summaryrefslogtreecommitdiff
path: root/libsanitizer
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@baserock.org>2015-04-22 10:21:45 +0000
committer <>2015-04-25 21:44:09 +0000
commitf80b5ea1605c9f9408c5aa386ba71c16d918ebbf (patch)
treebb7eafaa81fc4b8c5c215bc08d517fd158db234a /libsanitizer
parentc27a97d04853380f1e80525391b3f0d156ed4c84 (diff)
downloadgcc-tarball-f80b5ea1605c9f9408c5aa386ba71c16d918ebbf.tar.gz
Imported from /home/lorry/working-area/delta_gcc-tarball/gcc-5.1.0.tar.bz2.gcc-5.1.0
Diffstat (limited to 'libsanitizer')
-rw-r--r--libsanitizer/ChangeLog239
-rw-r--r--libsanitizer/MERGE2
-rw-r--r--libsanitizer/Makefile.in1
-rw-r--r--libsanitizer/asan/Makefile.am10
-rw-r--r--libsanitizer/asan/Makefile.in30
-rw-r--r--libsanitizer/asan/asan_activation.cc72
-rw-r--r--libsanitizer/asan/asan_activation.h21
-rw-r--r--libsanitizer/asan/asan_allocator.h64
-rw-r--r--libsanitizer/asan/asan_allocator2.cc203
-rw-r--r--libsanitizer/asan/asan_debugging.cc139
-rw-r--r--libsanitizer/asan/asan_dll_thunk.cc196
-rw-r--r--libsanitizer/asan/asan_fake_stack.cc61
-rw-r--r--libsanitizer/asan/asan_fake_stack.h8
-rw-r--r--libsanitizer/asan/asan_flags.h68
-rw-r--r--libsanitizer/asan/asan_globals.cc97
-rw-r--r--libsanitizer/asan/asan_init_version.h30
-rw-r--r--libsanitizer/asan/asan_intercepted_functions.h77
-rw-r--r--libsanitizer/asan/asan_interceptors.cc246
-rw-r--r--libsanitizer/asan/asan_interceptors.h74
-rw-r--r--libsanitizer/asan/asan_interface_internal.h112
-rw-r--r--libsanitizer/asan/asan_internal.h46
-rw-r--r--libsanitizer/asan/asan_linux.cc149
-rw-r--r--libsanitizer/asan/asan_mac.cc90
-rw-r--r--libsanitizer/asan/asan_mac.h57
-rw-r--r--libsanitizer/asan/asan_malloc_linux.cc115
-rw-r--r--libsanitizer/asan/asan_malloc_mac.cc36
-rw-r--r--libsanitizer/asan/asan_malloc_win.cc126
-rw-r--r--libsanitizer/asan/asan_mapping.h93
-rw-r--r--libsanitizer/asan/asan_new_delete.cc74
-rw-r--r--libsanitizer/asan/asan_poisoning.cc164
-rw-r--r--libsanitizer/asan/asan_poisoning.h32
-rw-r--r--libsanitizer/asan/asan_posix.cc76
-rw-r--r--libsanitizer/asan/asan_preinit.cc12
-rw-r--r--libsanitizer/asan/asan_report.cc712
-rw-r--r--libsanitizer/asan/asan_report.h76
-rw-r--r--libsanitizer/asan/asan_rtl.cc475
-rw-r--r--libsanitizer/asan/asan_stack.cc30
-rw-r--r--libsanitizer/asan/asan_stack.h89
-rw-r--r--libsanitizer/asan/asan_stats.cc13
-rw-r--r--libsanitizer/asan/asan_thread.cc72
-rw-r--r--libsanitizer/asan/asan_thread.h31
-rw-r--r--libsanitizer/asan/asan_win.cc121
-rw-r--r--libsanitizer/asan/asan_win_dll_thunk.cc374
-rw-r--r--libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc50
-rw-r--r--libsanitizer/asan/libtool-version2
-rwxr-xr-xlibsanitizer/configure116
-rw-r--r--libsanitizer/configure.ac5
-rw-r--r--libsanitizer/configure.tgt7
-rw-r--r--libsanitizer/include/sanitizer/allocator_interface.h64
-rw-r--r--libsanitizer/include/sanitizer/asan_interface.h109
-rw-r--r--libsanitizer/include/sanitizer/common_interface_defs.h33
-rw-r--r--libsanitizer/include/sanitizer/dfsan_interface.h11
-rw-r--r--libsanitizer/include/sanitizer/lsan_interface.h31
-rw-r--r--libsanitizer/include/sanitizer/msan_interface.h90
-rw-r--r--libsanitizer/include/sanitizer/tsan_interface_atomic.h (renamed from libsanitizer/tsan/tsan_interface_atomic.h)163
-rw-r--r--libsanitizer/interception/Makefile.am1
-rw-r--r--libsanitizer/interception/Makefile.in4
-rw-r--r--libsanitizer/interception/interception.h31
-rw-r--r--libsanitizer/interception/interception_linux.cc6
-rw-r--r--libsanitizer/interception/interception_linux.h20
-rw-r--r--libsanitizer/interception/interception_type_test.cc12
-rw-r--r--libsanitizer/interception/interception_win.cc185
-rw-r--r--libsanitizer/interception/interception_win.h32
-rw-r--r--libsanitizer/libbacktrace/Makefile.am1
-rw-r--r--libsanitizer/libbacktrace/Makefile.in3
-rw-r--r--libsanitizer/lsan/Makefile.am1
-rw-r--r--libsanitizer/lsan/Makefile.in4
-rw-r--r--libsanitizer/lsan/lsan.cc27
-rw-r--r--libsanitizer/lsan/lsan.h20
-rw-r--r--libsanitizer/lsan/lsan_allocator.cc50
-rw-r--r--libsanitizer/lsan/lsan_allocator.h2
-rw-r--r--libsanitizer/lsan/lsan_common.cc412
-rw-r--r--libsanitizer/lsan/lsan_common.h49
-rw-r--r--libsanitizer/lsan/lsan_common_linux.cc32
-rw-r--r--libsanitizer/lsan/lsan_interceptors.cc50
-rw-r--r--libsanitizer/lsan/lsan_preinit.cc6
-rw-r--r--libsanitizer/sanitizer_common/Makefile.am21
-rw-r--r--libsanitizer/sanitizer_common/Makefile.in64
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_addrhashmap.h340
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.cc4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.h57
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_interface.h36
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_internal.h22
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic.h3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_clang.h71
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h95
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h114
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h100
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_bitvector.h349
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_bvgraph.h163
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.cc76
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.h116
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc2543
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc557
-rwxr-xr-xlibsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc60
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc309
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc37
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc63
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_coverage.cc111
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc470
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc125
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h414
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc187
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc426
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h91
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flags.cc204
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flags.h56
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_freebsd.h135
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_interception.h23
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_internal_defs.h47
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.cc2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.h6
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libignore.cc11
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.cc614
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.h27
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc262
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_list.h17
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.cc254
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.h35
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mutex.h84
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_persistent_allocator.cc17
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h69
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform.h84
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h280
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc14
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc452
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h483
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix.cc116
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc133
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_printf.cc31
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps.h77
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc176
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_freebsd.cc86
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc88
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc188
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_report_decorator.h12
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stackdepot.cc272
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stackdepot.h33
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h174
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.cc169
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.h64
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc66
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc130
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h60
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc58
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_suppressions.cc33
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_suppressions.h11
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.cc11
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.h53
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc53
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h14
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc21
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc471
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc104
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc12
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_thread_registry.cc20
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_thread_registry.h8
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc129
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h58
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_unwind_posix_libcdep.cc151
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_win.cc162
-rw-r--r--libsanitizer/tsan/Makefile.am58
-rw-r--r--libsanitizer/tsan/Makefile.in94
-rw-r--r--libsanitizer/tsan/tsan_clock.cc454
-rw-r--r--libsanitizer/tsan/tsan_clock.h95
-rw-r--r--libsanitizer/tsan/tsan_defs.h27
-rw-r--r--libsanitizer/tsan/tsan_dense_alloc.h135
-rw-r--r--libsanitizer/tsan/tsan_fd.cc24
-rw-r--r--libsanitizer/tsan/tsan_fd.h4
-rw-r--r--libsanitizer/tsan/tsan_flags.cc82
-rw-r--r--libsanitizer/tsan/tsan_flags.h24
-rw-r--r--libsanitizer/tsan/tsan_interceptors.cc1063
-rw-r--r--libsanitizer/tsan/tsan_interface_ann.cc19
-rw-r--r--libsanitizer/tsan/tsan_interface_atomic.cc470
-rw-r--r--libsanitizer/tsan/tsan_interface_java.cc166
-rw-r--r--libsanitizer/tsan/tsan_interface_java.h7
-rw-r--r--libsanitizer/tsan/tsan_mman.cc174
-rw-r--r--libsanitizer/tsan/tsan_mman.h10
-rw-r--r--libsanitizer/tsan/tsan_mutex.cc39
-rw-r--r--libsanitizer/tsan/tsan_mutex.h10
-rw-r--r--libsanitizer/tsan/tsan_mutexset.h5
-rw-r--r--libsanitizer/tsan/tsan_platform.h320
-rw-r--r--libsanitizer/tsan/tsan_platform_linux.cc322
-rw-r--r--libsanitizer/tsan/tsan_platform_mac.cc57
-rw-r--r--libsanitizer/tsan/tsan_platform_windows.cc16
-rw-r--r--libsanitizer/tsan/tsan_report.cc152
-rw-r--r--libsanitizer/tsan/tsan_report.h39
-rw-r--r--libsanitizer/tsan/tsan_rtl.cc472
-rw-r--r--libsanitizer/tsan/tsan_rtl.h285
-rw-r--r--libsanitizer/tsan/tsan_rtl_amd64.S22
-rw-r--r--libsanitizer/tsan/tsan_rtl_mutex.cc293
-rw-r--r--libsanitizer/tsan/tsan_rtl_report.cc364
-rw-r--r--libsanitizer/tsan/tsan_rtl_thread.cc83
-rw-r--r--libsanitizer/tsan/tsan_stack_trace.cc44
-rw-r--r--libsanitizer/tsan/tsan_stack_trace.h37
-rw-r--r--libsanitizer/tsan/tsan_stat.cc358
-rw-r--r--libsanitizer/tsan/tsan_stat.h359
-rw-r--r--libsanitizer/tsan/tsan_suppressions.cc93
-rw-r--r--libsanitizer/tsan/tsan_suppressions.h1
-rw-r--r--libsanitizer/tsan/tsan_symbolize.cc79
-rw-r--r--libsanitizer/tsan/tsan_symbolize.h2
-rw-r--r--libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc191
-rw-r--r--libsanitizer/tsan/tsan_sync.cc419
-rw-r--r--libsanitizer/tsan/tsan_sync.h100
-rw-r--r--libsanitizer/tsan/tsan_trace.h20
-rw-r--r--libsanitizer/tsan/tsan_update_shadow_word_inl.h13
-rw-r--r--libsanitizer/tsan/tsan_vector.h22
-rw-r--r--libsanitizer/ubsan/Makefile.am5
-rw-r--r--libsanitizer/ubsan/Makefile.in15
-rw-r--r--libsanitizer/ubsan/ubsan_diag.cc133
-rw-r--r--libsanitizer/ubsan/ubsan_diag.h29
-rw-r--r--libsanitizer/ubsan/ubsan_flags.cc61
-rw-r--r--libsanitizer/ubsan/ubsan_flags.h38
-rw-r--r--libsanitizer/ubsan/ubsan_handlers.cc311
-rw-r--r--libsanitizer/ubsan/ubsan_handlers.h29
-rw-r--r--libsanitizer/ubsan/ubsan_handlers_cxx.cc36
-rw-r--r--libsanitizer/ubsan/ubsan_init.cc59
-rw-r--r--libsanitizer/ubsan/ubsan_init.h22
-rw-r--r--libsanitizer/ubsan/ubsan_value.cc1
-rw-r--r--libsanitizer/ubsan/ubsan_value.h6
221 files changed, 19462 insertions, 8294 deletions
diff --git a/libsanitizer/ChangeLog b/libsanitizer/ChangeLog
index 23bcabede4..3bec2b5e04 100644
--- a/libsanitizer/ChangeLog
+++ b/libsanitizer/ChangeLog
@@ -1,17 +1,170 @@
-2014-10-30 Release Manager
+2015-04-22 Release Manager
- * GCC 4.9.2 released.
+ * GCC 5.1.0 released.
-2014-10-16 Yury Gribov <y.gribov@samsung.com>
+2015-03-23 Christophe Lyon <christophe.lyon@linaro.org>
- Backport from mainline
- 2014-05-14 Yury Gribov <y.gribov@samsung.com>
+ PR sanitizer/59009
+ * sanitizer_common/sanitizer_platform_limits_posix.cc: Cherry pick
+ upstream r230324.
+ * sanitizer_common/sanitizer_platform.h: Likewise.
+ * sanitizer_common/sanitizer_common_syscalls.inc: Likewise.
- PR sanitizer/61100
+2015-03-11 Bernd Edlinger <bernd.edlinger@hotmail.de>
- * Makefile.am (nodist_saninclude_HEADERS): Install
- public headers.
+ * tsan/tsan_rtl_report.cc (ScopedReport::AddThread): Cherry pick
+ upstream 224508 and 224755.
+
+2015-03-09 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/63958
+ Reapply:
+ 2014-10-14 David S. Miller <davem@davemloft.net>
+
+ * sanitizer_common/sanitizer_platform_limits_linux.cc (time_t):
+ Define at __kernel_time_t, as needed for sparc.
+ (struct __old_kernel_stat): Don't check if __sparc__ is defined.
+ * libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+ (__sanitizer): Define struct___old_kernel_stat_sz,
+ struct_kernel_stat_sz, and struct_kernel_stat64_sz for sparc.
+ (__sanitizer_ipc_perm): Adjust for sparc targets.
+ (__sanitizer_shmid_ds): Likewsie.
+ (__sanitizer_sigaction): Likewise.
+ (IOC_SIZE): Likewsie.
+
+2015-02-27 Peter Bergner <bergner@vnet.ibm.com>
+
+ * configure.tgt: Enable build on powerpc*le-*-linux.
+
+2015-02-23 Jakub Jelinek <jakub@redhat.com>
+
+ PR bootstrap/63888
+ * asan/asan_globals.cc (RegisterGlobal): Disable detect_odr_violation
+ support until it is rewritten upstream.
+
+2015-01-26 Matthias Klose <doko@ubuntu.com>
+
+ * configure.ac: Move AM_ENABLE_MULTILIB before AC_PROG_CC.
+ * configure: Regenerate.
+
+2015-01-25 Venkataramanan Kumar <venkataramanan.kumar@linaro.org>
+
+ * configure.ac (TSAN_TARGET_DEPENDENT_OBJECTS): Undefine.
+ * configure: Regenerate.
+ * configure.tgt (TSAN_TARGET_DEPENDENT_OBJECTS): Define.
+
+2015-01-25 Venkataramanan Kumar <venkataramanan.kumar@linaro.org>
+
+ * configure.ac (TSAN_TARGET_DEPENDENT_OBJECTS): Define.
+ * configure: Regenerate.
+ * tsan/Makefile.am (EXTRA_libtsan_la_SOURCES): Define.
+ (libtsan_la_DEPENDENCIES): Likewise.
* Makefile.in: Regenerate.
+ * asan/Makefile.in: Regenerate.
+ * interception/Makefile.in: Regenerate.
+ * libbacktrace/Makefile.in: Regenerate.
+ * lsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * tsan/Makefile.in: Regenerate.
+ * ubsan/Makefile.in: Regenerate.
+
+2015-01-22 Jakub Jelinek <jakub@redhat.com>
+
+ * tsan/tsan_rtl.h: Cherry pick upstream r226829.
+
+2015-01-21 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64435
+ * sanitizer_common/sanitizer_platform_limits_posix.h: Cherry pick
+ upstream r226637.
+ * sanitizer_common/sanitizer_platform_limits_posix.cc: Likewise.
+ * sanitizer_common/sanitizer_posix.cc: Cherry pick upstream r226639.
+
+2015-01-20 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64632
+ * ubsan/ubsan_type_hash.cc: Cherry pick upstream r224972.
+
+2015-01-19 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64435
+ * sanitizer_common/sanitizer_platform_limits_posix.cc: Cherry pick
+ upstream r223925.
+
+2015-01-13 Jakub Jelinek <jakub@redhat.com>
+
+ * sanitizer_common/sanitizer_deadlock_detector.h: Cherry pick
+ upstream r224518 and r224519.
+ * tsan/tsan_rtl_thread.cc: Cherry pick upstream r224702 and
+ r224834.
+
+2014-12-16 Jakub Jelinek <jakub@redhat.com>
+
+ * sanitizer_common/sanitizer_symbolizer_libbacktrace.cc,
+ sanitizer_common/sanitizer_symbolizer_libbacktrace.h,
+ sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc: Cherry pick
+ upstream r224308.
+
+2014-11-21 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR bootstrap/63784
+ * configure: Regenerated.
+
+2014-11-21 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/61137
+ * config/ia64/ia64.c (ia64_attribute_takes_identifier_p): New function.
+ (TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P): Redefine to it.
+
+2014-11-14 Uros Bizjak <ubizjak@gmail.com>
+
+ * sanitizer_common/Makefile.am (AM_CXXFLAGS): Use -std=gnu++11.
+ * asan/Makefile.am (AM_CXXFLAGS): Ditto.
+ * lsan/Makefile.am (AM_CXXFLAGS): Ditto.
+ * interception/Makefile.am (AM_CXXFLAGS): Ditto.
+ * tsan/Makefile.am (AM_CXXFLAGS): Ditto.
+ * libbacktrace/Makefile.am (AM_CXXFLAGS): Ditto.
+ * ubsan/Makefile.am (AM_CXXFLAGS): Ditto.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * asan/Makefile.in: Ditto.
+ * lsan/Makefile.in: Ditto.
+ * interception/Makefile.in: Ditto.
+ * tsan/Makefile.in: Ditto.
+ * libbacktrace/Makefile.in: Ditto.
+ * ubsan/Makefile.in: Ditto.
+
+2014-11-13 Kostya Serebryany <kcc@google.com>
+
+ * All source files: Merge from upstream r221802.
+ * sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
+ (LibbacktraceSymbolizer::SymbolizeData): Replace 'address'
+ with 'start' to follow the new interface.
+ * asan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * interception/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * libbacktrace/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * lsan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Added new
+ files.
+ (AM_CXXFLAGS): Added -std=c++11.
+ * tsan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * ubsan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * asan/Makefile.in: Regenerate.
+ * interception/Makefile.in: Regenerate.
+ * libbacktrace/Makefile.in: Regenerate.
+ * lsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * tsan/Makefile.in: Regenerate.
+ * ubsan/Makefile.in: Regenerate.
+
+2014-11-11 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ PR target/63610
+ * configure: Regenerate.
+
+2014-10-16 Martin Liska <mliska@suse.cz>
+
+ * asan/Makefile.am: IPA ICF pass is disabled.
+ * asan/Makefile.in: Likewise.
2014-10-14 David S. Miller <davem@davemloft.net>
@@ -26,13 +179,75 @@
(__sanitizer_sigaction): Likewsie.
(IOC_SIZE): Likewsie.
-2014-07-16 Release Manager
+2014-10-14 Jakub Jelinek <jakub@redhat.com>
+
+ * ubsan/Makefile.am (DEFS): Add -DPIC.
+ * ubsan/Makefile.in: Regenerated.
+
+2014-09-26 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * configure.tgt: Enable build on aarch64*-linux.
+
+2014-09-19 Kostya Serebryany <kcc@google.com>
+
+ * All source files: Merge from upstream r218156.
+ * asan/Makefile.am (asan_files): Added new files.
+ * asan/Makefile.in: Regenerate.
+ * ubsan/Makefile.am (ubsan_files): Added new files.
+ * ubsan/Makefile.in: Regenerate.
+ * tsan/Makefile.am (tsan_files): Added new files.
+ * tsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Added new
+ files.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * asan/libtool-version: Bump the libasan SONAME.
+
+2014-09-10 Jakub Jelinek <jakub@redhat.com>
+
+ * ubsan/ubsan_handlers.cc, ubsan/ubsan_handlers.h: Cherry pick
+ upstream r215485, r217389, r217391 and r217400.
+
+2014-06-23 Paolo Carlini <paolo.carlini@oracle.com>
+
+ * sanitizer_common/sanitizer_common_interceptors.inc:
+ Cherry pick upstream r211008.
+
+2014-06-11 Richard Biener <rguenther@suse.de>
+
+ * asan/asan_linux.cc: Cherry pick upstream r210012.
+
+2014-05-30 Jakub Jelinek <jakub@redhat.com>
+
+ * sanitizer_common/sanitizer_stacktrace.cc: Cherry pick upstream
+ r209879.
+ * sanitizer_common/sanitizer_common.h: Likewise.
+ * asan/asan_mapping.h: Likewise.
+ * asan/asan_linux.cc: Likewise.
+ * tsan/tsan_mman.cc: Cherry pick upstream r209744.
+ * sanitizer_common/sanitizer_allocator.h: Likewise.
+
+2014-05-23 Marek Polacek <polacek@redhat.com>
- * GCC 4.9.1 released.
+ * ubsan/ubsan_value.cc (getFloatValue): Handle 96-bit
+ floating-point types.
-2014-04-22 Release Manager
+2014-05-22 Kostya Serebryany <kcc@google.com>
- * GCC 4.9.0 released.
+ * All source files: Merge from upstream r209283.
+ * asan/Makefile.am (asan_files): Added new files.
+ * asan/Makefile.in: Regenerate.
+ * tsan/Makefile.am (tsan_files): Added new files.
+ * tsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Added new
+ files.
+ * sanitizer_common/Makefile.in: Regenerate.
+
+2014-05-14 Yury Gribov <y.gribov@samsung.com>
+
+ PR sanitizer/61100
+ * Makefile.am (nodist_saninclude_HEADERS): Install
+ public headers.
+ * Makefile.in: Regenerate.
2014-03-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
diff --git a/libsanitizer/MERGE b/libsanitizer/MERGE
index 4688f0c869..ef893cb306 100644
--- a/libsanitizer/MERGE
+++ b/libsanitizer/MERGE
@@ -1,4 +1,4 @@
-196489
+221802
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
diff --git a/libsanitizer/Makefile.in b/libsanitizer/Makefile.in
index 0b8924554d..79a1be686b 100644
--- a/libsanitizer/Makefile.in
+++ b/libsanitizer/Makefile.in
@@ -185,6 +185,7 @@ SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
diff --git a/libsanitizer/asan/Makefile.am b/libsanitizer/asan/Makefile.am
index 3f07a834c6..54c74ce3e4 100644
--- a/libsanitizer/asan/Makefile.am
+++ b/libsanitizer/asan/Makefile.am
@@ -7,16 +7,18 @@ DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D
if USING_MAC_INTERPOSE
DEFS += -DMAC_INTERPOSE_FUNCTIONS -DMISSING_BLOCKS_SUPPORT
endif
-AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
+AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros -fno-ipa-icf
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
+ asan_activation.cc \
asan_allocator2.cc \
- asan_dll_thunk.cc \
+ asan_debugging.cc \
asan_fake_stack.cc \
asan_globals.cc \
asan_interceptors.cc \
@@ -33,7 +35,9 @@ asan_files = \
asan_stack.cc \
asan_stats.cc \
asan_thread.cc \
- asan_win.cc
+ asan_win.cc \
+ asan_win_dll_thunk.cc \
+ asan_win_dynamic_runtime_thunk.cc
libasan_la_SOURCES = $(asan_files)
libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/lsan/libsanitizer_lsan.la
diff --git a/libsanitizer/asan/Makefile.in b/libsanitizer/asan/Makefile.in
index 273eb4b264..e61ceda725 100644
--- a/libsanitizer/asan/Makefile.in
+++ b/libsanitizer/asan/Makefile.in
@@ -88,12 +88,14 @@ libasan_la_DEPENDENCIES = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \
$(am__append_3) $(am__DEPENDENCIES_1)
-am__objects_1 = asan_allocator2.lo asan_dll_thunk.lo \
- asan_fake_stack.lo asan_globals.lo asan_interceptors.lo \
- asan_linux.lo asan_mac.lo asan_malloc_linux.lo \
- asan_malloc_mac.lo asan_malloc_win.lo asan_new_delete.lo \
- asan_poisoning.lo asan_posix.lo asan_report.lo asan_rtl.lo \
- asan_stack.lo asan_stats.lo asan_thread.lo asan_win.lo
+am__objects_1 = asan_activation.lo asan_allocator2.lo \
+ asan_debugging.lo asan_fake_stack.lo asan_globals.lo \
+ asan_interceptors.lo asan_linux.lo asan_mac.lo \
+ asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
+ asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
+ asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
+ asan_thread.lo asan_win.lo asan_win_dll_thunk.lo \
+ asan_win_dynamic_runtime_thunk.lo
am_libasan_la_OBJECTS = $(am__objects_1)
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@@ -192,6 +194,7 @@ SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -267,13 +270,15 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
- -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ -Wno-variadic-macros -fno-ipa-icf \
+ $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -std=gnu++11
ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
+ asan_activation.cc \
asan_allocator2.cc \
- asan_dll_thunk.cc \
+ asan_debugging.cc \
asan_fake_stack.cc \
asan_globals.cc \
asan_interceptors.cc \
@@ -290,7 +295,9 @@ asan_files = \
asan_stack.cc \
asan_stats.cc \
asan_thread.cc \
- asan_win.cc
+ asan_win.cc \
+ asan_win_dll_thunk.cc \
+ asan_win_dynamic_runtime_thunk.cc
libasan_la_SOURCES = $(asan_files)
libasan_la_LIBADD = \
@@ -412,8 +419,9 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_dll_thunk.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_debugging.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
@@ -431,6 +439,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stats.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dll_thunk.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dynamic_runtime_thunk.Plo@am__quote@
.cc.o:
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
diff --git a/libsanitizer/asan/asan_activation.cc b/libsanitizer/asan/asan_activation.cc
new file mode 100644
index 0000000000..235451c8ae
--- /dev/null
+++ b/libsanitizer/asan/asan_activation.cc
@@ -0,0 +1,72 @@
+//===-- asan_activation.cc --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan activation/deactivation logic.
+//===----------------------------------------------------------------------===//
+
+#include "asan_activation.h"
+#include "asan_allocator.h"
+#include "asan_flags.h"
+#include "asan_internal.h"
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __asan {
+
+static struct AsanDeactivatedFlags {
+ int quarantine_size;
+ int max_redzone;
+ int malloc_context_size;
+ bool poison_heap;
+} asan_deactivated_flags;
+
+static bool asan_is_deactivated;
+
+void AsanStartDeactivated() {
+ VReport(1, "Deactivating ASan\n");
+ // Save flag values.
+ asan_deactivated_flags.quarantine_size = flags()->quarantine_size;
+ asan_deactivated_flags.max_redzone = flags()->max_redzone;
+ asan_deactivated_flags.poison_heap = flags()->poison_heap;
+ asan_deactivated_flags.malloc_context_size =
+ common_flags()->malloc_context_size;
+
+ flags()->quarantine_size = 0;
+ flags()->max_redzone = 16;
+ flags()->poison_heap = false;
+ common_flags()->malloc_context_size = 0;
+
+ asan_is_deactivated = true;
+}
+
+void AsanActivate() {
+ if (!asan_is_deactivated) return;
+ VReport(1, "Activating ASan\n");
+
+ // Restore flag values.
+ // FIXME: this is not atomic, and there may be other threads alive.
+ flags()->quarantine_size = asan_deactivated_flags.quarantine_size;
+ flags()->max_redzone = asan_deactivated_flags.max_redzone;
+ flags()->poison_heap = asan_deactivated_flags.poison_heap;
+ common_flags()->malloc_context_size =
+ asan_deactivated_flags.malloc_context_size;
+
+ ParseExtraActivationFlags();
+
+ ReInitializeAllocator();
+
+ asan_is_deactivated = false;
+ VReport(
+ 1,
+ "quarantine_size %d, max_redzone %d, poison_heap %d, malloc_context_size "
+ "%d\n",
+ flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap,
+ common_flags()->malloc_context_size);
+}
+
+} // namespace __asan
diff --git a/libsanitizer/asan/asan_activation.h b/libsanitizer/asan/asan_activation.h
new file mode 100644
index 0000000000..01f2d46d22
--- /dev/null
+++ b/libsanitizer/asan/asan_activation.h
@@ -0,0 +1,21 @@
+//===-- asan_activation.h ---------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan activation/deactivation logic.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_ACTIVATION_H
+#define ASAN_ACTIVATION_H
+
+namespace __asan {
+void AsanStartDeactivated();
+void AsanActivate();
+} // namespace __asan
+
+#endif // ASAN_ACTIVATION_H
diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h
index 763f4a58ef..d2f30af30d 100644
--- a/libsanitizer/asan/asan_allocator.h
+++ b/libsanitizer/asan/asan_allocator.h
@@ -15,6 +15,7 @@
#include "asan_internal.h"
#include "asan_interceptors.h"
+#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_list.h"
namespace __asan {
@@ -29,6 +30,7 @@ static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk;
void InitializeAllocator();
+void ReInitializeAllocator();
class AsanChunkView {
public:
@@ -40,8 +42,9 @@ class AsanChunkView {
uptr UsedSize(); // Size requested by the user.
uptr AllocTid();
uptr FreeTid();
- void GetAllocStack(StackTrace *stack);
- void GetFreeStack(StackTrace *stack);
+ bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
+ StackTrace GetAllocStack();
+ StackTrace GetFreeStack();
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg();
@@ -88,27 +91,66 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
uptr size_;
};
+struct AsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const;
+ void OnUnmap(uptr p, uptr size) const;
+};
+
+#if SANITIZER_CAN_USE_ALLOCATOR64
+# if defined(__powerpc64__)
+const uptr kAllocatorSpace = 0xa0000000000ULL;
+const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+# else
+const uptr kAllocatorSpace = 0x600000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+# endif
+typedef DefaultSizeClassMap SizeClassMap;
+typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
+ SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
+#else // Fallback to SizeClassAllocator32.
+static const uptr kRegionSizeLog = 20;
+static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+# if SANITIZER_WORDSIZE == 32
+typedef FlatByteMap<kNumRegions> ByteMap;
+# elif SANITIZER_WORDSIZE == 64
+typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+# endif
+typedef CompactSizeClassMap SizeClassMap;
+typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16,
+ SizeClassMap, kRegionSizeLog,
+ ByteMap,
+ AsanMapUnmapCallback> PrimaryAllocator;
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
+
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+
+
struct AsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
- uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
+ AllocatorCache allocator2_cache;
void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.
AsanThreadLocalMallocStorage() {}
};
-void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type);
-void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);
+void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
+void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type);
-void *asan_malloc(uptr size, StackTrace *stack);
-void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
-void *asan_realloc(void *p, uptr size, StackTrace *stack);
-void *asan_valloc(uptr size, StackTrace *stack);
-void *asan_pvalloc(uptr size, StackTrace *stack);
+void *asan_malloc(uptr size, BufferedStackTrace *stack);
+void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
+void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack);
+void *asan_valloc(uptr size, BufferedStackTrace *stack);
+void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
- StackTrace *stack);
+ BufferedStackTrace *stack);
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp);
uptr asan_mz_size(const void *ptr);
diff --git a/libsanitizer/asan/asan_allocator2.cc b/libsanitizer/asan/asan_allocator2.cc
index b9d66dc7a4..33d9fea70c 100644
--- a/libsanitizer/asan/asan_allocator2.cc
+++ b/libsanitizer/asan/asan_allocator2.cc
@@ -17,8 +17,9 @@
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
+#include "asan_stack.h"
#include "asan_thread.h"
-#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
@@ -28,65 +29,30 @@
namespace __asan {
-struct AsanMapUnmapCallback {
- void OnMap(uptr p, uptr size) const {
- PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.mmaps++;
- thread_stats.mmaped += size;
- }
- void OnUnmap(uptr p, uptr size) const {
- PoisonShadow(p, size, 0);
- // We are about to unmap a chunk of user memory.
- // Mark the corresponding shadow memory as not needed.
- // Since asan's mapping is compacting, the shadow chunk may be
- // not page-aligned, so we only flush the page-aligned portion.
- uptr page_size = GetPageSizeCached();
- uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
- uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.munmaps++;
- thread_stats.munmaped += size;
- }
-};
-
-#if SANITIZER_WORDSIZE == 64
-#if defined(__powerpc64__)
-const uptr kAllocatorSpace = 0xa0000000000ULL;
-const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
-#else
-const uptr kAllocatorSpace = 0x600000000000ULL;
-const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-#endif
-typedef DefaultSizeClassMap SizeClassMap;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
- SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
-#elif SANITIZER_WORDSIZE == 32
-static const u64 kAddressSpaceSize = 1ULL << 32;
-typedef CompactSizeClassMap SizeClassMap;
-static const uptr kRegionSizeLog = 20;
-static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
-typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
- SizeClassMap, kRegionSizeLog,
- FlatByteMap<kFlatByteMapSize>,
- AsanMapUnmapCallback> PrimaryAllocator;
-#endif
-
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
-typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
-typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
- SecondaryAllocator> Allocator;
+void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
+ PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mmaps++;
+ thread_stats.mmaped += size;
+}
+void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
+ PoisonShadow(p, size, 0);
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ FlushUnneededASanShadowMemory(p, size);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.munmaps++;
+ thread_stats.munmaped += size;
+}
// We can not use THREADLOCAL because it is not supported on some of the
// platforms we care about (OSX 10.6, Android).
// static THREADLOCAL AllocatorCache cache;
AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
- CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
- return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
+ return &ms->allocator2_cache;
}
static Allocator allocator;
@@ -132,7 +98,8 @@ static uptr ComputeRZLog(uptr user_requested_size) {
user_requested_size <= (1 << 14) - 256 ? 4 :
user_requested_size <= (1 << 15) - 512 ? 5 :
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
- return Max(rz_log, RZSize2Log(flags()->redzone));
+ return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
+ RZSize2Log(flags()->max_redzone));
}
// The memory chunk allocated from the underlying allocator looks like this:
@@ -199,23 +166,6 @@ struct AsanChunk: ChunkBase {
}
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
- // If we don't use stack depot, we store the alloc/free stack traces
- // in the chunk itself.
- u32 *AllocStackBeg() {
- return (u32*)(Beg() - RZLog2Size(rz_log));
- }
- uptr AllocStackSize() {
- CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
- return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
- }
- u32 *FreeStackBeg() {
- return (u32*)(Beg() + kChunkHeader2Size);
- }
- uptr FreeStackSize() {
- if (user_requested_size < kChunkHeader2Size) return 0;
- uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
- return (available - kChunkHeader2Size) / sizeof(u32);
- }
bool AddrIsInside(uptr addr, bool locked_version = false) {
return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
}
@@ -230,20 +180,19 @@ uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
-static void GetStackTraceFromId(u32 id, StackTrace *stack) {
+static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
- uptr size = 0;
- const uptr *trace = StackDepotGet(id, &size);
- CHECK(trace);
- stack->CopyFrom(trace, size);
+ StackTrace res = StackDepotGet(id);
+ CHECK(res.trace);
+ return res;
}
-void AsanChunkView::GetAllocStack(StackTrace *stack) {
- GetStackTraceFromId(chunk_->alloc_context_id, stack);
+StackTrace AsanChunkView::GetAllocStack() {
+ return GetStackTraceFromId(chunk_->alloc_context_id);
}
-void AsanChunkView::GetFreeStack(StackTrace *stack) {
- GetStackTraceFromId(chunk_->free_context_id, stack);
+StackTrace AsanChunkView::GetFreeStack() {
+ return GetStackTraceFromId(chunk_->free_context_id);
}
struct QuarantineCallback;
@@ -307,10 +256,14 @@ void InitializeAllocator() {
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
-static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
+void ReInitializeAllocator() {
+ quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
+}
+
+static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type, bool can_fill) {
- if (!asan_inited)
- __asan_init();
+ if (UNLIKELY(!asan_inited))
+ AsanInitFromRtl();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
@@ -355,6 +308,16 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8, false);
}
+
+ if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
+ // Heap poisoning is enabled, but the allocator provides an unpoisoned
+ // chunk. This is possible if flags()->poison_heap was disabled for some
+ // time, for example, due to flags()->start_disabled.
+ // Anyway, poison the block before using it for anything else.
+ uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
+ PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
+ }
+
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
@@ -389,7 +352,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
meta[1] = chunk_beg;
}
- m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
+ m->alloc_context_id = StackDepotPut(*stack);
uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
@@ -425,15 +388,16 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
return res;
}
-static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) {
+static void ReportInvalidFree(void *ptr, u8 chunk_state,
+ BufferedStackTrace *stack) {
if (chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack);
else
ReportFreeNotMalloced((uptr)ptr, stack);
}
-static void AtomicallySetQuarantineFlag(AsanChunk *m,
- void *ptr, StackTrace *stack) {
+static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
+ BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
@@ -444,8 +408,8 @@ static void AtomicallySetQuarantineFlag(AsanChunk *m,
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlag.
-static void QuarantineChunk(AsanChunk *m, void *ptr,
- StackTrace *stack, AllocType alloc_type) {
+static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
+ AllocType alloc_type) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
@@ -457,7 +421,7 @@ static void QuarantineChunk(AsanChunk *m, void *ptr,
CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
- m->free_context_id = StackDepotPut(stack->trace, stack->size);
+ m->free_context_id = StackDepotPut(*stack);
// Poison the region.
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
@@ -481,19 +445,25 @@ static void QuarantineChunk(AsanChunk *m, void *ptr,
}
}
-static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
+static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return;
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ if (delete_size && flags()->new_delete_type_mismatch &&
+ delete_size != m->UsedSize()) {
+ ReportNewDeleteSizeMismatch(p, delete_size, stack);
+ }
ASAN_FREE_HOOK(ptr);
// Must mark the chunk as quarantined before any changes to its metadata.
AtomicallySetQuarantineFlag(m, ptr, stack);
QuarantineChunk(m, ptr, stack, alloc_type);
}
-static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
+static void *Reallocate(void *old_ptr, uptr new_size,
+ BufferedStackTrace *stack) {
CHECK(old_ptr && new_size);
uptr p = reinterpret_cast<uptr>(old_ptr);
uptr chunk_beg = p - kChunkHeaderSize;
@@ -513,7 +483,7 @@ static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
// If realloc() races with free(), we may start copying freed memory.
// However, we will report racy double-free later anyway.
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
- Deallocate(old_ptr, stack, FROM_MALLOC);
+ Deallocate(old_ptr, 0, stack, FROM_MALLOC);
}
return new_ptr;
}
@@ -606,20 +576,25 @@ void PrintInternalAllocatorStats() {
allocator.PrintStats();
}
-void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
return Allocate(size, alignment, stack, alloc_type, true);
}
-void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
- Deallocate(ptr, stack, alloc_type);
+void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
+ Deallocate(ptr, 0, stack, alloc_type);
}
-void *asan_malloc(uptr size, StackTrace *stack) {
+void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ Deallocate(ptr, size, stack, alloc_type);
+}
+
+void *asan_malloc(uptr size, BufferedStackTrace *stack) {
return Allocate(size, 8, stack, FROM_MALLOC, true);
}
-void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
+void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return AllocatorReturnNull();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
@@ -630,21 +605,21 @@ void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
return ptr;
}
-void *asan_realloc(void *p, uptr size, StackTrace *stack) {
+void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
if (p == 0)
return Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) {
- Deallocate(p, stack, FROM_MALLOC);
+ Deallocate(p, 0, stack, FROM_MALLOC);
return 0;
}
return Reallocate(p, size, stack);
}
-void *asan_valloc(uptr size, StackTrace *stack) {
+void *asan_valloc(uptr size, BufferedStackTrace *stack) {
return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
}
-void *asan_pvalloc(uptr size, StackTrace *stack) {
+void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
@@ -655,7 +630,7 @@ void *asan_pvalloc(uptr size, StackTrace *stack) {
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
- StackTrace *stack) {
+ BufferedStackTrace *stack) {
void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
@@ -708,8 +683,12 @@ uptr PointsIntoChunk(void* p) {
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
- if ((m->chunk_state == __asan::CHUNK_ALLOCATED) &&
- m->AddrIsInside(addr, /*locked_version=*/true))
+ if (m->chunk_state != __asan::CHUNK_ALLOCATED)
+ return 0;
+ if (m->AddrIsInside(addr, /*locked_version=*/true))
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
+ addr))
return chunk;
return 0;
}
@@ -774,23 +753,23 @@ using namespace __asan; // NOLINT
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size". We don't want to expose our redzone sizes, etc here.
-uptr __asan_get_estimated_allocated_size(uptr size) {
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
-bool __asan_get_ownership(const void *p) {
+int __sanitizer_get_ownership(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
return (AllocationSize(ptr) > 0);
}
-uptr __asan_get_allocated_size(const void *p) {
+uptr __sanitizer_get_allocated_size(const void *p) {
if (p == 0) return 0;
uptr ptr = reinterpret_cast<uptr>(p);
uptr allocated_size = AllocationSize(ptr);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
GET_STACK_TRACE_FATAL_HERE;
- ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
+ ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
}
return allocated_size;
}
@@ -799,12 +778,12 @@ uptr __asan_get_allocated_size(const void *p) {
// Provide default (no-op) implementation of malloc hooks.
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __asan_malloc_hook(void *ptr, uptr size) {
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __asan_free_hook(void *ptr) {
+void __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
} // extern "C"
diff --git a/libsanitizer/asan/asan_debugging.cc b/libsanitizer/asan/asan_debugging.cc
new file mode 100644
index 0000000000..3efad658a4
--- /dev/null
+++ b/libsanitizer/asan/asan_debugging.cc
@@ -0,0 +1,139 @@
+//===-- asan_debugging.cc -------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file contains various functions that are generally useful to call when
+// using a debugger (LLDB, GDB).
+//===----------------------------------------------------------------------===//
+
+#include "asan_allocator.h"
+#include "asan_flags.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_thread.h"
+
+namespace __asan {
+
+void GetInfoForStackVar(uptr addr, AddressDescription *descr, AsanThread *t) {
+ descr->name[0] = 0;
+ descr->region_address = 0;
+ descr->region_size = 0;
+ descr->region_kind = "stack";
+
+ AsanThread::StackFrameAccess access;
+ if (!t->GetStackFrameAccessByAddr(addr, &access))
+ return;
+ InternalMmapVector<StackVarDescr> vars(16);
+ if (!ParseFrameDescription(access.frame_descr, &vars)) {
+ return;
+ }
+
+ for (uptr i = 0; i < vars.size(); i++) {
+ if (access.offset <= vars[i].beg + vars[i].size) {
+ internal_strncat(descr->name, vars[i].name_pos,
+ Min(descr->name_size, vars[i].name_len));
+ descr->region_address = addr - (access.offset - vars[i].beg);
+ descr->region_size = vars[i].size;
+ return;
+ }
+ }
+}
+
+void GetInfoForHeapAddress(uptr addr, AddressDescription *descr) {
+ AsanChunkView chunk = FindHeapChunkByAddress(addr);
+
+ descr->name[0] = 0;
+ descr->region_address = 0;
+ descr->region_size = 0;
+
+ if (!chunk.IsValid()) {
+ descr->region_kind = "heap-invalid";
+ return;
+ }
+
+ descr->region_address = chunk.Beg();
+ descr->region_size = chunk.UsedSize();
+ descr->region_kind = "heap";
+}
+
+void AsanLocateAddress(uptr addr, AddressDescription *descr) {
+ if (DescribeAddressIfShadow(addr, descr, /* print */ false)) {
+ return;
+ }
+ if (GetInfoForAddressIfGlobal(addr, descr)) {
+ return;
+ }
+ asanThreadRegistry().Lock();
+ AsanThread *thread = FindThreadByStackAddress(addr);
+ asanThreadRegistry().Unlock();
+ if (thread) {
+ GetInfoForStackVar(addr, descr, thread);
+ return;
+ }
+ GetInfoForHeapAddress(addr, descr);
+}
+
+uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
+ bool alloc_stack) {
+ AsanChunkView chunk = FindHeapChunkByAddress(addr);
+ if (!chunk.IsValid()) return 0;
+
+ StackTrace stack(nullptr, 0);
+ if (alloc_stack) {
+ if (chunk.AllocTid() == kInvalidTid) return 0;
+ stack = chunk.GetAllocStack();
+ if (thread_id) *thread_id = chunk.AllocTid();
+ } else {
+ if (chunk.FreeTid() == kInvalidTid) return 0;
+ stack = chunk.GetFreeStack();
+ if (thread_id) *thread_id = chunk.FreeTid();
+ }
+
+ if (trace && size) {
+ size = Min(size, Min(stack.size, kStackTraceMax));
+ for (uptr i = 0; i < size; i++)
+ trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]);
+
+ return size;
+ }
+
+ return 0;
+}
+
+} // namespace __asan
+
+using namespace __asan;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address, uptr *region_size) {
+ AddressDescription descr = { name, name_size, 0, 0, 0 };
+ AsanLocateAddress(addr, &descr);
+ if (region_address) *region_address = descr.region_address;
+ if (region_size) *region_size = descr.region_size;
+ return descr.region_kind;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
+ return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ true);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
+ return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ false);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) {
+ if (shadow_scale)
+ *shadow_scale = SHADOW_SCALE;
+ if (shadow_offset)
+ *shadow_offset = SHADOW_OFFSET;
+}
diff --git a/libsanitizer/asan/asan_dll_thunk.cc b/libsanitizer/asan/asan_dll_thunk.cc
deleted file mode 100644
index 19c31f0def..0000000000
--- a/libsanitizer/asan/asan_dll_thunk.cc
+++ /dev/null
@@ -1,196 +0,0 @@
-//===-- asan_dll_thunk.cc -------------------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// This file defines a family of thunks that should be statically linked into
-// the DLLs that have ASan instrumentation in order to delegate the calls to the
-// shared runtime that lives in the main binary.
-// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the
-// details.
-//===----------------------------------------------------------------------===//
-
-// Only compile this code when buidling asan_dll_thunk.lib
-// Using #ifdef rather than relying on Makefiles etc.
-// simplifies the build procedure.
-#ifdef ASAN_DLL_THUNK
-
-// ----------------- Helper functions and macros --------------------- {{{1
-extern "C" {
-void *__stdcall GetModuleHandleA(const char *module_name);
-void *__stdcall GetProcAddress(void *module, const char *proc_name);
-void abort();
-}
-
-static void *getRealProcAddressOrDie(const char *name) {
- void *ret = GetProcAddress(GetModuleHandleA(0), name);
- if (!ret)
- abort();
- return ret;
-}
-
-#define WRAP_V_V(name) \
- extern "C" void name() { \
- typedef void (*fntype)(); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(); \
- }
-
-#define WRAP_V_W(name) \
- extern "C" void name(void *arg) { \
- typedef void (*fntype)(void *arg); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg); \
- }
-
-#define WRAP_V_WW(name) \
- extern "C" void name(void *arg1, void *arg2) { \
- typedef void (*fntype)(void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg1, arg2); \
- }
-
-#define WRAP_V_WWW(name) \
- extern "C" void name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg1, arg2, arg3); \
- }
-
-#define WRAP_W_V(name) \
- extern "C" void *name() { \
- typedef void *(*fntype)(); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(); \
- }
-
-#define WRAP_W_W(name) \
- extern "C" void *name(void *arg) { \
- typedef void *(*fntype)(void *arg); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg); \
- }
-
-#define WRAP_W_WW(name) \
- extern "C" void *name(void *arg1, void *arg2) { \
- typedef void *(*fntype)(void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2); \
- }
-
-#define WRAP_W_WWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3); \
- }
-
-#define WRAP_W_WWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
- typedef void *(*fntype)(void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4); \
- }
-
-#define WRAP_W_WWWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
- void *arg5) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4, arg5); \
- }
-
-#define WRAP_W_WWWWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
- void *arg5, void *arg6) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
- }
-// }}}
-
-// ----------------- ASan own interface functions --------------------
-WRAP_W_V(__asan_should_detect_stack_use_after_return)
-
-extern "C" {
- int __asan_option_detect_stack_use_after_return;
-
- // Manually wrap __asan_init as we need to initialize
- // __asan_option_detect_stack_use_after_return afterwards.
- void __asan_init_v3() {
- typedef void (*fntype)();
- static fntype fn = (fntype)getRealProcAddressOrDie("__asan_init_v3");
- fn();
- __asan_option_detect_stack_use_after_return =
- (__asan_should_detect_stack_use_after_return() != 0);
- }
-}
-
-WRAP_V_V(__asan_handle_no_return)
-
-WRAP_V_W(__asan_report_store1)
-WRAP_V_W(__asan_report_store2)
-WRAP_V_W(__asan_report_store4)
-WRAP_V_W(__asan_report_store8)
-WRAP_V_W(__asan_report_store16)
-WRAP_V_WW(__asan_report_store_n)
-
-WRAP_V_W(__asan_report_load1)
-WRAP_V_W(__asan_report_load2)
-WRAP_V_W(__asan_report_load4)
-WRAP_V_W(__asan_report_load8)
-WRAP_V_W(__asan_report_load16)
-WRAP_V_WW(__asan_report_load_n)
-
-WRAP_V_WW(__asan_register_globals)
-WRAP_V_WW(__asan_unregister_globals)
-
-WRAP_W_WW(__asan_stack_malloc_0)
-WRAP_W_WW(__asan_stack_malloc_1)
-WRAP_W_WW(__asan_stack_malloc_2)
-WRAP_W_WW(__asan_stack_malloc_3)
-WRAP_W_WW(__asan_stack_malloc_4)
-WRAP_W_WW(__asan_stack_malloc_5)
-WRAP_W_WW(__asan_stack_malloc_6)
-WRAP_W_WW(__asan_stack_malloc_7)
-WRAP_W_WW(__asan_stack_malloc_8)
-WRAP_W_WW(__asan_stack_malloc_9)
-WRAP_W_WW(__asan_stack_malloc_10)
-
-WRAP_V_WWW(__asan_stack_free_0)
-WRAP_V_WWW(__asan_stack_free_1)
-WRAP_V_WWW(__asan_stack_free_2)
-WRAP_V_WWW(__asan_stack_free_4)
-WRAP_V_WWW(__asan_stack_free_5)
-WRAP_V_WWW(__asan_stack_free_6)
-WRAP_V_WWW(__asan_stack_free_7)
-WRAP_V_WWW(__asan_stack_free_8)
-WRAP_V_WWW(__asan_stack_free_9)
-WRAP_V_WWW(__asan_stack_free_10)
-
-// TODO(timurrrr): Add more interface functions on the as-needed basis.
-
-// ----------------- Memory allocation functions ---------------------
-WRAP_V_W(free)
-WRAP_V_WW(_free_dbg)
-
-WRAP_W_W(malloc)
-WRAP_W_WWWW(_malloc_dbg)
-
-WRAP_W_WW(calloc)
-WRAP_W_WWWWW(_calloc_dbg)
-WRAP_W_WWW(_calloc_impl)
-
-WRAP_W_WW(realloc)
-WRAP_W_WWW(_realloc_dbg)
-WRAP_W_WWW(_recalloc)
-
-WRAP_W_W(_msize)
-
-// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
-
-#endif // ASAN_DLL_THUNK
diff --git a/libsanitizer/asan/asan_fake_stack.cc b/libsanitizer/asan/asan_fake_stack.cc
index cf4122472e..cfe96a0882 100644
--- a/libsanitizer/asan/asan_fake_stack.cc
+++ b/libsanitizer/asan/asan_fake_stack.cc
@@ -40,21 +40,32 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
stack_size_log = kMinStackSizeLog;
if (stack_size_log > kMaxStackSizeLog)
stack_size_log = kMaxStackSizeLog;
+ uptr size = RequiredSize(stack_size_log);
FakeStack *res = reinterpret_cast<FakeStack *>(
- MmapOrDie(RequiredSize(stack_size_log), "FakeStack"));
+ flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
+ : MmapOrDie(size, "FakeStack"));
res->stack_size_log_ = stack_size_log;
- if (common_flags()->verbosity) {
- u8 *p = reinterpret_cast<u8 *>(res);
- Report("T%d: FakeStack created: %p -- %p stack_size_log: %zd \n",
- GetCurrentTidOrInvalid(), p,
- p + FakeStack::RequiredSize(stack_size_log), stack_size_log);
- }
+ u8 *p = reinterpret_cast<u8 *>(res);
+ VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
+ "mmapped %zdK, noreserve=%d \n",
+ GetCurrentTidOrInvalid(), p,
+ p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
+ size >> 10, flags()->uar_noreserve);
return res;
}
-void FakeStack::Destroy() {
+void FakeStack::Destroy(int tid) {
PoisonAll(0);
- UnmapOrDie(this, RequiredSize(stack_size_log_));
+ if (common_flags()->verbosity >= 2) {
+ InternalScopedString str(kNumberOfSizeClasses * 50);
+ for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
+ str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
+ NumberOfFrames(stack_size_log(), class_id));
+ Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
+ }
+ uptr size = RequiredSize(stack_size_log_);
+ FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
+ UnmapOrDie(this, size);
}
void FakeStack::PoisonAll(u8 magic) {
@@ -91,7 +102,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
return 0; // We are out of fake stack.
}
-uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
+uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
uptr stack_size_log = this->stack_size_log();
uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
@@ -101,7 +112,10 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
CHECK_LE(base, ptr);
CHECK_LT(ptr, base + (1UL << stack_size_log));
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
- return base + pos * BytesInSizeClass(class_id);
+ uptr res = base + pos * BytesInSizeClass(class_id);
+ *frame_end = res + BytesInSizeClass(class_id);
+ *frame_beg = res + sizeof(FakeFrame);
+ return res;
}
void FakeStack::HandleNoReturn() {
@@ -195,14 +209,15 @@ ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
+using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
__asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
- return __asan::OnMalloc(class_id, size, real_stack); \
+ return OnMalloc(class_id, size, real_stack); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
uptr ptr, uptr size, uptr real_stack) { \
- __asan::OnFree(ptr, class_id, size, real_stack); \
+ OnFree(ptr, class_id, size, real_stack); \
}
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
@@ -216,3 +231,23 @@ DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end) {
+ FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
+ if (!fs) return 0;
+ uptr frame_beg, frame_end;
+ FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
+ reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
+ if (!frame) return 0;
+ if (frame->magic != kCurrentStackFrameMagic)
+ return 0;
+ if (beg) *beg = reinterpret_cast<void*>(frame_beg);
+ if (end) *end = reinterpret_cast<void*>(frame_end);
+ return reinterpret_cast<void*>(frame->real_stack);
+}
+} // extern "C"
diff --git a/libsanitizer/asan/asan_fake_stack.h b/libsanitizer/asan/asan_fake_stack.h
index 5196025681..550a86e297 100644
--- a/libsanitizer/asan/asan_fake_stack.h
+++ b/libsanitizer/asan/asan_fake_stack.h
@@ -63,7 +63,7 @@ class FakeStack {
// CTOR: create the FakeStack as a single mmap-ed object.
static FakeStack *Create(uptr stack_size_log);
- void Destroy();
+ void Destroy(int tid);
// stack_size_log is at least 15 (stack_size >= 32K).
static uptr SizeRequiredForFlags(uptr stack_size_log) {
@@ -127,7 +127,11 @@ class FakeStack {
void PoisonAll(u8 magic);
// Return the beginning of the FakeFrame or 0 if the address is not ours.
- uptr AddrIsInFakeStack(uptr addr);
+ uptr AddrIsInFakeStack(uptr addr, uptr *frame_beg, uptr *frame_end);
+ USED uptr AddrIsInFakeStack(uptr addr) {
+ uptr t1, t2;
+ return AddrIsInFakeStack(addr, &t1, &t2);
+ }
// Number of bytes in a fake frame of this size class.
static uptr BytesInSizeClass(uptr class_id) {
diff --git a/libsanitizer/asan/asan_flags.h b/libsanitizer/asan/asan_flags.h
index 62b5d3215d..0b6a9857be 100644
--- a/libsanitizer/asan/asan_flags.h
+++ b/libsanitizer/asan/asan_flags.h
@@ -26,88 +26,44 @@
namespace __asan {
struct Flags {
- // Size (in bytes) of quarantine used to detect use-after-free errors.
- // Lower value may reduce memory usage but increase the chance of
- // false negatives.
+ // Flag descriptions are in asan_rtl.cc.
int quarantine_size;
- // Size (in bytes) of redzones around heap objects.
- // Requirement: redzone >= 32, is a power of two.
int redzone;
- // If set, prints some debugging information and does additional checks.
+ int max_redzone;
bool debug;
- // Controls the way to handle globals (0 - don't detect buffer overflow
- // on globals, 1 - detect buffer overflow, 2 - print data about registered
- // globals).
int report_globals;
- // If set, attempts to catch initialization order issues.
bool check_initialization_order;
- // If set, uses custom wrappers and replacements for libc string functions
- // to find more errors.
bool replace_str;
- // If set, uses custom wrappers for memset/memcpy/memmove intinsics.
bool replace_intrin;
- // Used on Mac only.
bool mac_ignore_invalid_free;
- // Enables stack-use-after-return checking at run-time.
bool detect_stack_use_after_return;
- // The minimal fake stack size log.
- int uar_stack_size_log;
- // ASan allocator flag. max_malloc_fill_size is the maximal amount of bytes
- // that will be filled with malloc_fill_byte on malloc.
+ int min_uar_stack_size_log;
+ int max_uar_stack_size_log;
+ bool uar_noreserve;
int max_malloc_fill_size, malloc_fill_byte;
- // Override exit status if something was reported.
int exitcode;
- // If set, user may manually mark memory regions as poisoned or unpoisoned.
bool allow_user_poisoning;
- // Number of seconds to sleep between printing an error report and
- // terminating application. Useful for debug purposes (when one needs
- // to attach gdb, for example).
int sleep_before_dying;
- // If set, registers ASan custom segv handler.
- bool handle_segv;
- // If set, allows user register segv handler even if ASan registers one.
- bool allow_user_segv_handler;
- // If set, uses alternate stack for signal handling.
- bool use_sigaltstack;
- // Allow the users to work around the bug in Nvidia drivers prior to 295.*.
bool check_malloc_usable_size;
- // If set, explicitly unmaps (huge) shadow at exit.
bool unmap_shadow_on_exit;
- // If set, calls abort() instead of _exit() after printing an error report.
bool abort_on_error;
- // Print various statistics after printing an error message or if atexit=1.
bool print_stats;
- // Print the legend for the shadow bytes.
bool print_legend;
- // If set, prints ASan exit stats even after program terminates successfully.
bool atexit;
- // If set, coverage information will be dumped at shutdown time if the
- // appropriate instrumentation was enabled.
- bool coverage;
- // By default, disable core dumper on 64-bit - it makes little sense
- // to dump 16T+ core.
- bool disable_core;
- // Allow the tool to re-exec the program. This may interfere badly with the
- // debugger.
bool allow_reexec;
- // If set, prints not only thread creation stacks for threads in error report,
- // but also thread creation stacks for threads that created those threads,
- // etc. up to main thread.
bool print_full_thread_history;
- // Poison (or not) the heap memory on [de]allocation. Zero value is useful
- // for benchmarking the allocator or instrumentator.
bool poison_heap;
- // If true, poison partially addressable 8-byte aligned words (default=true).
- // This flag affects heap and global buffers, but not stack buffers.
bool poison_partial;
- // Report errors on malloc/delete, new/free, new/delete[], etc.
+ bool poison_array_cookie;
bool alloc_dealloc_mismatch;
- // If true, assume that memcmp(p1, p2, n) always reads n bytes before
- // comparing p1 and p2.
+ bool new_delete_type_mismatch;
bool strict_memcmp;
- // If true, assume that dynamic initializers can never access globals from
- // other modules, even if the latter are already initialized.
bool strict_init_order;
+ bool start_deactivated;
+ int detect_invalid_pointer_pairs;
+ bool detect_container_overflow;
+ int detect_odr_violation;
+ bool dump_instruction_bytes;
};
extern Flags asan_flags_dont_use_directly;
diff --git a/libsanitizer/asan/asan_globals.cc b/libsanitizer/asan/asan_globals.cc
index e97850a854..4bb88cfa00 100644
--- a/libsanitizer/asan/asan_globals.cc
+++ b/libsanitizer/asan/asan_globals.cc
@@ -20,6 +20,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
@@ -43,6 +44,14 @@ typedef InternalMmapVector<DynInitGlobal> VectorOfGlobals;
// Lazy-initialized and never deleted.
static VectorOfGlobals *dynamic_init_globals;
+// We want to remember where a certain range of globals was registered.
+struct GlobalRegistrationSite {
+ u32 stack_id;
+ Global *g_first, *g_last;
+};
+typedef InternalMmapVector<GlobalRegistrationSite> GlobalRegistrationSiteVector;
+static GlobalRegistrationSiteVector *global_registration_site_vector;
+
ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) {
FastPoisonShadow(g->beg, g->size_with_redzone, value);
}
@@ -60,25 +69,74 @@ ALWAYS_INLINE void PoisonRedZones(const Global &g) {
}
}
+const uptr kMinimalDistanceFromAnotherGlobal = 64;
+
+bool IsAddressNearGlobal(uptr addr, const __asan_global &g) {
+ if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false;
+ if (addr >= g.beg + g.size_with_redzone) return false;
+ return true;
+}
+
static void ReportGlobal(const Global &g, const char *prefix) {
- Report("%s Global: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n",
- prefix, (void*)g.beg, g.size, g.size_with_redzone, g.name,
+ Report("%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n",
+ prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name,
g.module_name, g.has_dynamic_init);
+ if (g.location) {
+ Report(" location (%p): name=%s[%p], %d %d\n", g.location,
+ g.location->filename, g.location->filename, g.location->line_no,
+ g.location->column_no);
+ }
}
-bool DescribeAddressIfGlobal(uptr addr, uptr size) {
+static bool DescribeOrGetInfoIfGlobal(uptr addr, uptr size, bool print,
+ Global *output_global) {
if (!flags()->report_globals) return false;
BlockingMutexLock lock(&mu_for_globals);
bool res = false;
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g;
- if (flags()->report_globals >= 2)
- ReportGlobal(g, "Search");
- res |= DescribeAddressRelativeToGlobal(addr, size, g);
+ if (print) {
+ if (flags()->report_globals >= 2)
+ ReportGlobal(g, "Search");
+ res |= DescribeAddressRelativeToGlobal(addr, size, g);
+ } else {
+ if (IsAddressNearGlobal(addr, g)) {
+ CHECK(output_global);
+ *output_global = g;
+ return true;
+ }
+ }
}
return res;
}
+bool DescribeAddressIfGlobal(uptr addr, uptr size) {
+ return DescribeOrGetInfoIfGlobal(addr, size, /* print */ true,
+ /* output_global */ nullptr);
+}
+
+bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
+ Global g = {};
+ if (DescribeOrGetInfoIfGlobal(addr, /* size */ 1, /* print */ false, &g)) {
+ internal_strncpy(descr->name, g.name, descr->name_size);
+ descr->region_address = g.beg;
+ descr->region_size = g.size;
+ descr->region_kind = "global";
+ return true;
+ }
+ return false;
+}
+
+u32 FindRegistrationSite(const Global *g) {
+ CHECK(global_registration_site_vector);
+ for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) {
+ GlobalRegistrationSite &grs = (*global_registration_site_vector)[i];
+ if (g >= grs.g_first && g <= grs.g_last)
+ return grs.stack_id;
+ }
+ return 0;
+}
+
// Register a global variable.
// This function may be called more than once for every global
// so we store the globals in a map.
@@ -90,6 +148,22 @@ static void RegisterGlobal(const Global *g) {
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
+ // This "ODR violation" detection is fundamentally incompatible with
+ // how GCC registers globals. Disable as useless until rewritten upstream.
+ if (0 && flags()->detect_odr_violation) {
+ // Try detecting ODR (One Definition Rule) violation, i.e. the situation
+ // where two globals with the same name are defined in different modules.
+ if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+ // This check may not be enough: if the first global is much larger
+ // the entire redzone of the second global may be within the first global.
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ if (g->beg == l->g->beg &&
+ (flags()->detect_odr_violation >= 2 || g->size != l->g->size))
+ ReportODRViolation(g, FindRegistrationSite(g),
+ l->g, FindRegistrationSite(l->g));
+ }
+ }
+ }
if (flags()->poison_heap)
PoisonRedZones(*g);
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
@@ -142,7 +216,18 @@ using namespace __asan; // NOLINT
// Register an array of globals.
void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
+ GET_STACK_TRACE_FATAL_HERE;
+ u32 stack_id = StackDepotPut(stack);
BlockingMutexLock lock(&mu_for_globals);
+ if (!global_registration_site_vector)
+ global_registration_site_vector =
+ new(allocator_for_globals) GlobalRegistrationSiteVector(128);
+ GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]};
+ global_registration_site_vector->push_back(site);
+ if (flags()->report_globals >= 2) {
+ PRINT_CURRENT_STACK();
+ Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]);
+ }
for (uptr i = 0; i < n; i++) {
RegisterGlobal(&globals[i]);
}
diff --git a/libsanitizer/asan/asan_init_version.h b/libsanitizer/asan/asan_init_version.h
new file mode 100644
index 0000000000..da232513a0
--- /dev/null
+++ b/libsanitizer/asan/asan_init_version.h
@@ -0,0 +1,30 @@
+//===-- asan_init_version.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This header defines a versioned __asan_init function to be called at the
+// startup of the instrumented program.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_INIT_VERSION_H
+#define ASAN_INIT_VERSION_H
+
+extern "C" {
+ // Every time the ASan ABI changes we also change the version number in the
+ // __asan_init function name. Objects built with incompatible ASan ABI
+ // versions will not link with run-time.
+ // Changes between ABI versions:
+ // v1=>v2: added 'module_name' to __asan_global
+ // v2=>v3: stack frame description (created by the compiler)
+ // contains the function PC as the 3-rd field (see
+ // DescribeAddressIfStack).
+ // v3=>v4: added '__asan_global_source_location' to __asan_global.
+ #define __asan_init __asan_init_v4
+ #define __asan_init_name "__asan_init_v4"
+}
+
+#endif // ASAN_INIT_VERSION_H
diff --git a/libsanitizer/asan/asan_intercepted_functions.h b/libsanitizer/asan/asan_intercepted_functions.h
deleted file mode 100644
index 19b53363a5..0000000000
--- a/libsanitizer/asan/asan_intercepted_functions.h
+++ /dev/null
@@ -1,77 +0,0 @@
-//===-- asan_intercepted_functions.h ----------------------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// ASan-private header containing prototypes for wrapper functions and wrappers
-//===----------------------------------------------------------------------===//
-#ifndef ASAN_INTERCEPTED_FUNCTIONS_H
-#define ASAN_INTERCEPTED_FUNCTIONS_H
-
-#include "sanitizer_common/sanitizer_platform_interceptors.h"
-
-// Use macro to describe if specific function should be
-// intercepted on a given platform.
-#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
-# define ASAN_INTERCEPT__LONGJMP 1
-# define ASAN_INTERCEPT_STRDUP 1
-# define ASAN_INTERCEPT_INDEX 1
-# define ASAN_INTERCEPT_PTHREAD_CREATE 1
-# define ASAN_INTERCEPT_MLOCKX 1
-#else
-# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
-# define ASAN_INTERCEPT__LONGJMP 0
-# define ASAN_INTERCEPT_STRDUP 0
-# define ASAN_INTERCEPT_INDEX 0
-# define ASAN_INTERCEPT_PTHREAD_CREATE 0
-# define ASAN_INTERCEPT_MLOCKX 0
-#endif
-
-#if SANITIZER_LINUX
-# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
-#else
-# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
-#endif
-
-#if !SANITIZER_MAC
-# define ASAN_INTERCEPT_STRNLEN 1
-#else
-# define ASAN_INTERCEPT_STRNLEN 0
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-# define ASAN_INTERCEPT_SWAPCONTEXT 1
-#else
-# define ASAN_INTERCEPT_SWAPCONTEXT 0
-#endif
-
-#if !SANITIZER_ANDROID && !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
-#else
-# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
-#endif
-
-#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_SIGLONGJMP 1
-#else
-# define ASAN_INTERCEPT_SIGLONGJMP 0
-#endif
-
-#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT___CXA_THROW 1
-#else
-# define ASAN_INTERCEPT___CXA_THROW 0
-#endif
-
-#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT___CXA_ATEXIT 1
-#else
-# define ASAN_INTERCEPT___CXA_ATEXIT 0
-#endif
-
-#endif // ASAN_INTERCEPTED_FUNCTIONS_H
diff --git a/libsanitizer/asan/asan_interceptors.cc b/libsanitizer/asan/asan_interceptors.cc
index decbfea5f7..182b7842ed 100644
--- a/libsanitizer/asan/asan_interceptors.cc
+++ b/libsanitizer/asan/asan_interceptors.cc
@@ -12,14 +12,12 @@
#include "asan_interceptors.h"
#include "asan_allocator.h"
-#include "asan_intercepted_functions.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
-#include "interception/interception.h"
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
@@ -43,6 +41,10 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
uptr __offset = (uptr)(offset); \
uptr __size = (uptr)(size); \
uptr __bad = 0; \
+ if (__offset > __offset + __size) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ ReportStringFunctionSizeOverflow(__offset, __size, &stack); \
+ } \
if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
(__bad = __asan_region_is_poisoned(__offset, __size))) { \
GET_CURRENT_PC_BP_SP; \
@@ -70,13 +72,6 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
} \
} while (0)
-#define ENSURE_ASAN_INITED() do { \
- CHECK(!asan_init_is_running); \
- if (!asan_inited) { \
- __asan_init(); \
- } \
-} while (0)
-
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
#if ASAN_INTERCEPT_STRNLEN
if (REAL(strnlen) != 0) {
@@ -106,11 +101,10 @@ DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#if !SANITIZER_MAC
-#define ASAN_INTERCEPT_FUNC(name) \
- do { \
- if ((!INTERCEPT_FUNCTION(name) || !REAL(name)) && \
- common_flags()->verbosity > 0) \
- Report("AddressSanitizer: failed to intercept '" #name "'\n"); \
+#define ASAN_INTERCEPT_FUNC(name) \
+ do { \
+ if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \
+ VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \
} while (0)
#else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
@@ -118,19 +112,18 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#endif // SANITIZER_MAC
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
-#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
- do { \
- } while (false)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
ASAN_WRITE_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size)
-#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
- do { \
- if (asan_init_is_running) return REAL(func)(__VA_ARGS__); \
- ctx = 0; \
- (void) ctx; \
- if (SANITIZER_MAC && !asan_inited) return REAL(func)(__VA_ARGS__); \
- ENSURE_ASAN_INITED(); \
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ do { \
+ if (asan_init_is_running) \
+ return REAL(func)(__VA_ARGS__); \
+ ctx = 0; \
+ (void) ctx; \
+ if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \
+ return REAL(func)(__VA_ARGS__); \
+ ENSURE_ASAN_INITED(); \
} while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
do { \
@@ -151,6 +144,9 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) CovUpdateMapping()
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CovUpdateMapping()
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s)
@@ -194,20 +190,41 @@ INTERCEPTOR(int, pthread_create, void *thread,
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
+
+#if SANITIZER_ANDROID
+INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
+ if (!AsanInterceptsSignal(signum) ||
+ common_flags()->allow_user_segv_handler) {
+ return REAL(bsd_signal)(signum, handler);
+ }
+ return 0;
+}
+#else
INTERCEPTOR(void*, signal, int signum, void *handler) {
- if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) {
+ if (!AsanInterceptsSignal(signum) ||
+ common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
return 0;
}
+#endif
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) {
- if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) {
+ if (!AsanInterceptsSignal(signum) ||
+ common_flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact);
}
return 0;
}
+
+namespace __sanitizer {
+int real_sigaction(int signum, const void *act, void *oldact) {
+ return REAL(sigaction)(signum,
+ (struct sigaction *)act, (struct sigaction *)oldact);
+}
+} // namespace __sanitizer
+
#elif SANITIZER_POSIX
// We need to have defined REAL(sigaction) on posix systems.
DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
@@ -277,45 +294,36 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
}
#endif
-// intercept mlock and friends.
-// Since asan maps 16T of RAM, mlock is completely unfriendly to asan.
-// All functions return 0 (success).
-static void MlockIsUnsupported() {
- static bool printed = false;
- if (printed) return;
- printed = true;
- if (common_flags()->verbosity > 0) {
- Printf("INFO: AddressSanitizer ignores "
- "mlock/mlockall/munlock/munlockall\n");
- }
-}
-
-INTERCEPTOR(int, mlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
-}
-
-INTERCEPTOR(int, munlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
+#if SANITIZER_WINDOWS
+INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(RaiseException));
+ __asan_handle_no_return();
+ REAL(RaiseException)(a, b, c, d);
}
-INTERCEPTOR(int, mlockall, int flags) {
- MlockIsUnsupported();
- return 0;
+INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(_except_handler3));
+ __asan_handle_no_return();
+ return REAL(_except_handler3)(a, b, c, d);
}
-INTERCEPTOR(int, munlockall, void) {
- MlockIsUnsupported();
- return 0;
+#if ASAN_DYNAMIC
+// This handler is named differently in -MT and -MD CRTs.
+#define _except_handler4 _except_handler4_common
+#endif
+INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(_except_handler4));
+ __asan_handle_no_return();
+ return REAL(_except_handler4)(a, b, c, d);
}
+#endif
static inline int CharCmp(unsigned char c1, unsigned char c2) {
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
}
INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
- if (!asan_inited) return internal_memcmp(a1, a2, size);
+ if (UNLIKELY(!asan_inited)) return internal_memcmp(a1, a2, size);
ENSURE_ASAN_INITED();
if (flags()->replace_intrin) {
if (flags()->strict_memcmp) {
@@ -342,24 +350,8 @@ INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
return REAL(memcmp(a1, a2, size));
}
-#define MEMMOVE_BODY { \
- if (!asan_inited) return internal_memmove(to, from, size); \
- if (asan_init_is_running) { \
- return REAL(memmove)(to, from, size); \
- } \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- ASAN_READ_RANGE(from, size); \
- ASAN_WRITE_RANGE(to, size); \
- } \
- return internal_memmove(to, from, size); \
-}
-
-INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) MEMMOVE_BODY
-
-INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
-#if !SANITIZER_MAC
- if (!asan_inited) return internal_memcpy(to, from, size);
+void *__asan_memcpy(void *to, const void *from, uptr size) {
+ if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size);
// memcpy is called during __asan_init() from the internals
// of printf(...).
if (asan_init_is_running) {
@@ -375,23 +367,11 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
ASAN_READ_RANGE(from, size);
ASAN_WRITE_RANGE(to, size);
}
- // Interposing of resolver functions is broken on Mac OS 10.7 and 10.8, so
- // calling REAL(memcpy) here leads to infinite recursion.
- // See also http://code.google.com/p/address-sanitizer/issues/detail?id=116.
- return internal_memcpy(to, from, size);
-#else
- // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
- // with WRAP(memcpy). As a result, false positives are reported for memmove()
- // calls. If we just disable error reporting with
- // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
- // internal_memcpy(), which may lead to crashes, see
- // http://llvm.org/bugs/show_bug.cgi?id=16362.
- MEMMOVE_BODY
-#endif // !SANITIZER_MAC
+ return REAL(memcpy)(to, from, size);
}
-INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
- if (!asan_inited) return internal_memset(block, c, size);
+void *__asan_memset(void *block, int c, uptr size) {
+ if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size);
// memset is called inside Printf.
if (asan_init_is_running) {
return REAL(memset)(block, c, size);
@@ -403,8 +383,41 @@ INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
return REAL(memset)(block, c, size);
}
+void *__asan_memmove(void *to, const void *from, uptr size) {
+ if (UNLIKELY(!asan_inited))
+ return internal_memmove(to, from, size);
+ ENSURE_ASAN_INITED();
+ if (flags()->replace_intrin) {
+ ASAN_READ_RANGE(from, size);
+ ASAN_WRITE_RANGE(to, size);
+ }
+ return internal_memmove(to, from, size);
+}
+
+INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
+ return __asan_memmove(to, from, size);
+}
+
+INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
+#if !SANITIZER_MAC
+ return __asan_memcpy(to, from, size);
+#else
+ // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
+ // with WRAP(memcpy). As a result, false positives are reported for memmove()
+ // calls. If we just disable error reporting with
+ // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
+ // internal_memcpy(), which may lead to crashes, see
+ // http://llvm.org/bugs/show_bug.cgi?id=16362.
+ return __asan_memmove(to, from, size);
+#endif // !SANITIZER_MAC
+}
+
+INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
+ return __asan_memset(block, c, size);
+}
+
INTERCEPTOR(char*, strchr, const char *str, int c) {
- if (!asan_inited) return internal_strchr(str, c);
+ if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
// strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
// used.
if (asan_init_is_running) {
@@ -473,7 +486,7 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
#if SANITIZER_MAC
- if (!asan_inited) return REAL(strcpy)(to, from); // NOLINT
+ if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT
#endif
// strcpy is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
@@ -492,7 +505,7 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
#if ASAN_INTERCEPT_STRDUP
INTERCEPTOR(char*, strdup, const char *s) {
- if (!asan_inited) return internal_strdup(s);
+ if (UNLIKELY(!asan_inited)) return internal_strdup(s);
ENSURE_ASAN_INITED();
uptr length = REAL(strlen)(s);
if (flags()->replace_str) {
@@ -505,23 +518,23 @@ INTERCEPTOR(char*, strdup, const char *s) {
}
#endif
-INTERCEPTOR(uptr, strlen, const char *s) {
- if (!asan_inited) return internal_strlen(s);
+INTERCEPTOR(SIZE_T, strlen, const char *s) {
+ if (UNLIKELY(!asan_inited)) return internal_strlen(s);
// strlen is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
if (asan_init_is_running) {
return REAL(strlen)(s);
}
ENSURE_ASAN_INITED();
- uptr length = REAL(strlen)(s);
+ SIZE_T length = REAL(strlen)(s);
if (flags()->replace_str) {
ASAN_READ_RANGE(s, length + 1);
}
return length;
}
-INTERCEPTOR(uptr, wcslen, const wchar_t *s) {
- uptr length = REAL(wcslen)(s);
+INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
+ SIZE_T length = REAL(wcslen)(s);
if (!asan_init_is_running) {
ENSURE_ASAN_INITED();
ASAN_READ_RANGE(s, (length + 1) * sizeof(wchar_t));
@@ -588,7 +601,7 @@ INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
INTERCEPTOR(int, atoi, const char *nptr) {
#if SANITIZER_MAC
- if (!asan_inited) return REAL(atoi)(nptr);
+ if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
#endif
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
@@ -607,7 +620,7 @@ INTERCEPTOR(int, atoi, const char *nptr) {
INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
#if SANITIZER_MAC
- if (!asan_inited) return REAL(atol)(nptr);
+ if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
#endif
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
@@ -664,7 +677,7 @@ static void AtCxaAtexit(void *unused) {
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
void *dso_handle) {
#if SANITIZER_MAC
- if (!asan_inited) return REAL(__cxa_atexit)(func, arg, dso_handle);
+ if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle);
#endif
ENSURE_ASAN_INITED();
int res = REAL(__cxa_atexit)(func, arg, dso_handle);
@@ -673,6 +686,16 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
}
#endif // ASAN_INTERCEPT___CXA_ATEXIT
+#if ASAN_INTERCEPT_FORK
+INTERCEPTOR(int, fork, void) {
+ ENSURE_ASAN_INITED();
+ if (common_flags()->coverage) CovBeforeFork();
+ int pid = REAL(fork)();
+ if (common_flags()->coverage) CovAfterFork(pid);
+ return pid;
+}
+#endif // ASAN_INTERCEPT_FORK
+
#if SANITIZER_WINDOWS
INTERCEPTOR_WINAPI(DWORD, CreateThread,
void* security, uptr stack_size,
@@ -694,6 +717,9 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
namespace __asan {
void InitializeWindowsInterceptors() {
ASAN_INTERCEPT_FUNC(CreateThread);
+ ASAN_INTERCEPT_FUNC(RaiseException);
+ ASAN_INTERCEPT_FUNC(_except_handler3);
+ ASAN_INTERCEPT_FUNC(_except_handler4);
}
} // namespace __asan
@@ -705,7 +731,7 @@ void InitializeAsanInterceptors() {
static bool was_called_once;
CHECK(was_called_once == false);
was_called_once = true;
- SANITIZER_COMMON_INTERCEPTORS_INIT;
+ InitializeCommonInterceptors();
// Intercept mem* functions.
ASAN_INTERCEPT_FUNC(memcmp);
@@ -741,20 +767,16 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(strtoll);
#endif
-#if ASAN_INTERCEPT_MLOCKX
- // Intercept mlock/munlock.
- ASAN_INTERCEPT_FUNC(mlock);
- ASAN_INTERCEPT_FUNC(munlock);
- ASAN_INTERCEPT_FUNC(mlockall);
- ASAN_INTERCEPT_FUNC(munlockall);
-#endif
-
// Intecept signal- and jump-related functions.
ASAN_INTERCEPT_FUNC(longjmp);
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
ASAN_INTERCEPT_FUNC(sigaction);
+#if SANITIZER_ANDROID
+ ASAN_INTERCEPT_FUNC(bsd_signal);
+#else
ASAN_INTERCEPT_FUNC(signal);
#endif
+#endif
#if ASAN_INTERCEPT_SWAPCONTEXT
ASAN_INTERCEPT_FUNC(swapcontext);
#endif
@@ -767,7 +789,7 @@ void InitializeAsanInterceptors() {
// Intercept exception handling functions.
#if ASAN_INTERCEPT___CXA_THROW
- INTERCEPT_FUNCTION(__cxa_throw);
+ ASAN_INTERCEPT_FUNC(__cxa_throw);
#endif
// Intercept threading-related functions
@@ -780,14 +802,16 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(__cxa_atexit);
#endif
+#if ASAN_INTERCEPT_FORK
+ ASAN_INTERCEPT_FUNC(fork);
+#endif
+
// Some Windows-specific interceptors.
#if SANITIZER_WINDOWS
InitializeWindowsInterceptors();
#endif
- if (common_flags()->verbosity > 0) {
- Report("AddressSanitizer: libc interceptors initialized\n");
- }
+ VReport(1, "AddressSanitizer: libc interceptors initialized\n");
}
} // namespace __asan
diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h
index cae4c7f012..95a75db4e0 100644
--- a/libsanitizer/asan/asan_interceptors.h
+++ b/libsanitizer/asan/asan_interceptors.h
@@ -13,13 +13,76 @@
#define ASAN_INTERCEPTORS_H
#include "asan_internal.h"
-#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_interception.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+// Use macro to describe if specific function should be
+// intercepted on a given platform.
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
+# define ASAN_INTERCEPT__LONGJMP 1
+# define ASAN_INTERCEPT_STRDUP 1
+# define ASAN_INTERCEPT_INDEX 1
+# define ASAN_INTERCEPT_PTHREAD_CREATE 1
+# define ASAN_INTERCEPT_FORK 1
+#else
+# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
+# define ASAN_INTERCEPT__LONGJMP 0
+# define ASAN_INTERCEPT_STRDUP 0
+# define ASAN_INTERCEPT_INDEX 0
+# define ASAN_INTERCEPT_PTHREAD_CREATE 0
+# define ASAN_INTERCEPT_FORK 0
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
+#else
+# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
+#endif
+
+#if !SANITIZER_MAC
+# define ASAN_INTERCEPT_STRNLEN 1
+#else
+# define ASAN_INTERCEPT_STRNLEN 0
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# define ASAN_INTERCEPT_SWAPCONTEXT 1
+#else
+# define ASAN_INTERCEPT_SWAPCONTEXT 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
+#else
+# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT_SIGLONGJMP 1
+#else
+# define ASAN_INTERCEPT_SIGLONGJMP 0
+#endif
+
+// Android bug: https://code.google.com/p/android/issues/detail?id=61799
+#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && \
+ !(SANITIZER_ANDROID && defined(__i386))
+# define ASAN_INTERCEPT___CXA_THROW 1
+#else
+# define ASAN_INTERCEPT___CXA_THROW 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT___CXA_ATEXIT 1
+#else
+# define ASAN_INTERCEPT___CXA_ATEXIT 0
+#endif
DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
DECLARE_REAL(void*, memset, void *block, int c, uptr size)
DECLARE_REAL(char*, strchr, const char *str, int c)
-DECLARE_REAL(uptr, strlen, const char *s)
+DECLARE_REAL(SIZE_T, strlen, const char *s)
DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size)
DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
@@ -31,6 +94,13 @@ namespace __asan {
void InitializeAsanInterceptors();
+#define ENSURE_ASAN_INITED() do { \
+ CHECK(!asan_init_is_running); \
+ if (UNLIKELY(!asan_inited)) { \
+ AsanInitFromRtl(); \
+ } \
+} while (0)
+
} // namespace __asan
#endif // ASAN_INTERCEPTORS_H
diff --git a/libsanitizer/asan/asan_interface_internal.h b/libsanitizer/asan/asan_interface_internal.h
index 7deed9f460..939327eb8a 100644
--- a/libsanitizer/asan/asan_interface_internal.h
+++ b/libsanitizer/asan/asan_interface_internal.h
@@ -15,21 +15,24 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "asan_init_version.h"
+
using __sanitizer::uptr;
extern "C" {
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
- // Everytime the asan ABI changes we also change the version number in this
- // name. Objects build with incompatible asan ABI version
- // will not link with run-time.
- // Changes between ABI versions:
- // v1=>v2: added 'module_name' to __asan_global
- // v2=>v3: stack frame description (created by the compiler)
- // contains the function PC as the 3-rd field (see
- // DescribeAddressIfStack).
- SANITIZER_INTERFACE_ATTRIBUTE void __asan_init_v3();
- #define __asan_init __asan_init_v3
+ // Please note that __asan_init is a macro that is replaced with
+ // __asan_init_vXXX at compile-time.
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_init();
+
+ // This structure is used to describe the source location of a place where
+ // global was defined.
+ struct __asan_global_source_location {
+ const char *filename;
+ int line_no;
+ int column_no;
+ };
// This structure describes an instrumented global variable.
struct __asan_global {
@@ -40,6 +43,8 @@ extern "C" {
const char *module_name; // Module name as a C string. This pointer is a
// unique identifier of a module.
uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
+ __asan_global_source_location *location; // Source location of a global,
+ // or NULL if it is unknown.
};
// These two functions should be called by the instrumented code.
@@ -75,7 +80,7 @@ extern "C" {
void __asan_unpoison_memory_region(void const volatile *addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
- bool __asan_address_is_poisoned(void const volatile *addr);
+ int __asan_address_is_poisoned(void const volatile *addr);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_region_is_poisoned(uptr beg, uptr size);
@@ -84,8 +89,41 @@ extern "C" {
void __asan_describe_address(uptr addr);
SANITIZER_INTERFACE_ATTRIBUTE
+ int __asan_report_present();
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_pc();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_bp();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_sp();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_address();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __asan_get_report_access_type();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_access_size();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const char * __asan_get_report_description();
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const char * __asan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address, uptr *region_size);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size,
+ u32 *thread_id);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size,
+ u32 *thread_id);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
void __asan_report_error(uptr pc, uptr bp, uptr sp,
- uptr addr, bool is_write, uptr access_size);
+ uptr addr, int is_write, uptr access_size);
SANITIZER_INTERFACE_ATTRIBUTE
int __asan_set_error_exit_code(int exit_code);
@@ -97,32 +135,46 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_on_error();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ bool __asan_symbolize(const void *pc, char *out_buffer,
- int out_size);
-
- SANITIZER_INTERFACE_ATTRIBUTE
- uptr __asan_get_estimated_allocated_size(uptr size);
-
- SANITIZER_INTERFACE_ATTRIBUTE bool __asan_get_ownership(const void *p);
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_allocated_size(const void *p);
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_current_allocated_bytes();
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_heap_size();
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_free_bytes();
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_unmapped_bytes();
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __asan_default_options();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __asan_malloc_hook(void *ptr, uptr size);
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __asan_free_hook(void *ptr);
-
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ extern uptr *__asan_test_only_reported_buggy_pointer;
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memcpy(void *dst, const void *src, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memset(void *s, int c, uptr n);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memmove(void* dest, const void* src, uptr n);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_poison_cxx_array_cookie(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_load_cxx_array_cookie(uptr *p);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_poison_intra_object_redzone(uptr p, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_unpoison_intra_object_redzone(uptr p, uptr size);
} // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H
diff --git a/libsanitizer/asan/asan_internal.h b/libsanitizer/asan/asan_internal.h
index ede273a717..8911575d84 100644
--- a/libsanitizer/asan/asan_internal.h
+++ b/libsanitizer/asan/asan_internal.h
@@ -28,26 +28,11 @@
// Build-time configuration options.
-// If set, asan will install its own SEGV signal handler.
-#ifndef ASAN_NEEDS_SEGV
-# if SANITIZER_ANDROID == 1
-# define ASAN_NEEDS_SEGV 0
-# else
-# define ASAN_NEEDS_SEGV 1
-# endif
-#endif
-
// If set, asan will intercept C++ exception api call(s).
#ifndef ASAN_HAS_EXCEPTIONS
# define ASAN_HAS_EXCEPTIONS 1
#endif
-// If set, asan uses the values of SHADOW_SCALE and SHADOW_OFFSET
-// provided by the instrumented objects. Otherwise constants are used.
-#ifndef ASAN_FLEXIBLE_MAPPING_AND_OFFSET
-# define ASAN_FLEXIBLE_MAPPING_AND_OFFSET 0
-#endif
-
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
@@ -58,36 +43,41 @@
# endif
#endif
-#ifndef ASAN_USE_PREINIT_ARRAY
-# define ASAN_USE_PREINIT_ARRAY (SANITIZER_LINUX && !SANITIZER_ANDROID)
+#ifndef ASAN_DYNAMIC
+# ifdef PIC
+# define ASAN_DYNAMIC 1
+# else
+# define ASAN_DYNAMIC 0
+# endif
#endif
// All internal functions in asan reside inside the __asan namespace
// to avoid namespace collisions with the user programs.
-// Seperate namespace also makes it simpler to distinguish the asan run-time
+// Separate namespace also makes it simpler to distinguish the asan run-time
// functions from the instrumented user code in a profile.
namespace __asan {
class AsanThread;
using __sanitizer::StackTrace;
+void AsanInitFromRtl();
+
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
-void ReplaceOperatorsNewAndDelete();
// asan_malloc_linux.cc / asan_malloc_mac.cc
void ReplaceSystemMalloc();
// asan_linux.cc / asan_mac.cc / asan_win.cc
void *AsanDoesNotSupportStaticLinkage();
+void AsanCheckDynamicRTPrereqs();
+void AsanCheckIncompatibleRT();
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
+void AsanOnSIGSEGV(int, void *siginfo, void *context);
void MaybeReexec();
bool AsanInterceptsSignal(int signum);
-void SetAlternateSignalStack();
-void UnsetAlternateSignalStack();
-void InstallSignalHandlers();
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
void StopInitOrderChecking();
@@ -100,7 +90,11 @@ void PlatformTSDDtor(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer);
-// Platfrom-specific options.
+void ParseExtraActivationFlags();
+
+void *AsanDlSymNext(const char *sym);
+
+// Platform-specific options.
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
@@ -112,9 +106,9 @@ bool PlatformHasDifferentMemcpyAndMemmove();
// Add convenient macro for interface functions that may be represented as
// weak hooks.
#define ASAN_MALLOC_HOOK(ptr, size) \
- if (&__asan_malloc_hook) __asan_malloc_hook(ptr, size)
+ if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size)
#define ASAN_FREE_HOOK(ptr) \
- if (&__asan_free_hook) __asan_free_hook(ptr)
+ if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr)
#define ASAN_ON_ERROR() \
if (&__asan_on_error) __asan_on_error()
@@ -138,6 +132,8 @@ const int kAsanContiguousContainerOOBMagic = 0xfc;
const int kAsanStackUseAfterScopeMagic = 0xf8;
const int kAsanGlobalRedzoneMagic = 0xf9;
const int kAsanInternalHeapMagic = 0xfe;
+const int kAsanArrayCookieMagic = 0xac;
+const int kAsanIntraObjectRedzone = 0xbb;
static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
static const uptr kRetiredStackFrameMagic = 0x45E0360E;
diff --git a/libsanitizer/asan/asan_linux.cc b/libsanitizer/asan/asan_linux.cc
index 0692eb1f45..c504168b61 100644
--- a/libsanitizer/asan/asan_linux.cc
+++ b/libsanitizer/asan/asan_linux.cc
@@ -11,11 +11,13 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_freebsd.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
@@ -24,18 +26,43 @@
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/types.h>
+#include <dlfcn.h>
#include <fcntl.h>
#include <pthread.h>
#include <stdio.h>
#include <unistd.h>
#include <unwind.h>
-#if !SANITIZER_ANDROID
-// FIXME: where to get ucontext on Android?
-#include <sys/ucontext.h>
+#if SANITIZER_FREEBSD
+#include <sys/link_elf.h>
#endif
+#if SANITIZER_ANDROID || SANITIZER_FREEBSD
+#include <ucontext.h>
extern "C" void* _DYNAMIC;
+#else
+#include <sys/ucontext.h>
+#include <link.h>
+#endif
+
+// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
+// 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \
+ __FreeBSD_version <= 902001 // v9.2
+#define ucontext_t xucontext_t
+#endif
+
+typedef enum {
+ ASAN_RT_VERSION_UNDEFINED = 0,
+ ASAN_RT_VERSION_DYNAMIC,
+ ASAN_RT_VERSION_STATIC,
+} asan_rt_version_t;
+
+// FIXME: perhaps also store abi version here?
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+asan_rt_version_t __asan_rt_version;
+}
namespace __asan {
@@ -48,38 +75,126 @@ void *AsanDoesNotSupportStaticLinkage() {
return &_DYNAMIC; // defined in link.h
}
-void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if SANITIZER_ANDROID
- *pc = *sp = *bp = 0;
-#elif defined(__arm__)
+// FIXME: should we do anything for Android?
+void AsanCheckDynamicRTPrereqs() {}
+void AsanCheckIncompatibleRT() {}
+#else
+static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ // Continue until the first dynamic library is found
+ if (!info->dlpi_name || info->dlpi_name[0] == 0)
+ return 0;
+
+ // Ignore vDSO
+ if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
+ return 0;
+
+ *(const char **)data = info->dlpi_name;
+ return 1;
+}
+
+static bool IsDynamicRTName(const char *libname) {
+ return internal_strstr(libname, "libclang_rt.asan") ||
+ internal_strstr(libname, "libasan.so");
+}
+
+static void ReportIncompatibleRT() {
+ Report("Your application is linked against incompatible ASan runtimes.\n");
+ Die();
+}
+
+void AsanCheckDynamicRTPrereqs() {
+ // Ensure that dynamic RT is the first DSO in the list
+ const char *first_dso_name = 0;
+ dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
+ if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
+ Report("ASan runtime does not come first in initial library list; "
+ "you should either link runtime to your application or "
+ "manually preload it with LD_PRELOAD.\n");
+ Die();
+ }
+}
+
+void AsanCheckIncompatibleRT() {
+ if (ASAN_DYNAMIC) {
+ if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
+ __asan_rt_version = ASAN_RT_VERSION_DYNAMIC;
+ } else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) {
+ ReportIncompatibleRT();
+ }
+ } else {
+ if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
+ // Ensure that dynamic runtime is not present. We should detect it
+ // as early as possible, otherwise ASan interceptors could bind to
+ // the functions in dynamic ASan runtime instead of the functions in
+ // system libraries, causing crashes later in ASan initialization.
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ char filename[128];
+ while (proc_maps.Next(0, 0, 0, filename, sizeof(filename), 0)) {
+ if (IsDynamicRTName(filename)) {
+ Report("Your application is linked against "
+ "incompatible ASan runtimes.\n");
+ Die();
+ }
+ }
+ __asan_rt_version = ASAN_RT_VERSION_STATIC;
+ } else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) {
+ ReportIncompatibleRT();
+ }
+ }
+}
+#endif // SANITIZER_ANDROID
+
+void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+#if defined(__arm__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.arm_pc;
*bp = ucontext->uc_mcontext.arm_fp;
*sp = ucontext->uc_mcontext.arm_sp;
-# elif defined(__hppa__)
+#elif defined(__aarch64__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.pc;
+ *bp = ucontext->uc_mcontext.regs[29];
+ *sp = ucontext->uc_mcontext.sp;
+#elif defined(__hppa__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.sc_iaoq[0];
/* GCC uses %r3 whenever a frame pointer is needed. */
*bp = ucontext->uc_mcontext.sc_gr[3];
*sp = ucontext->uc_mcontext.sc_gr[30];
-# elif defined(__x86_64__)
+#elif defined(__x86_64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.mc_rip;
+ *bp = ucontext->uc_mcontext.mc_rbp;
+ *sp = ucontext->uc_mcontext.mc_rsp;
+# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP];
*bp = ucontext->uc_mcontext.gregs[REG_RBP];
*sp = ucontext->uc_mcontext.gregs[REG_RSP];
-# elif defined(__i386__)
+# endif
+#elif defined(__i386__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.mc_eip;
+ *bp = ucontext->uc_mcontext.mc_ebp;
+ *sp = ucontext->uc_mcontext.mc_esp;
+# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_EIP];
*bp = ucontext->uc_mcontext.gregs[REG_EBP];
*sp = ucontext->uc_mcontext.gregs[REG_ESP];
-# elif defined(__powerpc__) || defined(__powerpc64__)
+# endif
+#elif defined(__powerpc__) || defined(__powerpc64__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.regs->nip;
*sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
// The powerpc{,64}-linux ABIs do not specify r31 as the frame
// pointer, but GCC always uses r31 when we need a frame pointer.
*bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
-# elif defined(__sparc__)
+#elif defined(__sparc__)
ucontext_t *ucontext = (ucontext_t*)context;
uptr *stk_ptr;
# if defined (__arch64__)
@@ -93,7 +208,7 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
stk_ptr = (uptr *) *sp;
*bp = stk_ptr[15];
# endif
-# elif defined(__mips__)
+#elif defined(__mips__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[31];
*bp = ucontext->uc_mcontext.gregs[30];
@@ -104,7 +219,7 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
}
bool AsanInterceptsSignal(int signum) {
- return signum == SIGSEGV && flags()->handle_segv;
+ return signum == SIGSEGV && common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
@@ -123,6 +238,10 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
}
#endif
+void *AsanDlSymNext(const char *sym) {
+ return dlsym(RTLD_NEXT, sym);
+}
+
} // namespace __asan
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/asan/asan_mac.cc b/libsanitizer/asan/asan_mac.cc
index 8d01843afa..70823bdef9 100644
--- a/libsanitizer/asan/asan_mac.cc
+++ b/libsanitizer/asan/asan_mac.cc
@@ -15,12 +15,12 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
-#include "asan_mac.h"
#include "asan_mapping.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mac.h"
#include <crt_externs.h> // for _NSGetArgv
#include <dlfcn.h> // for dladdr()
@@ -51,43 +51,6 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# endif // SANITIZER_WORDSIZE
}
-MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
-
-MacosVersion GetMacosVersionInternal() {
- int mib[2] = { CTL_KERN, KERN_OSRELEASE };
- char version[100];
- uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
- for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
- // Get the version length.
- CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1);
- CHECK_LT(len, maxlen);
- CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1);
- switch (version[0]) {
- case '9': return MACOS_VERSION_LEOPARD;
- case '1': {
- switch (version[1]) {
- case '0': return MACOS_VERSION_SNOW_LEOPARD;
- case '1': return MACOS_VERSION_LION;
- case '2': return MACOS_VERSION_MOUNTAIN_LION;
- case '3': return MACOS_VERSION_MAVERICKS;
- default: return MACOS_VERSION_UNKNOWN;
- }
- }
- default: return MACOS_VERSION_UNKNOWN;
- }
-}
-
-MacosVersion GetMacosVersion() {
- atomic_uint32_t *cache =
- reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
- MacosVersion result =
- static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
- if (result == MACOS_VERSION_UNINITIALIZED) {
- result = GetMacosVersionInternal();
- atomic_store(cache, result, memory_order_release);
- }
- return result;
-}
bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved
@@ -172,12 +135,10 @@ void MaybeReexec() {
// Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
}
- if (common_flags()->verbosity >= 1) {
- Report("exec()-ing the program with\n");
- Report("%s=%s\n", kDyldInsertLibraries, new_env);
- Report("to enable ASan wrappers.\n");
- Report("Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
- }
+ VReport(1, "exec()-ing the program with\n");
+ VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
+ VReport(1, "to enable ASan wrappers.\n");
+ VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
execv(program_name, *_NSGetArgv());
} else {
// DYLD_INSERT_LIBRARIES is set and contains the runtime library.
@@ -236,8 +197,15 @@ void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
+// No-op. Mac does not support static linkage anyway.
+void AsanCheckDynamicRTPrereqs() {}
+
+// No-op. Mac does not support static linkage anyway.
+void AsanCheckIncompatibleRT() {}
+
bool AsanInterceptsSignal(int signum) {
- return (signum == SIGSEGV || signum == SIGBUS) && flags()->handle_segv;
+ return (signum == SIGSEGV || signum == SIGBUS) &&
+ common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
@@ -309,11 +277,10 @@ extern "C"
void asan_dispatch_call_block_and_release(void *block) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *context = (asan_block_context_t*)block;
- if (common_flags()->verbosity >= 2) {
- Report("asan_dispatch_call_block_and_release(): "
- "context: %p, pthread_self: %p\n",
- block, pthread_self());
- }
+ VReport(2,
+ "asan_dispatch_call_block_and_release(): "
+ "context: %p, pthread_self: %p\n",
+ block, pthread_self());
asan_register_worker_thread(context->parent_tid, &stack);
// Call the original dispatcher for the block.
context->func(context->block);
@@ -328,7 +295,7 @@ using namespace __asan; // NOLINT
// The caller retains control of the allocated context.
extern "C"
asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
- StackTrace *stack) {
+ BufferedStackTrace *stack) {
asan_block_context_t *asan_ctxt =
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
asan_ctxt->block = ctxt;
@@ -347,10 +314,10 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
if (common_flags()->verbosity >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
asan_ctxt, pthread_self()); \
- PRINT_CURRENT_STACK(); \
- } \
- return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
- asan_dispatch_call_block_and_release); \
+ PRINT_CURRENT_STACK(); \
+ } \
+ return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
+ asan_dispatch_call_block_and_release); \
}
INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
@@ -386,7 +353,6 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
#if !defined(MISSING_BLOCKS_SUPPORT)
extern "C" {
-// FIXME: consolidate these declarations with asan_intercepted_functions.h.
void dispatch_async(dispatch_queue_t dq, void(^work)(void));
void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
void(^work)(void));
@@ -406,32 +372,44 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
work(); \
}
+// Forces the compiler to generate a frame pointer in the function.
+#define ENABLE_FRAME_POINTER \
+ do { \
+ volatile uptr enable_fp; \
+ enable_fp = GET_CURRENT_FRAME(); \
+ } while (0)
+
INTERCEPTOR(void, dispatch_async,
dispatch_queue_t dq, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_async)(dq, asan_block);
}
INTERCEPTOR(void, dispatch_group_async,
dispatch_group_t dg, dispatch_queue_t dq, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_group_async)(dg, dq, asan_block);
}
INTERCEPTOR(void, dispatch_after,
dispatch_time_t when, dispatch_queue_t queue, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_after)(when, queue, asan_block);
}
INTERCEPTOR(void, dispatch_source_set_cancel_handler,
dispatch_source_t ds, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_cancel_handler)(ds, asan_block);
}
INTERCEPTOR(void, dispatch_source_set_event_handler,
dispatch_source_t ds, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_event_handler)(ds, asan_block);
}
diff --git a/libsanitizer/asan/asan_mac.h b/libsanitizer/asan/asan_mac.h
deleted file mode 100644
index 2d1d4b0bfb..0000000000
--- a/libsanitizer/asan/asan_mac.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//===-- asan_mac.h ----------------------------------------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// Mac-specific ASan definitions.
-//===----------------------------------------------------------------------===//
-#ifndef ASAN_MAC_H
-#define ASAN_MAC_H
-
-// CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal
-// and subject to change in further CoreFoundation versions. Apple does not
-// guarantee any binary compatibility from release to release.
-
-// See http://opensource.apple.com/source/CF/CF-635.15/CFInternal.h
-#if defined(__BIG_ENDIAN__)
-#define CF_RC_BITS 0
-#endif
-
-#if defined(__LITTLE_ENDIAN__)
-#define CF_RC_BITS 3
-#endif
-
-// See http://opensource.apple.com/source/CF/CF-635.15/CFRuntime.h
-typedef struct __CFRuntimeBase {
- uptr _cfisa;
- u8 _cfinfo[4];
-#if __LP64__
- u32 _rc;
-#endif
-} CFRuntimeBase;
-
-enum MacosVersion {
- MACOS_VERSION_UNINITIALIZED = 0,
- MACOS_VERSION_UNKNOWN,
- MACOS_VERSION_LEOPARD,
- MACOS_VERSION_SNOW_LEOPARD,
- MACOS_VERSION_LION,
- MACOS_VERSION_MOUNTAIN_LION,
- MACOS_VERSION_MAVERICKS
-};
-
-// Used by asan_malloc_mac.cc and asan_mac.cc
-extern "C" void __CFInitialize();
-
-namespace __asan {
-
-MacosVersion GetMacosVersion();
-void MaybeReplaceCFAllocator();
-
-} // namespace __asan
-
-#endif // ASAN_MAC_H
diff --git a/libsanitizer/asan/asan_malloc_linux.cc b/libsanitizer/asan/asan_malloc_linux.cc
index e3495cb090..d03f1bb89c 100644
--- a/libsanitizer/asan/asan_malloc_linux.cc
+++ b/libsanitizer/asan/asan_malloc_linux.cc
@@ -13,48 +13,14 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stack.h"
-#if SANITIZER_ANDROID
-DECLARE_REAL_AND_INTERCEPTOR(void*, malloc, uptr size)
-DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
-DECLARE_REAL_AND_INTERCEPTOR(void*, calloc, uptr nmemb, uptr size)
-DECLARE_REAL_AND_INTERCEPTOR(void*, realloc, void *ptr, uptr size)
-DECLARE_REAL_AND_INTERCEPTOR(void*, memalign, uptr boundary, uptr size)
-
-struct MallocDebug {
- void* (*malloc)(uptr bytes);
- void (*free)(void* mem);
- void* (*calloc)(uptr n_elements, uptr elem_size);
- void* (*realloc)(void* oldMem, uptr bytes);
- void* (*memalign)(uptr alignment, uptr bytes);
-};
-
-const MallocDebug asan_malloc_dispatch ALIGNED(32) = {
- WRAP(malloc), WRAP(free), WRAP(calloc), WRAP(realloc), WRAP(memalign)
-};
-
-extern "C" const MallocDebug* __libc_malloc_dispatch;
-
-namespace __asan {
-void ReplaceSystemMalloc() {
- __libc_malloc_dispatch = &asan_malloc_dispatch;
-}
-} // namespace __asan
-
-#else // ANDROID
-
-namespace __asan {
-void ReplaceSystemMalloc() {
-}
-} // namespace __asan
-#endif // ANDROID
-
// ---------------------- Replacement functions ---------------- {{{1
using namespace __asan; // NOLINT
@@ -74,7 +40,7 @@ INTERCEPTOR(void*, malloc, uptr size) {
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
- if (!asan_inited) {
+ if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const uptr kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
@@ -99,8 +65,17 @@ INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
return asan_memalign(boundary, size, &stack, FROM_MALLOC);
}
-INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
- ALIAS("memalign");
+INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return asan_memalign(boundary, size, &stack, FROM_MALLOC);
+}
+
+INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
+ DTLS_on_libc_memalign(res, size * boundary);
+ return res;
+}
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
GET_CURRENT_PC_BP_SP;
@@ -146,4 +121,64 @@ INTERCEPTOR(void, malloc_stats, void) {
__asan_print_accumulated_stats();
}
-#endif // SANITIZER_LINUX
+#if SANITIZER_ANDROID
+// Format of __libc_malloc_dispatch has changed in Android L.
+// While we are moving towards a solution that does not depend on bionic
+// internals, here is something to support both K* and L releases.
+struct MallocDebugK {
+ void *(*malloc)(uptr bytes);
+ void (*free)(void *mem);
+ void *(*calloc)(uptr n_elements, uptr elem_size);
+ void *(*realloc)(void *oldMem, uptr bytes);
+ void *(*memalign)(uptr alignment, uptr bytes);
+ uptr (*malloc_usable_size)(void *mem);
+};
+
+struct MallocDebugL {
+ void *(*calloc)(uptr n_elements, uptr elem_size);
+ void (*free)(void *mem);
+ fake_mallinfo (*mallinfo)(void);
+ void *(*malloc)(uptr bytes);
+ uptr (*malloc_usable_size)(void *mem);
+ void *(*memalign)(uptr alignment, uptr bytes);
+ int (*posix_memalign)(void **memptr, uptr alignment, uptr size);
+ void* (*pvalloc)(uptr size);
+ void *(*realloc)(void *oldMem, uptr bytes);
+ void* (*valloc)(uptr size);
+};
+
+ALIGNED(32) const MallocDebugK asan_malloc_dispatch_k = {
+ WRAP(malloc), WRAP(free), WRAP(calloc),
+ WRAP(realloc), WRAP(memalign), WRAP(malloc_usable_size)};
+
+ALIGNED(32) const MallocDebugL asan_malloc_dispatch_l = {
+ WRAP(calloc), WRAP(free), WRAP(mallinfo),
+ WRAP(malloc), WRAP(malloc_usable_size), WRAP(memalign),
+ WRAP(posix_memalign), WRAP(pvalloc), WRAP(realloc),
+ WRAP(valloc)};
+
+namespace __asan {
+void ReplaceSystemMalloc() {
+ void **__libc_malloc_dispatch_p =
+ (void **)AsanDlSymNext("__libc_malloc_dispatch");
+ if (__libc_malloc_dispatch_p) {
+ // Decide on K vs L dispatch format by the presence of
+ // __libc_malloc_default_dispatch export in libc.
+ void *default_dispatch_p = AsanDlSymNext("__libc_malloc_default_dispatch");
+ if (default_dispatch_p)
+ *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_k;
+ else
+ *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_l;
+ }
+}
+} // namespace __asan
+
+#else // SANITIZER_ANDROID
+
+namespace __asan {
+void ReplaceSystemMalloc() {
+}
+} // namespace __asan
+#endif // SANITIZER_ANDROID
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/asan/asan_malloc_mac.cc b/libsanitizer/asan/asan_malloc_mac.cc
index 342e806e3b..6a93ce1e80 100644
--- a/libsanitizer/asan/asan_malloc_mac.cc
+++ b/libsanitizer/asan/asan_malloc_mac.cc
@@ -22,10 +22,10 @@
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
-#include "asan_mac.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
+#include "sanitizer_common/sanitizer_mac.h"
// Similar code is used in Google Perftools,
// http://code.google.com/p/google-perftools.
@@ -39,7 +39,7 @@ static malloc_zone_t asan_zone;
INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
vm_size_t start_size, unsigned zone_flags) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
uptr page_size = GetPageSizeCached();
uptr allocated_size = RoundUpTo(sizeof(asan_zone), page_size);
@@ -58,34 +58,34 @@ INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
}
INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
return &asan_zone;
}
INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {
// FIXME: ASan should support purgeable allocations.
// https://code.google.com/p/address-sanitizer/issues/detail?id=139
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
return &asan_zone;
}
INTERCEPTOR(void, malloc_make_purgeable, void *ptr) {
// FIXME: ASan should support purgeable allocations. Ignoring them is fine
// for now.
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
}
INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
// FIXME: ASan should support purgeable allocations. Ignoring them is fine
// for now.
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
// Must return 0 if the contents were not purged since the last call to
// malloc_make_purgeable().
return 0;
}
INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
// Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes.
size_t buflen = 6 + (name ? internal_strlen(name) : 0);
InternalScopedBuffer<char> new_name(buflen);
@@ -100,44 +100,44 @@ INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
}
INTERCEPTOR(void *, malloc, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
void *res = asan_malloc(size, &stack);
return res;
}
INTERCEPTOR(void, free, void *ptr) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
if (!ptr) return;
GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
INTERCEPTOR(void *, realloc, void *ptr, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void *, valloc, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
}
INTERCEPTOR(size_t, malloc_good_size, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
return asan_zone.introspect->good_size(&asan_zone, size);
}
INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
- if (!asan_inited) __asan_init();
+ ENSURE_ASAN_INITED();
CHECK(memptr);
GET_STACK_TRACE_MALLOC;
void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC);
@@ -157,7 +157,7 @@ size_t mz_size(malloc_zone_t* zone, const void* ptr) {
}
void *mz_malloc(malloc_zone_t *zone, size_t size) {
- if (!asan_inited) {
+ if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
}
@@ -166,7 +166,7 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
}
void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
- if (!asan_inited) {
+ if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const size_t kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
@@ -182,7 +182,7 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
}
void *mz_valloc(malloc_zone_t *zone, size_t size) {
- if (!asan_inited) {
+ if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_valloc(system_malloc_zone, size);
}
@@ -240,7 +240,7 @@ void mz_destroy(malloc_zone_t* zone) {
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
- if (!asan_inited) {
+ if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_memalign(system_malloc_zone, align, size);
}
diff --git a/libsanitizer/asan/asan_malloc_win.cc b/libsanitizer/asan/asan_malloc_win.cc
index 1f2495ffc5..bbcf80e96b 100644
--- a/libsanitizer/asan/asan_malloc_win.cc
+++ b/libsanitizer/asan/asan_malloc_win.cc
@@ -17,75 +17,81 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stack.h"
-#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_interception.h"
#include <stddef.h>
-// ---------------------- Replacement functions ---------------- {{{1
using namespace __asan; // NOLINT
-// FIXME: Simply defining functions with the same signature in *.obj
-// files overrides the standard functions in *.lib
-// This works well for simple helloworld-like tests but might need to be
-// revisited in the future.
+// MT: Simply defining functions with the same signature in *.obj
+// files overrides the standard functions in the CRT.
+// MD: Memory allocation functions are defined in the CRT .dll,
+// so we have to intercept them before they are called for the first time.
+
+#if ASAN_DYNAMIC
+# define ALLOCATION_FUNCTION_ATTRIBUTE
+#else
+# define ALLOCATION_FUNCTION_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+#endif
extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void free(void *ptr) {
GET_STACK_TRACE_FREE;
return asan_free(ptr, &stack, FROM_MALLOC);
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void _free_dbg(void* ptr, int) {
+ALLOCATION_FUNCTION_ATTRIBUTE
+void _free_dbg(void *ptr, int) {
free(ptr);
}
+ALLOCATION_FUNCTION_ATTRIBUTE
void cfree(void *ptr) {
- CHECK(!"cfree() should not be used on Windows?");
+ CHECK(!"cfree() should not be used on Windows");
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *malloc(size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void* _malloc_dbg(size_t size, int , const char*, int) {
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_malloc_dbg(size_t size, int, const char *, int) {
return malloc(size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *calloc(size_t nmemb, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void* _calloc_dbg(size_t n, size_t size, int, const char*, int) {
- return calloc(n, size);
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) {
+ return calloc(nmemb, size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) {
return calloc(nmemb, size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *realloc(void *ptr, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *_realloc_dbg(void *ptr, size_t size, int) {
CHECK(!"_realloc_dbg should not exist!");
return 0;
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void* _recalloc(void* p, size_t n, size_t elem_size) {
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_recalloc(void *p, size_t n, size_t elem_size) {
if (!p)
return calloc(n, elem_size);
const size_t size = n * elem_size;
@@ -94,13 +100,28 @@ void* _recalloc(void* p, size_t n, size_t elem_size) {
return realloc(p, size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize(void *ptr) {
GET_CURRENT_PC_BP_SP;
(void)sp;
return asan_malloc_usable_size(ptr, pc, bp);
}
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_expand(void *memblock, size_t size) {
+ // _expand is used in realloc-like functions to resize the buffer if possible.
+ // We don't want memory to stand still while resizing buffers, so return 0.
+ return 0;
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_expand_dbg(void *memblock, size_t size) {
+ return _expand(memblock, size);
+}
+
+// TODO(timurrrr): Might want to add support for _aligned_* allocation
+// functions to detect a bit more bugs. Those functions seem to wrap malloc().
+
int _CrtDbgReport(int, const char*, int,
const char*, const char*, ...) {
ShowStatsAndAbort();
@@ -116,37 +137,38 @@ int _CrtSetReportMode(int, int) {
}
} // extern "C"
-using __interception::GetRealFunctionAddress;
-
-// We don't want to include "windows.h" in this file to avoid extra attributes
-// set on malloc/free etc (e.g. dllimport), so declare a few things manually:
-extern "C" int __stdcall VirtualProtect(void* addr, size_t size,
- DWORD prot, DWORD *old_prot);
-const int PAGE_EXECUTE_READWRITE = 0x40;
-
namespace __asan {
void ReplaceSystemMalloc() {
-#if defined(_DLL)
-# ifdef _WIN64
-# error ReplaceSystemMalloc was not tested on x64
-# endif
- char *crt_malloc;
- if (GetRealFunctionAddress("malloc", (void**)&crt_malloc)) {
- // Replace malloc in the CRT dll with a jump to our malloc.
- DWORD old_prot, unused;
- CHECK(VirtualProtect(crt_malloc, 16, PAGE_EXECUTE_READWRITE, &old_prot));
- REAL(memset)(crt_malloc, 0xCC /* int 3 */, 16); // just in case.
-
- ptrdiff_t jmp_offset = (char*)malloc - (char*)crt_malloc - 5;
- crt_malloc[0] = 0xE9; // jmp, should be followed by an offset.
- REAL(memcpy)(crt_malloc + 1, &jmp_offset, sizeof(jmp_offset));
-
- CHECK(VirtualProtect(crt_malloc, 16, old_prot, &unused));
-
- // FYI: FlushInstructionCache is needed on Itanium etc but not on x86/x64.
- }
-
- // FIXME: investigate whether anything else is needed.
+#if defined(ASAN_DYNAMIC)
+ // We don't check the result because CRT might not be used in the process.
+ __interception::OverrideFunction("free", (uptr)free);
+ __interception::OverrideFunction("malloc", (uptr)malloc);
+ __interception::OverrideFunction("_malloc_crt", (uptr)malloc);
+ __interception::OverrideFunction("calloc", (uptr)calloc);
+ __interception::OverrideFunction("_calloc_crt", (uptr)calloc);
+ __interception::OverrideFunction("realloc", (uptr)realloc);
+ __interception::OverrideFunction("_realloc_crt", (uptr)realloc);
+ __interception::OverrideFunction("_recalloc", (uptr)_recalloc);
+ __interception::OverrideFunction("_recalloc_crt", (uptr)_recalloc);
+ __interception::OverrideFunction("_msize", (uptr)_msize);
+ __interception::OverrideFunction("_expand", (uptr)_expand);
+
+ // Override different versions of 'operator new' and 'operator delete'.
+ // No need to override the nothrow versions as they just wrap the throw
+ // versions.
+ // FIXME: Unfortunately, MSVC miscompiles the statements that take the
+ // addresses of the array versions of these operators,
+ // see https://connect.microsoft.com/VisualStudio/feedbackdetail/view/946992
+ // We might want to try to work around this by [inline] assembly or compiling
+ // parts of the RTL with Clang.
+ void *(*op_new)(size_t sz) = operator new;
+ void (*op_delete)(void *p) = operator delete;
+ void *(*op_array_new)(size_t sz) = operator new[];
+ void (*op_array_delete)(void *p) = operator delete[];
+ __interception::OverrideFunction("??2@YAPAXI@Z", (uptr)op_new);
+ __interception::OverrideFunction("??3@YAXPAX@Z", (uptr)op_delete);
+ __interception::OverrideFunction("??_U@YAPAXI@Z", (uptr)op_array_new);
+ __interception::OverrideFunction("??_V@YAXPAX@Z", (uptr)op_array_delete);
#endif
}
} // namespace __asan
diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h
index 1e37bc26e9..907704d7fc 100644
--- a/libsanitizer/asan/asan_mapping.h
+++ b/libsanitizer/asan/asan_mapping.h
@@ -41,54 +41,87 @@
// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow ||
// || `[0x000000000000, 0x00007fff7fff]` || LowMem ||
//
-// Default Linux/i386 mapping:
+// Default Linux/i386 mapping on x86_64 machine:
// || `[0x40000000, 0xffffffff]` || HighMem ||
// || `[0x28000000, 0x3fffffff]` || HighShadow ||
// || `[0x24000000, 0x27ffffff]` || ShadowGap ||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
+// Default Linux/i386 mapping on i386 machine
+// (addresses starting with 0xc0000000 are reserved
+// for kernel and thus not sanitized):
+// || `[0x38000000, 0xbfffffff]` || HighMem ||
+// || `[0x27000000, 0x37ffffff]` || HighShadow ||
+// || `[0x24000000, 0x26ffffff]` || ShadowGap ||
+// || `[0x20000000, 0x23ffffff]` || LowShadow ||
+// || `[0x00000000, 0x1fffffff]` || LowMem ||
+//
// Default Linux/MIPS mapping:
-// || `[0x2aaa8000, 0xffffffff]` || HighMem ||
-// || `[0x0fffd000, 0x2aaa7fff]` || HighShadow ||
-// || `[0x0bffd000, 0x0fffcfff]` || ShadowGap ||
-// || `[0x0aaa8000, 0x0bffcfff]` || LowShadow ||
-// || `[0x00000000, 0x0aaa7fff]` || LowMem ||
+// || `[0x2aaa0000, 0xffffffff]` || HighMem ||
+// || `[0x0fff4000, 0x2aa9ffff]` || HighShadow ||
+// || `[0x0bff4000, 0x0fff3fff]` || ShadowGap ||
+// || `[0x0aaa0000, 0x0bff3fff]` || LowShadow ||
+// || `[0x00000000, 0x0aa9ffff]` || LowMem ||
+//
+// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
+// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
+// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
+// || `[0x480000000000, 0x49ffffffffff]` || ShadowGap ||
+// || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
+// || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
+//
+// Shadow mapping on FreeBSD/i386 with SHADOW_OFFSET == 0x40000000:
+// || `[0x60000000, 0xffffffff]` || HighMem ||
+// || `[0x4c000000, 0x5fffffff]` || HighShadow ||
+// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
+// || `[0x40000000, 0x47ffffff]` || LowShadow ||
+// || `[0x00000000, 0x3fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
-static const u64 kDefaultShadowOffset32 = 1ULL << 29;
+static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
+static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
+static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
+static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
+static const u64 kMIPS64_ShadowOffset64 = 1ULL << 36;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
-static const u64 kMIPS32_ShadowOffset32 = 0x0aaa8000;
+static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
+static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
-#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale;
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset;
-# define SHADOW_SCALE (__asan_mapping_scale)
-# define SHADOW_OFFSET (__asan_mapping_offset)
+#define SHADOW_SCALE kDefaultShadowScale
+#if SANITIZER_ANDROID
+# define SHADOW_OFFSET (0)
#else
-# define SHADOW_SCALE kDefaultShadowScale
-# if SANITIZER_ANDROID
-# define SHADOW_OFFSET (0)
-# else
-# if SANITIZER_WORDSIZE == 32
-# if defined(__mips__)
-# define SHADOW_OFFSET kMIPS32_ShadowOffset32
-# else
-# define SHADOW_OFFSET kDefaultShadowOffset32
-# endif
+# if SANITIZER_WORDSIZE == 32
+# if defined(__mips__)
+# define SHADOW_OFFSET kMIPS32_ShadowOffset32
+# elif SANITIZER_FREEBSD
+# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
# else
-# if defined(__powerpc64__)
+# if SANITIZER_IOS
+# define SHADOW_OFFSET kIosShadowOffset32
+# else
+# define SHADOW_OFFSET kDefaultShadowOffset32
+# endif
+# endif
+# else
+# if defined(__aarch64__)
+# define SHADOW_OFFSET kAArch64_ShadowOffset64
+# elif defined(__powerpc64__)
# define SHADOW_OFFSET kPPC64_ShadowOffset64
-# elif SANITIZER_MAC
-# define SHADOW_OFFSET kDefaultShadowOffset64
-# else
-# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
-# endif
+# elif SANITIZER_FREEBSD
+# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
+# elif SANITIZER_MAC
+# define SHADOW_OFFSET kDefaultShadowOffset64
+# elif defined(__mips64)
+# define SHADOW_OFFSET kMIPS64_ShadowOffset64
+# else
+# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
# endif
-#endif // ASAN_FLEXIBLE_MAPPING_AND_OFFSET
+#endif
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
diff --git a/libsanitizer/asan/asan_new_delete.cc b/libsanitizer/asan/asan_new_delete.cc
index beac8cdbdd..9d6660ec7b 100644
--- a/libsanitizer/asan/asan_new_delete.cc
+++ b/libsanitizer/asan/asan_new_delete.cc
@@ -14,20 +14,21 @@
#include "asan_internal.h"
#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_interception.h"
+
#include <stddef.h>
-namespace __asan {
-// This function is a no-op. We need it to make sure that object file
-// with our replacements will actually be loaded from static ASan
-// run-time library at link-time.
-void ReplaceOperatorsNewAndDelete() { }
-}
+// C++ operators can't have visibility attributes on Windows.
+#if SANITIZER_WINDOWS
+# define CXX_OPERATOR_ATTRIBUTE
+#else
+# define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+#endif
using namespace __asan; // NOLINT
-// On Android new() goes through malloc interceptors.
-// See also https://code.google.com/p/address-sanitizer/issues/detail?id=131.
-#if !SANITIZER_ANDROID
+// This code has issues on OSX.
+// See https://code.google.com/p/address-sanitizer/issues/detail?id=131.
// Fake std::nothrow_t to avoid including <new>.
namespace std {
@@ -46,14 +47,23 @@ struct nothrow_t {};
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
-INTERCEPTOR_ATTRIBUTE
+// FreeBSD prior v9.2 have wrong definition of 'size_t'.
+// http://svnweb.freebsd.org/base?view=revision&revision=232261
+#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+#include <sys/param.h>
+#if __FreeBSD_version <= 902001 // v9.2
+#define size_t unsigned
+#endif // __FreeBSD_version
+#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+
+CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size) { OPERATOR_NEW_BODY(FROM_NEW_BR); }
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW); }
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
@@ -77,16 +87,32 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
asan_free(ptr, &stack, type);
#if !SANITIZER_MAC
-INTERCEPTOR_ATTRIBUTE
-void operator delete(void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); }
-INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
-INTERCEPTOR_ATTRIBUTE
-void operator delete(void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FROM_NEW); }
-INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr) throw() {
+ OPERATOR_DELETE_BODY(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr) throw() {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) throw() {
+ GET_STACK_TRACE_FREE;
+ asan_sized_free(ptr, size, &stack, FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) throw() {
+ GET_STACK_TRACE_FREE;
+ asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
+}
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {
@@ -102,5 +128,3 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
#endif
-
-#endif
diff --git a/libsanitizer/asan/asan_poisoning.cc b/libsanitizer/asan/asan_poisoning.cc
index 86d49909b6..c0f3991c1f 100644
--- a/libsanitizer/asan/asan_poisoning.cc
+++ b/libsanitizer/asan/asan_poisoning.cc
@@ -11,6 +11,8 @@
//===----------------------------------------------------------------------===//
#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
@@ -48,6 +50,36 @@ struct ShadowSegmentEndpoint {
}
};
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ uptr page_size = GetPageSizeCached();
+ uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
+ uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
+ FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+}
+
+void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
+ uptr end = ptr + size;
+ if (common_flags()->verbosity) {
+ Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
+ poison ? "" : "un", ptr, end, size);
+ if (common_flags()->verbosity >= 2)
+ PRINT_CURRENT_STACK();
+ }
+ CHECK(size);
+ CHECK_LE(size, 4096);
+ CHECK(IsAligned(end, SHADOW_GRANULARITY));
+ if (!IsAligned(ptr, SHADOW_GRANULARITY)) {
+ *(u8 *)MemToShadow(ptr) =
+ poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0;
+ ptr |= SHADOW_GRANULARITY - 1;
+ ptr++;
+ }
+ for (; ptr < end; ptr += SHADOW_GRANULARITY)
+ *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
+}
+
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
@@ -67,10 +99,8 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
- if (common_flags()->verbosity >= 1) {
- Printf("Trying to poison memory region [%p, %p)\n",
- (void*)beg_addr, (void*)end_addr);
- }
+ VPrintf(1, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
+ (void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
@@ -109,10 +139,8 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
- if (common_flags()->verbosity >= 1) {
- Printf("Trying to unpoison memory region [%p, %p)\n",
- (void*)beg_addr, (void*)end_addr);
- }
+ VPrintf(1, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
+ (void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
@@ -137,7 +165,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
}
-bool __asan_address_is_poisoned(void const volatile *addr) {
+int __asan_address_is_poisoned(void const volatile *addr) {
return __asan::AddressIsPoisoned((uptr)addr);
}
@@ -146,6 +174,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
uptr end = beg + size;
if (!AddrIsInMem(beg)) return beg;
if (!AddrIsInMem(end)) return end;
+ CHECK_LT(beg, end);
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
uptr shadow_beg = MemToShadow(aligned_b);
@@ -217,6 +246,36 @@ void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
*p = x;
}
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_poison_cxx_array_cookie(uptr p) {
+ if (SANITIZER_WORDSIZE != 64) return;
+ if (!flags()->poison_array_cookie) return;
+ uptr s = MEM_TO_SHADOW(p);
+ *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_load_cxx_array_cookie(uptr *p) {
+ if (SANITIZER_WORDSIZE != 64) return *p;
+ if (!flags()->poison_array_cookie) return *p;
+ uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
+ u8 sval = *reinterpret_cast<u8*>(s);
+ if (sval == kAsanArrayCookieMagic) return *p;
+ // If sval is not kAsanArrayCookieMagic it can only be freed memory,
+ // which means that we are going to get double-free. So, return 0 to avoid
+ // infinite loop of destructors. We don't want to report a double-free here
+ // though, so print a warning just in case.
+ // CHECK_EQ(sval, kAsanHeapFreeMagic);
+ if (sval == kAsanHeapFreeMagic) {
+ Report("AddressSanitizer: loaded array cookie from free-d memory; "
+ "expect a double-free report\n");
+ return 0;
+ }
+ // The cookie may remain unpoisoned if e.g. it comes from a custom
+ // operator new defined inside a class.
+ return *p;
+}
+
// This is a simplified version of __asan_(un)poison_memory_region, which
// assumes that left border of region to be poisoned is properly aligned.
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
@@ -243,14 +302,12 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
void __asan_poison_stack_memory(uptr addr, uptr size) {
- if (common_flags()->verbosity > 0)
- Report("poisoning: %p %zx\n", (void*)addr, size);
+ VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
}
void __asan_unpoison_stack_memory(uptr addr, uptr size) {
- if (common_flags()->verbosity > 0)
- Report("unpoisoning: %p %zx\n", (void*)addr, size);
+ VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, false);
}
@@ -258,33 +315,40 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
const void *end_p,
const void *old_mid_p,
const void *new_mid_p) {
- if (common_flags()->verbosity >= 2)
- Printf("contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
- new_mid_p);
+ if (!flags()->detect_container_overflow) return;
+ VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
+ new_mid_p);
uptr beg = reinterpret_cast<uptr>(beg_p);
- uptr end= reinterpret_cast<uptr>(end_p);
+ uptr end = reinterpret_cast<uptr>(end_p);
uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
uptr granularity = SHADOW_GRANULARITY;
- CHECK(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
- IsAligned(beg, granularity));
+ if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
+ IsAligned(beg, granularity))) {
+ GET_STACK_TRACE_FATAL_HERE;
+ ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
+ &stack);
+ }
CHECK_LE(end - beg,
FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
uptr d1 = RoundDownTo(old_mid, granularity);
- uptr d2 = RoundUpTo(old_mid, granularity);
+ // uptr d2 = RoundUpTo(old_mid, granularity);
// Currently we should be in this state:
// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
// Make a quick sanity check that we are indeed in this state.
- if (d1 != d2)
- CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
+ //
+ // FIXME: Two of these three checks are disabled until we fix
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=258.
+ // if (d1 != d2)
+ // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
if (a + granularity <= d1)
CHECK_EQ(*(u8*)MemToShadow(a), 0);
- if (d2 + granularity <= c && c <= end)
- CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
- kAsanContiguousContainerOOBMagic);
+ // if (d2 + granularity <= c && c <= end)
+ // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
+ // kAsanContiguousContainerOOBMagic);
uptr b1 = RoundDownTo(new_mid, granularity);
uptr b2 = RoundUpTo(new_mid, granularity);
@@ -297,3 +361,53 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
*(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
}
}
+
+int __sanitizer_verify_contiguous_container(const void *beg_p,
+ const void *mid_p,
+ const void *end_p) {
+ if (!flags()->detect_container_overflow) return 1;
+ uptr beg = reinterpret_cast<uptr>(beg_p);
+ uptr end = reinterpret_cast<uptr>(end_p);
+ uptr mid = reinterpret_cast<uptr>(mid_p);
+ CHECK_LE(beg, mid);
+ CHECK_LE(mid, end);
+ // Check some bytes starting from beg, some bytes around mid, and some bytes
+ // ending with end.
+ uptr kMaxRangeToCheck = 32;
+ uptr r1_beg = beg;
+ uptr r1_end = Min(end + kMaxRangeToCheck, mid);
+ uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
+ uptr r2_end = Min(end, mid + kMaxRangeToCheck);
+ uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
+ uptr r3_end = end;
+ for (uptr i = r1_beg; i < r1_end; i++)
+ if (AddressIsPoisoned(i))
+ return 0;
+ for (uptr i = r2_beg; i < mid; i++)
+ if (AddressIsPoisoned(i))
+ return 0;
+ for (uptr i = mid; i < r2_end; i++)
+ if (!AddressIsPoisoned(i))
+ return 0;
+ for (uptr i = r3_beg; i < r3_end; i++)
+ if (!AddressIsPoisoned(i))
+ return 0;
+ return 1;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
+ AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
+ AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false);
+}
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+bool WordIsPoisoned(uptr addr) {
+ return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);
+}
+}
diff --git a/libsanitizer/asan/asan_poisoning.h b/libsanitizer/asan/asan_poisoning.h
index da79a0ff2e..9644b7d840 100644
--- a/libsanitizer/asan/asan_poisoning.h
+++ b/libsanitizer/asan/asan_poisoning.h
@@ -13,6 +13,7 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
+#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
@@ -35,7 +36,32 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
- REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
+ // FIXME: Page states are different on Windows, so using the same interface
+ // for mapping shadow and zeroing out pages doesn't "just work", so we should
+ // probably provide higher-level interface for these operations.
+ // For now, just memset on Windows.
+ if (value ||
+ SANITIZER_WINDOWS == 1 ||
+ shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
+ REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
+ } else {
+ uptr page_size = GetPageSizeCached();
+ uptr page_beg = RoundUpTo(shadow_beg, page_size);
+ uptr page_end = RoundDownTo(shadow_end, page_size);
+
+ if (page_beg >= page_end) {
+ REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
+ } else {
+ if (page_beg != shadow_beg) {
+ REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
+ }
+ if (page_end != shadow_end) {
+ REAL(memset)((void *)page_end, 0, shadow_end - page_end);
+ }
+ void *res = MmapFixedNoReserve(page_beg, page_end - page_beg);
+ CHECK_EQ(page_beg, res);
+ }
+ }
}
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
@@ -55,4 +81,8 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
}
}
+// Calls __sanitizer::FlushUnneededShadowMemory() on
+// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
+void FlushUnneededASanShadowMemory(uptr p, uptr size);
+
} // namespace __asan
diff --git a/libsanitizer/asan/asan_posix.cc b/libsanitizer/asan/asan_posix.cc
index ac4ec9e019..06d24d4e0e 100644
--- a/libsanitizer/asan/asan_posix.cc
+++ b/libsanitizer/asan/asan_posix.cc
@@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_POSIX
#include "asan_internal.h"
#include "asan_interceptors.h"
@@ -28,70 +28,28 @@
#include <sys/resource.h>
#include <unistd.h>
-static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
-
namespace __asan {
-static void MaybeInstallSigaction(int signum,
- void (*handler)(int, siginfo_t *, void *)) {
- if (!AsanInterceptsSignal(signum))
- return;
- struct sigaction sigact;
- REAL(memset)(&sigact, 0, sizeof(sigact));
- sigact.sa_sigaction = handler;
- sigact.sa_flags = SA_SIGINFO;
- if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
- CHECK_EQ(0, REAL(sigaction)(signum, &sigact, 0));
- if (common_flags()->verbosity >= 1) {
- Report("Installed the sigaction for signal %d\n", signum);
- }
-}
-
-static void ASAN_OnSIGSEGV(int, siginfo_t *siginfo, void *context) {
- uptr addr = (uptr)siginfo->si_addr;
+void AsanOnSIGSEGV(int, void *siginfo, void *context) {
+ ScopedDeadlySignal signal_scope(GetCurrentThread());
+ uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
+ int code = (int)((siginfo_t*)siginfo)->si_code;
// Write the first message using the bullet-proof write.
if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die();
uptr pc, sp, bp;
GetPcSpBp(context, &pc, &sp, &bp);
- ReportSIGSEGV(pc, sp, bp, addr);
-}
-
-void SetAlternateSignalStack() {
- stack_t altstack, oldstack;
- CHECK_EQ(0, sigaltstack(0, &oldstack));
- // If the alternate stack is already in place, do nothing.
- if ((oldstack.ss_flags & SS_DISABLE) == 0) return;
- // TODO(glider): the mapped stack should have the MAP_STACK flag in the
- // future. It is not required by man 2 sigaltstack now (they're using
- // malloc()).
- void* base = MmapOrDie(kAltStackSize, __FUNCTION__);
- altstack.ss_sp = base;
- altstack.ss_flags = 0;
- altstack.ss_size = kAltStackSize;
- CHECK_EQ(0, sigaltstack(&altstack, 0));
- if (common_flags()->verbosity > 0) {
- Report("Alternative stack for T%d set: [%p,%p)\n",
- GetCurrentTidOrInvalid(),
- altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size);
- }
-}
-
-void UnsetAlternateSignalStack() {
- stack_t altstack, oldstack;
- altstack.ss_sp = 0;
- altstack.ss_flags = SS_DISABLE;
- altstack.ss_size = 0;
- CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
- UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
-}
-void InstallSignalHandlers() {
- // Set the alternate signal stack for the main thread.
- // This will cause SetAlternateSignalStack to be called twice, but the stack
- // will be actually set only once.
- if (flags()->use_sigaltstack) SetAlternateSignalStack();
- MaybeInstallSigaction(SIGSEGV, ASAN_OnSIGSEGV);
- MaybeInstallSigaction(SIGBUS, ASAN_OnSIGSEGV);
+ // Access at a reasonable offset above SP, or slightly below it (to account
+ // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
+ // probably a stack overflow.
+ // We also check si_code to filter out SEGV caused by something else other
+ // then hitting the guard page or unmapped memory, like, for example,
+ // unaligned memory access.
+ if (addr + 512 > sp && addr < sp + 0xFFFF &&
+ (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
+ ReportStackOverflow(pc, sp, bp, context, addr);
+ else
+ ReportSIGSEGV("SEGV", pc, sp, bp, context, addr);
}
// ---------------------- TSD ---------------- {{{1
@@ -125,4 +83,4 @@ void PlatformTSDDtor(void *tsd) {
}
} // namespace __asan
-#endif // SANITIZER_LINUX || SANITIZER_MAC
+#endif // SANITIZER_POSIX
diff --git a/libsanitizer/asan/asan_preinit.cc b/libsanitizer/asan/asan_preinit.cc
index 3104240153..2ce1fb9b66 100644
--- a/libsanitizer/asan/asan_preinit.cc
+++ b/libsanitizer/asan/asan_preinit.cc
@@ -8,22 +8,12 @@
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Call __asan_init at the very early stage of process startup.
-// On Linux we use .preinit_array section (unless PIC macro is defined).
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
-#if ASAN_USE_PREINIT_ARRAY && !defined(PIC)
- // On Linux, we force __asan_init to be called before anyone else
- // by placing it into .preinit_array section.
- // FIXME: do we have anything like this on Mac?
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
// The symbol is called __local_asan_preinit, because it's not intended to be
// exported.
__attribute__((section(".preinit_array"), used))
void (*__local_asan_preinit)(void) = __asan_init;
-#elif SANITIZER_WINDOWS && defined(_DLL)
- // On Windows, when using dynamic CRT (/MD), we can put a pointer
- // to __asan_init into the global list of C initializers.
- // See crt0dat.c in the CRT sources for the details.
- #pragma section(".CRT$XIB", long, read) // NOLINT
- __declspec(allocate(".CRT$XIB")) void (*__asan_preinit)() = __asan_init;
#endif
diff --git a/libsanitizer/asan/asan_report.cc b/libsanitizer/asan/asan_report.cc
index 70c4b481a2..fcccb70ff8 100644
--- a/libsanitizer/asan/asan_report.cc
+++ b/libsanitizer/asan/asan_report.cc
@@ -29,6 +29,19 @@ static char *error_message_buffer = 0;
static uptr error_message_buffer_pos = 0;
static uptr error_message_buffer_size = 0;
+struct ReportData {
+ uptr pc;
+ uptr sp;
+ uptr bp;
+ uptr addr;
+ bool is_write;
+ uptr access_size;
+ const char *description;
+};
+
+static bool report_happened = false;
+static ReportData report_data = {};
+
void AppendToErrorMessageBuffer(const char *buffer) {
if (error_message_buffer) {
uptr length = internal_strlen(buffer);
@@ -43,11 +56,9 @@ void AppendToErrorMessageBuffer(const char *buffer) {
}
// ---------------------- Decorator ------------------------------ {{{1
-class Decorator: private __sanitizer::AnsiColorDecorator {
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
- const char *Warning() { return Red(); }
- const char *EndWarning() { return Default(); }
+ Decorator() : SanitizerCommonDecorator() { }
const char *Access() { return Blue(); }
const char *EndAccess() { return Default(); }
const char *Location() { return Green(); }
@@ -59,6 +70,7 @@ class Decorator: private __sanitizer::AnsiColorDecorator {
switch (byte) {
case kAsanHeapLeftRedzoneMagic:
case kAsanHeapRightRedzoneMagic:
+ case kAsanArrayCookieMagic:
return Red();
case kAsanHeapFreeMagic:
return Magenta();
@@ -80,77 +92,117 @@ class Decorator: private __sanitizer::AnsiColorDecorator {
return Red();
case kAsanInternalHeapMagic:
return Yellow();
+ case kAsanIntraObjectRedzone:
+ return Yellow();
default:
return Default();
}
}
const char *EndShadowByte() { return Default(); }
+ const char *MemoryByte() { return Magenta(); }
+ const char *EndMemoryByte() { return Default(); }
};
// ---------------------- Helper functions ----------------------- {{{1
-static void PrintShadowByte(const char *before, u8 byte,
- const char *after = "\n") {
+static void PrintMemoryByte(InternalScopedString *str, const char *before,
+ u8 byte, bool in_shadow, const char *after = "\n") {
Decorator d;
- Printf("%s%s%x%x%s%s", before,
- d.ShadowByte(byte), byte >> 4, byte & 15, d.EndShadowByte(), after);
+ str->append("%s%s%x%x%s%s", before,
+ in_shadow ? d.ShadowByte(byte) : d.MemoryByte(),
+ byte >> 4, byte & 15,
+ in_shadow ? d.EndShadowByte() : d.EndMemoryByte(), after);
+}
+
+static void PrintShadowByte(InternalScopedString *str, const char *before,
+ u8 byte, const char *after = "\n") {
+ PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
}
-static void PrintShadowBytes(const char *before, u8 *bytes,
- u8 *guilty, uptr n) {
+static void PrintShadowBytes(InternalScopedString *str, const char *before,
+ u8 *bytes, u8 *guilty, uptr n) {
Decorator d;
- if (before)
- Printf("%s%p:", before, bytes);
+ if (before) str->append("%s%p:", before, bytes);
for (uptr i = 0; i < n; i++) {
u8 *p = bytes + i;
- const char *before = p == guilty ? "[" :
- (p - 1 == guilty && i != 0) ? "" : " ";
+ const char *before =
+ p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
const char *after = p == guilty ? "]" : "";
- PrintShadowByte(before, *p, after);
+ PrintShadowByte(str, before, *p, after);
}
- Printf("\n");
-}
-
-static void PrintLegend() {
- Printf("Shadow byte legend (one shadow byte represents %d "
- "application bytes):\n", (int)SHADOW_GRANULARITY);
- PrintShadowByte(" Addressable: ", 0);
- Printf(" Partially addressable: ");
- for (u8 i = 1; i < SHADOW_GRANULARITY; i++)
- PrintShadowByte("", i, " ");
- Printf("\n");
- PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic);
- PrintShadowByte(" Heap right redzone: ", kAsanHeapRightRedzoneMagic);
- PrintShadowByte(" Freed heap region: ", kAsanHeapFreeMagic);
- PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic);
- PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic);
- PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic);
- PrintShadowByte(" Stack partial redzone: ", kAsanStackPartialRedzoneMagic);
- PrintShadowByte(" Stack after return: ", kAsanStackAfterReturnMagic);
- PrintShadowByte(" Stack use after scope: ", kAsanStackUseAfterScopeMagic);
- PrintShadowByte(" Global redzone: ", kAsanGlobalRedzoneMagic);
- PrintShadowByte(" Global init order: ", kAsanInitializationOrderMagic);
- PrintShadowByte(" Poisoned by user: ", kAsanUserPoisonedMemoryMagic);
- PrintShadowByte(" Contiguous container OOB:",
+ str->append("\n");
+}
+
+static void PrintLegend(InternalScopedString *str) {
+ str->append(
+ "Shadow byte legend (one shadow byte represents %d "
+ "application bytes):\n",
+ (int)SHADOW_GRANULARITY);
+ PrintShadowByte(str, " Addressable: ", 0);
+ str->append(" Partially addressable: ");
+ for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
+ str->append("\n");
+ PrintShadowByte(str, " Heap left redzone: ",
+ kAsanHeapLeftRedzoneMagic);
+ PrintShadowByte(str, " Heap right redzone: ",
+ kAsanHeapRightRedzoneMagic);
+ PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
+ PrintShadowByte(str, " Stack left redzone: ",
+ kAsanStackLeftRedzoneMagic);
+ PrintShadowByte(str, " Stack mid redzone: ",
+ kAsanStackMidRedzoneMagic);
+ PrintShadowByte(str, " Stack right redzone: ",
+ kAsanStackRightRedzoneMagic);
+ PrintShadowByte(str, " Stack partial redzone: ",
+ kAsanStackPartialRedzoneMagic);
+ PrintShadowByte(str, " Stack after return: ",
+ kAsanStackAfterReturnMagic);
+ PrintShadowByte(str, " Stack use after scope: ",
+ kAsanStackUseAfterScopeMagic);
+ PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
+ PrintShadowByte(str, " Global init order: ",
+ kAsanInitializationOrderMagic);
+ PrintShadowByte(str, " Poisoned by user: ",
+ kAsanUserPoisonedMemoryMagic);
+ PrintShadowByte(str, " Container overflow: ",
kAsanContiguousContainerOOBMagic);
- PrintShadowByte(" ASan internal: ", kAsanInternalHeapMagic);
+ PrintShadowByte(str, " Array cookie: ",
+ kAsanArrayCookieMagic);
+ PrintShadowByte(str, " Intra object redzone: ",
+ kAsanIntraObjectRedzone);
+ PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
}
-static void PrintShadowMemoryForAddress(uptr addr) {
- if (!AddrIsInMem(addr))
+void MaybeDumpInstructionBytes(uptr pc) {
+ if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
return;
+ InternalScopedString str(1024);
+ str.append("First 16 instruction bytes at pc: ");
+ if (IsAccessibleMemoryRange(pc, 16)) {
+ for (int i = 0; i < 16; ++i) {
+ PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/false, " ");
+ }
+ str.append("\n");
+ } else {
+ str.append("unaccessible\n");
+ }
+ Report("%s", str.data());
+}
+
+static void PrintShadowMemoryForAddress(uptr addr) {
+ if (!AddrIsInMem(addr)) return;
uptr shadow_addr = MemToShadow(addr);
const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
- Printf("Shadow bytes around the buggy address:\n");
+ InternalScopedString str(4096 * 8);
+ str.append("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
const char *prefix = (i == 0) ? "=>" : " ";
- PrintShadowBytes(prefix,
- (u8*)(aligned_shadow + i * n_bytes_per_row),
- (u8*)shadow_addr, n_bytes_per_row);
+ PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
+ (u8 *)shadow_addr, n_bytes_per_row);
}
- if (flags()->print_legend)
- PrintLegend();
+ if (flags()->print_legend) PrintLegend(&str);
+ Printf("%s", str.data());
}
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
@@ -182,62 +234,89 @@ static bool IsASCII(unsigned char c) {
static const char *MaybeDemangleGlobalName(const char *name) {
// We can spoil names of globals with C linkage, so use an heuristic
// approach to check if the name should be demangled.
- return (name[0] == '_' && name[1] == 'Z')
- ? Symbolizer::Get()->Demangle(name)
- : name;
+ bool should_demangle = false;
+ if (name[0] == '_' && name[1] == 'Z')
+ should_demangle = true;
+ else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
+ should_demangle = true;
+
+ return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
}
// Check if the global is a zero-terminated ASCII string. If so, print it.
-static void PrintGlobalNameIfASCII(const __asan_global &g) {
+static void PrintGlobalNameIfASCII(InternalScopedString *str,
+ const __asan_global &g) {
for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
unsigned char c = *(unsigned char*)p;
if (c == '\0' || !IsASCII(c)) return;
}
if (*(char*)(g.beg + g.size - 1) != '\0') return;
- Printf(" '%s' is ascii string '%s'\n",
- MaybeDemangleGlobalName(g.name), (char*)g.beg);
+ str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
+ (char *)g.beg);
+}
+
+static const char *GlobalFilename(const __asan_global &g) {
+ const char *res = g.module_name;
+ // Prefer the filename from source location, if is available.
+ if (g.location)
+ res = g.location->filename;
+ CHECK(res);
+ return res;
+}
+
+static void PrintGlobalLocation(InternalScopedString *str,
+ const __asan_global &g) {
+ str->append("%s", GlobalFilename(g));
+ if (!g.location)
+ return;
+ if (g.location->line_no)
+ str->append(":%d", g.location->line_no);
+ if (g.location->column_no)
+ str->append(":%d", g.location->column_no);
}
bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
const __asan_global &g) {
- static const uptr kMinimalDistanceFromAnotherGlobal = 64;
- if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false;
- if (addr >= g.beg + g.size_with_redzone) return false;
+ if (!IsAddressNearGlobal(addr, g)) return false;
+ InternalScopedString str(4096);
Decorator d;
- Printf("%s", d.Location());
+ str.append("%s", d.Location());
if (addr < g.beg) {
- Printf("%p is located %zd bytes to the left", (void*)addr, g.beg - addr);
+ str.append("%p is located %zd bytes to the left", (void *)addr,
+ g.beg - addr);
} else if (addr + size > g.beg + g.size) {
if (addr < g.beg + g.size)
addr = g.beg + g.size;
- Printf("%p is located %zd bytes to the right", (void*)addr,
- addr - (g.beg + g.size));
+ str.append("%p is located %zd bytes to the right", (void *)addr,
+ addr - (g.beg + g.size));
} else {
// Can it happen?
- Printf("%p is located %zd bytes inside", (void*)addr, addr - g.beg);
+ str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
}
- Printf(" of global variable '%s' from '%s' (0x%zx) of size %zu\n",
- MaybeDemangleGlobalName(g.name), g.module_name, g.beg, g.size);
- Printf("%s", d.EndLocation());
- PrintGlobalNameIfASCII(g);
+ str.append(" of global variable '%s' defined in '",
+ MaybeDemangleGlobalName(g.name));
+ PrintGlobalLocation(&str, g);
+ str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
+ str.append("%s", d.EndLocation());
+ PrintGlobalNameIfASCII(&str, g);
+ Printf("%s", str.data());
return true;
}
-bool DescribeAddressIfShadow(uptr addr) {
+bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr, bool print) {
if (AddrIsInMem(addr))
return false;
- static const char kAddrInShadowReport[] =
- "Address %p is located in the %s.\n";
- if (AddrIsInShadowGap(addr)) {
- Printf(kAddrInShadowReport, addr, "shadow gap area");
- return true;
- }
- if (AddrIsInHighShadow(addr)) {
- Printf(kAddrInShadowReport, addr, "high shadow area");
- return true;
- }
- if (AddrIsInLowShadow(addr)) {
- Printf(kAddrInShadowReport, addr, "low shadow area");
+ const char *area_type = nullptr;
+ if (AddrIsInShadowGap(addr)) area_type = "shadow gap";
+ else if (AddrIsInHighShadow(addr)) area_type = "high shadow";
+ else if (AddrIsInLowShadow(addr)) area_type = "low shadow";
+ if (area_type != nullptr) {
+ if (print) {
+ Printf("Address %p is located in the %s area.\n", addr, area_type);
+ } else {
+ CHECK(descr);
+ descr->region_kind = area_type;
+ }
return true;
}
CHECK(0 && "Address is not in memory and not in shadow?");
@@ -264,16 +343,15 @@ const char *ThreadNameWithParenthesis(u32 tid, char buff[],
return ThreadNameWithParenthesis(t, buff, buff_len);
}
-void PrintAccessAndVarIntersection(const char *var_name,
- uptr var_beg, uptr var_size,
- uptr addr, uptr access_size,
- uptr prev_var_end, uptr next_var_beg) {
- uptr var_end = var_beg + var_size;
+static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
+ uptr access_size, uptr prev_var_end,
+ uptr next_var_beg) {
+ uptr var_end = var.beg + var.size;
uptr addr_end = addr + access_size;
const char *pos_descr = 0;
- // If the variable [var_beg, var_end) is the nearest variable to the
+ // If the variable [var.beg, var_end) is the nearest variable to the
// current memory access, indicate it in the log.
- if (addr >= var_beg) {
+ if (addr >= var.beg) {
if (addr_end <= var_end)
pos_descr = "is inside"; // May happen if this is a use-after-return.
else if (addr < var_end)
@@ -282,59 +360,77 @@ void PrintAccessAndVarIntersection(const char *var_name,
next_var_beg - addr_end >= addr - var_end)
pos_descr = "overflows";
} else {
- if (addr_end > var_beg)
+ if (addr_end > var.beg)
pos_descr = "partially underflows";
else if (addr >= prev_var_end &&
- addr - prev_var_end >= var_beg - addr_end)
+ addr - prev_var_end >= var.beg - addr_end)
pos_descr = "underflows";
}
- Printf(" [%zd, %zd) '%s'", var_beg, var_beg + var_size, var_name);
+ InternalScopedString str(1024);
+ str.append(" [%zd, %zd)", var.beg, var_end);
+ // Render variable name.
+ str.append(" '");
+ for (uptr i = 0; i < var.name_len; ++i) {
+ str.append("%c", var.name_pos[i]);
+ }
+ str.append("'");
if (pos_descr) {
Decorator d;
// FIXME: we may want to also print the size of the access here,
// but in case of accesses generated by memset it may be confusing.
- Printf("%s <== Memory access at offset %zd %s this variable%s\n",
- d.Location(), addr, pos_descr, d.EndLocation());
+ str.append("%s <== Memory access at offset %zd %s this variable%s\n",
+ d.Location(), addr, pos_descr, d.EndLocation());
} else {
- Printf("\n");
+ str.append("\n");
}
+ Printf("%s", str.data());
}
-struct StackVarDescr {
- uptr beg;
- uptr size;
- const char *name_pos;
- uptr name_len;
-};
+bool ParseFrameDescription(const char *frame_descr,
+ InternalMmapVector<StackVarDescr> *vars) {
+ CHECK(frame_descr);
+ char *p;
+ // This string is created by the compiler and has the following form:
+ // "n alloc_1 alloc_2 ... alloc_n"
+ // where alloc_i looks like "offset size len ObjectName".
+ uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
+ if (n_objects == 0)
+ return false;
+
+ for (uptr i = 0; i < n_objects; i++) {
+ uptr beg = (uptr)internal_simple_strtoll(p, &p, 10);
+ uptr size = (uptr)internal_simple_strtoll(p, &p, 10);
+ uptr len = (uptr)internal_simple_strtoll(p, &p, 10);
+ if (beg == 0 || size == 0 || *p != ' ') {
+ return false;
+ }
+ p++;
+ StackVarDescr var = {beg, size, p, len};
+ vars->push_back(var);
+ p += len;
+ }
+
+ return true;
+}
bool DescribeAddressIfStack(uptr addr, uptr access_size) {
AsanThread *t = FindThreadByStackAddress(addr);
if (!t) return false;
- const uptr kBufSize = 4095;
- char buf[kBufSize];
- uptr offset = 0;
- uptr frame_pc = 0;
- char tname[128];
- const char *frame_descr = t->GetFrameNameByAddr(addr, &offset, &frame_pc);
-#ifdef __powerpc64__
- // On PowerPC64, the address of a function actually points to a
- // three-doubleword data structure with the first field containing
- // the address of the function's code.
- frame_pc = *reinterpret_cast<uptr *>(frame_pc);
-#endif
-
- // This string is created by the compiler and has the following form:
- // "n alloc_1 alloc_2 ... alloc_n"
- // where alloc_i looks like "offset size len ObjectName ".
- CHECK(frame_descr);
Decorator d;
+ char tname[128];
Printf("%s", d.Location());
- Printf("Address %p is located in stack of thread T%d%s "
- "at offset %zu in frame\n",
- addr, t->tid(),
- ThreadNameWithParenthesis(t->tid(), tname, sizeof(tname)),
- offset);
+ Printf("Address %p is located in stack of thread T%d%s", addr, t->tid(),
+ ThreadNameWithParenthesis(t->tid(), tname, sizeof(tname)));
+
+ // Try to fetch precise stack frame for this access.
+ AsanThread::StackFrameAccess access;
+ if (!t->GetStackFrameAccessByAddr(addr, &access)) {
+ Printf("%s\n", d.EndLocation());
+ return true;
+ }
+ Printf(" at offset %zu in frame%s\n", access.offset, d.EndLocation());
+
// Now we print the frame where the alloca has happened.
// We print this frame as a stack trace with one element.
// The symbolizer may print more than one frame if inlining was involved.
@@ -342,50 +438,42 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
// previously. That's unfortunate, but I have no better solution,
// especially given that the alloca may be from entirely different place
// (e.g. use-after-scope, or different thread's stack).
- StackTrace alloca_stack;
- alloca_stack.trace[0] = frame_pc + 16;
- alloca_stack.size = 1;
+#if defined(__powerpc64__) && defined(__BIG_ENDIAN__)
+ // On PowerPC64 ELFv1, the address of a function actually points to a
+ // three-doubleword data structure with the first field containing
+ // the address of the function's code.
+ access.frame_pc = *reinterpret_cast<uptr *>(access.frame_pc);
+#endif
+ access.frame_pc += 16;
Printf("%s", d.EndLocation());
- PrintStack(&alloca_stack);
+ StackTrace alloca_stack(&access.frame_pc, 1);
+ alloca_stack.Print();
+
+ InternalMmapVector<StackVarDescr> vars(16);
+ if (!ParseFrameDescription(access.frame_descr, &vars)) {
+ Printf("AddressSanitizer can't parse the stack frame "
+ "descriptor: |%s|\n", access.frame_descr);
+ // 'addr' is a stack address, so return true even if we can't parse frame
+ return true;
+ }
+ uptr n_objects = vars.size();
// Report the number of stack objects.
- char *p;
- uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
- CHECK_GT(n_objects, 0);
Printf(" This frame has %zu object(s):\n", n_objects);
// Report all objects in this frame.
- InternalScopedBuffer<StackVarDescr> vars(n_objects);
for (uptr i = 0; i < n_objects; i++) {
- uptr beg, size;
- uptr len;
- beg = (uptr)internal_simple_strtoll(p, &p, 10);
- size = (uptr)internal_simple_strtoll(p, &p, 10);
- len = (uptr)internal_simple_strtoll(p, &p, 10);
- if (beg == 0 || size == 0 || *p != ' ') {
- Printf("AddressSanitizer can't parse the stack frame "
- "descriptor: |%s|\n", frame_descr);
- break;
- }
- p++;
- vars[i].beg = beg;
- vars[i].size = size;
- vars[i].name_pos = p;
- vars[i].name_len = len;
- p += len;
- }
- for (uptr i = 0; i < n_objects; i++) {
- buf[0] = 0;
- internal_strncat(buf, vars[i].name_pos,
- static_cast<uptr>(Min(kBufSize, vars[i].name_len)));
uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
- PrintAccessAndVarIntersection(buf, vars[i].beg, vars[i].size,
- offset, access_size,
+ PrintAccessAndVarIntersection(vars[i], access.offset, access_size,
prev_var_end, next_var_beg);
}
Printf("HINT: this may be a false positive if your program uses "
- "some custom stack unwind mechanism or swapcontext\n"
- " (longjmp and C++ exceptions *are* supported)\n");
+ "some custom stack unwind mechanism or swapcontext\n");
+ if (SANITIZER_WINDOWS)
+ Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
+ else
+ Printf(" (longjmp and C++ exceptions *are* supported)\n");
+
DescribeThread(t);
return true;
}
@@ -394,24 +482,26 @@ static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
uptr access_size) {
sptr offset;
Decorator d;
- Printf("%s", d.Location());
+ InternalScopedString str(4096);
+ str.append("%s", d.Location());
if (chunk.AddrIsAtLeft(addr, access_size, &offset)) {
- Printf("%p is located %zd bytes to the left of", (void*)addr, offset);
+ str.append("%p is located %zd bytes to the left of", (void *)addr, offset);
} else if (chunk.AddrIsAtRight(addr, access_size, &offset)) {
if (offset < 0) {
addr -= offset;
offset = 0;
}
- Printf("%p is located %zd bytes to the right of", (void*)addr, offset);
+ str.append("%p is located %zd bytes to the right of", (void *)addr, offset);
} else if (chunk.AddrIsInside(addr, access_size, &offset)) {
- Printf("%p is located %zd bytes inside of", (void*)addr, offset);
+ str.append("%p is located %zd bytes inside of", (void*)addr, offset);
} else {
- Printf("%p is located somewhere around (this is AddressSanitizer bug!)",
- (void*)addr);
+ str.append("%p is located somewhere around (this is AddressSanitizer bug!)",
+ (void *)addr);
}
- Printf(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
- (void*)(chunk.Beg()), (void*)(chunk.End()));
- Printf("%s", d.EndLocation());
+ str.append(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
+ (void *)(chunk.Beg()), (void *)(chunk.End()));
+ str.append("%s", d.EndLocation());
+ Printf("%s", str.data());
}
void DescribeHeapAddress(uptr addr, uptr access_size) {
@@ -426,8 +516,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
asanThreadRegistry().CheckLocked();
AsanThreadContext *alloc_thread =
GetThreadContextByTidLocked(chunk.AllocTid());
- StackTrace alloc_stack;
- chunk.GetAllocStack(&alloc_stack);
+ StackTrace alloc_stack = chunk.GetAllocStack();
char tname[128];
Decorator d;
AsanThreadContext *free_thread = 0;
@@ -437,9 +526,8 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
free_thread->tid,
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
d.EndAllocation());
- StackTrace free_stack;
- chunk.GetFreeStack(&free_stack);
- PrintStack(&free_stack);
+ StackTrace free_stack = chunk.GetFreeStack();
+ free_stack.Print();
Printf("%spreviously allocated by thread T%d%s here:%s\n",
d.Allocation(), alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
@@ -450,7 +538,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
}
- PrintStack(&alloc_stack);
+ alloc_stack.Print();
DescribeThread(GetCurrentThread());
if (free_thread)
DescribeThread(free_thread);
@@ -481,15 +569,14 @@ void DescribeThread(AsanThreadContext *context) {
}
context->announced = true;
char tname[128];
- Printf("Thread T%d%s", context->tid,
- ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
- Printf(" created by T%d%s here:\n",
- context->parent_tid,
- ThreadNameWithParenthesis(context->parent_tid,
- tname, sizeof(tname)));
- uptr stack_size;
- const uptr *stack_trace = StackDepotGet(context->stack_id, &stack_size);
- PrintStack(stack_trace, stack_size);
+ InternalScopedString str(1024);
+ str.append("Thread T%d%s", context->tid,
+ ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
+ str.append(
+ " created by T%d%s here:\n", context->parent_tid,
+ ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
+ Printf("%s", str.data());
+ StackDepotGet(context->stack_id).Print();
// Recursively described parent thread if needed.
if (flags()->print_full_thread_history) {
AsanThreadContext *parent_context =
@@ -504,14 +591,14 @@ void DescribeThread(AsanThreadContext *context) {
// immediately after printing error report.
class ScopedInErrorReport {
public:
- ScopedInErrorReport() {
+ explicit ScopedInErrorReport(ReportData *report = nullptr) {
static atomic_uint32_t num_calls;
static u32 reporting_thread_tid;
if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
// Do not print more than one report, otherwise they will mix up.
// Error reporting functions shouldn't return at this situation, as
// they are defined as no-return.
- Report("AddressSanitizer: while reporting a bug found another one."
+ Report("AddressSanitizer: while reporting a bug found another one. "
"Ignoring.\n");
u32 current_tid = GetCurrentTidOrInvalid();
if (current_tid != reporting_thread_tid) {
@@ -524,6 +611,8 @@ class ScopedInErrorReport {
// Die() to bypass any additional checks.
internal__exit(flags()->exitcode);
}
+ if (report) report_data = *report;
+ report_happened = true;
ASAN_ON_ERROR();
// Make sure the registry and sanitizer report mutexes are locked while
// we're printing an error report.
@@ -539,6 +628,8 @@ class ScopedInErrorReport {
NORETURN ~ScopedInErrorReport() {
// Make sure the current thread is announced.
DescribeThread(GetCurrentThread());
+ // We may want to grab this lock again when printing stats.
+ asanThreadRegistry().Unlock();
// Print memory stats.
if (flags()->print_stats)
__asan_print_accumulated_stats();
@@ -550,22 +641,43 @@ class ScopedInErrorReport {
}
};
-void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
+void ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: SEGV on unknown address %p"
- " (pc %p sp %p bp %p T%d)\n",
- (void*)addr, (void*)pc, (void*)sp, (void*)bp,
- GetCurrentTidOrInvalid());
+ Report(
+ "ERROR: AddressSanitizer: stack-overflow on address %p"
+ " (pc %p bp %p sp %p T%d)\n",
+ (void *)addr, (void *)pc, (void *)bp, (void *)sp,
+ GetCurrentTidOrInvalid());
Printf("%s", d.EndWarning());
- GET_STACK_TRACE_FATAL(pc, bp);
- PrintStack(&stack);
+ GET_STACK_TRACE_SIGNAL(pc, bp, context);
+ stack.Print();
+ ReportErrorSummary("stack-overflow", &stack);
+}
+
+void ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
+ void *context, uptr addr) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: %s on unknown address %p"
+ " (pc %p bp %p sp %p T%d)\n",
+ description, (void *)addr, (void *)pc, (void *)bp, (void *)sp,
+ GetCurrentTidOrInvalid());
+ if (pc < GetPageSizeCached()) {
+ Report("Hint: pc points to the zero page.\n");
+ }
+ Printf("%s", d.EndWarning());
+ GET_STACK_TRACE_SIGNAL(pc, bp, context);
+ stack.Print();
+ MaybeDumpInstructionBytes(pc);
Printf("AddressSanitizer can not provide additional info.\n");
ReportErrorSummary("SEGV", &stack);
}
-void ReportDoubleFree(uptr addr, StackTrace *free_stack) {
+void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@@ -578,12 +690,36 @@ void ReportDoubleFree(uptr addr, StackTrace *free_stack) {
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- PrintStack(&stack);
+ stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("double-free", &stack);
}
-void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) {
+void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
+ BufferedStackTrace *free_stack) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ u32 curr_tid = GetCurrentTidOrInvalid();
+ Report("ERROR: AddressSanitizer: new-delete-type-mismatch on %p in "
+ "thread T%d%s:\n",
+ addr, curr_tid,
+ ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
+ Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
+ Printf(" size of the allocated type: %zd bytes;\n"
+ " size of the deallocated type: %zd bytes.\n",
+ asan_mz_size(reinterpret_cast<void*>(addr)), delete_size);
+ CHECK_GT(free_stack->size, 0);
+ GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
+ stack.Print();
+ DescribeHeapAddress(addr, 1);
+ ReportErrorSummary("new-delete-type-mismatch", &stack);
+ Report("HINT: if you don't care about these warnings you may set "
+ "ASAN_OPTIONS=new_delete_type_mismatch=0\n");
+}
+
+void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@@ -595,12 +731,12 @@ void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) {
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- PrintStack(&stack);
+ stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("bad-free", &stack);
}
-void ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
+void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
AllocType alloc_type,
AllocType dealloc_type) {
static const char *alloc_names[] =
@@ -616,14 +752,14 @@ void ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- PrintStack(&stack);
+ stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("alloc-dealloc-mismatch", &stack);
Report("HINT: if you don't care about these warnings you may set "
"ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
}
-void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
+void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@@ -631,27 +767,29 @@ void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
"malloc_usable_size() for pointer which is "
"not owned: %p\n", addr);
Printf("%s", d.EndWarning());
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("bad-malloc_usable_size", stack);
}
-void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
+void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
+ BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: attempting to call "
- "__asan_get_allocated_size() for pointer which is "
+ "__sanitizer_get_allocated_size() for pointer which is "
"not owned: %p\n", addr);
Printf("%s", d.EndWarning());
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-__asan_get_allocated_size", stack);
+ ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack);
}
-void ReportStringFunctionMemoryRangesOverlap(
- const char *function, const char *offset1, uptr length1,
- const char *offset2, uptr length2, StackTrace *stack) {
+void ReportStringFunctionMemoryRangesOverlap(const char *function,
+ const char *offset1, uptr length1,
+ const char *offset2, uptr length2,
+ BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
char bug_type[100];
@@ -661,44 +799,129 @@ void ReportStringFunctionMemoryRangesOverlap(
"memory ranges [%p,%p) and [%p, %p) overlap\n", \
bug_type, offset1, offset1 + length1, offset2, offset2 + length2);
Printf("%s", d.EndWarning());
- PrintStack(stack);
+ stack->Print();
DescribeAddress((uptr)offset1, length1);
DescribeAddress((uptr)offset2, length2);
ReportErrorSummary(bug_type, stack);
}
+void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ const char *bug_type = "negative-size-param";
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
+ Printf("%s", d.EndWarning());
+ stack->Print();
+ DescribeAddress(offset, size);
+ ReportErrorSummary(bug_type, stack);
+}
+
+void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
+ uptr old_mid, uptr new_mid,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report;
+ Report("ERROR: AddressSanitizer: bad parameters to "
+ "__sanitizer_annotate_contiguous_container:\n"
+ " beg : %p\n"
+ " end : %p\n"
+ " old_mid : %p\n"
+ " new_mid : %p\n",
+ beg, end, old_mid, new_mid);
+ stack->Print();
+ ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
+}
+
+void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
+ const __asan_global *g2, u32 stack_id2) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: odr-violation (%p):\n", g1->beg);
+ Printf("%s", d.EndWarning());
+ InternalScopedString g1_loc(256), g2_loc(256);
+ PrintGlobalLocation(&g1_loc, *g1);
+ PrintGlobalLocation(&g2_loc, *g2);
+ Printf(" [1] size=%zd '%s' %s\n", g1->size,
+ MaybeDemangleGlobalName(g1->name), g1_loc.data());
+ Printf(" [2] size=%zd '%s' %s\n", g2->size,
+ MaybeDemangleGlobalName(g2->name), g2_loc.data());
+ if (stack_id1 && stack_id2) {
+ Printf("These globals were registered at these points:\n");
+ Printf(" [1]:\n");
+ StackDepotGet(stack_id1).Print();
+ Printf(" [2]:\n");
+ StackDepotGet(stack_id2).Print();
+ }
+ Report("HINT: if you don't care about these warnings you may set "
+ "ASAN_OPTIONS=detect_odr_violation=0\n");
+ InternalScopedString error_msg(256);
+ error_msg.append("odr-violation: global '%s' at %s",
+ MaybeDemangleGlobalName(g1->name), g1_loc.data());
+ ReportErrorSummary(error_msg.data());
+}
+
+// ----------------------- CheckForInvalidPointerPair ----------- {{{1
+static NOINLINE void
+ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2);
+ Printf("%s", d.EndWarning());
+ GET_STACK_TRACE_FATAL(pc, bp);
+ stack.Print();
+ DescribeAddress(a1, 1);
+ DescribeAddress(a2, 1);
+ ReportErrorSummary("invalid-pointer-pair", &stack);
+}
+
+static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
+ if (!flags()->detect_invalid_pointer_pairs) return;
+ uptr a1 = reinterpret_cast<uptr>(p1);
+ uptr a2 = reinterpret_cast<uptr>(p2);
+ AsanChunkView chunk1 = FindHeapChunkByAddress(a1);
+ AsanChunkView chunk2 = FindHeapChunkByAddress(a2);
+ bool valid1 = chunk1.IsValid();
+ bool valid2 = chunk2.IsValid();
+ if ((valid1 != valid2) || (valid1 && valid2 && !chunk1.Eq(chunk2))) {
+ GET_CALLER_PC_BP_SP; \
+ return ReportInvalidPointerPair(pc, bp, sp, a1, a2);
+ }
+}
// ----------------------- Mac-specific reports ----------------- {{{1
-void WarnMacFreeUnallocated(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) {
+void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name,
+ BufferedStackTrace *stack) {
// Just print a warning here.
Printf("free_common(%p) -- attempting to free unallocated memory.\n"
"AddressSanitizer is ignoring this error on Mac OS now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
}
-void ReportMacMzReallocUnknown(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) {
+void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
+ BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
"This is an unrecoverable problem, exiting now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
}
-void ReportMacCfReallocUnknown(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) {
+void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
+ BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Printf("cf_realloc(%p) -- attempting to realloc unallocated memory.\n"
"This is an unrecoverable problem, exiting now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
}
@@ -707,10 +930,8 @@ void ReportMacCfReallocUnknown(
// --------------------------- Interface --------------------- {{{1
using namespace __asan; // NOLINT
-void __asan_report_error(uptr pc, uptr bp, uptr sp,
- uptr addr, bool is_write, uptr access_size) {
- ScopedInErrorReport in_report;
-
+void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
+ uptr access_size) {
// Determine the error type.
const char *bug_descr = "unknown-crash";
if (AddrIsInMem(addr)) {
@@ -724,6 +945,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
switch (*shadow_addr) {
case kAsanHeapLeftRedzoneMagic:
case kAsanHeapRightRedzoneMagic:
+ case kAsanArrayCookieMagic:
bug_descr = "heap-buffer-overflow";
break;
case kAsanHeapFreeMagic:
@@ -755,12 +977,20 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
case kAsanGlobalRedzoneMagic:
bug_descr = "global-buffer-overflow";
break;
+ case kAsanIntraObjectRedzone:
+ bug_descr = "intra-object-overflow";
+ break;
}
}
+
+ ReportData report = { pc, sp, bp, addr, (bool)is_write, access_size,
+ bug_descr };
+ ScopedInErrorReport in_report(&report);
+
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s on address "
- "%p at pc 0x%zx bp 0x%zx sp 0x%zx\n",
+ "%p at pc %p bp %p sp %p\n",
bug_descr, (void*)addr, pc, bp, sp);
Printf("%s", d.EndWarning());
@@ -774,7 +1004,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
d.EndAccess());
GET_STACK_TRACE_FATAL(pc, bp);
- PrintStack(&stack);
+ stack.Print();
DescribeAddress(addr, access_size);
ReportErrorSummary(bug_descr, &stack);
@@ -786,14 +1016,60 @@ void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
if (callback) {
error_message_buffer_size = 1 << 16;
error_message_buffer =
- (char*)MmapOrDie(error_message_buffer_size, __FUNCTION__);
+ (char*)MmapOrDie(error_message_buffer_size, __func__);
error_message_buffer_pos = 0;
}
}
void __asan_describe_address(uptr addr) {
+ // Thread registry must be locked while we're describing an address.
+ asanThreadRegistry().Lock();
DescribeAddress(addr, 1);
+ asanThreadRegistry().Unlock();
+}
+
+int __asan_report_present() {
+ return report_happened ? 1 : 0;
+}
+
+uptr __asan_get_report_pc() {
+ return report_data.pc;
+}
+
+uptr __asan_get_report_bp() {
+ return report_data.bp;
+}
+
+uptr __asan_get_report_sp() {
+ return report_data.sp;
+}
+
+uptr __asan_get_report_address() {
+ return report_data.addr;
+}
+
+int __asan_get_report_access_type() {
+ return report_data.is_write ? 1 : 0;
+}
+
+uptr __asan_get_report_access_size() {
+ return report_data.access_size;
+}
+
+const char *__asan_get_report_description() {
+ return report_data.description;
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_ptr_sub(void *a, void *b) {
+ CheckForInvalidPointerPair(a, b);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_ptr_cmp(void *a, void *b) {
+ CheckForInvalidPointerPair(a, b);
}
+} // extern "C"
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default implementation of __asan_on_error that does nothing
diff --git a/libsanitizer/asan/asan_report.h b/libsanitizer/asan/asan_report.h
index e4c756e557..b31b86dd08 100644
--- a/libsanitizer/asan/asan_report.h
+++ b/libsanitizer/asan/asan_report.h
@@ -16,13 +16,33 @@
namespace __asan {
+struct StackVarDescr {
+ uptr beg;
+ uptr size;
+ const char *name_pos;
+ uptr name_len;
+};
+
+struct AddressDescription {
+ char *name;
+ uptr name_size;
+ uptr region_address;
+ uptr region_size;
+ const char *region_kind;
+};
+
// The following functions prints address description depending
// on the memory type (shadow/heap/stack/global).
void DescribeHeapAddress(uptr addr, uptr access_size);
bool DescribeAddressIfGlobal(uptr addr, uptr access_size);
bool DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
const __asan_global &g);
-bool DescribeAddressIfShadow(uptr addr);
+bool IsAddressNearGlobal(uptr addr, const __asan_global &g);
+bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
+bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
+ bool print = true);
+bool ParseFrameDescription(const char *frame_descr,
+ InternalMmapVector<StackVarDescr> *vars);
bool DescribeAddressIfStack(uptr addr, uptr access_size);
// Determines memory type on its own.
void DescribeAddress(uptr addr, uptr access_size);
@@ -30,26 +50,46 @@ void DescribeAddress(uptr addr, uptr access_size);
void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
-void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
-void NORETURN ReportDoubleFree(uptr addr, StackTrace *free_stack);
-void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *free_stack);
-void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
+void NORETURN
+ ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
+void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
+ void *context, uptr addr);
+void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
+ BufferedStackTrace *free_stack);
+void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
+void NORETURN ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);
+void NORETURN ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
AllocType alloc_type,
AllocType dealloc_type);
-void NORETURN ReportMallocUsableSizeNotOwned(uptr addr,
- StackTrace *stack);
-void NORETURN ReportAsanGetAllocatedSizeNotOwned(uptr addr,
- StackTrace *stack);
-void NORETURN ReportStringFunctionMemoryRangesOverlap(
- const char *function, const char *offset1, uptr length1,
- const char *offset2, uptr length2, StackTrace *stack);
+void NORETURN
+ ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack);
+void NORETURN
+ ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
+ BufferedStackTrace *stack);
+void NORETURN
+ ReportStringFunctionMemoryRangesOverlap(const char *function,
+ const char *offset1, uptr length1,
+ const char *offset2, uptr length2,
+ BufferedStackTrace *stack);
+void NORETURN ReportStringFunctionSizeOverflow(uptr offset, uptr size,
+ BufferedStackTrace *stack);
+void NORETURN
+ ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
+ uptr old_mid, uptr new_mid,
+ BufferedStackTrace *stack);
+
+void NORETURN
+ReportODRViolation(const __asan_global *g1, u32 stack_id1,
+ const __asan_global *g2, u32 stack_id2);
// Mac-specific errors and warnings.
-void WarnMacFreeUnallocated(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack);
-void NORETURN ReportMacMzReallocUnknown(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack);
-void NORETURN ReportMacCfReallocUnknown(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack);
+void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name,
+ BufferedStackTrace *stack);
+void NORETURN ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr,
+ const char *zone_name,
+ BufferedStackTrace *stack);
+void NORETURN ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr,
+ const char *zone_name,
+ BufferedStackTrace *stack);
} // namespace __asan
diff --git a/libsanitizer/asan/asan_rtl.cc b/libsanitizer/asan/asan_rtl.cc
index 537d40612a..2c599047da 100644
--- a/libsanitizer/asan/asan_rtl.cc
+++ b/libsanitizer/asan/asan_rtl.cc
@@ -9,6 +9,7 @@
//
// Main file of the ASan run-time library.
//===----------------------------------------------------------------------===//
+#include "asan_activation.h"
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_interface_internal.h"
@@ -26,6 +27,7 @@
#include "lsan/lsan_common.h"
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
+uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
namespace __asan {
@@ -49,7 +51,7 @@ static void AsanDie() {
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
}
}
- if (flags()->coverage)
+ if (common_flags()->coverage)
__sanitizer_cov_dump();
if (death_callback)
death_callback();
@@ -60,10 +62,10 @@ static void AsanDie() {
static void AsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
- Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n",
- file, line, cond, (uptr)v1, (uptr)v2);
+ Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
+ line, cond, (uptr)v1, (uptr)v2);
// FIXME: check for infinite recursion without a thread-local counter here.
- PRINT_CURRENT_STACK();
+ PRINT_CURRENT_STACK_CHECK();
Die();
}
@@ -76,7 +78,7 @@ static const char *MaybeCallAsanDefaultOptions() {
return (&__asan_default_options) ? __asan_default_options() : "";
}
-static const char *MaybeUseAsanDefaultOptionsCompileDefiniton() {
+static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
// Stringize the macro value.
# define ASAN_STRINGIZE(x) #x
@@ -91,56 +93,159 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
CommonFlags *cf = common_flags();
ParseCommonFlagsFromString(cf, str);
CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
-
- ParseFlag(str, &f->quarantine_size, "quarantine_size");
- ParseFlag(str, &f->redzone, "redzone");
+ // Please write meaningful flag descriptions when adding new flags.
+ ParseFlag(str, &f->quarantine_size, "quarantine_size",
+ "Size (in bytes) of quarantine used to detect use-after-free "
+ "errors. Lower value may reduce memory usage but increase the "
+ "chance of false negatives.");
+ ParseFlag(str, &f->redzone, "redzone",
+ "Minimal size (in bytes) of redzones around heap objects. "
+ "Requirement: redzone >= 16, is a power of two.");
+ ParseFlag(str, &f->max_redzone, "max_redzone",
+ "Maximal size (in bytes) of redzones around heap objects.");
CHECK_GE(f->redzone, 16);
+ CHECK_GE(f->max_redzone, f->redzone);
+ CHECK_LE(f->max_redzone, 2048);
CHECK(IsPowerOfTwo(f->redzone));
-
- ParseFlag(str, &f->debug, "debug");
- ParseFlag(str, &f->report_globals, "report_globals");
- ParseFlag(str, &f->check_initialization_order, "check_initialization_order");
-
- ParseFlag(str, &f->replace_str, "replace_str");
- ParseFlag(str, &f->replace_intrin, "replace_intrin");
- ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free");
+ CHECK(IsPowerOfTwo(f->max_redzone));
+
+ ParseFlag(str, &f->debug, "debug",
+ "If set, prints some debugging information and does additional checks.");
+ ParseFlag(str, &f->report_globals, "report_globals",
+ "Controls the way to handle globals (0 - don't detect buffer overflow on "
+ "globals, 1 - detect buffer overflow, 2 - print data about registered "
+ "globals).");
+
+ ParseFlag(str, &f->check_initialization_order,
+ "check_initialization_order",
+ "If set, attempts to catch initialization order issues.");
+
+ ParseFlag(str, &f->replace_str, "replace_str",
+ "If set, uses custom wrappers and replacements for libc string functions "
+ "to find more errors.");
+
+ ParseFlag(str, &f->replace_intrin, "replace_intrin",
+ "If set, uses custom wrappers for memset/memcpy/memmove intinsics.");
+ ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free",
+ "Ignore invalid free() calls to work around some bugs. Used on OS X "
+ "only.");
ParseFlag(str, &f->detect_stack_use_after_return,
- "detect_stack_use_after_return");
- ParseFlag(str, &f->uar_stack_size_log, "uar_stack_size_log");
- ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size");
- ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte");
- ParseFlag(str, &f->exitcode, "exitcode");
- ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning");
- ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying");
- ParseFlag(str, &f->handle_segv, "handle_segv");
- ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler");
- ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack");
- ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size");
- ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit");
- ParseFlag(str, &f->abort_on_error, "abort_on_error");
- ParseFlag(str, &f->print_stats, "print_stats");
- ParseFlag(str, &f->print_legend, "print_legend");
- ParseFlag(str, &f->atexit, "atexit");
- ParseFlag(str, &f->coverage, "coverage");
- ParseFlag(str, &f->disable_core, "disable_core");
- ParseFlag(str, &f->allow_reexec, "allow_reexec");
- ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
- ParseFlag(str, &f->poison_heap, "poison_heap");
- ParseFlag(str, &f->poison_partial, "poison_partial");
- ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
- ParseFlag(str, &f->strict_memcmp, "strict_memcmp");
- ParseFlag(str, &f->strict_init_order, "strict_init_order");
+ "detect_stack_use_after_return",
+ "Enables stack-use-after-return checking at run-time.");
+ ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log",
+ "Minimum fake stack size log.");
+ ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log",
+ "Maximum fake stack size log.");
+ ParseFlag(str, &f->uar_noreserve, "uar_noreserve",
+ "Use mmap with 'norserve' flag to allocate fake stack.");
+ ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size",
+ "ASan allocator flag. max_malloc_fill_size is the maximal amount of "
+ "bytes that will be filled with malloc_fill_byte on malloc.");
+ ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte",
+ "Value used to fill the newly allocated memory.");
+ ParseFlag(str, &f->exitcode, "exitcode",
+ "Override the program exit status if the tool found an error.");
+ ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning",
+ "If set, user may manually mark memory regions as poisoned or "
+ "unpoisoned.");
+ ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying",
+ "Number of seconds to sleep between printing an error report and "
+ "terminating the program. Useful for debugging purposes (e.g. when one "
+ "needs to attach gdb).");
+
+ ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size",
+ "Allows the users to work around the bug in Nvidia drivers prior to "
+ "295.*.");
+
+ ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit",
+ "If set, explicitly unmaps the (huge) shadow at exit.");
+ ParseFlag(str, &f->abort_on_error, "abort_on_error",
+ "If set, the tool calls abort() instead of _exit() after printing the "
+ "error report.");
+ ParseFlag(str, &f->print_stats, "print_stats",
+ "Print various statistics after printing an error message or if "
+ "atexit=1.");
+ ParseFlag(str, &f->print_legend, "print_legend",
+ "Print the legend for the shadow bytes.");
+ ParseFlag(str, &f->atexit, "atexit",
+ "If set, prints ASan exit stats even after program terminates "
+ "successfully.");
+
+ ParseFlag(str, &f->allow_reexec, "allow_reexec",
+ "Allow the tool to re-exec the program. This may interfere badly with "
+ "the debugger.");
+
+ ParseFlag(str, &f->print_full_thread_history,
+ "print_full_thread_history",
+ "If set, prints thread creation stacks for the threads involved in the "
+ "report and their ancestors up to the main thread.");
+
+ ParseFlag(str, &f->poison_heap, "poison_heap",
+ "Poison (or not) the heap memory on [de]allocation. Zero value is useful "
+ "for benchmarking the allocator or instrumentator.");
+
+ ParseFlag(str, &f->poison_array_cookie, "poison_array_cookie",
+ "Poison (or not) the array cookie after operator new[].");
+
+ ParseFlag(str, &f->poison_partial, "poison_partial",
+ "If true, poison partially addressable 8-byte aligned words "
+ "(default=true). This flag affects heap and global buffers, but not "
+ "stack buffers.");
+
+ ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch",
+ "Report errors on malloc/delete, new/free, new/delete[], etc.");
+
+ ParseFlag(str, &f->new_delete_type_mismatch, "new_delete_type_mismatch",
+ "Report errors on mismatch betwen size of new and delete.");
+
+ ParseFlag(str, &f->strict_memcmp, "strict_memcmp",
+ "If true, assume that memcmp(p1, p2, n) always reads n bytes before "
+ "comparing p1 and p2.");
+
+ ParseFlag(str, &f->strict_init_order, "strict_init_order",
+ "If true, assume that dynamic initializers can never access globals from "
+ "other modules, even if the latter are already initialized.");
+
+ ParseFlag(str, &f->start_deactivated, "start_deactivated",
+ "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
+ "poisoning) to reduce memory consumption as much as possible, and "
+ "restores them to original values when the first instrumented module is "
+ "loaded into the process. This is mainly intended to be used on "
+ "Android. ");
+
+ ParseFlag(str, &f->detect_invalid_pointer_pairs,
+ "detect_invalid_pointer_pairs",
+ "If non-zero, try to detect operations like <, <=, >, >= and - on "
+ "invalid pointer pairs (e.g. when pointers belong to different objects). "
+ "The bigger the value the harder we try.");
+
+ ParseFlag(str, &f->detect_container_overflow,
+ "detect_container_overflow",
+ "If true, honor the container overflow annotations. "
+ "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow");
+
+ ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation",
+ "If >=2, detect violation of One-Definition-Rule (ODR); "
+ "If ==1, detect ODR-violation only if the two variables "
+ "have different sizes");
+
+ ParseFlag(str, &f->dump_instruction_bytes, "dump_instruction_bytes",
+ "If true, dump 16 bytes starting at the instruction that caused SEGV");
}
void InitializeFlags(Flags *f, const char *env) {
CommonFlags *cf = common_flags();
SetCommonFlagsDefaults(cf);
+ cf->detect_leaks = CAN_SANITIZE_LEAKS;
cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf->malloc_context_size = kDefaultMallocContextSize;
+ cf->intercept_tls_get_addr = true;
+ cf->coverage = false;
internal_memset(f, 0, sizeof(*f));
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
f->redzone = 16;
+ f->max_redzone = 2048;
f->debug = false;
f->report_globals = 1;
f->check_initialization_order = false;
@@ -148,53 +253,58 @@ void InitializeFlags(Flags *f, const char *env) {
f->replace_intrin = true;
f->mac_ignore_invalid_free = false;
f->detect_stack_use_after_return = false; // Also needs the compiler flag.
- f->uar_stack_size_log = 0;
+ f->min_uar_stack_size_log = 16; // We can't do smaller anyway.
+ f->max_uar_stack_size_log = 20; // 1Mb per size class, i.e. ~11Mb per thread.
+ f->uar_noreserve = false;
f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K.
f->malloc_fill_byte = 0xbe;
f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
f->allow_user_poisoning = true;
f->sleep_before_dying = 0;
- f->handle_segv = ASAN_NEEDS_SEGV;
- f->allow_user_segv_handler = false;
- f->use_sigaltstack = false;
f->check_malloc_usable_size = true;
f->unmap_shadow_on_exit = false;
f->abort_on_error = false;
f->print_stats = false;
f->print_legend = true;
f->atexit = false;
- f->coverage = false;
- f->disable_core = (SANITIZER_WORDSIZE == 64);
f->allow_reexec = true;
f->print_full_thread_history = true;
f->poison_heap = true;
+ f->poison_array_cookie = true;
f->poison_partial = true;
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=131
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=309
// TODO(glider,timurrrr): Fix known issues and enable this back.
f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
+ f->new_delete_type_mismatch = true;
f->strict_memcmp = true;
f->strict_init_order = false;
+ f->start_deactivated = false;
+ f->detect_invalid_pointer_pairs = 0;
+ f->detect_container_overflow = true;
+ f->detect_odr_violation = 2;
+ f->dump_instruction_bytes = false;
// Override from compile definition.
- ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefiniton());
+ ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition());
// Override from user-specified string.
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
- if (cf->verbosity) {
- Report("Using the defaults from __asan_default_options: %s\n",
- MaybeCallAsanDefaultOptions());
- }
+ VReport(1, "Using the defaults from __asan_default_options: %s\n",
+ MaybeCallAsanDefaultOptions());
// Override from command line.
ParseFlagsFromString(f, env);
+ if (common_flags()->help) {
+ PrintFlagDescriptions();
+ }
-#if !CAN_SANITIZE_LEAKS
- if (cf->detect_leaks) {
+ if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) {
Report("%s: detect_leaks is not supported on this platform.\n",
SanitizerToolName);
cf->detect_leaks = false;
}
-#endif
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
@@ -203,6 +313,17 @@ void InitializeFlags(Flags *f, const char *env) {
}
}
+// Parse flags that may change between startup and activation.
+// On Android they come from a system property.
+// On other platforms this is no-op.
+void ParseExtraActivationFlags() {
+ char buf[100];
+ GetExtraActivationFlags(buf, sizeof(buf));
+ ParseFlagsFromString(flags(), buf);
+ if (buf[0] != '\0')
+ VReport(1, "Extra activation flags: %s\n", buf);
+}
+
// -------------------------- Globals --------------------- {{{1
int asan_inited;
bool asan_init_is_running;
@@ -224,6 +345,7 @@ static void ReserveShadowMemoryRange(uptr beg, uptr end) {
CHECK_EQ((beg % GetPageSizeCached()), 0);
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
uptr size = end - beg + 1;
+ DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
void *res = MmapFixedNoReserve(beg, size);
if (res != (void*)beg) {
Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
@@ -269,6 +391,53 @@ void __asan_report_ ## type ## _n(uptr addr, uptr size) { \
ASAN_REPORT_ERROR_N(load, false)
ASAN_REPORT_ERROR_N(store, true)
+#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \
+ extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_##type##size(uptr addr); \
+ void __asan_##type##size(uptr addr) { \
+ uptr sp = MEM_TO_SHADOW(addr); \
+ uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
+ : *reinterpret_cast<u16 *>(sp); \
+ if (UNLIKELY(s)) { \
+ if (UNLIKELY(size >= SHADOW_GRANULARITY || \
+ ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
+ (s8)s)) { \
+ if (__asan_test_only_reported_buggy_pointer) { \
+ *__asan_test_only_reported_buggy_pointer = addr; \
+ } else { \
+ GET_CALLER_PC_BP_SP; \
+ __asan_report_error(pc, bp, sp, addr, is_write, size); \
+ } \
+ } \
+ } \
+ }
+
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 2)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 4)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 8)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 16)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 1)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 2)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 4)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16)
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ __asan_report_error(pc, bp, sp, addr, false, size);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ __asan_report_error(pc, bp, sp, addr, true, size);
+ }
+}
+
// Force the linker to keep the symbols for various ASan interface functions.
// We want to keep those in the executable in order to let the instrumented
// dynamic libraries access the symbol even if it is not used by the executable
@@ -295,13 +464,6 @@ static NOINLINE void force_interface_symbols() {
case 15: __asan_set_error_report_callback(0); break;
case 16: __asan_handle_no_return(); break;
case 17: __asan_address_is_poisoned(0); break;
- case 18: __asan_get_allocated_size(0); break;
- case 19: __asan_get_current_allocated_bytes(); break;
- case 20: __asan_get_estimated_allocated_size(0); break;
- case 21: __asan_get_free_bytes(); break;
- case 22: __asan_get_heap_size(); break;
- case 23: __asan_get_ownership(0); break;
- case 24: __asan_get_unmapped_bytes(); break;
case 25: __asan_poison_memory_region(0, 0); break;
case 26: __asan_unpoison_memory_region(0, 0); break;
case 27: __asan_set_error_exit_code(0); break;
@@ -372,7 +534,8 @@ static void PrintAddressSpaceLayout() {
(void*)MEM_TO_SHADOW(kMidShadowEnd));
}
Printf("\n");
- Printf("red_zone=%zu\n", (uptr)flags()->redzone);
+ Printf("redzone=%zu\n", (uptr)flags()->redzone);
+ Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
@@ -387,59 +550,17 @@ static void PrintAddressSpaceLayout() {
kHighShadowBeg > kMidMemEnd);
}
-} // namespace __asan
-
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan; // NOLINT
-
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char* __asan_default_options() { return ""; }
-} // extern "C"
-#endif
-
-int NOINLINE __asan_set_error_exit_code(int exit_code) {
- int old = flags()->exitcode;
- flags()->exitcode = exit_code;
- return old;
-}
-
-void NOINLINE __asan_handle_no_return() {
- int local_stack;
- AsanThread *curr_thread = GetCurrentThread();
- CHECK(curr_thread);
- uptr PageSize = GetPageSizeCached();
- uptr top = curr_thread->stack_top();
- uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1);
- static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
- if (top - bottom > kMaxExpectedCleanupSize) {
- static bool reported_warning = false;
- if (reported_warning)
- return;
- reported_warning = true;
- Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
- "stack top: %p; bottom %p; size: %p (%zd)\n"
- "False positive error reports may follow\n"
- "For details see "
- "http://code.google.com/p/address-sanitizer/issues/detail?id=189\n",
- top, bottom, top - bottom, top - bottom);
- return;
- }
- PoisonShadow(bottom, top - bottom, 0);
- if (curr_thread->has_fake_stack())
- curr_thread->fake_stack()->HandleNoReturn();
-}
-
-void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
- death_callback = callback;
-}
-
-void __asan_init() {
- if (asan_inited) return;
+static void AsanInitInternal() {
+ if (LIKELY(asan_inited)) return;
SanitizerToolName = "AddressSanitizer";
CHECK(!asan_init_is_running && "ASan init calls itself!");
asan_init_is_running = true;
+
+ // Initialize flags. This must be done early, because most of the
+ // initialization steps look at flags().
+ const char *options = GetEnv("ASAN_OPTIONS");
+ InitializeFlags(flags(), options);
+
InitializeHighMemEnd();
// Make sure we are not statically linked.
@@ -450,18 +571,21 @@ void __asan_init() {
SetCheckFailedCallback(AsanCheckFailed);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
- // Initialize flags. This must be done early, because most of the
- // initialization steps look at flags().
- const char *options = GetEnv("ASAN_OPTIONS");
- InitializeFlags(flags(), options);
+ if (!flags()->start_deactivated)
+ ParseExtraActivationFlags();
+
__sanitizer_set_report_path(common_flags()->log_path);
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
+ CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
- if (common_flags()->verbosity && options) {
- Report("Parsed ASAN_OPTIONS: %s\n", options);
+ if (options) {
+ VReport(1, "Parsed ASAN_OPTIONS: %s\n", options);
}
+ if (flags()->start_deactivated)
+ AsanStartDeactivated();
+
// Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec();
@@ -470,8 +594,12 @@ void __asan_init() {
InitializeAsanInterceptors();
+ // Enable system log ("adb logcat") on Android.
+ // Doing this before interceptors are initialized crashes in:
+ // AsanInitInternal -> android_log_write -> __interceptor_strcmp
+ AndroidLogInit();
+
ReplaceSystemMalloc();
- ReplaceOperatorsNewAndDelete();
uptr shadow_start = kLowShadowBeg;
if (kLowShadowBeg)
@@ -479,7 +607,8 @@ void __asan_init() {
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
-#if SANITIZER_LINUX && defined(__x86_64__) && !ASAN_FIXED_MAPPING
+#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
+ !ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
@@ -489,9 +618,7 @@ void __asan_init() {
if (common_flags()->verbosity)
PrintAddressSpaceLayout();
- if (flags()->disable_core) {
- DisableCoreDumper();
- }
+ DisableCoreDumperIfNecessary();
if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left.
@@ -501,6 +628,7 @@ void __asan_init() {
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
// protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
@@ -523,19 +651,10 @@ void __asan_init() {
}
AsanTSDInit(PlatformTSDDtor);
- InstallSignalHandlers();
+ InstallDeadlySignalHandlers(AsanOnSIGSEGV);
- // Allocator should be initialized before starting external symbolizer, as
- // fork() on Mac locks the allocator.
InitializeAllocator();
- // Start symbolizer process if necessary.
- if (common_flags()->symbolize) {
- Symbolizer::Init(common_flags()->external_symbolizer_path);
- } else {
- Symbolizer::Disable();
- }
-
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
asan_inited = 1;
@@ -544,8 +663,10 @@ void __asan_init() {
if (flags()->atexit)
Atexit(asan_atexit);
- if (flags()->coverage)
+ if (common_flags()->coverage) {
+ __sanitizer_cov_init();
Atexit(__sanitizer_cov_dump);
+ }
// interceptors
InitTlsSize();
@@ -559,15 +680,93 @@ void __asan_init() {
SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid());
force_interface_symbols(); // no-op.
+ SanitizerInitializeUnwinder();
#if CAN_SANITIZE_LEAKS
- __lsan::InitCommonLsan();
+ __lsan::InitCommonLsan(false);
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
Atexit(__lsan::DoLeakCheck);
}
#endif // CAN_SANITIZE_LEAKS
- if (common_flags()->verbosity) {
- Report("AddressSanitizer Init done\n");
+ VReport(1, "AddressSanitizer Init done\n");
+}
+
+// Initialize as requested from some part of ASan runtime library (interceptors,
+// allocator, etc).
+void AsanInitFromRtl() {
+ AsanInitInternal();
+}
+
+#if ASAN_DYNAMIC
+// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable
+// (and thus normal initializer from .preinit_array haven't run).
+
+class AsanInitializer {
+public: // NOLINT
+ AsanInitializer() {
+ AsanCheckIncompatibleRT();
+ AsanCheckDynamicRTPrereqs();
+ if (UNLIKELY(!asan_inited))
+ __asan_init();
}
+};
+
+static AsanInitializer asan_initializer;
+#endif // ASAN_DYNAMIC
+
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __asan_default_options() { return ""; }
+} // extern "C"
+#endif
+
+int NOINLINE __asan_set_error_exit_code(int exit_code) {
+ int old = flags()->exitcode;
+ flags()->exitcode = exit_code;
+ return old;
+}
+
+void NOINLINE __asan_handle_no_return() {
+ int local_stack;
+ AsanThread *curr_thread = GetCurrentThread();
+ CHECK(curr_thread);
+ uptr PageSize = GetPageSizeCached();
+ uptr top = curr_thread->stack_top();
+ uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1);
+ static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
+ if (top - bottom > kMaxExpectedCleanupSize) {
+ static bool reported_warning = false;
+ if (reported_warning)
+ return;
+ reported_warning = true;
+ Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
+ "stack top: %p; bottom %p; size: %p (%zd)\n"
+ "False positive error reports may follow\n"
+ "For details see "
+ "http://code.google.com/p/address-sanitizer/issues/detail?id=189\n",
+ top, bottom, top - bottom, top - bottom);
+ return;
+ }
+ PoisonShadow(bottom, top - bottom, 0);
+ if (curr_thread->has_fake_stack())
+ curr_thread->fake_stack()->HandleNoReturn();
+}
+
+void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
+ death_callback = callback;
+}
+
+// Initialize as requested from instrumented application code.
+// We use this call as a trigger to wake up ASan from deactivated state.
+void __asan_init() {
+ AsanCheckIncompatibleRT();
+ AsanActivate();
+ AsanInitInternal();
}
diff --git a/libsanitizer/asan/asan_stack.cc b/libsanitizer/asan/asan_stack.cc
index 24cccbd196..96178e8247 100644
--- a/libsanitizer/asan/asan_stack.cc
+++ b/libsanitizer/asan/asan_stack.cc
@@ -10,40 +10,10 @@
// Code for ASan stack trace.
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
-#include "asan_flags.h"
#include "asan_stack.h"
-#include "sanitizer_common/sanitizer_flags.h"
-
-namespace __asan {
-
-static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
- int out_size) {
- return (&__asan_symbolize) ? __asan_symbolize(pc, out_buffer, out_size)
- : false;
-}
-
-void PrintStack(const uptr *trace, uptr size) {
- StackTrace::PrintStack(trace, size, MaybeCallAsanSymbolize);
-}
-
-void PrintStack(StackTrace *stack) {
- PrintStack(stack->trace, stack->size);
-}
-
-} // namespace __asan
// ------------------ Interface -------------- {{{1
-// Provide default implementation of __asan_symbolize that does nothing
-// and may be overriden by user if he wants to use his own symbolization.
-// ASan on Windows has its own implementation of this.
-#if !SANITIZER_WINDOWS && !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
-bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) {
- return false;
-}
-#endif
-
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_stack_trace() {
diff --git a/libsanitizer/asan/asan_stack.h b/libsanitizer/asan/asan_stack.h
index df7a9805f9..640c4cf4f7 100644
--- a/libsanitizer/asan/asan_stack.h
+++ b/libsanitizer/asan/asan_stack.h
@@ -19,53 +19,74 @@
namespace __asan {
-void PrintStack(StackTrace *stack);
-void PrintStack(const uptr *trace, uptr size);
-
-} // namespace __asan
-
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
+ALWAYS_INLINE
+void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
+ uptr pc, uptr bp, void *context,
+ bool fast) {
#if SANITIZER_WINDOWS
-#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
- StackTrace stack; \
- stack.Unwind(max_s, pc, bp, 0, 0, fast)
+ stack->Unwind(max_depth, pc, bp, context, 0, 0, fast);
#else
-#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
- StackTrace stack; \
- { \
- AsanThread *t; \
- stack.size = 0; \
- if (asan_inited) { \
- if ((t = GetCurrentThread()) && !t->isUnwinding()) { \
- uptr stack_top = t->stack_top(); \
- uptr stack_bottom = t->stack_bottom(); \
- ScopedUnwinding unwind_scope(t); \
- stack.Unwind(max_s, pc, bp, stack_top, stack_bottom, fast); \
- } else if (t == 0 && !fast) { \
- /* If GetCurrentThread() has failed, try to do slow unwind anyways. */ \
- stack.Unwind(max_s, pc, bp, 0, 0, false); \
- } \
- } \
+ AsanThread *t;
+ stack->size = 0;
+ if (LIKELY(asan_inited)) {
+ if ((t = GetCurrentThread()) && !t->isUnwinding()) {
+ // On FreeBSD the slow unwinding that leverages _Unwind_Backtrace()
+ // yields the call stack of the signal's handler and not of the code
+ // that raised the signal (as it does on Linux).
+ if (SANITIZER_FREEBSD && t->isInDeadlySignal()) fast = true;
+ uptr stack_top = t->stack_top();
+ uptr stack_bottom = t->stack_bottom();
+ ScopedUnwinding unwind_scope(t);
+ stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
+ } else if (t == 0 && !fast) {
+ /* If GetCurrentThread() has failed, try to do slow unwind anyways. */
+ stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
+ }
}
#endif // SANITIZER_WINDOWS
+}
+
+} // namespace __asan
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
// as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals.
-#define GET_STACK_TRACE(max_size, fast) \
- GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \
- StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), fast)
+#define GET_STACK_TRACE(max_size, fast) \
+ BufferedStackTrace stack; \
+ if (max_size <= 2) { \
+ stack.size = max_size; \
+ if (max_size > 0) { \
+ stack.top_frame_bp = GET_CURRENT_FRAME(); \
+ stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
+ if (max_size > 1) \
+ stack.trace_buffer[1] = GET_CALLER_PC(); \
+ } \
+ } else { \
+ GetStackTraceWithPcBpAndContext(&stack, max_size, \
+ StackTrace::GetCurrentPc(), \
+ GET_CURRENT_FRAME(), 0, fast); \
+ }
+
+#define GET_STACK_TRACE_FATAL(pc, bp) \
+ BufferedStackTrace stack; \
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
+ common_flags()->fast_unwind_on_fatal)
-#define GET_STACK_TRACE_FATAL(pc, bp) \
- GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \
- common_flags()->fast_unwind_on_fatal)
+#define GET_STACK_TRACE_SIGNAL(pc, bp, context) \
+ BufferedStackTrace stack; \
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \
+ common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
+#define GET_STACK_TRACE_CHECK_HERE \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check)
+
#define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true)
@@ -78,7 +99,13 @@ void PrintStack(const uptr *trace, uptr size);
#define PRINT_CURRENT_STACK() \
{ \
GET_STACK_TRACE_FATAL_HERE; \
- PrintStack(&stack); \
+ stack.Print(); \
+ }
+
+#define PRINT_CURRENT_STACK_CHECK() \
+ { \
+ GET_STACK_TRACE_CHECK_HERE; \
+ stack.Print(); \
}
#endif // ASAN_STACK_H
diff --git a/libsanitizer/asan/asan_stats.cc b/libsanitizer/asan/asan_stats.cc
index 71c8582e81..fbd636ea55 100644
--- a/libsanitizer/asan/asan_stats.cc
+++ b/libsanitizer/asan/asan_stats.cc
@@ -13,6 +13,7 @@
#include "asan_internal.h"
#include "asan_stats.h"
#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -127,8 +128,8 @@ static void PrintAccumulatedStats() {
BlockingMutexLock lock(&print_lock);
stats.Print();
StackDepotStats *stack_depot_stats = StackDepotGetStats();
- Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
- stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
+ Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
+ stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20);
PrintInternalAllocatorStats();
}
@@ -137,7 +138,7 @@ static void PrintAccumulatedStats() {
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
-uptr __asan_get_current_allocated_bytes() {
+uptr __sanitizer_get_current_allocated_bytes() {
AsanStats stats;
GetAccumulatedStats(&stats);
uptr malloced = stats.malloced;
@@ -147,13 +148,13 @@ uptr __asan_get_current_allocated_bytes() {
return (malloced > freed) ? malloced - freed : 1;
}
-uptr __asan_get_heap_size() {
+uptr __sanitizer_get_heap_size() {
AsanStats stats;
GetAccumulatedStats(&stats);
return stats.mmaped - stats.munmaped;
}
-uptr __asan_get_free_bytes() {
+uptr __sanitizer_get_free_bytes() {
AsanStats stats;
GetAccumulatedStats(&stats);
uptr total_free = stats.mmaped
@@ -167,7 +168,7 @@ uptr __asan_get_free_bytes() {
return (total_free > total_used) ? total_free - total_used : 1;
}
-uptr __asan_get_unmapped_bytes() {
+uptr __sanitizer_get_unmapped_bytes() {
return 0;
}
diff --git a/libsanitizer/asan/asan_thread.cc b/libsanitizer/asan/asan_thread.cc
index 5a9c2dddff..95ca672009 100644
--- a/libsanitizer/asan/asan_thread.cc
+++ b/libsanitizer/asan/asan_thread.cc
@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan/lsan_common.h"
namespace __asan {
@@ -27,7 +28,7 @@ namespace __asan {
void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack)
- stack_id = StackDepotPut(args->stack->trace, args->stack->size);
+ stack_id = StackDepotPut(*args->stack);
thread = args->thread;
thread->set_context(this);
}
@@ -76,7 +77,7 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine,
void *arg) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
- AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
+ AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
@@ -85,28 +86,27 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine,
void AsanThread::TSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext*)tsd;
- if (common_flags()->verbosity >= 1)
- Report("T%d TSDDtor\n", context->tid);
+ VReport(1, "T%d TSDDtor\n", context->tid);
if (context->thread)
context->thread->Destroy();
}
void AsanThread::Destroy() {
- if (common_flags()->verbosity >= 1) {
- Report("T%d exited\n", tid());
- }
+ int tid = this->tid();
+ VReport(1, "T%d exited\n", tid);
malloc_storage().CommitBack();
- if (flags()->use_sigaltstack) UnsetAlternateSignalStack();
- asanThreadRegistry().FinishThread(tid());
+ if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
+ asanThreadRegistry().FinishThread(tid);
FlushToDeadThreadStats(&stats_);
// We also clear the shadow on thread destruction because
// some code may still be executing in later TSD destructors
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStackAndTLS();
- DeleteFakeStack();
+ DeleteFakeStack(tid);
uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
+ DTLS_Destroy();
}
// We want to create the FakeStack lazyly on the first use, but not eralier
@@ -121,13 +121,16 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
// 1 -- being initialized
// ptr -- initialized
// This CAS checks if the state was 0 and if so changes it to state 1,
- // if that was successfull, it initilizes the pointer.
+ // if that was successful, it initializes the pointer.
if (atomic_compare_exchange_strong(
reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
memory_order_relaxed)) {
uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
- if (flags()->uar_stack_size_log)
- stack_size_log = static_cast<uptr>(flags()->uar_stack_size_log);
+ CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
+ stack_size_log =
+ Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
+ stack_size_log =
+ Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
fake_stack_ = FakeStack::Create(stack_size_log);
SetTLSFakeStack(fake_stack_);
return fake_stack_;
@@ -136,24 +139,24 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
}
void AsanThread::Init() {
+ fake_stack_ = 0; // Will be initialized lazily if needed.
+ CHECK_EQ(this->stack_size(), 0U);
SetThreadStackAndTls();
+ CHECK_GT(this->stack_size(), 0U);
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStackAndTLS();
- if (common_flags()->verbosity >= 1) {
- int local = 0;
- Report("T%d: stack [%p,%p) size 0x%zx; local=%p\n",
- tid(), (void*)stack_bottom_, (void*)stack_top_,
- stack_top_ - stack_bottom_, &local);
- }
- fake_stack_ = 0; // Will be initialized lazily if needed.
+ int local = 0;
+ VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
+ (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
+ &local);
AsanPlatformThreadInit();
}
thread_return_t AsanThread::ThreadStart(uptr os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, 0);
- if (flags()->use_sigaltstack) SetAlternateSignalStack();
+ if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
if (!start_routine_) {
// start_routine_ == 0 if we're on the main thread or on one of the
@@ -193,17 +196,18 @@ void AsanThread::ClearShadowForThreadStackAndTLS() {
PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0);
}
-const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset,
- uptr *frame_pc) {
+bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
+ StackFrameAccess *access) {
uptr bottom = 0;
if (AddrIsInStack(addr)) {
bottom = stack_bottom();
} else if (has_fake_stack()) {
bottom = fake_stack()->AddrIsInFakeStack(addr);
CHECK(bottom);
- *offset = addr - bottom;
- *frame_pc = ((uptr*)bottom)[2];
- return (const char *)((uptr*)bottom)[1];
+ access->offset = addr - bottom;
+ access->frame_pc = ((uptr*)bottom)[2];
+ access->frame_descr = (const char *)((uptr*)bottom)[1];
+ return true;
}
uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr.
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
@@ -220,15 +224,15 @@ const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset,
}
if (shadow_ptr < shadow_bottom) {
- *offset = 0;
- return "UNKNOWN";
+ return false;
}
uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1));
CHECK(ptr[0] == kCurrentStackFrameMagic);
- *offset = addr - (uptr)ptr;
- *frame_pc = ptr[2];
- return (const char*)ptr[1];
+ access->offset = addr - (uptr)ptr;
+ access->frame_pc = ptr[2];
+ access->frame_descr = (const char*)ptr[1];
+ return true;
}
static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
@@ -265,10 +269,8 @@ AsanThread *GetCurrentThread() {
void SetCurrentThread(AsanThread *t) {
CHECK(t->context());
- if (common_flags()->verbosity >= 2) {
- Report("SetCurrentThread: %p for thread %p\n",
- t->context(), (void*)GetThreadSelf());
- }
+ VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
+ (void *)GetThreadSelf());
// Make sure we do not reset the current AsanThread.
CHECK_EQ(0, AsanTSDGet());
AsanTSDSet(t->context());
diff --git a/libsanitizer/asan/asan_thread.h b/libsanitizer/asan/asan_thread.h
index 5a917fa9a3..f5ea53d051 100644
--- a/libsanitizer/asan/asan_thread.h
+++ b/libsanitizer/asan/asan_thread.h
@@ -15,7 +15,6 @@
#include "asan_allocator.h"
#include "asan_internal.h"
#include "asan_fake_stack.h"
-#include "asan_stack.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
@@ -70,18 +69,23 @@ class AsanThread {
AsanThreadContext *context() { return context_; }
void set_context(AsanThreadContext *context) { context_ = context; }
- const char *GetFrameNameByAddr(uptr addr, uptr *offset, uptr *frame_pc);
+ struct StackFrameAccess {
+ uptr offset;
+ uptr frame_pc;
+ const char *frame_descr;
+ };
+ bool GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access);
bool AddrIsInStack(uptr addr) {
return addr >= stack_bottom_ && addr < stack_top_;
}
- void DeleteFakeStack() {
+ void DeleteFakeStack(int tid) {
if (!fake_stack_) return;
FakeStack *t = fake_stack_;
fake_stack_ = 0;
SetTLSFakeStack(0);
- t->Destroy();
+ t->Destroy(tid);
}
bool has_fake_stack() {
@@ -102,6 +106,10 @@ class AsanThread {
bool isUnwinding() const { return unwinding_; }
void setUnwinding(bool b) { unwinding_ = b; }
+ // True if we are in a deadly signal handler.
+ bool isInDeadlySignal() const { return in_deadly_signal_; }
+ void setInDeadlySignal(bool b) { in_deadly_signal_ = b; }
+
AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
AsanStats &stats() { return stats_; }
@@ -127,6 +135,7 @@ class AsanThread {
AsanThreadLocalMallocStorage malloc_storage_;
AsanStats stats_;
bool unwinding_;
+ bool in_deadly_signal_;
};
// ScopedUnwinding is a scope for stacktracing member of a context
@@ -141,6 +150,20 @@ class ScopedUnwinding {
AsanThread *thread;
};
+// ScopedDeadlySignal is a scope for handling deadly signals.
+class ScopedDeadlySignal {
+ public:
+ explicit ScopedDeadlySignal(AsanThread *t) : thread(t) {
+ if (thread) thread->setInDeadlySignal(true);
+ }
+ ~ScopedDeadlySignal() {
+ if (thread) thread->setInDeadlySignal(false);
+ }
+
+ private:
+ AsanThread *thread;
+};
+
struct CreateThreadContextArgs {
AsanThread *thread;
StackTrace *stack;
diff --git a/libsanitizer/asan/asan_win.cc b/libsanitizer/asan/asan_win.cc
index 8ffa58faa3..b0028763b1 100644
--- a/libsanitizer/asan/asan_win.cc
+++ b/libsanitizer/asan/asan_win.cc
@@ -19,6 +19,7 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
+#include "asan_report.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
@@ -33,11 +34,6 @@ extern "C" {
namespace __asan {
-// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
-static BlockingMutex dbghelp_lock(LINKER_INITIALIZED);
-static bool dbghelp_initialized = false;
-#pragma comment(lib, "dbghelp.lib")
-
// ---------------------- TSD ---------------- {{{1
static bool tsd_key_inited = false;
@@ -73,17 +69,9 @@ void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
-void SetAlternateSignalStack() {
- // FIXME: Decide what to do on Windows.
-}
-
-void UnsetAlternateSignalStack() {
- // FIXME: Decide what to do on Windows.
-}
+void AsanCheckDynamicRTPrereqs() {}
-void InstallSignalHandlers() {
- // FIXME: Decide what to do on Windows.
-}
+void AsanCheckIncompatibleRT() {}
void AsanPlatformThreadInit() {
// Nothing here for now.
@@ -93,54 +81,71 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
-} // namespace __asan
+void AsanOnSIGSEGV(int, void *siginfo, void *context) {
+ UNIMPLEMENTED();
+}
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan; // NOLINT
+static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
-bool __asan_symbolize(const void *addr, char *out_buffer, int buffer_size) {
- BlockingMutexLock lock(&dbghelp_lock);
- if (!dbghelp_initialized) {
- SymSetOptions(SYMOPT_DEFERRED_LOADS |
- SYMOPT_UNDNAME |
- SYMOPT_LOAD_LINES);
- CHECK(SymInitialize(GetCurrentProcess(), 0, TRUE));
- // FIXME: We don't call SymCleanup() on exit yet - should we?
- dbghelp_initialized = true;
- }
+static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
+ EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
+ CONTEXT *context = info->ContextRecord;
+ uptr pc = (uptr)exception_record->ExceptionAddress;
+#ifdef _WIN64
+ uptr bp = (uptr)context->Rbp, sp = (uptr)context->Rsp;
+#else
+ uptr bp = (uptr)context->Ebp, sp = (uptr)context->Esp;
+#endif
- // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
- char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
- PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
- symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
- symbol->MaxNameLen = MAX_SYM_NAME;
- DWORD64 offset = 0;
- BOOL got_objname = SymFromAddr(GetCurrentProcess(),
- (DWORD64)addr, &offset, symbol);
- if (!got_objname)
- return false;
-
- DWORD unused;
- IMAGEHLP_LINE64 info;
- info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
- BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(),
- (DWORD64)addr, &unused, &info);
- int written = 0;
- out_buffer[0] = '\0';
- // FIXME: it might be useful to print out 'obj' or 'obj+offset' info too.
- if (got_fileline) {
- written += internal_snprintf(out_buffer + written, buffer_size - written,
- " %s %s:%d", symbol->Name,
- info.FileName, info.LineNumber);
- } else {
- written += internal_snprintf(out_buffer + written, buffer_size - written,
- " %s+0x%p", symbol->Name, offset);
+ if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
+ exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
+ const char *description =
+ (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
+ ? "access-violation"
+ : "in-page-error";
+ uptr access_addr = exception_record->ExceptionInformation[1];
+ ReportSIGSEGV(description, pc, sp, bp, context, access_addr);
}
- return true;
+
+ // FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
+
+ return default_seh_handler(info);
}
-} // extern "C"
+// We want to install our own exception handler (EH) to print helpful reports
+// on access violations and whatnot. Unfortunately, the CRT initializers assume
+// they are run before any user code and drop any previously-installed EHs on
+// the floor, so we can't install our handler inside __asan_init.
+// (See crt0dat.c in the CRT sources for the details)
+//
+// Things get even more complicated with the dynamic runtime, as it finishes its
+// initialization before the .exe module CRT begins to initialize.
+//
+// For the static runtime (-MT), it's enough to put a callback to
+// __asan_set_seh_filter in the last section for C initializers.
+//
+// For the dynamic runtime (-MD), we want link the same
+// asan_dynamic_runtime_thunk.lib to all the modules, thus __asan_set_seh_filter
+// will be called for each instrumented module. This ensures that at least one
+// __asan_set_seh_filter call happens after the .exe module CRT is initialized.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __asan_set_seh_filter() {
+ // We should only store the previous handler if it's not our own handler in
+ // order to avoid loops in the EH chain.
+ auto prev_seh_handler = SetUnhandledExceptionFilter(SEHHandler);
+ if (prev_seh_handler != &SEHHandler)
+ default_seh_handler = prev_seh_handler;
+ return 0;
+}
+
+#if !ASAN_DYNAMIC
+// Put a pointer to __asan_set_seh_filter at the end of the global list
+// of C initializers, after the default EH is set by the CRT.
+#pragma section(".CRT$XIZ", long, read) // NOLINT
+static __declspec(allocate(".CRT$XIZ"))
+ int (*__intercept_seh)() = __asan_set_seh_filter;
+#endif
+
+} // namespace __asan
#endif // _WIN32
diff --git a/libsanitizer/asan/asan_win_dll_thunk.cc b/libsanitizer/asan/asan_win_dll_thunk.cc
new file mode 100644
index 0000000000..6adb7d2e94
--- /dev/null
+++ b/libsanitizer/asan/asan_win_dll_thunk.cc
@@ -0,0 +1,374 @@
+//===-- asan_win_dll_thunk.cc ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have ASan instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the
+// details.
+//===----------------------------------------------------------------------===//
+
+// Only compile this code when buidling asan_dll_thunk.lib
+// Using #ifdef rather than relying on Makefiles etc.
+// simplifies the build procedure.
+#ifdef ASAN_DLL_THUNK
+#include "asan_init_version.h"
+#include "sanitizer_common/sanitizer_interception.h"
+
+// ---------- Function interception helper functions and macros ----------- {{{1
+extern "C" {
+void *__stdcall GetModuleHandleA(const char *module_name);
+void *__stdcall GetProcAddress(void *module, const char *proc_name);
+void abort();
+}
+
+static void *getRealProcAddressOrDie(const char *name) {
+ void *ret = GetProcAddress(GetModuleHandleA(0), name);
+ if (!ret)
+ abort();
+ return ret;
+}
+
+// We need to intercept some functions (e.g. ASan interface, memory allocator --
+// let's call them "hooks") exported by the DLL thunk and forward the hooks to
+// the runtime in the main module.
+// However, we don't want to keep two lists of these hooks.
+// To avoid that, the list of hooks should be defined using the
+// INTERCEPT_WHEN_POSSIBLE macro. Then, all these hooks can be intercepted
+// at once by calling INTERCEPT_HOOKS().
+
+// Use macro+template magic to automatically generate the list of hooks.
+// Each hook at line LINE defines a template class with a static
+// FunctionInterceptor<LINE>::Execute() method intercepting the hook.
+// The default implementation of FunctionInterceptor<LINE> is to call
+// the Execute() method corresponding to the previous line.
+template<int LINE>
+struct FunctionInterceptor {
+ static void Execute() { FunctionInterceptor<LINE-1>::Execute(); }
+};
+
+// There shouldn't be any hooks with negative definition line number.
+template<>
+struct FunctionInterceptor<0> {
+ static void Execute() {}
+};
+
+#define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \
+ template<> struct FunctionInterceptor<__LINE__> { \
+ static void Execute() { \
+ void *wrapper = getRealProcAddressOrDie(main_function); \
+ if (!__interception::OverrideFunction((uptr)dll_function, \
+ (uptr)wrapper, 0)) \
+ abort(); \
+ FunctionInterceptor<__LINE__-1>::Execute(); \
+ } \
+ };
+
+// Special case of hooks -- ASan own interface functions. Those are only called
+// after __asan_init, thus an empty implementation is sufficient.
+#define INTERFACE_FUNCTION(name) \
+ extern "C" __declspec(noinline) void name() { \
+ volatile int prevent_icf = (__LINE__ << 8); (void)prevent_icf; \
+ __debugbreak(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name)
+
+// INTERCEPT_HOOKS must be used after the last INTERCEPT_WHEN_POSSIBLE.
+#define INTERCEPT_HOOKS FunctionInterceptor<__LINE__>::Execute
+
+// We can't define our own version of strlen etc. because that would lead to
+// link-time or even type mismatch errors. Instead, we can declare a function
+// just to be able to get its address. Me may miss the first few calls to the
+// functions since it can be called before __asan_init, but that would lead to
+// false negatives in the startup code before user's global initializers, which
+// isn't a big deal.
+#define INTERCEPT_LIBRARY_FUNCTION(name) \
+ extern "C" void name(); \
+ INTERCEPT_WHEN_POSSIBLE(WRAPPER_NAME(name), name)
+
+// Disable compiler warnings that show up if we declare our own version
+// of a compiler intrinsic (e.g. strlen).
+#pragma warning(disable: 4391)
+#pragma warning(disable: 4392)
+
+static void InterceptHooks();
+// }}}
+
+// ---------- Function wrapping helpers ----------------------------------- {{{1
+#define WRAP_V_V(name) \
+ extern "C" void name() { \
+ typedef void (*fntype)(); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_V_W(name) \
+ extern "C" void name(void *arg) { \
+ typedef void (*fntype)(void *arg); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(arg); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_V_WW(name) \
+ extern "C" void name(void *arg1, void *arg2) { \
+ typedef void (*fntype)(void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(arg1, arg2); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_V_WWW(name) \
+ extern "C" void name(void *arg1, void *arg2, void *arg3) { \
+ typedef void *(*fntype)(void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_V(name) \
+ extern "C" void *name() { \
+ typedef void *(*fntype)(); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_W(name) \
+ extern "C" void *name(void *arg) { \
+ typedef void *(*fntype)(void *arg); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WW(name) \
+ extern "C" void *name(void *arg1, void *arg2) { \
+ typedef void *(*fntype)(void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
+ typedef void *(*fntype)(void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
+ typedef void *(*fntype)(void *, void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5) { \
+ typedef void *(*fntype)(void *, void *, void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5, void *arg6) { \
+ typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+// }}}
+
+// ----------------- ASan own interface functions --------------------
+// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
+// want to call it in the __asan_init interceptor.
+WRAP_W_V(__asan_should_detect_stack_use_after_return)
+
+extern "C" {
+ int __asan_option_detect_stack_use_after_return;
+
+ // Manually wrap __asan_init as we need to initialize
+ // __asan_option_detect_stack_use_after_return afterwards.
+ void __asan_init() {
+ typedef void (*fntype)();
+ static fntype fn = 0;
+ // __asan_init is expected to be called by only one thread.
+ if (fn) return;
+
+ fn = (fntype)getRealProcAddressOrDie(__asan_init_name);
+ fn();
+ __asan_option_detect_stack_use_after_return =
+ (__asan_should_detect_stack_use_after_return() != 0);
+
+ InterceptHooks();
+ }
+}
+
+INTERFACE_FUNCTION(__asan_handle_no_return)
+
+INTERFACE_FUNCTION(__asan_report_store1)
+INTERFACE_FUNCTION(__asan_report_store2)
+INTERFACE_FUNCTION(__asan_report_store4)
+INTERFACE_FUNCTION(__asan_report_store8)
+INTERFACE_FUNCTION(__asan_report_store16)
+INTERFACE_FUNCTION(__asan_report_store_n)
+
+INTERFACE_FUNCTION(__asan_report_load1)
+INTERFACE_FUNCTION(__asan_report_load2)
+INTERFACE_FUNCTION(__asan_report_load4)
+INTERFACE_FUNCTION(__asan_report_load8)
+INTERFACE_FUNCTION(__asan_report_load16)
+INTERFACE_FUNCTION(__asan_report_load_n)
+
+INTERFACE_FUNCTION(__asan_store1)
+INTERFACE_FUNCTION(__asan_store2)
+INTERFACE_FUNCTION(__asan_store4)
+INTERFACE_FUNCTION(__asan_store8)
+INTERFACE_FUNCTION(__asan_store16)
+INTERFACE_FUNCTION(__asan_storeN)
+
+INTERFACE_FUNCTION(__asan_load1)
+INTERFACE_FUNCTION(__asan_load2)
+INTERFACE_FUNCTION(__asan_load4)
+INTERFACE_FUNCTION(__asan_load8)
+INTERFACE_FUNCTION(__asan_load16)
+INTERFACE_FUNCTION(__asan_loadN)
+
+INTERFACE_FUNCTION(__asan_memcpy);
+INTERFACE_FUNCTION(__asan_memset);
+INTERFACE_FUNCTION(__asan_memmove);
+
+INTERFACE_FUNCTION(__asan_register_globals)
+INTERFACE_FUNCTION(__asan_unregister_globals)
+
+INTERFACE_FUNCTION(__asan_before_dynamic_init)
+INTERFACE_FUNCTION(__asan_after_dynamic_init)
+
+INTERFACE_FUNCTION(__asan_poison_stack_memory)
+INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
+
+INTERFACE_FUNCTION(__asan_poison_memory_region)
+INTERFACE_FUNCTION(__asan_unpoison_memory_region)
+
+INTERFACE_FUNCTION(__asan_address_is_poisoned)
+INTERFACE_FUNCTION(__asan_region_is_poisoned)
+
+INTERFACE_FUNCTION(__asan_get_current_fake_stack)
+INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
+
+INTERFACE_FUNCTION(__asan_stack_malloc_0)
+INTERFACE_FUNCTION(__asan_stack_malloc_1)
+INTERFACE_FUNCTION(__asan_stack_malloc_2)
+INTERFACE_FUNCTION(__asan_stack_malloc_3)
+INTERFACE_FUNCTION(__asan_stack_malloc_4)
+INTERFACE_FUNCTION(__asan_stack_malloc_5)
+INTERFACE_FUNCTION(__asan_stack_malloc_6)
+INTERFACE_FUNCTION(__asan_stack_malloc_7)
+INTERFACE_FUNCTION(__asan_stack_malloc_8)
+INTERFACE_FUNCTION(__asan_stack_malloc_9)
+INTERFACE_FUNCTION(__asan_stack_malloc_10)
+
+INTERFACE_FUNCTION(__asan_stack_free_0)
+INTERFACE_FUNCTION(__asan_stack_free_1)
+INTERFACE_FUNCTION(__asan_stack_free_2)
+INTERFACE_FUNCTION(__asan_stack_free_4)
+INTERFACE_FUNCTION(__asan_stack_free_5)
+INTERFACE_FUNCTION(__asan_stack_free_6)
+INTERFACE_FUNCTION(__asan_stack_free_7)
+INTERFACE_FUNCTION(__asan_stack_free_8)
+INTERFACE_FUNCTION(__asan_stack_free_9)
+INTERFACE_FUNCTION(__asan_stack_free_10)
+
+INTERFACE_FUNCTION(__sanitizer_cov_module_init)
+
+// TODO(timurrrr): Add more interface functions on the as-needed basis.
+
+// ----------------- Memory allocation functions ---------------------
+WRAP_V_W(free)
+WRAP_V_WW(_free_dbg)
+
+WRAP_W_W(malloc)
+WRAP_W_WWWW(_malloc_dbg)
+
+WRAP_W_WW(calloc)
+WRAP_W_WWWWW(_calloc_dbg)
+WRAP_W_WWW(_calloc_impl)
+
+WRAP_W_WW(realloc)
+WRAP_W_WWW(_realloc_dbg)
+WRAP_W_WWW(_recalloc)
+
+WRAP_W_W(_msize)
+WRAP_W_W(_expand)
+WRAP_W_W(_expand_dbg)
+
+// TODO(timurrrr): Might want to add support for _aligned_* allocation
+// functions to detect a bit more bugs. Those functions seem to wrap malloc().
+
+// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
+
+INTERCEPT_LIBRARY_FUNCTION(atoi);
+INTERCEPT_LIBRARY_FUNCTION(atol);
+INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
+
+// _except_handler4 checks -GS cookie which is different for each module, so we
+// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4).
+INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
+ __asan_handle_no_return();
+ return REAL(_except_handler4)(a, b, c, d);
+}
+
+INTERCEPT_LIBRARY_FUNCTION(frexp);
+INTERCEPT_LIBRARY_FUNCTION(longjmp);
+INTERCEPT_LIBRARY_FUNCTION(memchr);
+INTERCEPT_LIBRARY_FUNCTION(memcmp);
+INTERCEPT_LIBRARY_FUNCTION(memcpy);
+INTERCEPT_LIBRARY_FUNCTION(memmove);
+INTERCEPT_LIBRARY_FUNCTION(memset);
+INTERCEPT_LIBRARY_FUNCTION(strcat); // NOLINT
+INTERCEPT_LIBRARY_FUNCTION(strchr);
+INTERCEPT_LIBRARY_FUNCTION(strcmp);
+INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT
+INTERCEPT_LIBRARY_FUNCTION(strlen);
+INTERCEPT_LIBRARY_FUNCTION(strncat);
+INTERCEPT_LIBRARY_FUNCTION(strncmp);
+INTERCEPT_LIBRARY_FUNCTION(strncpy);
+INTERCEPT_LIBRARY_FUNCTION(strnlen);
+INTERCEPT_LIBRARY_FUNCTION(strtol);
+INTERCEPT_LIBRARY_FUNCTION(wcslen);
+
+// Must be after all the interceptor declarations due to the way INTERCEPT_HOOKS
+// is defined.
+void InterceptHooks() {
+ INTERCEPT_HOOKS();
+ INTERCEPT_FUNCTION(_except_handler4);
+}
+
+// We want to call __asan_init before C/C++ initializers/constructors are
+// executed, otherwise functions like memset might be invoked.
+// For some strange reason, merely linking in asan_preinit.cc doesn't work
+// as the callback is never called... Is link.exe doing something too smart?
+
+// In DLLs, the callbacks are expected to return 0,
+// otherwise CRT initialization fails.
+static int call_asan_init() {
+ __asan_init();
+ return 0;
+}
+#pragma section(".CRT$XIB", long, read) // NOLINT
+__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = call_asan_init;
+
+#endif // ASAN_DLL_THUNK
diff --git a/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc b/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc
new file mode 100644
index 0000000000..1b59677eef
--- /dev/null
+++ b/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc
@@ -0,0 +1,50 @@
+//===-- asan_win_uar_thunk.cc ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file defines things that need to be present in the application modules
+// to interact with the ASan DLL runtime correctly and can't be implemented
+// using the default "import library" generated when linking the DLL RTL.
+//
+// This includes:
+// - forwarding the detect_stack_use_after_return runtime option
+// - installing a custom SEH handler
+//
+//===----------------------------------------------------------------------===//
+
+// Only compile this code when buidling asan_dynamic_runtime_thunk.lib
+// Using #ifdef rather than relying on Makefiles etc.
+// simplifies the build procedure.
+#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
+extern "C" {
+__declspec(dllimport) int __asan_set_seh_filter();
+__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
+
+// Define a copy of __asan_option_detect_stack_use_after_return that should be
+// used when linking an MD runtime with a set of object files on Windows.
+//
+// The ASan MD runtime dllexports '__asan_option_detect_stack_use_after_return',
+// so normally we would just dllimport it. Unfortunately, the dllimport
+// attribute adds __imp_ prefix to the symbol name of a variable.
+// Since in general we don't know if a given TU is going to be used
+// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
+// just to work around this issue, let's clone the a variable that is
+// constant after initialization anyways.
+int __asan_option_detect_stack_use_after_return =
+ __asan_should_detect_stack_use_after_return();
+
+// Set the ASan-specific SEH handler at the end of CRT initialization of each
+// module (see asan_win.cc for the details).
+//
+// Unfortunately, putting a pointer to __asan_set_seh_filter into
+// __asan_intercept_seh gets optimized out, so we have to use an extra function.
+static int SetSEHFilter() { return __asan_set_seh_filter(); }
+#pragma section(".CRT$XIZ", long, read) // NOLINT
+__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
+}
+#endif // ASAN_DYNAMIC_RUNTIME_THUNK
diff --git a/libsanitizer/asan/libtool-version b/libsanitizer/asan/libtool-version
index 9a16cf5784..f18f40737c 100644
--- a/libsanitizer/asan/libtool-version
+++ b/libsanitizer/asan/libtool-version
@@ -3,4 +3,4 @@
# a separate file so that version updates don't involve re-running
# automake.
# CURRENT:REVISION:AGE
-1:0:0
+2:0:0
diff --git a/libsanitizer/configure b/libsanitizer/configure
index 5e4840ffea..9ded4cc778 100755
--- a/libsanitizer/configure
+++ b/libsanitizer/configure
@@ -604,6 +604,7 @@ ac_subst_vars='am__EXEEXT_FALSE
am__EXEEXT_TRUE
LTLIBOBJS
LIBOBJS
+TSAN_TARGET_DEPENDENT_OBJECTS
LIBBACKTRACE_SUPPORTED_FALSE
LIBBACKTRACE_SUPPORTED_TRUE
BACKTRACE_SUPPORTS_THREADS
@@ -659,7 +660,6 @@ toolexecdir
MAINT
MAINTAINER_MODE_FALSE
MAINTAINER_MODE_TRUE
-multi_basedir
am__fastdepCC_FALSE
am__fastdepCC_TRUE
CCDEPMODE
@@ -717,6 +717,7 @@ CPPFLAGS
LDFLAGS
CFLAGS
CC
+multi_basedir
target_alias
host_alias
build_alias
@@ -758,9 +759,9 @@ SHELL'
ac_subst_files=''
ac_user_opts='
enable_option_checking
+enable_multilib
enable_version_specific_runtime_libs
enable_dependency_tracking
-enable_multilib
enable_maintainer_mode
enable_shared
enable_static
@@ -1404,10 +1405,10 @@ Optional Features:
--disable-option-checking ignore unrecognized --enable/--with options
--disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
--enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --enable-multilib build many library versions (default)
--enable-version-specific-runtime-libs Specify that runtime libraries should be installed in a compiler-specific directory
--disable-dependency-tracking speeds up one-time build
--enable-dependency-tracking do not reject slow dependency extractors
- --enable-multilib build many library versions (default)
--enable-maintainer-mode enable make rules and dependencies not useful
(and sometimes confusing) to the casual installer
--enable-shared[=PKGS] build shared libraries [default=yes]
@@ -2625,6 +2626,44 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+# Default to --enable-multilib
+# Check whether --enable-multilib was given.
+if test "${enable_multilib+set}" = set; then :
+ enableval=$enable_multilib; case "$enableval" in
+ yes) multilib=yes ;;
+ no) multilib=no ;;
+ *) as_fn_error "bad value $enableval for multilib option" "$LINENO" 5 ;;
+ esac
+else
+ multilib=yes
+fi
+
+
+# We may get other options which we leave undocumented:
+# --with-target-subdir, --with-multisrctop, --with-multisubdir
+# See config-ml.in if you want the gory details.
+
+if test "$srcdir" = "."; then
+ if test "$with_target_subdir" != "."; then
+ multi_basedir="$srcdir/$with_multisrctop../.."
+ else
+ multi_basedir="$srcdir/$with_multisrctop.."
+ fi
+else
+ multi_basedir="$srcdir/.."
+fi
+
+
+# Even if the default multilib is not a cross compilation,
+# it may be that some of the other multilibs are.
+if test $cross_compiling = no && test $multilib = yes \
+ && test "x${with_multisubdir}" != x ; then
+ cross_compiling=maybe
+fi
+
+ac_config_commands="$ac_config_commands default-1"
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for --enable-version-specific-runtime-libs" >&5
$as_echo_n "checking for --enable-version-specific-runtime-libs... " >&6; }
# Check whether --enable-version-specific-runtime-libs was given.
@@ -4701,43 +4740,6 @@ fi
-# Default to --enable-multilib
-# Check whether --enable-multilib was given.
-if test "${enable_multilib+set}" = set; then :
- enableval=$enable_multilib; case "$enableval" in
- yes) multilib=yes ;;
- no) multilib=no ;;
- *) as_fn_error "bad value $enableval for multilib option" "$LINENO" 5 ;;
- esac
-else
- multilib=yes
-fi
-
-
-# We may get other options which we leave undocumented:
-# --with-target-subdir, --with-multisrctop, --with-multisubdir
-# See config-ml.in if you want the gory details.
-
-if test "$srcdir" = "."; then
- if test "$with_target_subdir" != "."; then
- multi_basedir="$srcdir/$with_multisrctop../.."
- else
- multi_basedir="$srcdir/$with_multisrctop.."
- fi
-else
- multi_basedir="$srcdir/.."
-fi
-
-
-# Even if the default multilib is not a cross compilation,
-# it may be that some of the other multilibs are.
-if test $cross_compiling = no && test $multilib = yes \
- && test "x${with_multisubdir}" != x ; then
- cross_compiling=maybe
-fi
-
-ac_config_commands="$ac_config_commands default-1"
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5
$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; }
@@ -8506,7 +8508,7 @@ $as_echo "$lt_cv_ld_force_load" >&6; }
case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in
10.0,*86*-darwin8*|10.0,*-darwin[91]*)
_lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
- 10.[012]*)
+ 10.[012][,.]*)
_lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
10.*)
_lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
@@ -9809,7 +9811,7 @@ _LT_EOF
if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
&& test "$tmp_diet" = no
then
- tmp_addflag=
+ tmp_addflag=' $pic_flag'
tmp_sharedflag='-shared'
case $cc_basename,$host_cpu in
pgcc*) # Portland Group C compiler
@@ -12019,7 +12021,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 12022 "configure"
+#line 12024 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -12125,7 +12127,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 12128 "configure"
+#line 12130 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -12720,8 +12722,8 @@ with_gnu_ld=$lt_cv_prog_gnu_ld
# Check if GNU C++ uses GNU ld as the underlying linker, since the
# archiving commands below assume that GNU ld is being used.
if test "$with_gnu_ld" = yes; then
- archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
- archive_expsym_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
@@ -16362,6 +16364,8 @@ if test "x$TSAN_SUPPORTED" = "xyes"; then
fi
+
+
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
# tests run on this system so they can be shared between configure
@@ -17090,7 +17094,6 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
#
# INIT-COMMANDS
#
-AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"
srcdir="$srcdir"
host="$host"
@@ -17105,6 +17108,7 @@ CC="$CC"
CXX="$CXX"
GFORTRAN="$GFORTRAN"
GCJ="$GCJ"
+AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"
# The HP-UX ksh and POSIX shell print the target directory to stdout
@@ -17492,8 +17496,8 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
for ac_config_target in $ac_config_targets
do
case $ac_config_target in
- "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
"default-1") CONFIG_COMMANDS="$CONFIG_COMMANDS default-1" ;;
+ "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
"libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;;
"gstdint.h") CONFIG_COMMANDS="$CONFIG_COMMANDS gstdint.h" ;;
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
@@ -18091,6 +18095,14 @@ $as_echo "$as_me: executing $ac_file commands" >&6;}
case $ac_file$ac_mode in
+ "default-1":C)
+# Only add multilib support code if we just rebuilt the top-level
+# Makefile.
+case " $CONFIG_FILES " in
+ *" Makefile "*)
+ ac_file=Makefile . ${multi_basedir}/config-ml.in
+ ;;
+esac ;;
"depfiles":C) test x"$AMDEP_TRUE" != x"" || {
# Autoconf 2.62 quotes --file arguments for eval, but not when files
# are listed without --file. Let's play safe and only enable the eval
@@ -18186,14 +18198,6 @@ $as_echo X"$file" |
done
}
;;
- "default-1":C)
-# Only add multilib support code if we just rebuilt the top-level
-# Makefile.
-case " $CONFIG_FILES " in
- *" Makefile "*)
- ac_file=Makefile . ${multi_basedir}/config-ml.in
- ;;
-esac ;;
"libtool":C)
# See if we are running on zsh, and set the options which allow our
diff --git a/libsanitizer/configure.ac b/libsanitizer/configure.ac
index e6721312d0..031c2710a8 100644
--- a/libsanitizer/configure.ac
+++ b/libsanitizer/configure.ac
@@ -5,6 +5,8 @@ AC_PREREQ([2.64])
AC_INIT(package-unused, version-unused, libsanitizer)
AC_CONFIG_SRCDIR([include/sanitizer/common_interface_defs.h])
+AM_ENABLE_MULTILIB(, ..)
+
AC_MSG_CHECKING([for --enable-version-specific-runtime-libs])
AC_ARG_ENABLE(version-specific-runtime-libs,
[ --enable-version-specific-runtime-libs Specify that runtime libraries should be installed in a compiler-specific directory ],
@@ -26,7 +28,6 @@ AC_SUBST(target_alias)
GCC_LIBSTDCXX_RAW_CXX_FLAGS
AM_INIT_AUTOMAKE(foreign no-dist)
-AM_ENABLE_MULTILIB(, ..)
AM_MAINTAINER_MODE
# Calculate toolexeclibdir
@@ -346,4 +347,6 @@ _EOF
])
fi
+AC_SUBST([TSAN_TARGET_DEPENDENT_OBJECTS])
+
AC_OUTPUT
diff --git a/libsanitizer/configure.tgt b/libsanitizer/configure.tgt
index 6de4a65715..ac0eb7a446 100644
--- a/libsanitizer/configure.tgt
+++ b/libsanitizer/configure.tgt
@@ -19,22 +19,23 @@
# lets us skip running autoconf when modifying target specific information.
# Filter out unsupported systems.
+TSAN_TARGET_DEPENDENT_OBJECTS=
case "${target}" in
x86_64-*-linux* | i?86-*-linux*)
if test x$ac_cv_sizeof_void_p = x8; then
TSAN_SUPPORTED=yes
LSAN_SUPPORTED=yes
+ TSAN_TARGET_DEPENDENT_OBJECTS=tsan_rtl_amd64.lo
fi
;;
- powerpc*le-*-linux*)
- UNSUPPORTED=1
- ;;
powerpc*-*-linux*)
;;
sparc*-*-linux*)
;;
arm*-*-linux*)
;;
+ aarch64*-*-linux*)
+ ;;
x86_64-*-darwin[1]* | i?86-*-darwin[1]*)
TSAN_SUPPORTED=no
;;
diff --git a/libsanitizer/include/sanitizer/allocator_interface.h b/libsanitizer/include/sanitizer/allocator_interface.h
new file mode 100644
index 0000000000..97a72a258e
--- /dev/null
+++ b/libsanitizer/include/sanitizer/allocator_interface.h
@@ -0,0 +1,64 @@
+//===-- allocator_interface.h ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Public interface header for allocator used in sanitizers (ASan/TSan/MSan).
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
+#define SANITIZER_ALLOCATOR_INTERFACE_H
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ /* Returns the estimated number of bytes that will be reserved by allocator
+ for request of "size" bytes. If allocator can't allocate that much
+ memory, returns the maximal possible allocation size, otherwise returns
+ "size". */
+ size_t __sanitizer_get_estimated_allocated_size(size_t size);
+
+ /* Returns true if p was returned by the allocator and
+ is not yet freed. */
+ int __sanitizer_get_ownership(const volatile void *p);
+
+ /* Returns the number of bytes reserved for the pointer p.
+ Requires (get_ownership(p) == true) or (p == 0). */
+ size_t __sanitizer_get_allocated_size(const volatile void *p);
+
+ /* Number of bytes, allocated and not yet freed by the application. */
+ size_t __sanitizer_get_current_allocated_bytes();
+
+ /* Number of bytes, mmaped by the allocator to fulfill allocation requests.
+ Generally, for request of X bytes, allocator can reserve and add to free
+ lists a large number of chunks of size X to use them for future requests.
+ All these chunks count toward the heap size. Currently, allocator never
+ releases memory to OS (instead, it just puts freed chunks to free
+ lists). */
+ size_t __sanitizer_get_heap_size();
+
+ /* Number of bytes, mmaped by the allocator, which can be used to fulfill
+ allocation requests. When a user program frees memory chunk, it can first
+ fall into quarantine and will count toward __sanitizer_get_free_bytes()
+ later. */
+ size_t __sanitizer_get_free_bytes();
+
+ /* Number of bytes in unmapped pages, that are released to OS. Currently,
+ always returns 0. */
+ size_t __sanitizer_get_unmapped_bytes();
+
+ /* Malloc hooks that may be optionally provided by user.
+ __sanitizer_malloc_hook(ptr, size) is called immediately after
+ allocation of "size" bytes, which returned "ptr".
+ __sanitizer_free_hook(ptr) is called immediately before
+ deallocation of "ptr". */
+ void __sanitizer_malloc_hook(const volatile void *ptr, size_t size);
+ void __sanitizer_free_hook(const volatile void *ptr);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif
diff --git a/libsanitizer/include/sanitizer/asan_interface.h b/libsanitizer/include/sanitizer/asan_interface.h
index 0016339e48..023fa29c60 100644
--- a/libsanitizer/include/sanitizer/asan_interface.h
+++ b/libsanitizer/include/sanitizer/asan_interface.h
@@ -48,22 +48,65 @@ extern "C" {
((void)(addr), (void)(size))
#endif
- // Returns true iff addr is poisoned (i.e. 1-byte read/write access to this
+ // Returns 1 if addr is poisoned (i.e. 1-byte read/write access to this
// address will result in error report from AddressSanitizer).
- bool __asan_address_is_poisoned(void const volatile *addr);
+ // Otherwise returns 0.
+ int __asan_address_is_poisoned(void const volatile *addr);
- // If at least on byte in [beg, beg+size) is poisoned, return the address
+ // If at least one byte in [beg, beg+size) is poisoned, return the address
// of the first such byte. Otherwise return 0.
void *__asan_region_is_poisoned(void *beg, size_t size);
// Print the description of addr (useful when debugging in gdb).
void __asan_describe_address(void *addr);
+ // Useful for calling from a debugger to get information about an ASan error.
+ // Returns 1 if an error has been (or is being) reported, otherwise returns 0.
+ int __asan_report_present();
+
+ // Useful for calling from a debugger to get information about an ASan error.
+ // If an error has been (or is being) reported, the following functions return
+ // the pc, bp, sp, address, access type (0 = read, 1 = write), access size and
+ // bug description (e.g. "heap-use-after-free"). Otherwise they return 0.
+ void *__asan_get_report_pc();
+ void *__asan_get_report_bp();
+ void *__asan_get_report_sp();
+ void *__asan_get_report_address();
+ int __asan_get_report_access_type();
+ size_t __asan_get_report_access_size();
+ const char *__asan_get_report_description();
+
+ // Useful for calling from the debugger to get information about a pointer.
+ // Returns the category of the given pointer as a constant string.
+ // Possible return values are "global", "stack", "stack-fake", "heap",
+ // "heap-invalid", "shadow-low", "shadow-gap", "shadow-high", "unknown".
+ // If global or stack, tries to also return the variable name, address and
+ // size. If heap, tries to return the chunk address and size. 'name' should
+ // point to an allocated buffer of size 'name_size'.
+ const char *__asan_locate_address(void *addr, char *name, size_t name_size,
+ void **region_address, size_t *region_size);
+
+ // Useful for calling from the debugger to get the allocation stack trace
+ // and thread ID for a heap address. Stores up to 'size' frames into 'trace',
+ // returns the number of stored frames or 0 on error.
+ size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
+ int *thread_id);
+
+ // Useful for calling from the debugger to get the free stack trace
+ // and thread ID for a heap address. Stores up to 'size' frames into 'trace',
+ // returns the number of stored frames or 0 on error.
+ size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
+ int *thread_id);
+
+ // Useful for calling from the debugger to get the current shadow memory
+ // mapping.
+ void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
+
// This is an internal function that is called to report an error.
// However it is still a part of the interface because users may want to
// set a breakpoint on this function in a debugger.
void __asan_report_error(void *pc, void *bp, void *sp,
- void *addr, bool is_write, size_t access_size);
+ void *addr, int is_write, size_t access_size);
// Sets the exit code to use when reporting an error.
// Returns the old value.
@@ -80,40 +123,6 @@ extern "C" {
// the program crashes before ASan report is printed.
void __asan_on_error();
- // User may provide its own implementation for symbolization function.
- // It should print the description of instruction at address "pc" to
- // "out_buffer". Description should be at most "out_size" bytes long.
- // User-specified function should return true if symbolization was
- // successful.
- bool __asan_symbolize(const void *pc, char *out_buffer,
- int out_size);
-
- // Returns the estimated number of bytes that will be reserved by allocator
- // for request of "size" bytes. If ASan allocator can't allocate that much
- // memory, returns the maximal possible allocation size, otherwise returns
- // "size".
- size_t __asan_get_estimated_allocated_size(size_t size);
- // Returns true if p was returned by the ASan allocator and
- // is not yet freed.
- bool __asan_get_ownership(const void *p);
- // Returns the number of bytes reserved for the pointer p.
- // Requires (get_ownership(p) == true) or (p == 0).
- size_t __asan_get_allocated_size(const void *p);
- // Number of bytes, allocated and not yet freed by the application.
- size_t __asan_get_current_allocated_bytes();
- // Number of bytes, mmaped by asan allocator to fulfill allocation requests.
- // Generally, for request of X bytes, allocator can reserve and add to free
- // lists a large number of chunks of size X to use them for future requests.
- // All these chunks count toward the heap size. Currently, allocator never
- // releases memory to OS (instead, it just puts freed chunks to free lists).
- size_t __asan_get_heap_size();
- // Number of bytes, mmaped by asan allocator, which can be used to fulfill
- // allocation requests. When a user program frees memory chunk, it can first
- // fall into quarantine and will count toward __asan_get_free_bytes() later.
- size_t __asan_get_free_bytes();
- // Number of bytes in unmapped pages, that are released to OS. Currently,
- // always returns 0.
- size_t __asan_get_unmapped_bytes();
// Prints accumulated stats to stderr. Used for debugging.
void __asan_print_accumulated_stats();
@@ -121,13 +130,23 @@ extern "C" {
// a string containing ASan runtime options. See asan_flags.h for details.
const char* __asan_default_options();
- // Malloc hooks that may be optionally provided by user.
- // __asan_malloc_hook(ptr, size) is called immediately after
- // allocation of "size" bytes, which returned "ptr".
- // __asan_free_hook(ptr) is called immediately before
- // deallocation of "ptr".
- void __asan_malloc_hook(void *ptr, size_t size);
- void __asan_free_hook(void *ptr);
+ // The following 2 functions facilitate garbage collection in presence of
+ // asan's fake stack.
+
+ // Returns an opaque handler to be used later in __asan_addr_is_in_fake_stack.
+ // Returns NULL if the current thread does not have a fake stack.
+ void *__asan_get_current_fake_stack();
+
+ // If fake_stack is non-NULL and addr belongs to a fake frame in
+ // fake_stack, returns the address on real stack that corresponds to
+ // the fake frame and sets beg/end to the boundaries of this fake frame.
+ // Otherwise returns NULL and does not touch beg/end.
+ // If beg/end are NULL, they are not touched.
+ // This function may be called from a thread other than the owner of
+ // fake_stack, but the owner thread need to be alive.
+ void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end);
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/include/sanitizer/common_interface_defs.h b/libsanitizer/include/sanitizer/common_interface_defs.h
index db8b3b543e..3aba519327 100644
--- a/libsanitizer/include/sanitizer/common_interface_defs.h
+++ b/libsanitizer/include/sanitizer/common_interface_defs.h
@@ -22,13 +22,28 @@
#ifdef __cplusplus
extern "C" {
#endif
+ // Arguments for __sanitizer_sandbox_on_notify() below.
+ typedef struct {
+ // Enable sandbox support in sanitizer coverage.
+ int coverage_sandboxed;
+ // File descriptor to write coverage data to. If -1 is passed, a file will
+ // be pre-opened by __sanitizer_sandobx_on_notify(). This field has no
+ // effect if coverage_sandboxed == 0.
+ intptr_t coverage_fd;
+ // If non-zero, split the coverage data into well-formed blocks. This is
+ // useful when coverage_fd is a socket descriptor. Each block will contain
+ // a header, allowing data from multiple processes to be sent over the same
+ // socket.
+ unsigned int coverage_max_block_size;
+ } __sanitizer_sandbox_arguments;
+
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
void __sanitizer_set_report_path(const char *path);
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox.
- void __sanitizer_sandbox_on_notify(void *reserved);
+ void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
@@ -45,8 +60,14 @@ extern "C" {
void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x);
+ // Initialize coverage.
+ void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
+ // Open <name>.sancov.packed in the coverage directory and return the file
+ // descriptor. Returns -1 on failure, or if coverage dumping is disabled.
+ // This is intended for use by sandboxing code.
+ intptr_t __sanitizer_maybe_open_cov_file(const char *name);
// Annotate the current state of a contiguous container, such as
// std::vector, std::string or similar.
@@ -54,7 +75,7 @@ extern "C" {
// in a contiguous region of memory. The container owns the region of memory
// [beg, end); the memory [beg, mid) is used to store the current elements
// and the memory [mid, end) is reserved for future elements;
- // end <= mid <= end. For example, in "std::vector<> v"
+ // beg <= mid <= end. For example, in "std::vector<> v"
// beg = &v[0];
// end = beg + v.capacity() * sizeof(v[0]);
// mid = beg + v.size() * sizeof(v[0]);
@@ -82,6 +103,14 @@ extern "C" {
const void *end,
const void *old_mid,
const void *new_mid);
+ // Returns true if the contiguous container [beg, end) is properly poisoned
+ // (e.g. with __sanitizer_annotate_contiguous_container), i.e. if
+ // - [beg, mid) is addressable,
+ // - [mid, end) is unaddressable.
+ // Full verification requires O(end-beg) time; this function tries to avoid
+ // such complexity by touching only parts of the container around beg/mid/end.
+ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
// Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace();
diff --git a/libsanitizer/include/sanitizer/dfsan_interface.h b/libsanitizer/include/sanitizer/dfsan_interface.h
index dd938483d1..c1b160205a 100644
--- a/libsanitizer/include/sanitizer/dfsan_interface.h
+++ b/libsanitizer/include/sanitizer/dfsan_interface.h
@@ -37,6 +37,9 @@ struct dfsan_label_info {
void *userdata;
};
+/// Signature of the callback argument to dfsan_set_write_callback().
+typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
+
/// Computes the union of \c l1 and \c l2, possibly creating a union label in
/// the process.
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
@@ -72,6 +75,14 @@ int dfsan_has_label(dfsan_label label, dfsan_label elem);
/// that label, else returns 0.
dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
+/// Returns the number of labels allocated.
+size_t dfsan_get_label_count(void);
+
+/// Sets a callback to be invoked on calls to write(). The callback is invoked
+/// before the write is done. The write is not guaranteed to succeed when the
+/// callback executes. Pass in NULL to remove any callback.
+void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
+
#ifdef __cplusplus
} // extern "C"
diff --git a/libsanitizer/include/sanitizer/lsan_interface.h b/libsanitizer/include/sanitizer/lsan_interface.h
index ec9c730eee..95e79245ec 100644
--- a/libsanitizer/include/sanitizer/lsan_interface.h
+++ b/libsanitizer/include/sanitizer/lsan_interface.h
@@ -21,13 +21,24 @@ extern "C" {
// be treated as non-leaks. Disable/enable pairs may be nested.
void __lsan_disable();
void __lsan_enable();
+
// The heap object into which p points will be treated as a non-leak.
void __lsan_ignore_object(const void *p);
- // The user may optionally provide this function to disallow leak checking
- // for the program it is linked into (if the return value is non-zero). This
- // function must be defined as returning a constant value; any behavior beyond
- // that is unsupported.
- int __lsan_is_turned_off();
+
+ // Memory regions registered through this interface will be treated as sources
+ // of live pointers during leak checking. Useful if you store pointers in
+ // mapped memory.
+ // Points of note:
+ // - __lsan_unregister_root_region() must be called with the same pointer and
+ // size that have earlier been passed to __lsan_register_root_region()
+ // - LSan will skip any inaccessible memory when scanning a root region. E.g.,
+ // if you map memory within a larger region that you have mprotect'ed, you can
+ // register the entire large region.
+ // - the implementation is not optimized for performance. This interface is
+ // intended to be used for a small number of relatively static regions.
+ void __lsan_register_root_region(const void *p, size_t size);
+ void __lsan_unregister_root_region(const void *p, size_t size);
+
// Calling this function makes LSan enter the leak checking phase immediately.
// Use this if normal end-of-process leak checking happens too late (e.g. if
// you have intentional memory leaks in your shutdown code). Calling this
@@ -35,6 +46,16 @@ extern "C" {
// most once per process. This function will terminate the process if there
// are memory leaks and the exit_code flag is non-zero.
void __lsan_do_leak_check();
+
+ // The user may optionally provide this function to disallow leak checking
+ // for the program it is linked into (if the return value is non-zero). This
+ // function must be defined as returning a constant value; any behavior beyond
+ // that is unsupported.
+ int __lsan_is_turned_off();
+
+ // This function may be optionally provided by the user and should return
+ // a string containing LSan suppressions.
+ const char *__lsan_default_suppressions();
#ifdef __cplusplus
} // extern "C"
diff --git a/libsanitizer/include/sanitizer/msan_interface.h b/libsanitizer/include/sanitizer/msan_interface.h
index f531cf347c..ea7ad1d7c9 100644
--- a/libsanitizer/include/sanitizer/msan_interface.h
+++ b/libsanitizer/include/sanitizer/msan_interface.h
@@ -17,13 +17,6 @@
#ifdef __cplusplus
extern "C" {
#endif
-
-#if __has_feature(memory_sanitizer)
- /* Returns a string describing a stack origin.
- Return NULL if the origin is invalid, or is not a stack origin. */
- const char *__msan_get_origin_descr_if_stack(uint32_t id);
-
-
/* Set raw origin for the memory range. */
void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
@@ -39,6 +32,10 @@ extern "C" {
/* Make memory region fully initialized (without changing its contents). */
void __msan_unpoison(const volatile void *a, size_t size);
+ /* Make a null-terminated string fully initialized (without changing its
+ contents). */
+ void __msan_unpoison_string(const volatile char *a);
+
/* Make memory region fully uninitialized (without changing its contents). */
void __msan_poison(const volatile void *a, size_t size);
@@ -51,6 +48,10 @@ extern "C" {
memory range, or -1 if the whole range is good. */
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
+ /* Checks that memory range is fully initialized, and reports an error if it
+ * is not. */
+ void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
+
/* Set exit code when error(s) were detected.
Value of 0 means don't change the program exit code. */
void __msan_set_exit_code(int exit_code);
@@ -67,13 +68,13 @@ extern "C" {
modules that were compiled without the corresponding compiler flag. */
void __msan_set_keep_going(int keep_going);
- /* Print shadow and origin for the memory range to stdout in a human-readable
+ /* Print shadow and origin for the memory range to stderr in a human-readable
format. */
void __msan_print_shadow(const volatile void *x, size_t size);
- /* Print current function arguments shadow and origin to stdout in a
+ /* Print shadow for the memory range to stderr in a minimalistic
human-readable format. */
- void __msan_print_param_shadow();
+ void __msan_dump_shadow(const volatile void *x, size_t size);
/* Returns true if running under a dynamic tool (DynamoRio-based). */
int __msan_has_dynamic_component();
@@ -86,72 +87,9 @@ extern "C" {
a string containing Msan runtime options. See msan_flags.h for details. */
const char* __msan_default_options();
-
- /***********************************/
- /* Allocator statistics interface. */
-
- /* Returns the estimated number of bytes that will be reserved by allocator
- for request of "size" bytes. If Msan allocator can't allocate that much
- memory, returns the maximal possible allocation size, otherwise returns
- "size". */
- size_t __msan_get_estimated_allocated_size(size_t size);
-
- /* Returns true if p was returned by the Msan allocator and
- is not yet freed. */
- int __msan_get_ownership(const volatile void *p);
-
- /* Returns the number of bytes reserved for the pointer p.
- Requires (get_ownership(p) == true) or (p == 0). */
- size_t __msan_get_allocated_size(const volatile void *p);
-
- /* Number of bytes, allocated and not yet freed by the application. */
- size_t __msan_get_current_allocated_bytes();
-
- /* Number of bytes, mmaped by msan allocator to fulfill allocation requests.
- Generally, for request of X bytes, allocator can reserve and add to free
- lists a large number of chunks of size X to use them for future requests.
- All these chunks count toward the heap size. Currently, allocator never
- releases memory to OS (instead, it just puts freed chunks to free
- lists). */
- size_t __msan_get_heap_size();
-
- /* Number of bytes, mmaped by msan allocator, which can be used to fulfill
- allocation requests. When a user program frees memory chunk, it can first
- fall into quarantine and will count toward __msan_get_free_bytes()
- later. */
- size_t __msan_get_free_bytes();
-
- /* Number of bytes in unmapped pages, that are released to OS. Currently,
- always returns 0. */
- size_t __msan_get_unmapped_bytes();
-
- /* Malloc hooks that may be optionally provided by user.
- __msan_malloc_hook(ptr, size) is called immediately after
- allocation of "size" bytes, which returned "ptr".
- __msan_free_hook(ptr) is called immediately before
- deallocation of "ptr". */
- void __msan_malloc_hook(const volatile void *ptr, size_t size);
- void __msan_free_hook(const volatile void *ptr);
-
-#else // __has_feature(memory_sanitizer)
-
-#define __msan_get_origin_descr_if_stack(id) ((const char*)0)
-#define __msan_set_origin(a, size, origin)
-#define __msan_get_origin(a) ((uint32_t)-1)
-#define __msan_get_track_origins() (0)
-#define __msan_get_umr_origin() ((uint32_t)-1)
-#define __msan_unpoison(a, size)
-#define __msan_poison(a, size)
-#define __msan_partial_poison(data, shadow, size)
-#define __msan_test_shadow(x, size) ((intptr_t)-1)
-#define __msan_set_exit_code(exit_code)
-#define __msan_set_expect_umr(expect_umr)
-#define __msan_print_shadow(x, size)
-#define __msan_print_param_shadow()
-#define __msan_has_dynamic_component() (0)
-#define __msan_allocated_memory(data, size)
-
-#endif // __has_feature(memory_sanitizer)
+ /* Sets the callback to be called right before death on error.
+ Passing 0 will unset the callback. */
+ void __msan_set_death_callback(void (*callback)(void));
#ifdef __cplusplus
} // extern "C"
diff --git a/libsanitizer/tsan/tsan_interface_atomic.h b/libsanitizer/include/sanitizer/tsan_interface_atomic.h
index c500614acc..d19c910941 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.h
+++ b/libsanitizer/include/sanitizer/tsan_interface_atomic.h
@@ -7,14 +7,11 @@
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
+// Public interface header for TSan atomics.
//===----------------------------------------------------------------------===//
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
-#ifndef INTERFACE_ATTRIBUTE
-# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
-#endif
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -23,14 +20,12 @@ typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
typedef long __tsan_atomic64; // NOLINT
-
#if defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302)
__extension__ typedef __int128 __tsan_atomic128;
-#define __TSAN_HAS_INT128 1
+# define __TSAN_HAS_INT128 1
#else
-typedef char __tsan_atomic128;
-#define __TSAN_HAS_INT128 0
+# define __TSAN_HAS_INT128 0
#endif
// Part of ABI, do not change.
@@ -45,159 +40,181 @@ typedef enum {
} __tsan_memory_order;
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#endif
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#endif
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#endif
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+#endif
-void __tsan_atomic_thread_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-void __tsan_atomic_signal_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void __tsan_atomic_signal_fence(__tsan_memory_order mo);
#ifdef __cplusplus
} // extern "C"
#endif
-#undef INTERFACE_ATTRIBUTE
-
-#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
+#endif // TSAN_INTERFACE_ATOMIC_H
diff --git a/libsanitizer/interception/Makefile.am b/libsanitizer/interception/Makefile.am
index e9fbe6a467..4fb69a963d 100644
--- a/libsanitizer/interception/Makefile.am
+++ b/libsanitizer/interception/Makefile.am
@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libinterception.la
diff --git a/libsanitizer/interception/Makefile.in b/libsanitizer/interception/Makefile.in
index 5ac217dcc4..0e261b4266 100644
--- a/libsanitizer/interception/Makefile.in
+++ b/libsanitizer/interception/Makefile.in
@@ -150,6 +150,7 @@ SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -225,7 +226,8 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
- -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
+ -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libinterception.la
interception_files = \
diff --git a/libsanitizer/interception/interception.h b/libsanitizer/interception/interception.h
index 7393f4c26b..458928f14b 100644
--- a/libsanitizer/interception/interception.h
+++ b/libsanitizer/interception/interception.h
@@ -13,7 +13,8 @@
#ifndef INTERCEPTION_H
#define INTERCEPTION_H
-#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
+#if !defined(__linux__) && !defined(__FreeBSD__) && \
+ !defined(__APPLE__) && !defined(_WIN32)
# error "Interception doesn't work on this operating system."
#endif
@@ -119,19 +120,23 @@ const interpose_substitution substitution_##func_name[] \
# define DECLARE_WRAPPER(ret_type, func, ...)
#elif defined(_WIN32)
-# if defined(_DLL) // DLL CRT
-# define WRAP(x) x
-# define WRAPPER_NAME(x) #x
-# define INTERCEPTOR_ATTRIBUTE
-# else // Static CRT
-# define WRAP(x) wrap_##x
-# define WRAPPER_NAME(x) "wrap_"#x
-# define INTERCEPTOR_ATTRIBUTE
-# endif
+# define WRAP(x) __asan_wrap_##x
+# define WRAPPER_NAME(x) "__asan_wrap_"#x
+# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
+#elif defined(__FreeBSD__)
+# define WRAP(x) __interceptor_ ## x
+# define WRAPPER_NAME(x) "__interceptor_" #x
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
+// priority than weak ones so weak aliases won't work for indirect calls
+// in position-independent (-fPIC / -fPIE) mode.
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) \
+ __attribute__((alias("__interceptor_" #func), visibility("default")));
#else
# define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x
@@ -233,11 +238,11 @@ typedef unsigned long uptr; // NOLINT
#define INCLUDED_FROM_INTERCEPTION_LIB
-#if defined(__linux__)
+#if defined(__linux__) || defined(__FreeBSD__)
# include "interception_linux.h"
-# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func)
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
- INTERCEPT_FUNCTION_VER_LINUX(func, symver)
+ INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
#elif defined(__APPLE__)
# include "interception_mac.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
diff --git a/libsanitizer/interception/interception_linux.cc b/libsanitizer/interception/interception_linux.cc
index 0a8df474ab..0a8305b0bf 100644
--- a/libsanitizer/interception/interception_linux.cc
+++ b/libsanitizer/interception/interception_linux.cc
@@ -10,10 +10,10 @@
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
-#ifdef __linux__
+#if defined(__linux__) || defined(__FreeBSD__)
#include "interception.h"
-#include <dlfcn.h> // for dlsym
+#include <dlfcn.h> // for dlsym() and dlvsym()
namespace __interception {
bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
@@ -31,4 +31,4 @@ void *GetFuncAddrVer(const char *func_name, const char *ver) {
} // namespace __interception
-#endif // __linux__
+#endif // __linux__ || __FreeBSD__
diff --git a/libsanitizer/interception/interception_linux.h b/libsanitizer/interception/interception_linux.h
index 5ab24db438..e2cc4c19df 100644
--- a/libsanitizer/interception/interception_linux.h
+++ b/libsanitizer/interception/interception_linux.h
@@ -10,7 +10,7 @@
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
-#ifdef __linux__
+#if defined(__linux__) || defined(__FreeBSD__)
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_linux.h should be included from interception library only"
@@ -26,20 +26,20 @@ bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
void *GetFuncAddrVer(const char *func_name, const char *ver);
} // namespace __interception
-#define INTERCEPT_FUNCTION_LINUX(func) \
- ::__interception::GetRealFunctionAddress( \
- #func, (::__interception::uptr*)&REAL(func), \
- (::__interception::uptr)&(func), \
- (::__interception::uptr)&WRAP(func))
+#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
+ ::__interception::GetRealFunctionAddress( \
+ #func, (::__interception::uptr *)&__interception::PTR_TO_REAL(func), \
+ (::__interception::uptr) & (func), \
+ (::__interception::uptr) & WRAP(func))
#if !defined(__ANDROID__) // android does not have dlvsym
-# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
+# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::real_##func = (func##_f)(unsigned long) \
::__interception::GetFuncAddrVer(#func, symver)
#else
-# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
- INTERCEPT_FUNCTION_LINUX(func)
+# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
+ INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
#endif // !defined(__ANDROID__)
#endif // INTERCEPTION_LINUX_H
-#endif // __linux__
+#endif // __linux__ || __FreeBSD__
diff --git a/libsanitizer/interception/interception_type_test.cc b/libsanitizer/interception/interception_type_test.cc
index f664eeeb04..6ba45e26dd 100644
--- a/libsanitizer/interception/interception_type_test.cc
+++ b/libsanitizer/interception/interception_type_test.cc
@@ -17,13 +17,13 @@
#include <stddef.h>
#include <stdint.h>
-COMPILER_CHECK(sizeof(SIZE_T) == sizeof(size_t));
-COMPILER_CHECK(sizeof(SSIZE_T) == sizeof(ssize_t));
-COMPILER_CHECK(sizeof(PTRDIFF_T) == sizeof(ptrdiff_t));
-COMPILER_CHECK(sizeof(INTMAX_T) == sizeof(intmax_t));
+COMPILER_CHECK(sizeof(::SIZE_T) == sizeof(size_t));
+COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
+COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
+COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
#ifndef __APPLE__
-COMPILER_CHECK(sizeof(OFF64_T) == sizeof(off64_t));
+COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
#endif
// The following are the cases when pread (and friends) is used instead of
@@ -31,7 +31,7 @@ COMPILER_CHECK(sizeof(OFF64_T) == sizeof(off64_t));
// rest (they depend on _FILE_OFFSET_BITS setting when building an application).
# if defined(__ANDROID__) || !defined _FILE_OFFSET_BITS || \
_FILE_OFFSET_BITS != 64
-COMPILER_CHECK(sizeof(OFF_T) == sizeof(off_t));
+COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));
# endif
#endif
diff --git a/libsanitizer/interception/interception_win.cc b/libsanitizer/interception/interception_win.cc
index 443bdce185..f9c2e9b3ad 100644
--- a/libsanitizer/interception/interception_win.cc
+++ b/libsanitizer/interception/interception_win.cc
@@ -17,20 +17,6 @@
namespace __interception {
-bool GetRealFunctionAddress(const char *func_name, uptr *func_addr) {
- const char *DLLS[] = {
- "msvcr80.dll",
- "msvcr90.dll",
- "kernel32.dll",
- NULL
- };
- *func_addr = 0;
- for (size_t i = 0; *func_addr == 0 && DLLS[i]; ++i) {
- *func_addr = (uptr)GetProcAddress(GetModuleHandleA(DLLS[i]), func_name);
- }
- return (*func_addr != 0);
-}
-
// FIXME: internal_str* and internal_mem* functions should be moved from the
// ASan sources into interception/.
@@ -54,96 +40,175 @@ static void WriteJumpInstruction(char *jmp_from, char *to) {
*(ptrdiff_t*)(jmp_from + 1) = offset;
}
-bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
-#ifdef _WIN64
-# error OverrideFunction was not tested on x64
-#endif
- // Basic idea:
- // We write 5 bytes (jmp-to-new_func) at the beginning of the 'old_func'
- // to override it. We want to be able to execute the original 'old_func' from
- // the wrapper, so we need to keep the leading 5+ bytes ('head') of the
- // original instructions somewhere with a "jmp old_func+head".
- // We call these 'head'+5 bytes of instructions a "trampoline".
-
+static char *GetMemoryForTrampoline(size_t size) {
// Trampolines are allocated from a common pool.
const int POOL_SIZE = 1024;
static char *pool = NULL;
static size_t pool_used = 0;
- if (pool == NULL) {
- pool = (char*)VirtualAlloc(NULL, POOL_SIZE,
- MEM_RESERVE | MEM_COMMIT,
- PAGE_EXECUTE_READWRITE);
- // FIXME: set PAGE_EXECUTE_READ access after setting all interceptors?
- if (pool == NULL)
- return false;
+ if (!pool) {
+ pool = (char *)VirtualAlloc(NULL, POOL_SIZE, MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+ // FIXME: Might want to apply PAGE_EXECUTE_READ access after all the
+ // interceptors are in place.
+ if (!pool)
+ return NULL;
_memset(pool, 0xCC /* int 3 */, POOL_SIZE);
}
- char* old_bytes = (char*)old_func;
- char* trampoline = pool + pool_used;
+ if (pool_used + size > POOL_SIZE)
+ return NULL;
+
+ char *ret = pool + pool_used;
+ pool_used += size;
+ return ret;
+}
- // Find out the number of bytes of the instructions we need to copy to the
- // island and store it in 'head'.
- size_t head = 0;
- while (head < 5) {
- switch (old_bytes[head]) {
+// Returns 0 on error.
+static size_t RoundUpToInstrBoundary(size_t size, char *code) {
+ size_t cursor = 0;
+ while (cursor < size) {
+ switch (code[cursor]) {
+ case '\x51': // push ecx
+ case '\x52': // push edx
+ case '\x53': // push ebx
+ case '\x54': // push esp
case '\x55': // push ebp
case '\x56': // push esi
case '\x57': // push edi
- head++;
+ case '\x5D': // pop ebp
+ cursor++;
+ continue;
+ case '\x6A': // 6A XX = push XX
+ cursor += 2;
+ continue;
+ case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX
+ cursor += 5;
continue;
}
- switch (*(unsigned short*)(old_bytes + head)) { // NOLINT
+ switch (*(unsigned short*)(code + cursor)) { // NOLINT
case 0xFF8B: // 8B FF = mov edi, edi
case 0xEC8B: // 8B EC = mov ebp, esp
case 0xC033: // 33 C0 = xor eax, eax
- head += 2;
+ cursor += 2;
continue;
+ case 0x458B: // 8B 45 XX = mov eax, dword ptr [ebp+XXh]
+ case 0x5D8B: // 8B 5D XX = mov ebx, dword ptr [ebp+XXh]
case 0xEC83: // 83 EC XX = sub esp, XX
- head += 3;
+ case 0x75FF: // FF 75 XX = push dword ptr [ebp+XXh]
+ cursor += 3;
continue;
case 0xC1F7: // F7 C1 XX YY ZZ WW = test ecx, WWZZYYXX
- head += 6;
+ case 0x25FF: // FF 25 XX YY ZZ WW = jmp dword ptr ds:[WWZZYYXX]
+ cursor += 6;
+ continue;
+ case 0x3D83: // 83 3D XX YY ZZ WW TT = cmp TT, WWZZYYXX
+ cursor += 7;
continue;
}
- switch (0x00FFFFFF & *(unsigned int*)(old_bytes + head)) {
+ switch (0x00FFFFFF & *(unsigned int*)(code + cursor)) {
case 0x24448A: // 8A 44 24 XX = mov eal, dword ptr [esp+XXh]
+ case 0x24448B: // 8B 44 24 XX = mov eax, dword ptr [esp+XXh]
case 0x244C8B: // 8B 4C 24 XX = mov ecx, dword ptr [esp+XXh]
case 0x24548B: // 8B 54 24 XX = mov edx, dword ptr [esp+XXh]
+ case 0x24748B: // 8B 74 24 XX = mov esi, dword ptr [esp+XXh]
case 0x247C8B: // 8B 7C 24 XX = mov edi, dword ptr [esp+XXh]
- head += 4;
+ cursor += 4;
continue;
}
// Unknown instruction!
- return false;
+ // FIXME: Unknown instruction failures might happen when we add a new
+ // interceptor or a new compiler version. In either case, they should result
+ // in visible and readable error messages. However, merely calling abort()
+ // leads to an infinite recursion in CheckFailed.
+ // Do we have a good way to abort with an error message here?
+ __debugbreak();
+ return 0;
}
- if (pool_used + head + 5 > POOL_SIZE)
- return false;
+ return cursor;
+}
+
+bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
+#ifdef _WIN64
+#error OverrideFunction is not yet supported on x64
+#endif
+ // Function overriding works basically like this:
+ // We write "jmp <new_func>" (5 bytes) at the beginning of the 'old_func'
+ // to override it.
+ // We might want to be able to execute the original 'old_func' from the
+ // wrapper, in this case we need to keep the leading 5+ bytes ('head')
+ // of the original code somewhere with a "jmp <old_func+head>".
+ // We call these 'head'+5 bytes of instructions a "trampoline".
+ char *old_bytes = (char *)old_func;
+
+ // We'll need at least 5 bytes for a 'jmp'.
+ size_t head = 5;
+ if (orig_old_func) {
+ // Find out the number of bytes of the instructions we need to copy
+ // to the trampoline and store it in 'head'.
+ head = RoundUpToInstrBoundary(head, old_bytes);
+ if (!head)
+ return false;
- // Now put the "jump to trampoline" instruction into the original code.
+ // Put the needed instructions into the trampoline bytes.
+ char *trampoline = GetMemoryForTrampoline(head + 5);
+ if (!trampoline)
+ return false;
+ _memcpy(trampoline, old_bytes, head);
+ WriteJumpInstruction(trampoline + head, old_bytes + head);
+ *orig_old_func = (uptr)trampoline;
+ }
+
+ // Now put the "jmp <new_func>" instruction at the original code location.
+ // We should preserve the EXECUTE flag as some of our own code might be
+ // located in the same page (sic!). FIXME: might consider putting the
+ // __interception code into a separate section or something?
DWORD old_prot, unused_prot;
- if (!VirtualProtect((void*)old_func, head, PAGE_EXECUTE_READWRITE,
+ if (!VirtualProtect((void *)old_bytes, head, PAGE_EXECUTE_READWRITE,
&old_prot))
return false;
- // Put the needed instructions into the trampoline bytes.
- _memcpy(trampoline, old_bytes, head);
- WriteJumpInstruction(trampoline + head, old_bytes + head);
- *orig_old_func = (uptr)trampoline;
- pool_used += head + 5;
-
- // Intercept the 'old_func'.
- WriteJumpInstruction(old_bytes, (char*)new_func);
+ WriteJumpInstruction(old_bytes, (char *)new_func);
_memset(old_bytes + 5, 0xCC /* int 3 */, head - 5);
- if (!VirtualProtect((void*)old_func, head, old_prot, &unused_prot))
+ // Restore the original permissions.
+ if (!VirtualProtect((void *)old_bytes, head, old_prot, &unused_prot))
return false; // not clear if this failure bothers us.
return true;
}
+static const void **InterestingDLLsAvailable() {
+ const char *InterestingDLLs[] = {"kernel32.dll",
+ "msvcr110.dll", // VS2012
+ "msvcr120.dll", // VS2013
+ NULL};
+ static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
+ if (!result[0]) {
+ for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
+ if (HMODULE h = GetModuleHandleA(InterestingDLLs[i]))
+ result[j++] = (void *)h;
+ }
+ }
+ return (const void **)&result[0];
+}
+
+static bool GetFunctionAddressInDLLs(const char *func_name, uptr *func_addr) {
+ *func_addr = 0;
+ const void **DLLs = InterestingDLLsAvailable();
+ for (size_t i = 0; *func_addr == 0 && DLLs[i]; ++i)
+ *func_addr = (uptr)GetProcAddress((HMODULE)DLLs[i], func_name);
+ return (*func_addr != 0);
+}
+
+bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func) {
+ uptr orig_func;
+ if (!GetFunctionAddressInDLLs(name, &orig_func))
+ return false;
+ return OverrideFunction(orig_func, new_func, orig_old_func);
+}
+
} // namespace __interception
#endif // _WIN32
diff --git a/libsanitizer/interception/interception_win.h b/libsanitizer/interception/interception_win.h
index b46ad0dc66..e9c6200d1b 100644
--- a/libsanitizer/interception/interception_win.h
+++ b/libsanitizer/interception/interception_win.h
@@ -20,27 +20,29 @@
#define INTERCEPTION_WIN_H
namespace __interception {
-// returns true if a function with the given name was found.
-bool GetRealFunctionAddress(const char *func_name, uptr *func_addr);
+// All the functions in the OverrideFunction() family return true on success,
+// false on failure (including "couldn't find the function").
-// returns true if the old function existed, false on failure.
-bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func);
+// Overrides a function by its address.
+bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func = 0);
+
+// Overrides a function in a system DLL or DLL CRT by its exported name.
+bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
} // namespace __interception
-#if defined(_DLL)
-# define INTERCEPT_FUNCTION_WIN(func) \
- ::__interception::GetRealFunctionAddress( \
- #func, (::__interception::uptr*)&REAL(func))
+#if defined(INTERCEPTION_DYNAMIC_CRT)
+#define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::OverrideFunction(#func, \
+ (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
#else
-# define INTERCEPT_FUNCTION_WIN(func) \
- ::__interception::OverrideFunction( \
- (::__interception::uptr)func, \
- (::__interception::uptr)WRAP(func), \
- (::__interception::uptr*)&REAL(func))
+#define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::OverrideFunction((::__interception::uptr)func, \
+ (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
#endif
-#define INTERCEPT_FUNCTION_VER_WIN(func, symver) \
- INTERCEPT_FUNCTION_WIN(func)
+#define INTERCEPT_FUNCTION_VER_WIN(func, symver) INTERCEPT_FUNCTION_WIN(func)
#endif // INTERCEPTION_WIN_H
#endif // _WIN32
diff --git a/libsanitizer/libbacktrace/Makefile.am b/libsanitizer/libbacktrace/Makefile.am
index fd9eb65f7e..34bfb1eef2 100644
--- a/libsanitizer/libbacktrace/Makefile.am
+++ b/libsanitizer/libbacktrace/Makefile.am
@@ -40,6 +40,7 @@ C_WARN_FLAGS = $(WARN_FLAGS) -Wstrict-prototypes -Wmissing-prototypes -Wold-styl
CXX_WARN_FLAGS = $(WARN_FLAGS) -Wno-unused-parameter
AM_CFLAGS = $(C_WARN_FLAGS)
AM_CXXFLAGS = $(CXX_WARN_FLAGS) -fno-rtti -fno-exceptions
+AM_CXXFLAGS += -std=gnu++11
noinst_LTLIBRARIES = libsanitizer_libbacktrace.la
diff --git a/libsanitizer/libbacktrace/Makefile.in b/libsanitizer/libbacktrace/Makefile.in
index 0dfb53d2c9..7d2e2443e3 100644
--- a/libsanitizer/libbacktrace/Makefile.in
+++ b/libsanitizer/libbacktrace/Makefile.in
@@ -192,6 +192,7 @@ SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -270,7 +271,7 @@ WARN_FLAGS = -W -Wall -Wwrite-strings -Wmissing-format-attribute \
C_WARN_FLAGS = $(WARN_FLAGS) -Wstrict-prototypes -Wmissing-prototypes -Wold-style-definition
CXX_WARN_FLAGS = $(WARN_FLAGS) -Wno-unused-parameter
AM_CFLAGS = $(C_WARN_FLAGS)
-AM_CXXFLAGS = $(CXX_WARN_FLAGS) -fno-rtti -fno-exceptions
+AM_CXXFLAGS = $(CXX_WARN_FLAGS) -fno-rtti -fno-exceptions -std=gnu++11
noinst_LTLIBRARIES = libsanitizer_libbacktrace.la
libsanitizer_libbacktrace_la_SOURCES = \
../../libbacktrace/backtrace.h \
diff --git a/libsanitizer/lsan/Makefile.am b/libsanitizer/lsan/Makefile.am
index 7a508c1268..03ec4fe91f 100644
--- a/libsanitizer/lsan/Makefile.am
+++ b/libsanitizer/lsan/Makefile.am
@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_lsan.la
diff --git a/libsanitizer/lsan/Makefile.in b/libsanitizer/lsan/Makefile.in
index 47caebc09f..3ad44012e9 100644
--- a/libsanitizer/lsan/Makefile.in
+++ b/libsanitizer/lsan/Makefile.in
@@ -185,6 +185,7 @@ SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -260,7 +261,8 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
- -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
+ -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_lsan.la
@LSAN_SUPPORTED_TRUE@toolexeclib_LTLIBRARIES = liblsan.la
diff --git a/libsanitizer/lsan/lsan.cc b/libsanitizer/lsan/lsan.cc
index 270979a78e..61792d9c9e 100644
--- a/libsanitizer/lsan/lsan.cc
+++ b/libsanitizer/lsan/lsan.cc
@@ -23,14 +23,9 @@ bool lsan_init_is_running;
namespace __lsan {
-static void InitializeCommonFlags() {
- CommonFlags *cf = common_flags();
- SetCommonFlagsDefaults(cf);
- cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
- cf->malloc_context_size = 30;
- cf->detect_leaks = true;
-
- ParseCommonFlagsFromString(cf, GetEnv("LSAN_OPTIONS"));
+///// Interface to the common LSan module. /////
+bool WordIsPoisoned(uptr addr) {
+ return false;
}
} // namespace __lsan
@@ -43,7 +38,7 @@ extern "C" void __lsan_init() {
return;
lsan_init_is_running = true;
SanitizerToolName = "LeakSanitizer";
- InitializeCommonFlags();
+ InitCommonLsan(true);
InitializeAllocator();
InitTlsSize();
InitializeInterceptors();
@@ -53,16 +48,14 @@ extern "C" void __lsan_init() {
ThreadStart(tid, GetTid());
SetCurrentThread(tid);
- // Start symbolizer process if necessary.
- if (common_flags()->symbolize) {
- Symbolizer::Init(common_flags()->external_symbolizer_path);
- } else {
- Symbolizer::Disable();
- }
-
- InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
Atexit(DoLeakCheck);
lsan_inited = true;
lsan_init_is_running = false;
}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ GET_STACK_TRACE_FATAL;
+ stack.Print();
+}
diff --git a/libsanitizer/lsan/lsan.h b/libsanitizer/lsan/lsan.h
index 8a5030ce87..ee2fc02cf0 100644
--- a/libsanitizer/lsan/lsan.h
+++ b/libsanitizer/lsan/lsan.h
@@ -13,6 +13,26 @@
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#define GET_STACK_TRACE(max_size, fast) \
+ BufferedStackTrace stack; \
+ { \
+ uptr stack_top = 0, stack_bottom = 0; \
+ ThreadContext *t; \
+ if (fast && (t = CurrentThreadContext())) { \
+ stack_top = t->stack_end(); \
+ stack_bottom = t->stack_begin(); \
+ } \
+ stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
+ /* context */ 0, stack_top, stack_bottom, fast); \
+ }
+
+#define GET_STACK_TRACE_FATAL \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
+ common_flags()->fast_unwind_on_malloc)
+
namespace __lsan {
void InitializeInterceptors();
diff --git a/libsanitizer/lsan/lsan_allocator.cc b/libsanitizer/lsan/lsan_allocator.cc
index ce47dfcd21..2d406a0f85 100644
--- a/libsanitizer/lsan/lsan_allocator.cc
+++ b/libsanitizer/lsan/lsan_allocator.cc
@@ -13,6 +13,7 @@
#include "lsan_allocator.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
@@ -51,7 +52,7 @@ void AllocatorThreadFinish() {
allocator.SwallowCache(&cache);
}
-static ChunkMetadata *Metadata(void *p) {
+static ChunkMetadata *Metadata(const void *p) {
return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
}
@@ -60,7 +61,7 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
ChunkMetadata *m = Metadata(p);
CHECK(m);
m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
- m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
+ m->stack_trace_id = StackDepotPut(stack);
m->requested_size = size;
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
}
@@ -85,10 +86,12 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
RegisterAllocation(stack, p, size);
+ if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
return p;
}
void Deallocate(void *p) {
+ if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
RegisterDeallocation(p);
allocator.Deallocate(&cache, p);
}
@@ -111,7 +114,7 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end) {
*end = *begin + sizeof(cache);
}
-uptr GetMallocUsableSize(void *p) {
+uptr GetMallocUsableSize(const void *p) {
ChunkMetadata *m = Metadata(p);
if (!m) return 0;
return m->requested_size;
@@ -141,7 +144,11 @@ uptr PointsIntoChunk(void* p) {
if (addr < chunk) return 0;
ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
CHECK(m);
- if (m->allocated && addr < chunk + m->requested_size)
+ if (!m->allocated)
+ return 0;
+ if (addr < chunk + m->requested_size)
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
return chunk;
return 0;
}
@@ -194,3 +201,38 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
}
}
} // namespace __lsan
+
+using namespace __lsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_free_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_unmapped_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_ownership(const void *p) { return Metadata(p) != 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+} // extern "C"
diff --git a/libsanitizer/lsan/lsan_allocator.h b/libsanitizer/lsan/lsan_allocator.h
index 61ea86572e..aae0d28dc4 100644
--- a/libsanitizer/lsan/lsan_allocator.h
+++ b/libsanitizer/lsan/lsan_allocator.h
@@ -23,7 +23,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
void Deallocate(void *p);
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment);
-uptr GetMallocUsableSize(void *p);
+uptr GetMallocUsableSize(const void *p);
template<typename Callable>
void ForEachChunk(const Callable &callback);
diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc
index bbc5b5f037..aa79a7e41c 100644
--- a/libsanitizer/lsan/lsan_common.cc
+++ b/libsanitizer/lsan/lsan_common.cc
@@ -15,6 +15,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
@@ -24,7 +25,8 @@
#if CAN_SANITIZE_LEAKS
namespace __lsan {
-// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
+// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
+// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
THREADLOCAL int disable_counter;
@@ -32,65 +34,90 @@ bool DisabledInThisThread() { return disable_counter > 0; }
Flags lsan_flags;
-static void InitializeFlags() {
+static void InitializeFlags(bool standalone) {
Flags *f = flags();
// Default values.
f->report_objects = false;
f->resolution = 0;
f->max_leaks = 0;
f->exitcode = 23;
- f->suppressions="";
f->use_registers = true;
f->use_globals = true;
f->use_stacks = true;
f->use_tls = true;
+ f->use_root_regions = true;
f->use_unaligned = false;
- f->verbosity = 0;
+ f->use_poisoned = false;
f->log_pointers = false;
f->log_threads = false;
const char *options = GetEnv("LSAN_OPTIONS");
if (options) {
- ParseFlag(options, &f->use_registers, "use_registers");
- ParseFlag(options, &f->use_globals, "use_globals");
- ParseFlag(options, &f->use_stacks, "use_stacks");
- ParseFlag(options, &f->use_tls, "use_tls");
- ParseFlag(options, &f->use_unaligned, "use_unaligned");
- ParseFlag(options, &f->report_objects, "report_objects");
- ParseFlag(options, &f->resolution, "resolution");
+ ParseFlag(options, &f->use_registers, "use_registers", "");
+ ParseFlag(options, &f->use_globals, "use_globals", "");
+ ParseFlag(options, &f->use_stacks, "use_stacks", "");
+ ParseFlag(options, &f->use_tls, "use_tls", "");
+ ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
+ ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
+ ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
+ ParseFlag(options, &f->report_objects, "report_objects", "");
+ ParseFlag(options, &f->resolution, "resolution", "");
CHECK_GE(&f->resolution, 0);
- ParseFlag(options, &f->max_leaks, "max_leaks");
+ ParseFlag(options, &f->max_leaks, "max_leaks", "");
CHECK_GE(&f->max_leaks, 0);
- ParseFlag(options, &f->verbosity, "verbosity");
- ParseFlag(options, &f->log_pointers, "log_pointers");
- ParseFlag(options, &f->log_threads, "log_threads");
- ParseFlag(options, &f->exitcode, "exitcode");
- ParseFlag(options, &f->suppressions, "suppressions");
+ ParseFlag(options, &f->log_pointers, "log_pointers", "");
+ ParseFlag(options, &f->log_threads, "log_threads", "");
+ ParseFlag(options, &f->exitcode, "exitcode", "");
}
+
+ // Set defaults for common flags (only in standalone mode) and parse
+ // them from LSAN_OPTIONS.
+ CommonFlags *cf = common_flags();
+ if (standalone) {
+ SetCommonFlagsDefaults(cf);
+ cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
+ cf->malloc_context_size = 30;
+ cf->detect_leaks = true;
+ }
+ ParseCommonFlagsFromString(cf, options);
}
-SuppressionContext *suppression_ctx;
+#define LOG_POINTERS(...) \
+ do { \
+ if (flags()->log_pointers) Report(__VA_ARGS__); \
+ } while (0);
+
+#define LOG_THREADS(...) \
+ do { \
+ if (flags()->log_threads) Report(__VA_ARGS__); \
+ } while (0);
+
+static bool suppressions_inited = false;
void InitializeSuppressions() {
- CHECK(!suppression_ctx);
- ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
- suppression_ctx = new(placeholder_) SuppressionContext;
- char *suppressions_from_file;
- uptr buffer_size;
- if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
- &buffer_size, 1 << 26 /* max_len */))
- suppression_ctx->Parse(suppressions_from_file);
- if (flags()->suppressions[0] && !buffer_size) {
- Printf("LeakSanitizer: failed to read suppressions file '%s'\n",
- flags()->suppressions);
- Die();
- }
+ CHECK(!suppressions_inited);
+ SuppressionContext::InitIfNecessary();
if (&__lsan_default_suppressions)
- suppression_ctx->Parse(__lsan_default_suppressions());
+ SuppressionContext::Get()->Parse(__lsan_default_suppressions());
+ suppressions_inited = true;
+}
+
+struct RootRegion {
+ const void *begin;
+ uptr size;
+};
+
+InternalMmapVector<RootRegion> *root_regions;
+
+void InitializeRootRegions() {
+ CHECK(!root_regions);
+ ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
+ root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
}
-void InitCommonLsan() {
- InitializeFlags();
+void InitCommonLsan(bool standalone) {
+ InitializeFlags(standalone);
+ InitializeRootRegions();
if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if
// LSan is actually enabled.
@@ -99,9 +126,9 @@ void InitCommonLsan() {
}
}
-class Decorator: private __sanitizer::AnsiColorDecorator {
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
+ Decorator() : SanitizerCommonDecorator() { }
const char *Error() { return Red(); }
const char *Leak() { return Blue(); }
const char *End() { return Default(); }
@@ -130,8 +157,7 @@ void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag) {
const uptr alignment = flags()->pointer_alignment();
- if (flags()->log_pointers)
- Report("Scanning %s range %p-%p.\n", region_type, begin, end);
+ LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
uptr pp = begin;
if (pp % alignment)
pp = pp + alignment - pp % alignment;
@@ -146,10 +172,19 @@ void ScanRangeForPointers(uptr begin, uptr end,
// Reachable beats ignored beats leaked.
if (m.tag() == kReachable) continue;
if (m.tag() == kIgnored && tag != kReachable) continue;
+
+ // Do this check relatively late so we can log only the interesting cases.
+ if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
+ LOG_POINTERS(
+ "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
+ "%zu.\n",
+ pp, p, chunk, chunk + m.requested_size(), m.requested_size());
+ continue;
+ }
+
m.set_tag(tag);
- if (flags()->log_pointers)
- Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
- chunk, chunk + m.requested_size(), m.requested_size());
+ LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
+ chunk, chunk + m.requested_size(), m.requested_size());
if (frontier)
frontier->push_back(chunk);
}
@@ -168,7 +203,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
uptr registers_end = registers_begin + registers.size();
for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
- if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
+ LOG_THREADS("Processing thread %d.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
&tls_begin, &tls_end,
@@ -176,8 +211,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
- if (flags()->log_threads)
- Report("Thread %d not found in registry.\n", os_id);
+ LOG_THREADS("Thread %d not found in registry.\n", os_id);
continue;
}
uptr sp;
@@ -194,14 +228,12 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
"REGISTERS", kReachable);
if (flags()->use_stacks) {
- if (flags()->log_threads)
- Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
+ LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
if (sp < stack_begin || sp >= stack_end) {
// SP is outside the recorded stack range (e.g. the thread is running a
// signal handler on alternate stack). Again, consider the entire stack
// range to be reachable.
- if (flags()->log_threads)
- Report("WARNING: stack pointer not in stack range.\n");
+ LOG_THREADS("WARNING: stack pointer not in stack range.\n");
} else {
// Shrink the stack range to ignore out-of-scope values.
stack_begin = sp;
@@ -212,7 +244,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
if (flags()->use_tls) {
- if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
+ LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
if (cache_begin == cache_end) {
ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
} else {
@@ -230,6 +262,37 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
}
+static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
+ uptr root_end) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr begin, end, prot;
+ while (proc_maps.Next(&begin, &end,
+ /*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
+ &prot)) {
+ uptr intersection_begin = Max(root_begin, begin);
+ uptr intersection_end = Min(end, root_end);
+ if (intersection_begin >= intersection_end) continue;
+ bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
+ LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
+ root_begin, root_end, begin, end,
+ is_readable ? "readable" : "unreadable");
+ if (is_readable)
+ ScanRangeForPointers(intersection_begin, intersection_end, frontier,
+ "ROOT", kReachable);
+ }
+}
+
+// Scans root regions for heap pointers.
+static void ProcessRootRegions(Frontier *frontier) {
+ if (!flags()->use_root_regions) return;
+ CHECK(root_regions);
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ RootRegion region = (*root_regions)[i];
+ uptr begin_addr = reinterpret_cast<uptr>(region.begin);
+ ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
+ }
+}
+
static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
while (frontier->size()) {
uptr next_chunk = frontier->back();
@@ -264,41 +327,37 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
- Frontier frontier(GetPageSizeCached());
+ Frontier frontier(1);
- if (flags()->use_globals)
- ProcessGlobalRegions(&frontier);
+ ProcessGlobalRegions(&frontier);
ProcessThreads(suspended_threads, &frontier);
+ ProcessRootRegions(&frontier);
FloodFillTag(&frontier, kReachable);
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
- if (flags()->log_pointers)
- Report("Processing platform-specific allocations.\n");
+ LOG_POINTERS("Processing platform-specific allocations.\n");
ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable);
- if (flags()->log_pointers)
- Report("Scanning ignored chunks.\n");
+ LOG_POINTERS("Scanning ignored chunks.\n");
CHECK_EQ(0, frontier.size());
ForEachChunk(CollectIgnoredCb, &frontier);
FloodFillTag(&frontier, kIgnored);
// Iterate over leaked chunks and mark those that are reachable from other
// leaked chunks.
- if (flags()->log_pointers)
- Report("Scanning leaked chunks.\n");
+ LOG_POINTERS("Scanning leaked chunks.\n");
ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
}
static void PrintStackTraceById(u32 stack_trace_id) {
CHECK(stack_trace_id);
- uptr size = 0;
- const uptr *trace = StackDepotGet(stack_trace_id, &size);
- StackTrace::PrintStack(trace, size);
+ StackDepotGet(stack_trace_id).Print();
}
-// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
+// ForEachChunk callback. Aggregates information about unreachable chunks into
+// a LeakReport.
static void CollectLeaksCb(uptr chunk, void *arg) {
CHECK(arg);
LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
@@ -307,32 +366,22 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
uptr resolution = flags()->resolution;
+ u32 stack_trace_id = 0;
if (resolution > 0) {
- uptr size = 0;
- const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
- size = Min(size, resolution);
- leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
+ StackTrace stack = StackDepotGet(m.stack_trace_id());
+ stack.size = Min(stack.size, resolution);
+ stack_trace_id = StackDepotPut(stack);
} else {
- leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
+ stack_trace_id = m.stack_trace_id();
}
- }
-}
-
-// ForEachChunkCallback. Prints addresses of unreachable chunks.
-static void PrintLeakedCb(uptr chunk, void *arg) {
- chunk = GetUserBegin(chunk);
- LsanMetadata m(chunk);
- if (!m.allocated()) return;
- if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
- Printf("%s leaked %zu byte object at %p.\n",
- m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
- m.requested_size(), chunk);
+ leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
+ m.tag());
}
}
static void PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched(1);
- suppression_ctx->GetMatched(&matched);
+ SuppressionContext::Get()->GetMatched(&matched);
if (!matched.size())
return;
const char *line = "-----------------------------------------------------";
@@ -345,12 +394,6 @@ static void PrintMatchedSuppressions() {
Printf("%s\n\n", line);
}
-static void PrintLeaked() {
- Printf("\n");
- Printf("Reporting individual objects:\n");
- ForEachChunk(PrintLeakedCb, 0 /* arg */);
-}
-
struct DoLeakCheckParam {
bool success;
LeakReport leak_report;
@@ -361,11 +404,8 @@ static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
CHECK(param);
CHECK(!param->success);
- CHECK(param->leak_report.IsEmpty());
ClassifyAllChunks(suspended_threads);
ForEachChunk(CollectLeaksCb, &param->leak_report);
- if (!param->leak_report.IsEmpty() && flags()->report_objects)
- PrintLeaked();
param->success = true;
}
@@ -376,7 +416,7 @@ void DoLeakCheck() {
if (already_done) return;
already_done = true;
if (&__lsan_is_turned_off && __lsan_is_turned_off())
- return;
+ return;
DoLeakCheckParam param;
param.success = false;
@@ -390,8 +430,9 @@ void DoLeakCheck() {
Report("LeakSanitizer has encountered a fatal error.\n");
Die();
}
- uptr have_unsuppressed = param.leak_report.ApplySuppressions();
- if (have_unsuppressed) {
+ param.leak_report.ApplySuppressions();
+ uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
+ if (unsuppressed_count > 0) {
Decorator d;
Printf("\n"
"================================================================="
@@ -399,38 +440,52 @@ void DoLeakCheck() {
Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n");
Printf("%s", d.End());
- param.leak_report.PrintLargest(flags()->max_leaks);
+ param.leak_report.ReportTopLeaks(flags()->max_leaks);
}
- if (have_unsuppressed || (flags()->verbosity >= 1)) {
+ if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
+ if (unsuppressed_count > 0) {
param.leak_report.PrintSummary();
+ if (flags()->exitcode) {
+ if (common_flags()->coverage)
+ __sanitizer_cov_dump();
+ internal__exit(flags()->exitcode);
+ }
}
- if (have_unsuppressed && flags()->exitcode)
- internal__exit(flags()->exitcode);
}
static Suppression *GetSuppressionForAddr(uptr addr) {
+ Suppression *s;
+
+ // Suppress by module name.
+ const char *module_name;
+ uptr module_offset;
+ if (Symbolizer::GetOrInit()
+ ->GetModuleNameAndOffsetForPC(addr, &module_name, &module_offset) &&
+ SuppressionContext::Get()->Match(module_name, SuppressionLeak, &s))
+ return s;
+
+ // Suppress by file or function name.
static const uptr kMaxAddrFrames = 16;
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
+ uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
addr, addr_frames.data(), kMaxAddrFrames);
for (uptr i = 0; i < addr_frames_num; i++) {
- Suppression* s;
- if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
- suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
- suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
+ if (SuppressionContext::Get()->Match(addr_frames[i].function,
+ SuppressionLeak, &s) ||
+ SuppressionContext::Get()->Match(addr_frames[i].file, SuppressionLeak,
+ &s))
return s;
}
return 0;
}
static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
- uptr size = 0;
- const uptr *trace = StackDepotGet(stack_trace_id, &size);
- for (uptr i = 0; i < size; i++) {
- Suppression *s =
- GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
+ StackTrace stack = StackDepotGet(stack_trace_id);
+ for (uptr i = 0; i < stack.size; i++) {
+ Suppression *s = GetSuppressionForAddr(
+ StackTrace::GetPreviousInstructionPc(stack.trace[i]));
if (s) return s;
}
return 0;
@@ -439,26 +494,35 @@ static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
///// LeakReport implementation. /////
// A hard limit on the number of distinct leaks, to avoid quadratic complexity
-// in LeakReport::Add(). We don't expect to ever see this many leaks in
-// real-world applications.
+// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
+// in real-world applications.
// FIXME: Get rid of this limit by changing the implementation of LeakReport to
// use a hash table.
const uptr kMaxLeaksConsidered = 5000;
-void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
+void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
+ uptr leaked_size, ChunkTag tag) {
CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
bool is_directly_leaked = (tag == kDirectlyLeaked);
- for (uptr i = 0; i < leaks_.size(); i++)
+ uptr i;
+ for (i = 0; i < leaks_.size(); i++) {
if (leaks_[i].stack_trace_id == stack_trace_id &&
leaks_[i].is_directly_leaked == is_directly_leaked) {
leaks_[i].hit_count++;
leaks_[i].total_size += leaked_size;
- return;
+ break;
}
- if (leaks_.size() == kMaxLeaksConsidered) return;
- Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
- is_directly_leaked, /* is_suppressed */ false };
- leaks_.push_back(leak);
+ }
+ if (i == leaks_.size()) {
+ if (leaks_.size() == kMaxLeaksConsidered) return;
+ Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
+ is_directly_leaked, /* is_suppressed */ false };
+ leaks_.push_back(leak);
+ }
+ if (flags()->report_objects) {
+ LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
+ leaked_objects_.push_back(obj);
+ }
}
static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
@@ -468,7 +532,7 @@ static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
return leak1.is_directly_leaked;
}
-void LeakReport::PrintLargest(uptr num_leaks_to_print) {
+void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
Printf("\n");
if (leaks_.size() == kMaxLeaksConsidered)
@@ -476,31 +540,49 @@ void LeakReport::PrintLargest(uptr num_leaks_to_print) {
"reported.\n",
kMaxLeaksConsidered);
- uptr unsuppressed_count = 0;
- for (uptr i = 0; i < leaks_.size(); i++)
- if (!leaks_[i].is_suppressed) unsuppressed_count++;
- if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
- Printf("The %zu largest leak(s):\n", num_leaks_to_print);
+ uptr unsuppressed_count = UnsuppressedLeakCount();
+ if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
+ Printf("The %zu top leak(s):\n", num_leaks_to_report);
InternalSort(&leaks_, leaks_.size(), LeakComparator);
- uptr leaks_printed = 0;
- Decorator d;
+ uptr leaks_reported = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
if (leaks_[i].is_suppressed) continue;
- Printf("%s", d.Leak());
- Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
- leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
- leaks_[i].total_size, leaks_[i].hit_count);
- Printf("%s", d.End());
- PrintStackTraceById(leaks_[i].stack_trace_id);
- leaks_printed++;
- if (leaks_printed == num_leaks_to_print) break;
+ PrintReportForLeak(i);
+ leaks_reported++;
+ if (leaks_reported == num_leaks_to_report) break;
}
- if (leaks_printed < unsuppressed_count) {
- uptr remaining = unsuppressed_count - leaks_printed;
+ if (leaks_reported < unsuppressed_count) {
+ uptr remaining = unsuppressed_count - leaks_reported;
Printf("Omitting %zu more leak(s).\n", remaining);
}
}
+void LeakReport::PrintReportForLeak(uptr index) {
+ Decorator d;
+ Printf("%s", d.Leak());
+ Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
+ leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
+ leaks_[index].total_size, leaks_[index].hit_count);
+ Printf("%s", d.End());
+
+ PrintStackTraceById(leaks_[index].stack_trace_id);
+
+ if (flags()->report_objects) {
+ Printf("Objects leaked above:\n");
+ PrintLeakedObjectsForLeak(index);
+ Printf("\n");
+ }
+}
+
+void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
+ u32 leak_id = leaks_[index].id;
+ for (uptr j = 0; j < leaked_objects_.size(); j++) {
+ if (leaked_objects_[j].leak_id == leak_id)
+ Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
+ leaked_objects_[j].size);
+ }
+}
+
void LeakReport::PrintSummary() {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
uptr bytes = 0, allocations = 0;
@@ -516,20 +598,24 @@ void LeakReport::PrintSummary() {
ReportErrorSummary(summary.data());
}
-uptr LeakReport::ApplySuppressions() {
- uptr unsuppressed_count = 0;
+void LeakReport::ApplySuppressions() {
for (uptr i = 0; i < leaks_.size(); i++) {
Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
if (s) {
s->weight += leaks_[i].total_size;
s->hit_count += leaks_[i].hit_count;
leaks_[i].is_suppressed = true;
- } else {
- unsuppressed_count++;
}
}
- return unsuppressed_count;
}
+
+uptr LeakReport::UnsuppressedLeakCount() {
+ uptr result = 0;
+ for (uptr i = 0; i < leaks_.size(); i++)
+ if (!leaks_[i].is_suppressed) result++;
+ return result;
+}
+
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS
@@ -545,13 +631,51 @@ void __lsan_ignore_object(const void *p) {
// locked.
BlockingMutexLock l(&global_mutex);
IgnoreObjectResult res = IgnoreObjectLocked(p);
- if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
- Report("__lsan_ignore_object(): no heap object found at %p", p);
- if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
- Report("__lsan_ignore_object(): "
+ if (res == kIgnoreObjectInvalid)
+ VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
+ if (res == kIgnoreObjectAlreadyIgnored)
+ VReport(1, "__lsan_ignore_object(): "
"heap object at %p is already being ignored\n", p);
- if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
- Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
+ if (res == kIgnoreObjectSuccess)
+ VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_register_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ BlockingMutexLock l(&global_mutex);
+ CHECK(root_regions);
+ RootRegion region = {begin, size};
+ root_regions->push_back(region);
+ VReport(1, "Registered root region at %p of size %llu\n", begin, size);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_unregister_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ BlockingMutexLock l(&global_mutex);
+ CHECK(root_regions);
+ bool removed = false;
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ RootRegion region = (*root_regions)[i];
+ if (region.begin == begin && region.size == size) {
+ removed = true;
+ uptr last_index = root_regions->size() - 1;
+ (*root_regions)[i] = (*root_regions)[last_index];
+ root_regions->pop_back();
+ VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
+ break;
+ }
+ }
+ if (!removed) {
+ Report(
+ "__lsan_unregister_root_region(): region at %p of size %llu has not "
+ "been registered.\n",
+ begin, size);
+ Die();
+ }
#endif // CAN_SANITIZE_LEAKS
}
diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h
index 5d9b4eb62e..72523d9ff5 100644
--- a/libsanitizer/lsan/lsan_common.h
+++ b/libsanitizer/lsan/lsan_common.h
@@ -19,7 +19,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#if SANITIZER_LINUX && defined(__x86_64__)
+#if SANITIZER_LINUX && defined(__x86_64__) && (SANITIZER_WORDSIZE == 64)
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
@@ -49,8 +49,6 @@ struct Flags {
int max_leaks;
// If nonzero kill the process with this exit code upon finding leaks.
int exitcode;
- // Suppressions file name.
- const char* suppressions;
// Flags controlling the root set of reachable memory.
// Global variables (.data and .bss).
@@ -61,12 +59,13 @@ struct Flags {
bool use_registers;
// TLS and thread-specific storage.
bool use_tls;
+ // Regions added via __lsan_register_root_region().
+ bool use_root_regions;
// Consider unaligned pointers valid.
bool use_unaligned;
-
- // User-visible verbosity.
- int verbosity;
+ // Consider pointers found in poisoned memory to be valid.
+ bool use_poisoned;
// Debug logging.
bool log_pointers;
@@ -77,6 +76,7 @@ extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
struct Leak {
+ u32 id;
uptr hit_count;
uptr total_size;
u32 stack_trace_id;
@@ -84,17 +84,31 @@ struct Leak {
bool is_suppressed;
};
+struct LeakedObject {
+ u32 leak_id;
+ uptr addr;
+ uptr size;
+};
+
// Aggregates leaks by stack trace prefix.
class LeakReport {
public:
- LeakReport() : leaks_(1) {}
- void Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag);
- void PrintLargest(uptr max_leaks);
+ LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
+ void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
+ ChunkTag tag);
+ void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
- bool IsEmpty() { return leaks_.size() == 0; }
- uptr ApplySuppressions();
+ void ApplySuppressions();
+ uptr UnsuppressedLeakCount();
+
+
private:
+ void PrintReportForLeak(uptr index);
+ void PrintLeakedObjectsForLeak(uptr index);
+
+ u32 next_id_;
InternalMmapVector<Leak> leaks_;
+ InternalMmapVector<LeakedObject> leaked_objects_;
};
typedef InternalMmapVector<uptr> Frontier;
@@ -115,10 +129,19 @@ enum IgnoreObjectResult {
};
// Functions called from the parent tool.
-void InitCommonLsan();
+void InitCommonLsan(bool standalone);
void DoLeakCheck();
bool DisabledInThisThread();
+// Special case for "new T[0]" where T is a type with DTOR.
+// new T[0] will allocate one word for the array size (0) and store a pointer
+// to the end of allocated chunk.
+inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
// The following must be implemented in the parent tool.
void ForEachChunk(ForEachChunkCallback callback, void *arg);
@@ -127,6 +150,8 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// Wrappers for allocator's ForceLock()/ForceUnlock().
void LockAllocator();
void UnlockAllocator();
+// Returns true if [addr, addr + sizeof(void *)) is poisoned.
+bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
diff --git a/libsanitizer/lsan/lsan_common_linux.cc b/libsanitizer/lsan/lsan_common_linux.cc
index 80d2459a9a..b17156ce6b 100644
--- a/libsanitizer/lsan/lsan_common_linux.cc
+++ b/libsanitizer/lsan/lsan_common_linux.cc
@@ -17,6 +17,7 @@
#include <link.h>
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -41,11 +42,11 @@ void InitializePlatformSpecificModules() {
return;
}
if (num_matches == 0)
- Report("LeakSanitizer: Dynamic linker not found. "
- "TLS will not be handled correctly.\n");
+ VReport(1, "LeakSanitizer: Dynamic linker not found. "
+ "TLS will not be handled correctly.\n");
else if (num_matches > 1)
- Report("LeakSanitizer: Multiple modules match \"%s\". "
- "TLS will not be handled correctly.\n", kLinkerName);
+ VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
+ "TLS will not be handled correctly.\n", kLinkerName);
linker = 0;
}
@@ -81,6 +82,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
+ if (!flags()->use_globals) return;
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
// deadlocking by running this under StopTheWorld. However, the lock is
// reentrant, so we should be able to fix this by acquiring the lock before
@@ -90,11 +92,10 @@ void ProcessGlobalRegions(Frontier *frontier) {
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
CHECK(stack_id);
- uptr size = 0;
- const uptr *trace = map->Get(stack_id, &size);
+ StackTrace stack = map->Get(stack_id);
// The top frame is our malloc/calloc/etc. The next frame is the caller.
- if (size >= 2)
- return trace[1];
+ if (stack.size >= 2)
+ return stack.trace[1];
return 0;
}
@@ -127,6 +128,21 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
// Handles dynamically allocated TLS blocks by treating all chunks allocated
// from ld-linux.so as reachable.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (!flags()->use_tls) return;
if (!linker) return;
diff --git a/libsanitizer/lsan/lsan_interceptors.cc b/libsanitizer/lsan/lsan_interceptors.cc
index 1940902ef8..ac05dfaa54 100644
--- a/libsanitizer/lsan/lsan_interceptors.cc
+++ b/libsanitizer/lsan/lsan_interceptors.cc
@@ -10,11 +10,11 @@
//
//===----------------------------------------------------------------------===//
-#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interception.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
@@ -32,21 +32,6 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
-#define GET_STACK_TRACE \
- StackTrace stack; \
- { \
- uptr stack_top = 0, stack_bottom = 0; \
- ThreadContext *t; \
- bool fast = common_flags()->fast_unwind_on_malloc; \
- if (fast && (t = CurrentThreadContext())) { \
- stack_top = t->stack_end(); \
- stack_bottom = t->stack_begin(); \
- } \
- stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
- StackTrace::GetCurrentPc(), \
- GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \
- }
-
#define ENSURE_LSAN_INITED do { \
CHECK(!lsan_init_is_running); \
if (!lsan_inited) \
@@ -63,7 +48,7 @@ namespace std {
INTERCEPTOR(void*, malloc, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
return Allocate(stack, size, 1, kAlwaysClearMemory);
}
@@ -86,26 +71,32 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
}
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
size *= nmemb;
return Allocate(stack, size, 1, true);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
return Reallocate(stack, q, size, 1);
}
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
+ return Allocate(stack, size, alignment, kAlwaysClearMemory);
+}
+
+INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
return Allocate(stack, size, alignment, kAlwaysClearMemory);
}
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
*memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
// FIXME: Return ENOMEM if user requested more than max alloc size.
return 0;
@@ -113,7 +104,7 @@ INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
INTERCEPTOR(void*, valloc, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
if (size == 0)
size = GetPageSizeCached();
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
@@ -140,7 +131,7 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
INTERCEPTOR(void*, pvalloc, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
@@ -150,11 +141,11 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
-INTERCEPTOR(void, cfree, void *p) ALIAS("free");
+INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
#define OPERATOR_NEW_BODY \
ENSURE_LSAN_INITED; \
- GET_STACK_TRACE; \
+ GET_STACK_TRACE_MALLOC; \
return Allocate(stack, size, 1, kAlwaysClearMemory);
INTERCEPTOR_ATTRIBUTE
@@ -171,9 +162,9 @@ void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
Deallocate(ptr);
INTERCEPTOR_ATTRIBUTE
-void operator delete(void *ptr) { OPERATOR_DELETE_BODY; }
+void operator delete(void *ptr) throw() { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; }
+void operator delete[](void *ptr) throw() { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
@@ -183,7 +174,8 @@ void operator delete[](void *ptr, std::nothrow_t const &) {
// We need this to intercept the __libc_memalign calls that are used to
// allocate dynamic TLS space in ld-linux.so.
-INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s) ALIAS("memalign");
+INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s)
+ ALIAS(WRAPPER_NAME(memalign));
///// Thread initialization and finalization. /////
@@ -236,7 +228,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
pthread_attr_init(&myattr);
attr = &myattr;
}
- AdjustStackSizeLinux(attr);
+ AdjustStackSize(attr);
int detached = 0;
pthread_attr_getdetachstate(attr, &detached);
ThreadParam p;
diff --git a/libsanitizer/lsan/lsan_preinit.cc b/libsanitizer/lsan/lsan_preinit.cc
index 856f9f7878..d1efd31406 100644
--- a/libsanitizer/lsan/lsan_preinit.cc
+++ b/libsanitizer/lsan/lsan_preinit.cc
@@ -12,11 +12,7 @@
#include "lsan.h"
-#ifndef LSAN_USE_PREINIT_ARRAY
-#define LSAN_USE_PREINIT_ARRAY 1
-#endif
-
-#if LSAN_USE_PREINIT_ARRAY && !defined(PIC)
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
// We force __lsan_init to be called before anyone else by placing it into
// .preinit_array section.
__attribute__((section(".preinit_array"), used))
diff --git a/libsanitizer/sanitizer_common/Makefile.am b/libsanitizer/sanitizer_common/Makefile.am
index 8e9038dc8c..28ce39b64c 100644
--- a/libsanitizer/sanitizer_common/Makefile.am
+++ b/libsanitizer/sanitizer_common/Makefile.am
@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
if LIBBACKTRACE_SUPPORTED
AM_CXXFLAGS += -DSANITIZER_LIBBACKTRACE -DSANITIZER_CP_DEMANGLE \
-I $(top_srcdir)/../libbacktrace \
@@ -21,31 +22,43 @@ sanitizer_common_files = \
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
- sanitizer_coverage.cc \
+ sanitizer_coverage_libcdep.cc \
+ sanitizer_coverage_mapping_libcdep.cc \
+ sanitizer_deadlock_detector1.cc \
+ sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \
+ sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
- sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
+ sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
+ sanitizer_procmaps_common.cc \
+ sanitizer_procmaps_freebsd.cc \
+ sanitizer_procmaps_linux.cc \
+ sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
+ sanitizer_stacktrace_printer.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
- sanitizer_symbolizer_posix_libcdep.cc \
- sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
+ sanitizer_symbolizer_posix_libcdep.cc \
+ sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
+ sanitizer_tls_get_addr.cc \
+ sanitizer_unwind_posix_libcdep.cc \
sanitizer_win.cc
+
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
diff --git a/libsanitizer/sanitizer_common/Makefile.in b/libsanitizer/sanitizer_common/Makefile.in
index e9fd115e97..4a0e72762a 100644
--- a/libsanitizer/sanitizer_common/Makefile.in
+++ b/libsanitizer/sanitizer_common/Makefile.in
@@ -64,19 +64,28 @@ CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
libsanitizer_common_la_LIBADD =
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
- sanitizer_common_libcdep.lo sanitizer_coverage.lo \
- sanitizer_flags.lo sanitizer_libc.lo sanitizer_libignore.lo \
- sanitizer_linux.lo sanitizer_linux_libcdep.lo sanitizer_mac.lo \
+ sanitizer_common_libcdep.lo sanitizer_coverage_libcdep.lo \
+ sanitizer_coverage_mapping_libcdep.lo \
+ sanitizer_deadlock_detector1.lo \
+ sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
+ sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \
+ sanitizer_linux_libcdep.lo sanitizer_mac.lo \
+ sanitizer_persistent_allocator.lo \
sanitizer_platform_limits_linux.lo \
- sanitizer_platform_limits_posix.lo sanitizer_posix_libcdep.lo \
- sanitizer_posix.lo sanitizer_printf.lo sanitizer_stackdepot.lo \
- sanitizer_stacktrace.lo sanitizer_stacktrace_libcdep.lo \
+ sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
+ sanitizer_posix_libcdep.lo sanitizer_printf.lo \
+ sanitizer_procmaps_common.lo sanitizer_procmaps_freebsd.lo \
+ sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
+ sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
+ sanitizer_stacktrace_libcdep.lo \
+ sanitizer_stacktrace_printer.lo \
sanitizer_stoptheworld_linux_libcdep.lo \
- sanitizer_suppressions.lo \
- sanitizer_symbolizer_posix_libcdep.lo \
- sanitizer_symbolizer_win.lo sanitizer_symbolizer.lo \
+ sanitizer_suppressions.lo sanitizer_symbolizer.lo \
sanitizer_symbolizer_libbacktrace.lo \
- sanitizer_symbolizer_libcdep.lo sanitizer_thread_registry.lo \
+ sanitizer_symbolizer_libcdep.lo \
+ sanitizer_symbolizer_posix_libcdep.lo \
+ sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
+ sanitizer_tls_get_addr.lo sanitizer_unwind_posix_libcdep.lo \
sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
@@ -169,6 +178,7 @@ SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -245,36 +255,47 @@ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
-Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
- $(am__append_1)
+ -std=gnu++11 $(am__append_1)
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_common.la
sanitizer_common_files = \
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
- sanitizer_coverage.cc \
+ sanitizer_coverage_libcdep.cc \
+ sanitizer_coverage_mapping_libcdep.cc \
+ sanitizer_deadlock_detector1.cc \
+ sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \
+ sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
- sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
+ sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
+ sanitizer_procmaps_common.cc \
+ sanitizer_procmaps_freebsd.cc \
+ sanitizer_procmaps_linux.cc \
+ sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
+ sanitizer_stacktrace_printer.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
- sanitizer_symbolizer_posix_libcdep.cc \
- sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
+ sanitizer_symbolizer_posix_libcdep.cc \
+ sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
+ sanitizer_tls_get_addr.cc \
+ sanitizer_unwind_posix_libcdep.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
@@ -373,21 +394,30 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common_libcdep.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_mapping_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_persistent_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_printf.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_common.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_freebsd.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_printer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stoptheworld_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
@@ -396,6 +426,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
.cc.o:
diff --git a/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h
new file mode 100644
index 0000000000..9e15d51c66
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h
@@ -0,0 +1,340 @@
+//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Concurrent uptr->T hashmap.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ADDRHASHMAP_H
+#define SANITIZER_ADDRHASHMAP_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_allocator_internal.h"
+
+namespace __sanitizer {
+
+// Concurrent uptr->T hashmap.
+// T must be a POD type, kSize is preferably a prime but can be any number.
+// Usage example:
+//
+// typedef AddrHashMap<uptr, 11> Map;
+// Map m;
+// {
+// Map::Handle h(&m, addr);
+// use h.operator->() to access the data
+// if h.created() then the element was just created, and the current thread
+// has exclusive access to it
+// otherwise the current thread has only read access to the data
+// }
+// {
+// Map::Handle h(&m, addr, true);
+// this will remove the data from the map in Handle dtor
+// the current thread has exclusive access to the data
+// if !h.exists() then the element never existed
+// }
+template<typename T, uptr kSize>
+class AddrHashMap {
+ private:
+ struct Cell {
+ atomic_uintptr_t addr;
+ T val;
+ };
+
+ struct AddBucket {
+ uptr cap;
+ uptr size;
+ Cell cells[1]; // variable len
+ };
+
+ static const uptr kBucketSize = 3;
+
+ struct Bucket {
+ RWMutex mtx;
+ atomic_uintptr_t add;
+ Cell cells[kBucketSize];
+ };
+
+ public:
+ AddrHashMap();
+
+ class Handle {
+ public:
+ Handle(AddrHashMap<T, kSize> *map, uptr addr);
+ Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
+ Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
+
+ ~Handle();
+ T *operator->();
+ bool created() const;
+ bool exists() const;
+
+ private:
+ friend AddrHashMap<T, kSize>;
+ AddrHashMap<T, kSize> *map_;
+ Bucket *bucket_;
+ Cell *cell_;
+ uptr addr_;
+ uptr addidx_;
+ bool created_;
+ bool remove_;
+ bool create_;
+ };
+
+ private:
+ friend class Handle;
+ Bucket *table_;
+
+ void acquire(Handle *h);
+ void release(Handle *h);
+ uptr calcHash(uptr addr);
+};
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = false;
+ create_ = true;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
+ bool remove) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = remove;
+ create_ = true;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
+ bool remove, bool create) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = remove;
+ create_ = create;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::~Handle() {
+ map_->release(this);
+}
+
+template <typename T, uptr kSize>
+T *AddrHashMap<T, kSize>::Handle::operator->() {
+ return &cell_->val;
+}
+
+template<typename T, uptr kSize>
+bool AddrHashMap<T, kSize>::Handle::created() const {
+ return created_;
+}
+
+template<typename T, uptr kSize>
+bool AddrHashMap<T, kSize>::Handle::exists() const {
+ return cell_ != 0;
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::AddrHashMap() {
+ table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
+}
+
+template<typename T, uptr kSize>
+void AddrHashMap<T, kSize>::acquire(Handle *h) {
+ uptr addr = h->addr_;
+ uptr hash = calcHash(addr);
+ Bucket *b = &table_[hash];
+
+ h->created_ = false;
+ h->addidx_ = -1U;
+ h->bucket_ = b;
+ h->cell_ = 0;
+
+ // If we want to remove the element, we need exclusive access to the bucket,
+ // so skip the lock-free phase.
+ if (h->remove_)
+ goto locked;
+
+ retry:
+ // First try to find an existing element w/o read mutex.
+ CHECK(!h->remove_);
+ // Check the embed cells.
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
+ if (addr1 == addr) {
+ h->cell_ = c;
+ return;
+ }
+ }
+
+ // Check the add cells with read lock.
+ if (atomic_load(&b->add, memory_order_relaxed)) {
+ b->mtx.ReadLock();
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ h->addidx_ = i;
+ h->cell_ = c;
+ return;
+ }
+ }
+ b->mtx.ReadUnlock();
+ }
+
+ locked:
+ // Re-check existence under write lock.
+ // Embed cells.
+ b->mtx.Lock();
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ if (h->remove_) {
+ h->cell_ = c;
+ return;
+ }
+ b->mtx.Unlock();
+ goto retry;
+ }
+ }
+
+ // Add cells.
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ if (add) {
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ if (h->remove_) {
+ h->addidx_ = i;
+ h->cell_ = c;
+ return;
+ }
+ b->mtx.Unlock();
+ goto retry;
+ }
+ }
+ }
+
+ // The element does not exist, no need to create it if we want to remove.
+ if (h->remove_ || !h->create_) {
+ b->mtx.Unlock();
+ return;
+ }
+
+ // Now try to create it under the mutex.
+ h->created_ = true;
+ // See if we have a free embed cell.
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == 0) {
+ h->cell_ = c;
+ return;
+ }
+ }
+
+ // Store in the add cells.
+ if (add == 0) {
+ // Allocate a new add array.
+ const uptr kInitSize = 64;
+ add = (AddBucket*)InternalAlloc(kInitSize);
+ internal_memset(add, 0, kInitSize);
+ add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
+ add->size = 0;
+ atomic_store(&b->add, (uptr)add, memory_order_relaxed);
+ }
+ if (add->size == add->cap) {
+ // Grow existing add array.
+ uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
+ uptr newsize = oldsize * 2;
+ AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
+ internal_memset(add1, 0, newsize);
+ add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
+ add1->size = add->size;
+ internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
+ InternalFree(add);
+ atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
+ add = add1;
+ }
+ // Store.
+ uptr i = add->size++;
+ Cell *c = &add->cells[i];
+ CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
+ h->addidx_ = i;
+ h->cell_ = c;
+}
+
+template<typename T, uptr kSize>
+void AddrHashMap<T, kSize>::release(Handle *h) {
+ if (h->cell_ == 0)
+ return;
+ Bucket *b = h->bucket_;
+ Cell *c = h->cell_;
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (h->created_) {
+ // Denote completion of insertion.
+ CHECK_EQ(addr1, 0);
+ // After the following store, the element becomes available
+ // for lock-free reads.
+ atomic_store(&c->addr, h->addr_, memory_order_release);
+ b->mtx.Unlock();
+ } else if (h->remove_) {
+ // Denote that the cell is empty now.
+ CHECK_EQ(addr1, h->addr_);
+ atomic_store(&c->addr, 0, memory_order_release);
+ // See if we need to compact the bucket.
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ if (h->addidx_ == -1U) {
+ // Removed from embed array, move an add element into the freed cell.
+ if (add && add->size != 0) {
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ c->val = c1->val;
+ uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
+ atomic_store(&c->addr, addr1, memory_order_release);
+ atomic_store(&c1->addr, 0, memory_order_release);
+ }
+ } else {
+ // Removed from add array, compact it.
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ if (c != c1) {
+ *c = *c1;
+ atomic_store(&c1->addr, 0, memory_order_relaxed);
+ }
+ }
+ if (add && add->size == 0) {
+ // FIXME(dvyukov): free add?
+ }
+ b->mtx.Unlock();
+ } else {
+ CHECK_EQ(addr1, h->addr_);
+ if (h->addidx_ != -1U)
+ b->mtx.ReadUnlock();
+ }
+}
+
+template<typename T, uptr kSize>
+uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
+ addr += addr << 10;
+ addr ^= addr >> 6;
+ return addr % kSize;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ADDRHASHMAP_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.cc b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
index 9d38e9429a..f4e3af1b06 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
@@ -17,7 +17,7 @@
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
-#if defined(SANITIZER_GO)
+#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
extern "C" void __libc_free(void *ptr);
@@ -115,7 +115,7 @@ void *LowLevelAllocator::Allocate(uptr size) {
if (allocated_end_ - allocated_current_ < (sptr)size) {
uptr size_to_allocate = Max(size, GetPageSizeCached());
allocated_current_ =
- (char*)MmapOrDie(size_to_allocate, __FUNCTION__);
+ (char*)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {
low_level_alloc_callback((uptr)allocated_current_,
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.h b/libsanitizer/sanitizer_common/sanitizer_allocator.h
index 8ba825f14e..dd5539a208 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.h
@@ -196,14 +196,12 @@ template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
// Memory allocator statistics
enum AllocatorStat {
- AllocatorStatMalloced,
- AllocatorStatFreed,
- AllocatorStatMmapped,
- AllocatorStatUnmapped,
+ AllocatorStatAllocated,
+ AllocatorStatMapped,
AllocatorStatCount
};
-typedef u64 AllocatorStatCounters[AllocatorStatCount];
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
// Per-thread stats, live in per-thread cache.
class AllocatorStats {
@@ -212,16 +210,21 @@ class AllocatorStats {
internal_memset(this, 0, sizeof(*this));
}
- void Add(AllocatorStat i, u64 v) {
+ void Add(AllocatorStat i, uptr v) {
v += atomic_load(&stats_[i], memory_order_relaxed);
atomic_store(&stats_[i], v, memory_order_relaxed);
}
- void Set(AllocatorStat i, u64 v) {
+ void Sub(AllocatorStat i, uptr v) {
+ v = atomic_load(&stats_[i], memory_order_relaxed) - v;
atomic_store(&stats_[i], v, memory_order_relaxed);
}
- u64 Get(AllocatorStat i) const {
+ void Set(AllocatorStat i, uptr v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ uptr Get(AllocatorStat i) const {
return atomic_load(&stats_[i], memory_order_relaxed);
}
@@ -229,7 +232,7 @@ class AllocatorStats {
friend class AllocatorGlobalStats;
AllocatorStats *next_;
AllocatorStats *prev_;
- atomic_uint64_t stats_[AllocatorStatCount];
+ atomic_uintptr_t stats_[AllocatorStatCount];
};
// Global stats, used for aggregation and querying.
@@ -258,7 +261,7 @@ class AllocatorGlobalStats : public AllocatorStats {
}
void Get(AllocatorStatCounters s) const {
- internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
+ internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
SpinMutexLock l(&mu_);
const AllocatorStats *stats = this;
for (;;) {
@@ -268,6 +271,9 @@ class AllocatorGlobalStats : public AllocatorStats {
if (stats == this)
break;
}
+ // All stats must be non-negative.
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
}
private:
@@ -453,6 +459,11 @@ class SizeClassAllocator64 {
}
}
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
@@ -482,11 +493,6 @@ class SizeClassAllocator64 {
};
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
- static uptr AdditionalSize() {
- return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
- GetPageSizeCached());
- }
-
RegionInfo *GetRegionInfo(uptr class_id) {
CHECK_LT(class_id, kNumClasses);
RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
@@ -520,7 +526,7 @@ class SizeClassAllocator64 {
map_size += kUserMapSize;
CHECK_GE(region->mapped_user + map_size, end_idx);
MapWithCallback(region_beg + region->mapped_user, map_size);
- stat->Add(AllocatorStatMmapped, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
region->mapped_user += map_size;
}
uptr total_count = (region->mapped_user - beg_idx - size)
@@ -839,7 +845,7 @@ class SizeClassAllocator32 {
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
"SizeClassAllocator32"));
MapUnmapCallback().OnMap(res, kRegionSize);
- stat->Add(AllocatorStatMmapped, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
CHECK_EQ(0U, (res & (kRegionSize - 1)));
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
return res;
@@ -905,7 +911,7 @@ struct SizeClassAllocatorLocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
+ stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
@@ -920,7 +926,7 @@ struct SizeClassAllocatorLocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
+ stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
@@ -1007,12 +1013,15 @@ class LargeMmapAllocator {
if (map_size < size) return AllocatorReturnNull(); // Overflow.
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
+ CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
uptr res = map_beg + page_size_;
if (res & (alignment - 1)) // Align.
res += alignment - (res & (alignment - 1));
- CHECK_EQ(0, res & (alignment - 1));
+ CHECK(IsAligned(res, alignment));
+ CHECK(IsAligned(res, page_size_));
+ CHECK_GE(res + size, map_beg);
CHECK_LE(res + size, map_end);
Header *h = GetHeader(res);
h->size = size;
@@ -1031,8 +1040,8 @@ class LargeMmapAllocator {
stats.currently_allocated += map_size;
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
stats.by_size_log[size_log]++;
- stat->Add(AllocatorStatMalloced, map_size);
- stat->Add(AllocatorStatMmapped, map_size);
+ stat->Add(AllocatorStatAllocated, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
}
return reinterpret_cast<void*>(res);
}
@@ -1050,8 +1059,8 @@ class LargeMmapAllocator {
chunks_sorted_ = false;
stats.n_frees++;
stats.currently_allocated -= h->map_size;
- stat->Add(AllocatorStatFreed, h->map_size);
- stat->Add(AllocatorStatUnmapped, h->map_size);
+ stat->Sub(AllocatorStatAllocated, h->map_size);
+ stat->Sub(AllocatorStatMapped, h->map_size);
}
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_interface.h b/libsanitizer/sanitizer_common/sanitizer_allocator_interface.h
new file mode 100644
index 0000000000..fd5bed30c4
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_interface.h
@@ -0,0 +1,36 @@
+//===-- sanitizer_allocator_interface.h ------------------------- C++ -----===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Re-declaration of functions from public sanitizer allocator interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
+#define SANITIZER_ALLOCATOR_INTERFACE_H
+
+#include "sanitizer_internal_defs.h"
+
+using __sanitizer::uptr;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr
+__sanitizer_get_allocated_size(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ /* OPTIONAL */ void __sanitizer_malloc_hook(void *ptr, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ /* OPTIONAL */ void __sanitizer_free_hook(void *ptr);
+} // extern "C"
+
+#endif // SANITIZER_ALLOCATOR_INTERFACE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
index efdb89e368..5e8cc10770 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
@@ -22,40 +22,26 @@ namespace __sanitizer {
typedef CompactSizeClassMap InternalSizeClassMap;
static const uptr kInternalAllocatorSpace = 0;
-#if SANITIZER_WORDSIZE == 32
-static const u64 kInternalAllocatorSize = (1ULL << 32);
+static const u64 kInternalAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kInternalAllocatorRegionSizeLog = 20;
+#if SANITIZER_WORDSIZE == 32
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef FlatByteMap<kInternalAllocatorNumRegions> ByteMap;
#else
-static const u64 kInternalAllocatorSize = (1ULL << 47);
-static const uptr kInternalAllocatorRegionSizeLog = 24;
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef TwoLevelByteMap<(kInternalAllocatorNumRegions >> 12), 1 << 12> ByteMap;
#endif
typedef SizeClassAllocator32<
- kInternalAllocatorSpace, kInternalAllocatorSize, 16, InternalSizeClassMap,
+ kInternalAllocatorSpace, kInternalAllocatorSize, 0, InternalSizeClassMap,
kInternalAllocatorRegionSizeLog, ByteMap> PrimaryInternalAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache;
-// We don't want our internal allocator to do any map/unmap operations from
-// LargeMmapAllocator.
-struct CrashOnMapUnmap {
- void OnMap(uptr p, uptr size) const {
- RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!");
- }
- void OnUnmap(uptr p, uptr size) const {
- RAW_CHECK_MSG(0, "Unexpected munmap in InternalAllocator!");
- }
-};
-
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
- LargeMmapAllocator<CrashOnMapUnmap> >
- InternalAllocator;
+ LargeMmapAllocator<> > InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = 0);
void InternalFree(void *p, InternalAllocatorCache *cache = 0);
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic.h b/libsanitizer/sanitizer_common/sanitizer_atomic.h
index f2bf23588a..6387664f63 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic.h
@@ -42,7 +42,8 @@ struct atomic_uint32_t {
struct atomic_uint64_t {
typedef u64 Type;
- volatile Type val_dont_use;
+ // On 32-bit platforms u64 is not necessary aligned on 8 bytes.
+ volatile ALIGNED(8) Type val_dont_use;
};
struct atomic_uintptr_t {
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
index 88819e32a7..c600999e67 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
@@ -13,8 +13,26 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
+#if defined(__i386__) || defined(__x86_64__)
+# include "sanitizer_atomic_clang_x86.h"
+#else
+# include "sanitizer_atomic_clang_other.h"
+#endif
+
namespace __sanitizer {
+// We would like to just use compiler builtin atomic operations
+// for loads and stores, but they are mostly broken in clang:
+// - they lead to vastly inefficient code generation
+// (http://llvm.org/bugs/show_bug.cgi?id=17281)
+// - 64-bit atomic operations are not implemented on x86_32
+// (http://llvm.org/bugs/show_bug.cgi?id=15034)
+// - they are not implemented on ARM
+// error: undefined reference to '__atomic_load_4'
+
+// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+// for mappings of the memory model to different processors.
+
INLINE void atomic_signal_fence(memory_order) {
__asm__ __volatile__("" ::: "memory");
}
@@ -23,59 +41,6 @@ INLINE void atomic_thread_fence(memory_order) {
__sync_synchronize();
}
-INLINE void proc_yield(int cnt) {
- __asm__ __volatile__("" ::: "memory");
-#if defined(__i386__) || defined(__x86_64__)
- for (int i = 0; i < cnt; i++)
- __asm__ __volatile__("pause");
-#endif
- __asm__ __volatile__("" ::: "memory");
-}
-
-template<typename T>
-INLINE typename T::Type atomic_load(
- const volatile T *a, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_consume
- | memory_order_acquire | memory_order_seq_cst));
- DCHECK(!((uptr)a % sizeof(*a)));
- typename T::Type v;
- // FIXME:
- // 64-bit atomic operations are not atomic on 32-bit platforms.
- // The implementation lacks necessary memory fences on ARM/PPC.
- // We would like to use compiler builtin atomic operations,
- // but they are mostly broken:
- // - they lead to vastly inefficient code generation
- // (http://llvm.org/bugs/show_bug.cgi?id=17281)
- // - 64-bit atomic operations are not implemented on x86_32
- // (http://llvm.org/bugs/show_bug.cgi?id=15034)
- // - they are not implemented on ARM
- // error: undefined reference to '__atomic_load_4'
- if (mo == memory_order_relaxed) {
- v = a->val_dont_use;
- } else {
- atomic_signal_fence(memory_order_seq_cst);
- v = a->val_dont_use;
- atomic_signal_fence(memory_order_seq_cst);
- }
- return v;
-}
-
-template<typename T>
-INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_release
- | memory_order_seq_cst));
- DCHECK(!((uptr)a % sizeof(*a)));
- if (mo == memory_order_relaxed) {
- a->val_dont_use = v;
- } else {
- atomic_signal_fence(memory_order_seq_cst);
- a->val_dont_use = v;
- atomic_signal_fence(memory_order_seq_cst);
- }
- if (mo == memory_order_seq_cst)
- atomic_thread_fence(memory_order_seq_cst);
-}
-
template<typename T>
INLINE typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h
new file mode 100644
index 0000000000..c66c0992e1
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h
@@ -0,0 +1,95 @@
+//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
+#define SANITIZER_ATOMIC_CLANG_OTHER_H
+
+namespace __sanitizer {
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else if (mo == memory_order_consume) {
+ // Assume that processor respects data dependencies
+ // (and that compiler won't break them).
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ } else if (mo == memory_order_acquire) {
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __sync_synchronize();
+ } else { // seq_cst
+ // E.g. on POWER we need a hw fence even before the store.
+ __sync_synchronize();
+ v = a->val_dont_use;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit load on 32-bit platform.
+ // Gross, but simple and reliable.
+ // Assume that it is not in read-only memory.
+ v = __sync_fetch_and_add(
+ const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else if (mo == memory_order_release) {
+ __sync_synchronize();
+ a->val_dont_use = v;
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ __sync_synchronize();
+ a->val_dont_use = v;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit store on 32-bit platform.
+ // Gross, but simple and reliable.
+ typename T::Type cmp = a->val_dont_use;
+ typename T::Type cur;
+ for (;;) {
+ cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
+ if (cmp == v)
+ break;
+ cmp = cur;
+ }
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h
new file mode 100644
index 0000000000..5df210eca7
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h
@@ -0,0 +1,114 @@
+//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_X86_H
+#define SANITIZER_ATOMIC_CLANG_X86_H
+
+namespace __sanitizer {
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+ for (int i = 0; i < cnt; i++)
+ __asm__ __volatile__("pause");
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else if (mo == memory_order_consume) {
+ // Assume that processor respects data dependencies
+ // (and that compiler won't break them).
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ } else if (mo == memory_order_acquire) {
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ // On x86 loads are implicitly acquire.
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ // On x86 plain MOV is enough for seq_cst store.
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ }
+ } else {
+ // 64-bit load on 32-bit platform.
+ __asm__ __volatile__(
+ "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
+ "movq %%mm0, %0;" // (ptr could be read-only)
+ "emms;" // Empty mmx state/Reset FP regs
+ : "=m" (v)
+ : "m" (a->val_dont_use)
+ : // mark the FP stack and mmx registers as clobbered
+ "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
+#ifdef __MMX__
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+#endif // #ifdef __MMX__
+ "memory");
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else if (mo == memory_order_release) {
+ // On x86 stores are implicitly release.
+ __asm__ __volatile__("" ::: "memory");
+ a->val_dont_use = v;
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ // On x86 stores are implicitly release.
+ __asm__ __volatile__("" ::: "memory");
+ a->val_dont_use = v;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit store on 32-bit platform.
+ __asm__ __volatile__(
+ "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
+ "movq %%mm0, %0;"
+ "emms;" // Empty mmx state/Reset FP regs
+ : "=m" (a->val_dont_use)
+ : "m" (v)
+ : // mark the FP stack and mmx registers as clobbered
+ "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
+#ifdef __MMX__
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+#endif // #ifdef __MMX__
+ "memory");
+ if (mo == memory_order_seq_cst)
+ __sync_synchronize();
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
index dac7c19199..1198dec3e9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
@@ -22,30 +22,29 @@ extern "C" void _mm_pause();
extern "C" long _InterlockedExchangeAdd( // NOLINT
long volatile * Addend, long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd)
-
-#ifdef _WIN64
+extern "C" short _InterlockedCompareExchange16( // NOLINT
+ short volatile *Destination, // NOLINT
+ short Exchange, short Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange16)
+extern "C"
+long long _InterlockedCompareExchange64( // NOLINT
+ long long volatile *Destination, // NOLINT
+ long long Exchange, long long Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange64)
extern "C" void *_InterlockedCompareExchangePointer(
void *volatile *Destination,
void *Exchange, void *Comparand);
#pragma intrinsic(_InterlockedCompareExchangePointer)
-#else
-// There's no _InterlockedCompareExchangePointer intrinsic on x86,
-// so call _InterlockedCompareExchange instead.
extern "C"
long __cdecl _InterlockedCompareExchange( // NOLINT
long volatile *Destination, // NOLINT
long Exchange, long Comparand); // NOLINT
#pragma intrinsic(_InterlockedCompareExchange)
-inline static void *_InterlockedCompareExchangePointer(
- void *volatile *Destination,
- void *Exchange, void *Comparand) {
- return reinterpret_cast<void*>(
- _InterlockedCompareExchange(
- reinterpret_cast<long volatile*>(Destination), // NOLINT
- reinterpret_cast<long>(Exchange), // NOLINT
- reinterpret_cast<long>(Comparand))); // NOLINT
-}
+#ifdef _WIN64
+extern "C" long long _InterlockedExchangeAdd64( // NOLINT
+ long long volatile * Addend, long long Value); // NOLINT
+#pragma intrinsic(_InterlockedExchangeAdd64)
#endif
namespace __sanitizer {
@@ -106,6 +105,40 @@ INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
(volatile long*)&a->val_dont_use, (long)v); // NOLINT
}
+INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
+ uptr v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+#ifdef _WIN64
+ return (uptr)_InterlockedExchangeAdd64(
+ (volatile long long*)&a->val_dont_use, (long long)v); // NOLINT
+#else
+ return (uptr)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, (long)v); // NOLINT
+#endif
+}
+
+INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, -(long)v); // NOLINT
+}
+
+INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
+ uptr v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+#ifdef _WIN64
+ return (uptr)_InterlockedExchangeAdd64(
+ (volatile long long*)&a->val_dont_use, -(long long)v); // NOLINT
+#else
+ return (uptr)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, -(long)v); // NOLINT
+#endif
+}
+
INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
u8 v, memory_order mo) {
(void)mo;
@@ -166,6 +199,45 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
return false;
}
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
+ u16 *cmp,
+ u16 xchg,
+ memory_order mo) {
+ u16 cmpv = *cmp;
+ u16 prev = (u16)_InterlockedCompareExchange16(
+ (volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
+ u32 *cmp,
+ u32 xchg,
+ memory_order mo) {
+ u32 cmpv = *cmp;
+ u32 prev = (u32)_InterlockedCompareExchange(
+ (volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
+ u64 *cmp,
+ u64 xchg,
+ memory_order mo) {
+ u64 cmpv = *cmp;
+ u64 prev = (u64)_InterlockedCompareExchange64(
+ (volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
diff --git a/libsanitizer/sanitizer_common/sanitizer_bitvector.h b/libsanitizer/sanitizer_common/sanitizer_bitvector.h
new file mode 100644
index 0000000000..bb2872facd
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_bitvector.h
@@ -0,0 +1,349 @@
+//===-- sanitizer_bitvector.h -----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Specializer BitVector implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_BITVECTOR_H
+#define SANITIZER_BITVECTOR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+// Fixed size bit vector based on a single basic integer.
+template <class basic_int_t = uptr>
+class BasicBitVector {
+ public:
+ enum SizeEnum { kSize = sizeof(basic_int_t) * 8 };
+
+ uptr size() const { return kSize; }
+ // No CTOR.
+ void clear() { bits_ = 0; }
+ void setAll() { bits_ = ~(basic_int_t)0; }
+ bool empty() const { return bits_ == 0; }
+
+ // Returns true if the bit has changed from 0 to 1.
+ bool setBit(uptr idx) {
+ basic_int_t old = bits_;
+ bits_ |= mask(idx);
+ return bits_ != old;
+ }
+
+ // Returns true if the bit has changed from 1 to 0.
+ bool clearBit(uptr idx) {
+ basic_int_t old = bits_;
+ bits_ &= ~mask(idx);
+ return bits_ != old;
+ }
+
+ bool getBit(uptr idx) const { return (bits_ & mask(idx)) != 0; }
+
+ uptr getAndClearFirstOne() {
+ CHECK(!empty());
+ uptr idx = LeastSignificantSetBitIndex(bits_);
+ clearBit(idx);
+ return idx;
+ }
+
+ // Do "this |= v" and return whether new bits have been added.
+ bool setUnion(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ |= v.bits_;
+ return bits_ != old;
+ }
+
+ // Do "this &= v" and return whether any bits have been removed.
+ bool setIntersection(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ &= v.bits_;
+ return bits_ != old;
+ }
+
+ // Do "this &= ~v" and return whether any bits have been removed.
+ bool setDifference(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ &= ~v.bits_;
+ return bits_ != old;
+ }
+
+ void copyFrom(const BasicBitVector &v) { bits_ = v.bits_; }
+
+ // Returns true if 'this' intersects with 'v'.
+ bool intersectsWith(const BasicBitVector &v) const {
+ return (bits_ & v.bits_) != 0;
+ }
+
+ // for (BasicBitVector<>::Iterator it(bv); it.hasNext();) {
+ // uptr idx = it.next();
+ // use(idx);
+ // }
+ class Iterator {
+ public:
+ Iterator() { }
+ explicit Iterator(const BasicBitVector &bv) : bv_(bv) {}
+ bool hasNext() const { return !bv_.empty(); }
+ uptr next() { return bv_.getAndClearFirstOne(); }
+ void clear() { bv_.clear(); }
+ private:
+ BasicBitVector bv_;
+ };
+
+ private:
+ basic_int_t mask(uptr idx) const {
+ CHECK_LT(idx, size());
+ return (basic_int_t)1UL << idx;
+ }
+ basic_int_t bits_;
+};
+
+// Fixed size bit vector of (kLevel1Size*BV::kSize**2) bits.
+// The implementation is optimized for better performance on
+// sparse bit vectors, i.e. the those with few set bits.
+template <uptr kLevel1Size = 1, class BV = BasicBitVector<> >
+class TwoLevelBitVector {
+ // This is essentially a 2-level bit vector.
+ // Set bit in the first level BV indicates that there are set bits
+ // in the corresponding BV of the second level.
+ // This structure allows O(kLevel1Size) time for clear() and empty(),
+ // as well fast handling of sparse BVs.
+ public:
+ enum SizeEnum { kSize = BV::kSize * BV::kSize * kLevel1Size };
+ // No CTOR.
+
+ uptr size() const { return kSize; }
+
+ void clear() {
+ for (uptr i = 0; i < kLevel1Size; i++)
+ l1_[i].clear();
+ }
+
+ void setAll() {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ l1_[i0].setAll();
+ for (uptr i1 = 0; i1 < BV::kSize; i1++)
+ l2_[i0][i1].setAll();
+ }
+ }
+
+ bool empty() const {
+ for (uptr i = 0; i < kLevel1Size; i++)
+ if (!l1_[i].empty())
+ return false;
+ return true;
+ }
+
+ // Returns true if the bit has changed from 0 to 1.
+ bool setBit(uptr idx) {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ if (!l1_[i0].getBit(i1)) {
+ l1_[i0].setBit(i1);
+ l2_[i0][i1].clear();
+ }
+ bool res = l2_[i0][i1].setBit(i2);
+ // Printf("%s: %zd => %zd %zd %zd; %d\n", __func__,
+ // idx, i0, i1, i2, res);
+ return res;
+ }
+
+ bool clearBit(uptr idx) {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ bool res = false;
+ if (l1_[i0].getBit(i1)) {
+ res = l2_[i0][i1].clearBit(i2);
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ return res;
+ }
+
+ bool getBit(uptr idx) const {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ // Printf("%s: %zd => %zd %zd %zd\n", __func__, idx, i0, i1, i2);
+ return l1_[i0].getBit(i1) && l2_[i0][i1].getBit(i2);
+ }
+
+ uptr getAndClearFirstOne() {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ if (l1_[i0].empty()) continue;
+ uptr i1 = l1_[i0].getAndClearFirstOne();
+ uptr i2 = l2_[i0][i1].getAndClearFirstOne();
+ if (!l2_[i0][i1].empty())
+ l1_[i0].setBit(i1);
+ uptr res = i0 * BV::kSize * BV::kSize + i1 * BV::kSize + i2;
+ // Printf("getAndClearFirstOne: %zd %zd %zd => %zd\n", i0, i1, i2, res);
+ return res;
+ }
+ CHECK(0);
+ return 0;
+ }
+
+ // Do "this |= v" and return whether new bits have been added.
+ bool setUnion(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = v.l1_[i0];
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l1_[i0].setBit(i1))
+ l2_[i0][i1].clear();
+ if (l2_[i0][i1].setUnion(v.l2_[i0][i1]))
+ res = true;
+ }
+ }
+ return res;
+ }
+
+ // Do "this &= v" and return whether any bits have been removed.
+ bool setIntersection(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ if (l1_[i0].setIntersection(v.l1_[i0]))
+ res = true;
+ if (!l1_[i0].empty()) {
+ BV t = l1_[i0];
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l2_[i0][i1].setIntersection(v.l2_[i0][i1]))
+ res = true;
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ }
+ }
+ return res;
+ }
+
+ // Do "this &= ~v" and return whether any bits have been removed.
+ bool setDifference(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = l1_[i0];
+ t.setIntersection(v.l1_[i0]);
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l2_[i0][i1].setDifference(v.l2_[i0][i1]))
+ res = true;
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ }
+ return res;
+ }
+
+ void copyFrom(const TwoLevelBitVector &v) {
+ clear();
+ setUnion(v);
+ }
+
+ // Returns true if 'this' intersects with 'v'.
+ bool intersectsWith(const TwoLevelBitVector &v) const {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = l1_[i0];
+ t.setIntersection(v.l1_[i0]);
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (!v.l1_[i0].getBit(i1)) continue;
+ if (l2_[i0][i1].intersectsWith(v.l2_[i0][i1]))
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // for (TwoLevelBitVector<>::Iterator it(bv); it.hasNext();) {
+ // uptr idx = it.next();
+ // use(idx);
+ // }
+ class Iterator {
+ public:
+ Iterator() { }
+ explicit Iterator(const TwoLevelBitVector &bv) : bv_(bv), i0_(0), i1_(0) {
+ it1_.clear();
+ it2_.clear();
+ }
+
+ bool hasNext() const {
+ if (it1_.hasNext()) return true;
+ for (uptr i = i0_; i < kLevel1Size; i++)
+ if (!bv_.l1_[i].empty()) return true;
+ return false;
+ }
+
+ uptr next() {
+ // Printf("++++: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ if (!it1_.hasNext() && !it2_.hasNext()) {
+ for (; i0_ < kLevel1Size; i0_++) {
+ if (bv_.l1_[i0_].empty()) continue;
+ it1_ = typename BV::Iterator(bv_.l1_[i0_]);
+ // Printf("+i0: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ break;
+ }
+ }
+ if (!it2_.hasNext()) {
+ CHECK(it1_.hasNext());
+ i1_ = it1_.next();
+ it2_ = typename BV::Iterator(bv_.l2_[i0_][i1_]);
+ // Printf("++i1: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ }
+ CHECK(it2_.hasNext());
+ uptr i2 = it2_.next();
+ uptr res = i0_ * BV::kSize * BV::kSize + i1_ * BV::kSize + i2;
+ // Printf("+ret: %zd %zd; %d %d; size %zd; res: %zd\n", i0_, i1_,
+ // it1_.hasNext(), it2_.hasNext(), kSize, res);
+ if (!it1_.hasNext() && !it2_.hasNext())
+ i0_++;
+ return res;
+ }
+
+ private:
+ const TwoLevelBitVector &bv_;
+ uptr i0_, i1_;
+ typename BV::Iterator it1_, it2_;
+ };
+
+ private:
+ void check(uptr idx) const { CHECK_LE(idx, size()); }
+
+ uptr idx0(uptr idx) const {
+ uptr res = idx / (BV::kSize * BV::kSize);
+ CHECK_LE(res, kLevel1Size);
+ return res;
+ }
+
+ uptr idx1(uptr idx) const {
+ uptr res = (idx / BV::kSize) % BV::kSize;
+ CHECK_LE(res, BV::kSize);
+ return res;
+ }
+
+ uptr idx2(uptr idx) const {
+ uptr res = idx % BV::kSize;
+ CHECK_LE(res, BV::kSize);
+ return res;
+ }
+
+ BV l1_[kLevel1Size];
+ BV l2_[kLevel1Size][BV::kSize];
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_BITVECTOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_bvgraph.h b/libsanitizer/sanitizer_common/sanitizer_bvgraph.h
new file mode 100644
index 0000000000..6ef0e81e04
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_bvgraph.h
@@ -0,0 +1,163 @@
+//===-- sanitizer_bvgraph.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// BVGraph -- a directed graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_BVGRAPH_H
+#define SANITIZER_BVGRAPH_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_bitvector.h"
+
+namespace __sanitizer {
+
+// Directed graph of fixed size implemented as an array of bit vectors.
+// Not thread-safe, all accesses should be protected by an external lock.
+template<class BV>
+class BVGraph {
+ public:
+ enum SizeEnum { kSize = BV::kSize };
+ uptr size() const { return kSize; }
+ // No CTOR.
+ void clear() {
+ for (uptr i = 0; i < size(); i++)
+ v[i].clear();
+ }
+
+ bool empty() const {
+ for (uptr i = 0; i < size(); i++)
+ if (!v[i].empty())
+ return false;
+ return true;
+ }
+
+ // Returns true if a new edge was added.
+ bool addEdge(uptr from, uptr to) {
+ check(from, to);
+ return v[from].setBit(to);
+ }
+
+ // Returns true if at least one new edge was added.
+ uptr addEdges(const BV &from, uptr to, uptr added_edges[],
+ uptr max_added_edges) {
+ uptr res = 0;
+ t1.copyFrom(from);
+ while (!t1.empty()) {
+ uptr node = t1.getAndClearFirstOne();
+ if (v[node].setBit(to))
+ if (res < max_added_edges)
+ added_edges[res++] = node;
+ }
+ return res;
+ }
+
+ // *EXPERIMENTAL*
+ // Returns true if an edge from=>to exist.
+ // This function does not use any global state except for 'this' itself,
+ // and thus can be called from different threads w/o locking.
+ // This would be racy.
+ // FIXME: investigate how much we can prove about this race being "benign".
+ bool hasEdge(uptr from, uptr to) { return v[from].getBit(to); }
+
+ // Returns true if the edge from=>to was removed.
+ bool removeEdge(uptr from, uptr to) {
+ return v[from].clearBit(to);
+ }
+
+ // Returns true if at least one edge *=>to was removed.
+ bool removeEdgesTo(const BV &to) {
+ bool res = 0;
+ for (uptr from = 0; from < size(); from++) {
+ if (v[from].setDifference(to))
+ res = true;
+ }
+ return res;
+ }
+
+ // Returns true if at least one edge from=>* was removed.
+ bool removeEdgesFrom(const BV &from) {
+ bool res = false;
+ t1.copyFrom(from);
+ while (!t1.empty()) {
+ uptr idx = t1.getAndClearFirstOne();
+ if (!v[idx].empty()) {
+ v[idx].clear();
+ res = true;
+ }
+ }
+ return res;
+ }
+
+ void removeEdgesFrom(uptr from) {
+ return v[from].clear();
+ }
+
+ bool hasEdge(uptr from, uptr to) const {
+ check(from, to);
+ return v[from].getBit(to);
+ }
+
+ // Returns true if there is a path from the node 'from'
+ // to any of the nodes in 'targets'.
+ bool isReachable(uptr from, const BV &targets) {
+ BV &to_visit = t1,
+ &visited = t2;
+ to_visit.copyFrom(v[from]);
+ visited.clear();
+ visited.setBit(from);
+ while (!to_visit.empty()) {
+ uptr idx = to_visit.getAndClearFirstOne();
+ if (visited.setBit(idx))
+ to_visit.setUnion(v[idx]);
+ }
+ return targets.intersectsWith(visited);
+ }
+
+ // Finds a path from 'from' to one of the nodes in 'target',
+ // stores up to 'path_size' items of the path into 'path',
+ // returns the path length, or 0 if there is no path of size 'path_size'.
+ uptr findPath(uptr from, const BV &targets, uptr *path, uptr path_size) {
+ if (path_size == 0)
+ return 0;
+ path[0] = from;
+ if (targets.getBit(from))
+ return 1;
+ // The function is recursive, so we don't want to create BV on stack.
+ // Instead of a getAndClearFirstOne loop we use the slower iterator.
+ for (typename BV::Iterator it(v[from]); it.hasNext(); ) {
+ uptr idx = it.next();
+ if (uptr res = findPath(idx, targets, path + 1, path_size - 1))
+ return res + 1;
+ }
+ return 0;
+ }
+
+ // Same as findPath, but finds a shortest path.
+ uptr findShortestPath(uptr from, const BV &targets, uptr *path,
+ uptr path_size) {
+ for (uptr p = 1; p <= path_size; p++)
+ if (findPath(from, targets, path, p) == p)
+ return p;
+ return 0;
+ }
+
+ private:
+ void check(uptr idx1, uptr idx2) const {
+ CHECK_LT(idx1, size());
+ CHECK_LT(idx2, size());
+ }
+ BV v[kSize];
+ // Keep temporary vectors here since we can not create large objects on stack.
+ BV t1, t2;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_BVGRAPH_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.cc b/libsanitizer/sanitizer_common/sanitizer_common.cc
index 0d93527aa5..f06efffb32 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_common.cc
@@ -12,8 +12,6 @@
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
-#include "sanitizer_stacktrace.h"
-#include "sanitizer_symbolizer.h"
namespace __sanitizer {
@@ -91,7 +89,7 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
if (internal_iserror(openrv)) return 0;
fd_t fd = openrv;
UnmapOrDie(*buff, *buff_size);
- *buff = (char*)MmapOrDie(size, __FUNCTION__);
+ *buff = (char*)MmapOrDie(size, __func__);
*buff_size = size;
// Read up to one page at a time.
read_len = 0;
@@ -155,23 +153,12 @@ const char *StripPathPrefix(const char *filepath,
return pos;
}
-void PrintSourceLocation(InternalScopedString *buffer, const char *file,
- int line, int column) {
- CHECK(file);
- buffer->append("%s",
- StripPathPrefix(file, common_flags()->strip_path_prefix));
- if (line > 0) {
- buffer->append(":%d", line);
- if (column > 0)
- buffer->append(":%d", column);
- }
-}
-
-void PrintModuleAndOffset(InternalScopedString *buffer, const char *module,
- uptr offset) {
- buffer->append("(%s+0x%zx)",
- StripPathPrefix(module, common_flags()->strip_path_prefix),
- offset);
+const char *StripModuleName(const char *module) {
+ if (module == 0)
+ return 0;
+ if (const char *slash_pos = internal_strrchr(module, '/'))
+ return slash_pos + 1;
+ return module;
}
void ReportErrorSummary(const char *error_message) {
@@ -195,31 +182,17 @@ void ReportErrorSummary(const char *error_type, const char *file,
ReportErrorSummary(buff.data());
}
-void ReportErrorSummary(const char *error_type, StackTrace *stack) {
- if (!common_flags()->print_summary)
- return;
- AddressInfo ai;
-#if !SANITIZER_GO
- if (stack->size > 0 && Symbolizer::Get()->IsAvailable()) {
- // Currently, we include the first stack frame into the report summary.
- // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
- uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
- Symbolizer::Get()->SymbolizeCode(pc, &ai, 1);
- }
-#endif
- ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
-}
-
LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
full_name_ = internal_strdup(module_name);
base_address_ = base_address;
n_ranges_ = 0;
}
-void LoadedModule::addAddressRange(uptr beg, uptr end) {
+void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges);
ranges_[n_ranges_].beg = beg;
ranges_[n_ranges_].end = end;
+ exec_[n_ranges_] = executable;
n_ranges_++;
}
@@ -231,15 +204,23 @@ bool LoadedModule::containsAddress(uptr address) const {
return false;
}
-char *StripModuleName(const char *module) {
- if (module == 0)
- return 0;
- const char *short_module_name = internal_strrchr(module, '/');
- if (short_module_name)
- short_module_name += 1;
- else
- short_module_name = module;
- return internal_strdup(short_module_name);
+static atomic_uintptr_t g_total_mmaped;
+
+void IncreaseTotalMmap(uptr size) {
+ if (!common_flags()->mmap_limit_mb) return;
+ uptr total_mmaped =
+ atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;
+ if ((total_mmaped >> 20) > common_flags()->mmap_limit_mb) {
+ // Since for now mmap_limit_mb is not a user-facing flag, just CHECK.
+ uptr mmap_limit_mb = common_flags()->mmap_limit_mb;
+ common_flags()->mmap_limit_mb = 0; // Allow mmap in CHECK.
+ RAW_CHECK(total_mmaped >> 20 < mmap_limit_mb);
+ }
+}
+
+void DecreaseTotalMmap(uptr size) {
+ if (!common_flags()->mmap_limit_mb) return;
+ atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
}
} // namespace __sanitizer
@@ -274,11 +255,6 @@ void __sanitizer_set_report_path(const char *path) {
}
}
-void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
- (void)reserved;
- PrepareForSandboxing();
-}
-
void __sanitizer_report_error_summary(const char *error_summary) {
Printf("%s\n", error_summary);
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h
index 07d1b63db5..a892491387 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.h
+++ b/libsanitizer/sanitizer_common/sanitizer_common.h
@@ -17,6 +17,7 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
+#include "sanitizer_flags.h"
namespace __sanitizer {
struct StackTrace;
@@ -26,13 +27,15 @@ const uptr kWordSize = SANITIZER_WORDSIZE / 8;
const uptr kWordSizeInBits = 8 * kWordSize;
#if defined(__powerpc__) || defined(__powerpc64__)
-const uptr kCacheLineSize = 128;
+ const uptr kCacheLineSize = 128;
#else
-const uptr kCacheLineSize = 64;
+ const uptr kCacheLineSize = 64;
#endif
const uptr kMaxPathLength = 512;
+const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
+
extern const char *SanitizerToolName; // Can be changed by the tool.
uptr GetPageSize();
@@ -51,6 +54,7 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
void *MmapOrDie(uptr size, const char *mem_type);
void UnmapOrDie(void *addr, uptr size);
void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
+void *MmapNoReserveOrDie(uptr size, const char *mem_type);
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
void *Mprotect(uptr fixed_addr, uptr size);
// Map aligned chunk of address space; size and alignment are powers of two.
@@ -58,6 +62,8 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
void FlushUnneededShadowMemory(uptr addr, uptr size);
+void IncreaseTotalMmap(uptr size);
+void DecreaseTotalMmap(uptr size);
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
@@ -123,9 +129,18 @@ void RawWrite(const char *buffer);
bool PrintsToTty();
// Caching version of PrintsToTty(). Not thread-safe.
bool PrintsToTtyCached();
+bool ColorizeReports();
void Printf(const char *format, ...);
void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *));
+#define VReport(level, ...) \
+ do { \
+ if ((uptr)common_flags()->verbosity >= (level)) Report(__VA_ARGS__); \
+ } while (0)
+#define VPrintf(level, ...) \
+ do { \
+ if ((uptr)common_flags()->verbosity >= (level)) Printf(__VA_ARGS__); \
+ } while (0)
// Can be used to prevent mixing error reports from different sanitizers.
extern StaticSpinMutex CommonSanitizerReportMutex;
@@ -148,17 +163,18 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
// (or NULL if the mapping failes). Stores the size of mmaped region
// in '*buff_size'.
void *MapFileToMemory(const char *file_name, uptr *buff_size);
+void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset);
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size);
// Error report formatting.
const char *StripPathPrefix(const char *filepath,
const char *strip_file_prefix);
-void PrintSourceLocation(InternalScopedString *buffer, const char *file,
- int line, int column);
-void PrintModuleAndOffset(InternalScopedString *buffer,
- const char *module, uptr offset);
+// Strip the directories from the module name.
+const char *StripModuleName(const char *module);
// OS
-void DisableCoreDumper();
+void DisableCoreDumperIfNecessary();
void DumpProcessMap();
bool FileExists(const char *filename);
const char *GetEnv(const char *name);
@@ -169,7 +185,16 @@ u32 GetUid();
void ReExec();
bool StackSizeIsUnlimited();
void SetStackSizeLimitInBytes(uptr limit);
-void PrepareForSandboxing();
+bool AddressSpaceIsUnlimited();
+void SetAddressSpaceUnlimited();
+void AdjustStackSize(void *attr);
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
+void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
+void SetSandboxingCallback(void (*f)());
+
+void CovUpdateMapping(uptr caller_pc = 0);
+void CovBeforeFork();
+void CovAfterFork(int child_pid);
void InitTlsSize();
uptr GetTlsSize();
@@ -180,9 +205,6 @@ void SleepForMillis(int millis);
u64 NanoTime();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
-// Strip the directories from the module name, return a new string allocated
-// with internal_strdup.
-char *StripModuleName(const char *module);
// Exit
void NORETURN Abort();
@@ -206,6 +228,14 @@ typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback);
+// Functions related to signal handling.
+typedef void (*SignalHandlerType)(int, void *, void *);
+bool IsDeadlySignal(int signum);
+void InstallDeadlySignalHandlers(SignalHandlerType handler);
+// Alternative signal stack (POSIX-only).
+void SetAlternateSignalStack();
+void UnsetAlternateSignalStack();
+
// We don't want a summary too long.
const int kMaxSummaryLength = 1024;
// Construct a one-line string:
@@ -213,7 +243,7 @@ const int kMaxSummaryLength = 1024;
// and pass it to __sanitizer_report_error_summary.
void ReportErrorSummary(const char *error_message);
// Same as above, but construct error_message as:
-// error_type: file:line function
+// error_type file:line function
void ReportErrorSummary(const char *error_type, const char *file,
int line, const char *function);
void ReportErrorSummary(const char *error_type, StackTrace *trace);
@@ -243,6 +273,19 @@ INLINE uptr MostSignificantSetBitIndex(uptr x) {
return up;
}
+INLINE uptr LeastSignificantSetBitIndex(uptr x) {
+ CHECK_NE(x, 0U);
+ unsigned long up; // NOLINT
+#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
+ up = __builtin_ctzl(x);
+#elif defined(_WIN64)
+ _BitScanForward64(&up, x);
+#else
+ _BitScanForward(&up, x);
+#endif
+ return up;
+}
+
INLINE bool IsPowerOfTwo(uptr x) {
return (x & (x - 1)) == 0;
}
@@ -307,12 +350,6 @@ INLINE int ToLower(int c) {
return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
}
-#if SANITIZER_WORDSIZE == 64
-# define FIRST_32_SECOND_64(a, b) (b)
-#else
-# define FIRST_32_SECOND_64(a, b) (a)
-#endif
-
// A low-level vector based on mmap. May incur a significant memory overhead for
// small vectors.
// WARNING: The current implementation supports only POD types.
@@ -442,12 +479,17 @@ uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
class LoadedModule {
public:
LoadedModule(const char *module_name, uptr base_address);
- void addAddressRange(uptr beg, uptr end);
+ void addAddressRange(uptr beg, uptr end, bool executable);
bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
+ uptr n_ranges() const { return n_ranges_; }
+ uptr address_range_start(int i) const { return ranges_[i].beg; }
+ uptr address_range_end(int i) const { return ranges_[i].end; }
+ bool address_range_executable(int i) const { return exec_[i]; }
+
private:
struct AddressRange {
uptr beg;
@@ -457,6 +499,7 @@ class LoadedModule {
uptr base_address_;
static const uptr kMaxNumberOfAddressRanges = 6;
AddressRange ranges_[kMaxNumberOfAddressRanges];
+ bool exec_[kMaxNumberOfAddressRanges];
uptr n_ranges_;
};
@@ -477,6 +520,36 @@ const uptr kPthreadDestructorIterations = 0;
// Callback type for iterating over a set of memory ranges.
typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
+
+#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !defined(SANITIZER_GO)
+extern uptr indirect_call_wrapper;
+void SetIndirectCallWrapper(uptr wrapper);
+
+template <typename F>
+F IndirectExternCall(F f) {
+ typedef F (*WrapF)(F);
+ return indirect_call_wrapper ? ((WrapF)indirect_call_wrapper)(f) : f;
+}
+#else
+INLINE void SetIndirectCallWrapper(uptr wrapper) {}
+template <typename F>
+F IndirectExternCall(F f) {
+ return f;
+}
+#endif
+
+#if SANITIZER_ANDROID
+// Initialize Android logging. Any writes before this are silently lost.
+void AndroidLogInit();
+void AndroidLogWrite(const char *buffer);
+void GetExtraActivationFlags(char *buf, uptr size);
+void SanitizerInitializeUnwinder();
+#else
+INLINE void AndroidLogInit() {}
+INLINE void AndroidLogWrite(const char *buffer_unused) {}
+INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
+INLINE void SanitizerInitializeUnwinder() {}
+#endif
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
@@ -484,4 +557,9 @@ inline void *operator new(__sanitizer::operator_new_size_type size,
return alloc.Allocate(size);
}
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr allocated;
+};
+
#endif // SANITIZER_COMMON_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
index e301dc17bd..10f321838e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
@@ -11,6 +11,7 @@
// This file should be included into the tool's interceptor file,
// which has to define it's own macros:
// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_ENTER_NOIGNORE
// COMMON_INTERCEPTOR_READ_RANGE
// COMMON_INTERCEPTOR_WRITE_RANGE
// COMMON_INTERCEPTOR_INITIALIZE_RANGE
@@ -24,9 +25,13 @@
// COMMON_INTERCEPTOR_MUTEX_REPAIR
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
+// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
+#include "sanitizer_addrhashmap.h"
+#include "sanitizer_placement_new.h"
#include "sanitizer_platform_interceptors.h"
+#include "sanitizer_tls_get_addr.h"
#include <stdarg.h>
@@ -34,8 +39,16 @@
#define va_copy(dst, src) ((dst) = (src))
#endif // _WIN32
+#if SANITIZER_FREEBSD
+#define pthread_setname_np pthread_set_name_np
+#endif
+
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
-#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, p, size) {}
+#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_UNPOISON_PARAM
+#define COMMON_INTERCEPTOR_UNPOISON_PARAM(count) {}
#endif
#ifndef COMMON_INTERCEPTOR_FD_ACCESS
@@ -58,14 +71,87 @@
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) ((void)(msg))
#endif
+#ifndef COMMON_INTERCEPTOR_FILE_OPEN
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_FILE_CLOSE
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_LIBRARY_LOADED
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, map) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_LIBRARY_UNLOADED
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_ENTER_NOIGNORE
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, ...) \
+ COMMON_INTERCEPTOR_ENTER(ctx, __VA_ARGS__)
+#endif
+
+#ifndef COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0)
+#endif
+
+struct FileMetadata {
+ // For open_memstream().
+ char **addr;
+ SIZE_T *size;
+};
+
+struct CommonInterceptorMetadata {
+ enum {
+ CIMT_INVALID = 0,
+ CIMT_FILE
+ } type;
+ union {
+ FileMetadata file;
+ };
+};
+
+typedef AddrHashMap<CommonInterceptorMetadata, 31051> MetadataHashMap;
+
+static MetadataHashMap *interceptor_metadata_map;
+
+#if SI_NOT_WINDOWS
+UNUSED static void SetInterceptorMetadata(__sanitizer_FILE *addr,
+ const FileMetadata &file) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr);
+ CHECK(h.created());
+ h->type = CommonInterceptorMetadata::CIMT_FILE;
+ h->file = file;
+}
+
+UNUSED static const FileMetadata *GetInterceptorMetadata(
+ __sanitizer_FILE *addr) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr,
+ /* remove */ false,
+ /* create */ false);
+ if (h.exists()) {
+ CHECK(!h.created());
+ CHECK(h->type == CommonInterceptorMetadata::CIMT_FILE);
+ return &h->file;
+ } else {
+ return 0;
+ }
+}
+
+UNUSED static void DeleteInterceptorMetadata(void *addr) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr, true);
+ CHECK(h.exists());
+}
+#endif // SI_NOT_WINDOWS
+
#if SANITIZER_INTERCEPT_TEXTDOMAIN
INTERCEPTOR(char*, textdomain, const char *domainname) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, textdomain, domainname);
char* domain = REAL(textdomain)(domainname);
if (domain) {
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, domain,
- REAL(strlen)(domain) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1);
}
return domain;
}
@@ -95,6 +181,8 @@ INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
}
INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strncmp(s1, s2, size);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strncmp, s1, s2, size);
unsigned char c1 = 0, c2 = 0;
@@ -160,12 +248,41 @@ INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) {
#define INIT_STRNCASECMP
#endif
+#if SANITIZER_INTERCEPT_MEMCHR
+INTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n);
+ void *res = REAL(memchr)(s, c, n);
+ uptr len = res ? (char*)res - (char*)s + 1 : n;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len);
+ return res;
+}
+
+#define INIT_MEMCHR COMMON_INTERCEPT_FUNCTION(memchr)
+#else
+#define INIT_MEMCHR
+#endif
+
+#if SANITIZER_INTERCEPT_MEMRCHR
+INTERCEPTOR(void*, memrchr, const void *s, int c, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memrchr, s, c, n);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, n);
+ return REAL(memrchr)(s, c, n);
+}
+
+#define INIT_MEMRCHR COMMON_INTERCEPT_FUNCTION(memrchr)
+#else
+#define INIT_MEMRCHR
+#endif
+
#if SANITIZER_INTERCEPT_FREXP
INTERCEPTOR(double, frexp, double x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexp, x, exp);
- double res = REAL(frexp)(x, exp);
+ // Assuming frexp() always writes to |exp|.
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ double res = REAL(frexp)(x, exp);
return res;
}
@@ -178,6 +295,9 @@ INTERCEPTOR(double, frexp, double x, int *exp) {
INTERCEPTOR(float, frexpf, float x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(frexpf)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
return res;
@@ -186,6 +306,9 @@ INTERCEPTOR(float, frexpf, float x, int *exp) {
INTERCEPTOR(long double, frexpl, long double x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(frexpl)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
return res;
@@ -224,6 +347,9 @@ INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(read)(fd, ptr, count);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -239,6 +365,9 @@ INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -254,6 +383,9 @@ INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -432,9 +564,11 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2,
INTERCEPTOR(unsigned long, time, unsigned long *t) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, time, t);
- unsigned long res = REAL(time)(t);
+ unsigned long local_t;
+ unsigned long res = REAL(time)(&local_t);
if (t && res != (unsigned long)-1) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, t, sizeof(*t));
+ *t = local_t;
}
return res;
}
@@ -449,7 +583,7 @@ static void unpoison_tm(void *ctx, __sanitizer_tm *tm) {
if (tm->tm_zone) {
// Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone
// can point to shared memory and tsan would report a data race.
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, tm->tm_zone,
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(tm->tm_zone,
REAL(strlen(tm->tm_zone)) + 1);
}
}
@@ -496,6 +630,9 @@ INTERCEPTOR(__sanitizer_tm *, gmtime_r, unsigned long *timep, void *result) {
INTERCEPTOR(char *, ctime, unsigned long *timep) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ctime, timep);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(ctime)(timep);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
@@ -506,6 +643,9 @@ INTERCEPTOR(char *, ctime, unsigned long *timep) {
INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ctime_r, timep, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(ctime_r)(timep, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
@@ -516,6 +656,9 @@ INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, asctime, tm);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(asctime)(tm);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
@@ -526,6 +669,9 @@ INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {
INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, asctime_r, tm, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(asctime_r)(tm, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
@@ -533,6 +679,20 @@ INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
}
return res;
}
+INTERCEPTOR(long, mktime, __sanitizer_tm *tm) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mktime, tm);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_sec, sizeof(tm->tm_sec));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_min, sizeof(tm->tm_min));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_hour, sizeof(tm->tm_hour));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mday, sizeof(tm->tm_mday));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mon, sizeof(tm->tm_mon));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_year, sizeof(tm->tm_year));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_isdst, sizeof(tm->tm_isdst));
+ long res = REAL(mktime)(tm);
+ if (res != -1) unpoison_tm(ctx, tm);
+ return res;
+}
#define INIT_LOCALTIME_AND_FRIENDS \
COMMON_INTERCEPT_FUNCTION(localtime); \
COMMON_INTERCEPT_FUNCTION(localtime_r); \
@@ -541,7 +701,8 @@ INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
COMMON_INTERCEPT_FUNCTION(ctime); \
COMMON_INTERCEPT_FUNCTION(ctime_r); \
COMMON_INTERCEPT_FUNCTION(asctime); \
- COMMON_INTERCEPT_FUNCTION(asctime_r);
+ COMMON_INTERCEPT_FUNCTION(asctime_r); \
+ COMMON_INTERCEPT_FUNCTION(mktime);
#else
#define INIT_LOCALTIME_AND_FRIENDS
#endif // SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
@@ -552,6 +713,9 @@ INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {
COMMON_INTERCEPTOR_ENTER(ctx, strptime, s, format, tm);
if (format)
COMMON_INTERCEPTOR_READ_RANGE(ctx, format, REAL(strlen)(format) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(strptime)(s, format, tm);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, s, res - s);
@@ -567,9 +731,23 @@ INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {
#define INIT_STRPTIME
#endif
-#if SANITIZER_INTERCEPT_SCANF
+#if SANITIZER_INTERCEPT_SCANF || SANITIZER_INTERCEPT_PRINTF
+#include "sanitizer_common_interceptors_format.inc"
+
+#define FORMAT_INTERCEPTOR_IMPL(name, vname, ...) \
+ { \
+ void *ctx; \
+ va_list ap; \
+ va_start(ap, format); \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__, ap); \
+ int res = WRAP(vname)(__VA_ARGS__, ap); \
+ va_end(ap); \
+ return res; \
+ }
-#include "sanitizer_common_interceptors_scanf.inc"
+#endif
+
+#if SANITIZER_INTERCEPT_SCANF
#define VSCANF_INTERCEPTOR_IMPL(vname, allowGnuMalloc, ...) \
{ \
@@ -605,35 +783,24 @@ INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
-#define SCANF_INTERCEPTOR_IMPL(name, vname, ...) \
- { \
- void *ctx; \
- va_list ap; \
- va_start(ap, format); \
- COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__, ap); \
- int res = vname(__VA_ARGS__, ap); \
- va_end(ap); \
- return res; \
- }
-
INTERCEPTOR(int, scanf, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(scanf, vscanf, format)
+FORMAT_INTERCEPTOR_IMPL(scanf, vscanf, format)
INTERCEPTOR(int, fscanf, void *stream, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format)
+FORMAT_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format)
INTERCEPTOR(int, sscanf, const char *str, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format)
+FORMAT_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format)
#if SANITIZER_INTERCEPT_ISOC99_SCANF
INTERCEPTOR(int, __isoc99_scanf, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format)
INTERCEPTOR(int, __isoc99_fscanf, void *stream, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
#endif
#endif
@@ -662,6 +829,180 @@ SCANF_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
#define INIT_ISOC99_SCANF
#endif
+#if SANITIZER_INTERCEPT_PRINTF
+
+#define VPRINTF_INTERCEPTOR_ENTER(vname, ...) \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \
+ va_list aq; \
+ va_copy(aq, ap);
+
+#define VPRINTF_INTERCEPTOR_RETURN() \
+ va_end(aq);
+
+#define VPRINTF_INTERCEPTOR_IMPL(vname, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, __VA_ARGS__); \
+ if (common_flags()->check_printf) \
+ printf_common(ctx, format, aq); \
+ int res = REAL(vname)(__VA_ARGS__); \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#define VSPRINTF_INTERCEPTOR_IMPL(vname, str, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, str, __VA_ARGS__) \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(str, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, res + 1); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#define VSNPRINTF_INTERCEPTOR_IMPL(vname, str, size, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, str, size, __VA_ARGS__) \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(str, size, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, Min(size, (SIZE_T)(res + 1))); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#define VASPRINTF_INTERCEPTOR_IMPL(vname, strp, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, strp, __VA_ARGS__) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, strp, sizeof(char *)); \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(strp, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *strp, res + 1); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+INTERCEPTOR(int, vprintf, const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(vprintf, format, ap)
+
+INTERCEPTOR(int, vfprintf, __sanitizer_FILE *stream, const char *format,
+ va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(vfprintf, stream, format, ap)
+
+INTERCEPTOR(int, vsnprintf, char *str, SIZE_T size, const char *format,
+ va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)
+
+INTERCEPTOR(int, vsprintf, char *str, const char *format, va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)
+
+INTERCEPTOR(int, vasprintf, char **strp, const char *format, va_list ap)
+VASPRINTF_INTERCEPTOR_IMPL(vasprintf, strp, format, ap)
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+INTERCEPTOR(int, __isoc99_vprintf, const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(__isoc99_vprintf, format, ap)
+
+INTERCEPTOR(int, __isoc99_vfprintf, __sanitizer_FILE *stream,
+ const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(__isoc99_vfprintf, stream, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsnprintf, char *str, SIZE_T size, const char *format,
+ va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(__isoc99_vsnprintf, str, size, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsprintf, char *str, const char *format,
+ va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(__isoc99_vsprintf, str, format,
+ ap)
+
+#endif // SANITIZER_INTERCEPT_ISOC99_PRINTF
+
+INTERCEPTOR(int, printf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(printf, vprintf, format)
+
+INTERCEPTOR(int, fprintf, __sanitizer_FILE *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(fprintf, vfprintf, stream, format)
+
+INTERCEPTOR(int, sprintf, char *str, const char *format, ...) // NOLINT
+FORMAT_INTERCEPTOR_IMPL(sprintf, vsprintf, str, format) // NOLINT
+
+INTERCEPTOR(int, snprintf, char *str, SIZE_T size, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(snprintf, vsnprintf, str, size, format)
+
+INTERCEPTOR(int, asprintf, char **strp, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(asprintf, vasprintf, strp, format)
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+INTERCEPTOR(int, __isoc99_printf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_printf, __isoc99_vprintf, format)
+
+INTERCEPTOR(int, __isoc99_fprintf, __sanitizer_FILE *stream, const char *format,
+ ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_fprintf, __isoc99_vfprintf, stream, format)
+
+INTERCEPTOR(int, __isoc99_sprintf, char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_sprintf, __isoc99_vsprintf, str, format)
+
+INTERCEPTOR(int, __isoc99_snprintf, char *str, SIZE_T size,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size,
+ format)
+
+#endif // SANITIZER_INTERCEPT_ISOC99_PRINTF
+
+#endif // SANITIZER_INTERCEPT_PRINTF
+
+#if SANITIZER_INTERCEPT_PRINTF
+#define INIT_PRINTF \
+ COMMON_INTERCEPT_FUNCTION(printf); \
+ COMMON_INTERCEPT_FUNCTION(sprintf); \
+ COMMON_INTERCEPT_FUNCTION(snprintf); \
+ COMMON_INTERCEPT_FUNCTION(asprintf); \
+ COMMON_INTERCEPT_FUNCTION(fprintf); \
+ COMMON_INTERCEPT_FUNCTION(vprintf); \
+ COMMON_INTERCEPT_FUNCTION(vsprintf); \
+ COMMON_INTERCEPT_FUNCTION(vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION(vasprintf); \
+ COMMON_INTERCEPT_FUNCTION(vfprintf);
+#else
+#define INIT_PRINTF
+#endif
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+#define INIT_ISOC99_PRINTF \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_printf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_sprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_snprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_fprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vfprintf);
+#else
+#define INIT_ISOC99_PRINTF
+#endif
+
#if SANITIZER_INTERCEPT_IOCTL
#include "sanitizer_common_interceptors_ioctl.inc"
INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
@@ -675,7 +1016,14 @@ INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
if (!common_flags()->handle_ioctl) return REAL(ioctl)(d, request, arg);
const ioctl_desc *desc = ioctl_lookup(request);
- if (!desc) Printf("WARNING: unknown ioctl %x\n", request);
+ ioctl_desc decoded_desc;
+ if (!desc) {
+ VPrintf(2, "Decoding unknown ioctl 0x%x\n", request);
+ if (!ioctl_decode(request, &decoded_desc))
+ Printf("WARNING: failed decoding unknown ioctl 0x%x\n", request);
+ else
+ desc = &decoded_desc;
+ }
if (desc) ioctl_common_pre(ctx, desc, d, request, arg);
int res = REAL(ioctl)(d, request, arg);
@@ -690,35 +1038,88 @@ INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
#define INIT_IOCTL
#endif
+#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS || \
+ SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT || \
+ SANITIZER_INTERCEPT_GETPWENT_R || SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) {
+ if (pwd) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, sizeof(*pwd));
+ if (pwd->pw_name)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_name,
+ REAL(strlen)(pwd->pw_name) + 1);
+ if (pwd->pw_passwd)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_passwd,
+ REAL(strlen)(pwd->pw_passwd) + 1);
+#if !SANITIZER_ANDROID
+ if (pwd->pw_gecos)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_gecos,
+ REAL(strlen)(pwd->pw_gecos) + 1);
+#endif
+#if SANITIZER_MAC
+ if (pwd->pw_class)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_class,
+ REAL(strlen)(pwd->pw_class) + 1);
+#endif
+ if (pwd->pw_dir)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_dir,
+ REAL(strlen)(pwd->pw_dir) + 1);
+ if (pwd->pw_shell)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_shell,
+ REAL(strlen)(pwd->pw_shell) + 1);
+ }
+}
+
+static void unpoison_group(void *ctx, __sanitizer_group *grp) {
+ if (grp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, sizeof(*grp));
+ if (grp->gr_name)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_name,
+ REAL(strlen)(grp->gr_name) + 1);
+ if (grp->gr_passwd)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_passwd,
+ REAL(strlen)(grp->gr_passwd) + 1);
+ char **p = grp->gr_mem;
+ for (; *p; ++p) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*p, REAL(strlen)(*p) + 1);
+ }
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_mem,
+ (p - grp->gr_mem + 1) * sizeof(*p));
+ }
+}
+#endif // SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS ||
+ // SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT ||
+ // SANITIZER_INTERCEPT_GETPWENT_R ||
+ // SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+
#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
-INTERCEPTOR(void *, getpwnam, const char *name) {
+INTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
- void *res = REAL(getpwnam)(name);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_passwd_sz);
+ __sanitizer_passwd *res = REAL(getpwnam)(name);
+ if (res != 0) unpoison_passwd(ctx, res);
return res;
}
-INTERCEPTOR(void *, getpwuid, u32 uid) {
+INTERCEPTOR(__sanitizer_passwd *, getpwuid, u32 uid) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwuid, uid);
- void *res = REAL(getpwuid)(uid);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_passwd_sz);
+ __sanitizer_passwd *res = REAL(getpwuid)(uid);
+ if (res != 0) unpoison_passwd(ctx, res);
return res;
}
-INTERCEPTOR(void *, getgrnam, const char *name) {
+INTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
- void *res = REAL(getgrnam)(name);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_group_sz);
+ __sanitizer_group *res = REAL(getgrnam)(name);
+ if (res != 0) unpoison_group(ctx, res);
return res;
}
-INTERCEPTOR(void *, getgrgid, u32 gid) {
+INTERCEPTOR(__sanitizer_group *, getgrgid, u32 gid) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrgid, gid);
- void *res = REAL(getgrgid)(gid);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_group_sz);
+ __sanitizer_group *res = REAL(getgrgid)(gid);
+ if (res != 0) unpoison_group(ctx, res);
return res;
}
#define INIT_GETPWNAM_AND_FRIENDS \
@@ -731,50 +1132,66 @@ INTERCEPTOR(void *, getgrgid, u32 gid) {
#endif
#if SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
-INTERCEPTOR(int, getpwnam_r, const char *name, void *pwd, char *buf,
- SIZE_T buflen, void **result) {
+INTERCEPTOR(int, getpwnam_r, const char *name, __sanitizer_passwd *pwd,
+ char *buf, SIZE_T buflen, __sanitizer_passwd **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getpwnam_r)(name, pwd, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, struct_passwd_sz);
+ if (result && *result) unpoison_passwd(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
-INTERCEPTOR(int, getpwuid_r, u32 uid, void *pwd, char *buf, SIZE_T buflen,
- void **result) {
+INTERCEPTOR(int, getpwuid_r, u32 uid, __sanitizer_passwd *pwd, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwuid_r, uid, pwd, buf, buflen, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getpwuid_r)(uid, pwd, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, struct_passwd_sz);
+ if (result && *result) unpoison_passwd(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
-INTERCEPTOR(int, getgrnam_r, const char *name, void *grp, char *buf,
- SIZE_T buflen, void **result) {
+INTERCEPTOR(int, getgrnam_r, const char *name, __sanitizer_group *grp,
+ char *buf, SIZE_T buflen, __sanitizer_group **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getgrnam_r)(name, grp, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, struct_group_sz);
+ if (result && *result) unpoison_group(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
-INTERCEPTOR(int, getgrgid_r, u32 gid, void *grp, char *buf, SIZE_T buflen,
- void **result) {
+INTERCEPTOR(int, getgrgid_r, u32 gid, __sanitizer_group *grp, char *buf,
+ SIZE_T buflen, __sanitizer_group **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrgid_r, gid, grp, buf, buflen, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getgrgid_r)(gid, grp, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, struct_group_sz);
+ if (result && *result) unpoison_group(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
#define INIT_GETPWNAM_R_AND_FRIENDS \
@@ -786,10 +1203,160 @@ INTERCEPTOR(int, getgrgid_r, u32 gid, void *grp, char *buf, SIZE_T buflen,
#define INIT_GETPWNAM_R_AND_FRIENDS
#endif
+#if SANITIZER_INTERCEPT_GETPWENT
+INTERCEPTOR(__sanitizer_passwd *, getpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwent, dummy);
+ __sanitizer_passwd *res = REAL(getpwent)(dummy);
+ if (res != 0) unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, getgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrent, dummy);
+ __sanitizer_group *res = REAL(getgrent)(dummy);
+ if (res != 0) unpoison_group(ctx, res);;
+ return res;
+}
+#define INIT_GETPWENT \
+ COMMON_INTERCEPT_FUNCTION(getpwent); \
+ COMMON_INTERCEPT_FUNCTION(getgrent);
+#else
+#define INIT_GETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_FGETPWENT
+INTERCEPTOR(__sanitizer_passwd *, fgetpwent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent, fp);
+ __sanitizer_passwd *res = REAL(fgetpwent)(fp);
+ if (res != 0) unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, fgetgrent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent, fp);
+ __sanitizer_group *res = REAL(fgetgrent)(fp);
+ if (res != 0) unpoison_group(ctx, res);
+ return res;
+}
+#define INIT_FGETPWENT \
+ COMMON_INTERCEPT_FUNCTION(fgetpwent); \
+ COMMON_INTERCEPT_FUNCTION(fgetgrent);
+#else
+#define INIT_FGETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_GETPWENT_R
+INTERCEPTOR(int, getpwent_r, __sanitizer_passwd *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwent_r, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(getpwent_r)(pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+INTERCEPTOR(int, fgetpwent_r, void *fp, __sanitizer_passwd *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent_r, fp, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(fgetpwent_r)(fp, pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+INTERCEPTOR(int, getgrent_r, __sanitizer_group *pwbuf, char *buf, SIZE_T buflen,
+ __sanitizer_group **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrent_r, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(getgrent_r)(pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+INTERCEPTOR(int, fgetgrent_r, void *fp, __sanitizer_group *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_group **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent_r, fp, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(fgetgrent_r)(fp, pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+#define INIT_GETPWENT_R \
+ COMMON_INTERCEPT_FUNCTION(getpwent_r); \
+ COMMON_INTERCEPT_FUNCTION(fgetpwent_r); \
+ COMMON_INTERCEPT_FUNCTION(getgrent_r); \
+ COMMON_INTERCEPT_FUNCTION(fgetgrent_r);
+#else
+#define INIT_GETPWENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_SETPWENT
+// The only thing these interceptors do is disable any nested interceptors.
+// These functions may open nss modules and call uninstrumented functions from
+// them, and we don't want things like strlen() to trigger.
+INTERCEPTOR(void, setpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setpwent, dummy);
+ REAL(setpwent)(dummy);
+}
+INTERCEPTOR(void, endpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, endpwent, dummy);
+ REAL(endpwent)(dummy);
+}
+INTERCEPTOR(void, setgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setgrent, dummy);
+ REAL(setgrent)(dummy);
+}
+INTERCEPTOR(void, endgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, endgrent, dummy);
+ REAL(endgrent)(dummy);
+}
+#define INIT_SETPWENT \
+ COMMON_INTERCEPT_FUNCTION(setpwent); \
+ COMMON_INTERCEPT_FUNCTION(endpwent); \
+ COMMON_INTERCEPT_FUNCTION(setgrent); \
+ COMMON_INTERCEPT_FUNCTION(endgrent);
+#else
+#define INIT_SETPWENT
+#endif
+
#if SANITIZER_INTERCEPT_CLOCK_GETTIME
INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, clock_getres, clk_id, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(clock_getres)(clk_id, tp);
if (!res && tp) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
@@ -799,6 +1366,9 @@ INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) {
INTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, clock_gettime, clk_id, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(clock_gettime)(clk_id, tp);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
@@ -823,6 +1393,9 @@ INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) {
INTERCEPTOR(int, getitimer, int which, void *curr_value) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getitimer, which, curr_value);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getitimer)(which, curr_value);
if (!res && curr_value) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerval_sz);
@@ -834,6 +1407,9 @@ INTERCEPTOR(int, setitimer, int which, const void *new_value, void *old_value) {
COMMON_INTERCEPTOR_ENTER(ctx, setitimer, which, new_value, old_value);
if (new_value)
COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerval_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(setitimer)(which, new_value, old_value);
if (!res && old_value) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerval_sz);
@@ -861,34 +1437,33 @@ static void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) {
}
static THREADLOCAL __sanitizer_glob_t *pglob_copy;
-static THREADLOCAL void *glob_ctx;
static void wrapped_gl_closedir(void *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 1);
- pglob_copy->gl_closedir(dir);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ IndirectExternCall(pglob_copy->gl_closedir)(dir);
}
static void *wrapped_gl_readdir(void *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 1);
- return pglob_copy->gl_readdir(dir);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ return IndirectExternCall(pglob_copy->gl_readdir)(dir);
}
static void *wrapped_gl_opendir(const char *s) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(glob_ctx, s, REAL(strlen)(s) + 1);
- return pglob_copy->gl_opendir(s);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return IndirectExternCall(pglob_copy->gl_opendir)(s);
}
static int wrapped_gl_lstat(const char *s, void *st) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(glob_ctx, s, REAL(strlen)(s) + 1);
- return pglob_copy->gl_lstat(s, st);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return IndirectExternCall(pglob_copy->gl_lstat)(s, st);
}
static int wrapped_gl_stat(const char *s, void *st) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(glob_ctx, s, REAL(strlen)(s) + 1);
- return pglob_copy->gl_stat(s, st);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return IndirectExternCall(pglob_copy->gl_stat)(s, st);
}
INTERCEPTOR(int, glob, const char *pattern, int flags,
@@ -907,7 +1482,6 @@ INTERCEPTOR(int, glob, const char *pattern, int flags,
Swap(pglob->gl_lstat, glob_copy.gl_lstat);
Swap(pglob->gl_stat, glob_copy.gl_stat);
pglob_copy = &glob_copy;
- glob_ctx = ctx;
}
int res = REAL(glob)(pattern, flags, errfunc, pglob);
if (flags & glob_altdirfunc) {
@@ -918,7 +1492,6 @@ INTERCEPTOR(int, glob, const char *pattern, int flags,
Swap(pglob->gl_stat, glob_copy.gl_stat);
}
pglob_copy = 0;
- glob_ctx = 0;
if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
return res;
}
@@ -939,7 +1512,6 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
Swap(pglob->gl_lstat, glob_copy.gl_lstat);
Swap(pglob->gl_stat, glob_copy.gl_stat);
pglob_copy = &glob_copy;
- glob_ctx = ctx;
}
int res = REAL(glob64)(pattern, flags, errfunc, pglob);
if (flags & glob_altdirfunc) {
@@ -950,7 +1522,6 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
Swap(pglob->gl_stat, glob_copy.gl_stat);
}
pglob_copy = 0;
- glob_ctx = 0;
if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
return res;
}
@@ -968,15 +1539,27 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
INTERCEPTOR_WITH_SUFFIX(int, wait, int *status) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wait, status);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wait)(status);
if (res != -1 && status)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
return res;
}
+// On FreeBSD id_t is always 64-bit wide.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, long long id, void *infop,
+ int options) {
+#else
INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop,
int options) {
+#endif
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, waitid, idtype, id, infop, options);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(waitid)(idtype, id, infop, options);
if (res != -1 && infop)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, infop, siginfo_t_sz);
@@ -985,6 +1568,9 @@ INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop,
INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, waitpid, pid, status, options);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(waitpid)(pid, status, options);
if (res != -1 && status)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
@@ -993,6 +1579,9 @@ INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) {
INTERCEPTOR(int, wait3, int *status, int options, void *rusage) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wait3, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wait3)(status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
@@ -1000,9 +1589,28 @@ INTERCEPTOR(int, wait3, int *status, int options, void *rusage) {
}
return res;
}
+#if SANITIZER_ANDROID
+INTERCEPTOR(int, __wait4, int pid, int *status, int options, void *rusage) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wait4, pid, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(__wait4)(pid, status, options, rusage);
+ if (res != -1) {
+ if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
+ }
+ return res;
+}
+#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(__wait4);
+#else
INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wait4, pid, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wait4)(pid, status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
@@ -1010,14 +1618,16 @@ INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
}
return res;
}
+#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(wait4);
+#endif // SANITIZER_ANDROID
#define INIT_WAIT \
COMMON_INTERCEPT_FUNCTION(wait); \
COMMON_INTERCEPT_FUNCTION(waitid); \
COMMON_INTERCEPT_FUNCTION(waitpid); \
- COMMON_INTERCEPT_FUNCTION(wait3); \
- COMMON_INTERCEPT_FUNCTION(wait4);
+ COMMON_INTERCEPT_FUNCTION(wait3);
#else
#define INIT_WAIT
+#define INIT_WAIT4
#endif
#if SANITIZER_INTERCEPT_INET
@@ -1027,6 +1637,9 @@ INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) {
uptr sz = __sanitizer_in_addr_sz(af);
if (sz) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sz);
// FIXME: figure out read size based on the address family.
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(inet_ntop)(af, src, dst, size);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -1035,6 +1648,9 @@ INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, inet_pton, af, src, dst);
// FIXME: figure out read size based on the address family.
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(inet_pton)(af, src, dst);
if (res == 1) {
uptr sz = __sanitizer_in_addr_sz(af);
@@ -1054,6 +1670,9 @@ INTERCEPTOR(int, inet_aton, const char *cp, void *dst) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, inet_aton, cp, dst);
if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, REAL(strlen)(cp) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(inet_aton)(cp, dst);
if (res != 0) {
uptr sz = __sanitizer_in_addr_sz(af_inet);
@@ -1070,6 +1689,9 @@ INTERCEPTOR(int, inet_aton, const char *cp, void *dst) {
INTERCEPTOR(int, pthread_getschedparam, uptr thread, int *policy, int *param) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_getschedparam, thread, policy, param);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(pthread_getschedparam)(thread, policy, param);
if (res == 0) {
if (policy) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, policy, sizeof(*policy));
@@ -1094,6 +1716,9 @@ INTERCEPTOR(int, getaddrinfo, char *node, char *service,
COMMON_INTERCEPTOR_READ_RANGE(ctx, service, REAL(strlen)(service) + 1);
if (hints)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getaddrinfo)(node, service, hints, out);
if (res == 0 && out) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, out, sizeof(*out));
@@ -1123,6 +1748,9 @@ INTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host,
serv, servlen, flags);
// FIXME: consider adding READ_RANGE(sockaddr, salen)
// There is padding in in_addr that may make this too noisy
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res =
REAL(getnameinfo)(sockaddr, salen, host, hostlen, serv, servlen, flags);
if (res == 0) {
@@ -1144,6 +1772,9 @@ INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) {
COMMON_INTERCEPTOR_ENTER(ctx, getsockname, sock_fd, addr, addrlen);
COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
int addrlen_in = *addrlen;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getsockname)(sock_fd, addr, addrlen);
if (res == 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addrlen_in, *addrlen));
@@ -1221,23 +1852,54 @@ INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname2, char *name, int af) {
#endif
#if SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,
+ char *buf, SIZE_T buflen, __sanitizer_hostent **result,
+ int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result,
+ h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ return res;
+}
+#define INIT_GETHOSTBYNAME_R COMMON_INTERCEPT_FUNCTION(gethostbyname_r);
+#else
+#define INIT_GETHOSTBYNAME_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTENT_R
INTERCEPTOR(int, gethostent_r, struct __sanitizer_hostent *ret, char *buf,
SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, gethostent_r, ret, buf, buflen, result,
h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(gethostent_r)(ret, buf, buflen, result, h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
}
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
+#define INIT_GETHOSTENT_R \
+ COMMON_INTERCEPT_FUNCTION(gethostent_r);
+#else
+#define INIT_GETHOSTENT_R
+#endif
+#if SANITIZER_INTERCEPT_GETHOSTBYADDR_R
INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type,
struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
__sanitizer_hostent **result, int *h_errnop) {
@@ -1245,62 +1907,49 @@ INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type,
COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr_r, addr, len, type, ret, buf,
buflen, result, h_errnop);
COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(gethostbyaddr_r)(addr, len, type, ret, buf, buflen, result,
h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
- }
- return res;
-}
-
-INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,
- char *buf, SIZE_T buflen, __sanitizer_hostent **result,
- int *h_errnop) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result,
- h_errnop);
- int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
}
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
+#define INIT_GETHOSTBYADDR_R \
+ COMMON_INTERCEPT_FUNCTION(gethostbyaddr_r);
+#else
+#define INIT_GETHOSTBYADDR_R
+#endif
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME2_R
INTERCEPTOR(int, gethostbyname2_r, char *name, int af,
struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
__sanitizer_hostent **result, int *h_errnop) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2_r, name, af, ret, buf, buflen,
result, h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res =
REAL(gethostbyname2_r)(name, af, ret, buf, buflen, result, h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
}
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
-#define INIT_GETHOSTBYNAME_R \
- COMMON_INTERCEPT_FUNCTION(gethostent_r); \
- COMMON_INTERCEPT_FUNCTION(gethostbyaddr_r); \
- COMMON_INTERCEPT_FUNCTION(gethostbyname_r); \
+#define INIT_GETHOSTBYNAME2_R \
COMMON_INTERCEPT_FUNCTION(gethostbyname2_r);
#else
-#define INIT_GETHOSTBYNAME_R
+#define INIT_GETHOSTBYNAME2_R
#endif
#if SANITIZER_INTERCEPT_GETSOCKOPT
@@ -1310,6 +1959,9 @@ INTERCEPTOR(int, getsockopt, int sockfd, int level, int optname, void *optval,
COMMON_INTERCEPTOR_ENTER(ctx, getsockopt, sockfd, level, optname, optval,
optlen);
if (optlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, optlen, sizeof(*optlen));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getsockopt)(sockfd, level, optname, optval, optlen);
if (res == 0)
if (optval && optlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, optval, *optlen);
@@ -1324,7 +1976,7 @@ INTERCEPTOR(int, getsockopt, int sockfd, int level, int optname, void *optval,
INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, accept, fd, addr, addrlen);
- unsigned addrlen0;
+ unsigned addrlen0 = 0;
if (addrlen) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
addrlen0 = *addrlen;
@@ -1346,11 +1998,14 @@ INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) {
INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, accept4, fd, addr, addrlen, f);
- unsigned addrlen0;
+ unsigned addrlen0 = 0;
if (addrlen) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
addrlen0 = *addrlen;
}
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int fd2 = REAL(accept4)(fd, addr, addrlen, f);
if (fd2 >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
@@ -1368,6 +2023,9 @@ INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) {
INTERCEPTOR(double, modf, double x, double *iptr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, modf, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
double res = REAL(modf)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
@@ -1377,6 +2035,9 @@ INTERCEPTOR(double, modf, double x, double *iptr) {
INTERCEPTOR(float, modff, float x, float *iptr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, modff, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(modff)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
@@ -1386,6 +2047,9 @@ INTERCEPTOR(float, modff, float x, float *iptr) {
INTERCEPTOR(long double, modfl, long double x, long double *iptr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, modfl, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(modfl)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
@@ -1418,6 +2082,9 @@ INTERCEPTOR(SSIZE_T, recvmsg, int fd, struct __sanitizer_msghdr *msg,
int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, recvmsg, fd, msg, flags);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(recvmsg)(fd, msg, flags);
if (res >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -1439,6 +2106,9 @@ INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
COMMON_INTERCEPTOR_ENTER(ctx, getpeername, sockfd, addr, addrlen);
unsigned addr_sz;
if (addrlen) addr_sz = *addrlen;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getpeername)(sockfd, addr, addrlen);
if (!res && addr && addrlen)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));
@@ -1452,6 +2122,9 @@ INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
#if SANITIZER_INTERCEPT_SYSINFO
INTERCEPTOR(int, sysinfo, void *info) {
void *ctx;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
COMMON_INTERCEPTOR_ENTER(ctx, sysinfo, info);
int res = REAL(sysinfo)(info);
if (!res && info)
@@ -1467,6 +2140,9 @@ INTERCEPTOR(int, sysinfo, void *info) {
INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_dirent *res = REAL(readdir)(dirp);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
return res;
@@ -1476,6 +2152,9 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
__sanitizer_dirent **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir_r, dirp, entry, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(readdir_r)(dirp, entry, result);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
@@ -1496,6 +2175,9 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
INTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir64, dirp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_dirent64 *res = REAL(readdir64)(dirp);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
return res;
@@ -1505,6 +2187,9 @@ INTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry,
__sanitizer_dirent64 **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir64_r, dirp, entry, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(readdir64_r)(dirp, entry, result);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
@@ -1540,10 +2225,13 @@ INTERCEPTOR(uptr, ptrace, int request, int pid, void *addr, void *data) {
}
}
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
uptr res = REAL(ptrace)(request, pid, addr, data);
if (!res && data) {
- // Note that PEEK* requests assing different meaning to the return value.
+ // Note that PEEK* requests assign different meaning to the return value.
// This function does not handle them (nor does it need to).
if (request == ptrace_getregs)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_regs_struct_sz);
@@ -1553,6 +2241,8 @@ INTERCEPTOR(uptr, ptrace, int request, int pid, void *addr, void *data) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpxregs_struct_sz);
else if (request == ptrace_getsiginfo)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, siginfo_t_sz);
+ else if (request == ptrace_geteventmsg)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(unsigned long));
else if (request == ptrace_getregset) {
__sanitizer_iovec *iov = (__sanitizer_iovec *)data;
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iov->iov_base, iov->iov_len);
@@ -1586,6 +2276,9 @@ INTERCEPTOR(char *, setlocale, int category, char *locale) {
INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getcwd, buf, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(getcwd)(buf, size);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -1599,6 +2292,9 @@ INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {
INTERCEPTOR(char *, get_current_dir_name, int fake) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, get_current_dir_name, fake);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(get_current_dir_name)(fake);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -1614,6 +2310,9 @@ INTERCEPTOR(char *, get_current_dir_name, int fake) {
INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
INTMAX_T res = REAL(strtoimax)(nptr, endptr, base);
if (endptr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr));
return res;
@@ -1622,6 +2321,9 @@ INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
INTERCEPTOR(INTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
INTMAX_T res = REAL(strtoumax)(nptr, endptr, base);
if (endptr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr));
return res;
@@ -1638,6 +2340,9 @@ INTERCEPTOR(INTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, mbstowcs, dest, src, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(mbstowcs)(dest, src, len);
if (res != (SIZE_T) - 1 && dest) {
SIZE_T write_cnt = res + (res < len);
@@ -1652,6 +2357,9 @@ INTERCEPTOR(SIZE_T, mbsrtowcs, wchar_t *dest, const char **src, SIZE_T len,
COMMON_INTERCEPTOR_ENTER(ctx, mbsrtowcs, dest, src, len, ps);
if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(mbsrtowcs)(dest, src, len, ps);
if (res != (SIZE_T)(-1) && dest && src) {
// This function, and several others, may or may not write the terminating
@@ -1679,6 +2387,9 @@ INTERCEPTOR(SIZE_T, mbsnrtowcs, wchar_t *dest, const char **src, SIZE_T nms,
if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);
}
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(mbsnrtowcs)(dest, src, nms, len, ps);
if (res != (SIZE_T)(-1) && dest && src) {
SIZE_T write_cnt = res + !*src;
@@ -1696,6 +2407,9 @@ INTERCEPTOR(SIZE_T, mbsnrtowcs, wchar_t *dest, const char **src, SIZE_T nms,
INTERCEPTOR(SIZE_T, wcstombs, char *dest, const wchar_t *src, SIZE_T len) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcstombs, dest, src, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(wcstombs)(dest, src, len);
if (res != (SIZE_T) - 1 && dest) {
SIZE_T write_cnt = res + (res < len);
@@ -1710,6 +2424,9 @@ INTERCEPTOR(SIZE_T, wcsrtombs, char *dest, const wchar_t **src, SIZE_T len,
COMMON_INTERCEPTOR_ENTER(ctx, wcsrtombs, dest, src, len, ps);
if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(wcsrtombs)(dest, src, len, ps);
if (res != (SIZE_T) - 1 && dest && src) {
SIZE_T write_cnt = res + !*src;
@@ -1735,6 +2452,9 @@ INTERCEPTOR(SIZE_T, wcsnrtombs, char *dest, const wchar_t **src, SIZE_T nms,
if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);
}
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(wcsnrtombs)(dest, src, nms, len, ps);
if (res != (SIZE_T) - 1 && dest && src) {
SIZE_T write_cnt = res + !*src;
@@ -1752,6 +2472,9 @@ INTERCEPTOR(SIZE_T, wcsnrtombs, char *dest, const wchar_t **src, SIZE_T nms,
INTERCEPTOR(int, tcgetattr, int fd, void *termios_p) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, tcgetattr, fd, termios_p);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(tcgetattr)(fd, termios_p);
if (!res && termios_p)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, termios_p, struct_termios_sz);
@@ -1806,6 +2529,9 @@ INTERCEPTOR(char *, canonicalize_file_name, const char *path) {
INTERCEPTOR(SIZE_T, confstr, int name, char *buf, SIZE_T len) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, confstr, name, buf, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(confstr)(name, buf, len);
if (buf && res)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res < len ? res : len);
@@ -1820,6 +2546,9 @@ INTERCEPTOR(SIZE_T, confstr, int name, char *buf, SIZE_T len) {
INTERCEPTOR(int, sched_getaffinity, int pid, SIZE_T cpusetsize, void *mask) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sched_getaffinity, pid, cpusetsize, mask);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sched_getaffinity)(pid, cpusetsize, mask);
if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize);
return res;
@@ -1834,7 +2563,7 @@ INTERCEPTOR(char *, strerror, int errnum) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strerror, errnum);
char *res = REAL(strerror)(errnum);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
return res;
}
#define INIT_STRERROR COMMON_INTERCEPT_FUNCTION(strerror);
@@ -1846,6 +2575,9 @@ INTERCEPTOR(char *, strerror, int errnum) {
INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(strerror_r)(errnum, buf, buflen);
// There are 2 versions of strerror_r:
// * POSIX version returns 0 on success, negative error code on failure,
@@ -1874,6 +2606,9 @@ INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
INTERCEPTOR(int, __xpg_strerror_r, int errnum, char *buf, SIZE_T buflen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __xpg_strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(__xpg_strerror_r)(errnum, buf, buflen);
// This version always returns a null-terminated string.
if (buf && buflen)
@@ -1890,24 +2625,23 @@ typedef int (*scandir_filter_f)(const struct __sanitizer_dirent *);
typedef int (*scandir_compar_f)(const struct __sanitizer_dirent **,
const struct __sanitizer_dirent **);
-static THREADLOCAL void *scandir_ctx;
static THREADLOCAL scandir_filter_f scandir_filter;
static THREADLOCAL scandir_compar_f scandir_compar;
static int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir_ctx, 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, dir, dir->d_reclen);
- return scandir_filter(dir);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ return IndirectExternCall(scandir_filter)(dir);
}
static int wrapped_scandir_compar(const struct __sanitizer_dirent **a,
const struct __sanitizer_dirent **b) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, a, sizeof(*a));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, *a, (*a)->d_reclen);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, b, sizeof(*b));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, *b, (*b)->d_reclen);
- return scandir_compar(a, b);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ return IndirectExternCall(scandir_compar)(a, b);
}
INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
@@ -1915,13 +2649,13 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scandir, dirp, namelist, filter, compar);
if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
- CHECK_EQ(0, scandir_ctx);
- scandir_ctx = ctx;
scandir_filter = filter;
scandir_compar = compar;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(scandir)(dirp, namelist, filter ? wrapped_scandir_filter : 0,
compar ? wrapped_scandir_compar : 0);
- scandir_ctx = 0;
scandir_filter = 0;
scandir_compar = 0;
if (namelist && res > 0) {
@@ -1943,24 +2677,23 @@ typedef int (*scandir64_filter_f)(const struct __sanitizer_dirent64 *);
typedef int (*scandir64_compar_f)(const struct __sanitizer_dirent64 **,
const struct __sanitizer_dirent64 **);
-static THREADLOCAL void *scandir64_ctx;
static THREADLOCAL scandir64_filter_f scandir64_filter;
static THREADLOCAL scandir64_compar_f scandir64_compar;
static int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir64_ctx, 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, dir, dir->d_reclen);
- return scandir64_filter(dir);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ return IndirectExternCall(scandir64_filter)(dir);
}
static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a,
const struct __sanitizer_dirent64 **b) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir64_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, a, sizeof(*a));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, *a, (*a)->d_reclen);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, b, sizeof(*b));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, *b, (*b)->d_reclen);
- return scandir64_compar(a, b);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ return IndirectExternCall(scandir64_compar)(a, b);
}
INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
@@ -1968,14 +2701,14 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scandir64, dirp, namelist, filter, compar);
if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
- CHECK_EQ(0, scandir64_ctx);
- scandir64_ctx = ctx;
scandir64_filter = filter;
scandir64_compar = compar;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res =
REAL(scandir64)(dirp, namelist, filter ? wrapped_scandir64_filter : 0,
compar ? wrapped_scandir64_compar : 0);
- scandir64_ctx = 0;
scandir64_filter = 0;
scandir64_compar = 0;
if (namelist && res > 0) {
@@ -1996,6 +2729,9 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
INTERCEPTOR(int, getgroups, int size, u32 *lst) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgroups, size, lst);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getgroups)(size, lst);
if (res && lst) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst));
return res;
@@ -2059,6 +2795,9 @@ INTERCEPTOR(int, wordexp, char *s, __sanitizer_wordexp_t *p, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wordexp, s, p, flags);
if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wordexp)(s, p, flags);
if (!res && p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
@@ -2082,6 +2821,9 @@ INTERCEPTOR(int, sigwait, __sanitizer_sigset_t *set, int *sig) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigwait, set, sig);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigwait)(set, sig);
if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig));
return res;
@@ -2096,6 +2838,9 @@ INTERCEPTOR(int, sigwaitinfo, __sanitizer_sigset_t *set, void *info) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigwaitinfo, set, info);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigwaitinfo)(set, info);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
@@ -2112,6 +2857,9 @@ INTERCEPTOR(int, sigtimedwait, __sanitizer_sigset_t *set, void *info,
COMMON_INTERCEPTOR_ENTER(ctx, sigtimedwait, set, info, timeout);
if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigtimedwait)(set, info, timeout);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
@@ -2125,6 +2873,9 @@ INTERCEPTOR(int, sigtimedwait, __sanitizer_sigset_t *set, void *info,
INTERCEPTOR(int, sigemptyset, __sanitizer_sigset_t *set) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigemptyset, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigemptyset)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
@@ -2133,6 +2884,9 @@ INTERCEPTOR(int, sigemptyset, __sanitizer_sigset_t *set) {
INTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigfillset, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigfillset)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
@@ -2148,6 +2902,9 @@ INTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) {
INTERCEPTOR(int, sigpending, __sanitizer_sigset_t *set) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigpending, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigpending)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
@@ -2163,6 +2920,9 @@ INTERCEPTOR(int, sigprocmask, int how, __sanitizer_sigset_t *set,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigprocmask, how, set, oldset);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigprocmask)(how, set, oldset);
if (!res && oldset)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));
@@ -2177,6 +2937,9 @@ INTERCEPTOR(int, sigprocmask, int how, __sanitizer_sigset_t *set,
INTERCEPTOR(int, backtrace, void **buffer, int size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(backtrace)(buffer, size);
if (res && buffer)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer));
@@ -2188,6 +2951,9 @@ INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {
COMMON_INTERCEPTOR_ENTER(ctx, backtrace_symbols, buffer, size);
if (buffer && size)
COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char **res = REAL(backtrace_symbols)(buffer, size);
if (res && size) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));
@@ -2243,53 +3009,6 @@ INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
#define INIT_PTHREAD_MUTEX_UNLOCK
#endif
-#if SANITIZER_INTERCEPT_PTHREAD_COND
-INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_wait, c, m);
- COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, c, pthread_cond_t_sz);
- int res = REAL(pthread_cond_wait)(c, m);
- COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m);
- return res;
-}
-
-INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_init, c, a);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, c, pthread_cond_t_sz);
- return REAL(pthread_cond_init)(c, a);
-}
-
-INTERCEPTOR(int, pthread_cond_signal, void *c) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_signal, c);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, c, pthread_cond_t_sz);
- return REAL(pthread_cond_signal)(c);
-}
-
-INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_broadcast, c);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, c, pthread_cond_t_sz);
- return REAL(pthread_cond_broadcast)(c);
-}
-
-#define INIT_PTHREAD_COND_WAIT \
- INTERCEPT_FUNCTION_VER(pthread_cond_wait, "GLIBC_2.3.2")
-#define INIT_PTHREAD_COND_INIT \
- INTERCEPT_FUNCTION_VER(pthread_cond_init, "GLIBC_2.3.2")
-#define INIT_PTHREAD_COND_SIGNAL \
- INTERCEPT_FUNCTION_VER(pthread_cond_signal, "GLIBC_2.3.2")
-#define INIT_PTHREAD_COND_BROADCAST \
- INTERCEPT_FUNCTION_VER(pthread_cond_broadcast, "GLIBC_2.3.2")
-#else
-#define INIT_PTHREAD_COND_WAIT
-#define INIT_PTHREAD_COND_INIT
-#define INIT_PTHREAD_COND_SIGNAL
-#define INIT_PTHREAD_COND_BROADCAST
-#endif
-
#if SANITIZER_INTERCEPT_GETMNTENT || SANITIZER_INTERCEPT_GETMNTENT_R
static void write_mntent(void *ctx, __sanitizer_mntent *mnt) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt, sizeof(*mnt));
@@ -2340,6 +3059,9 @@ INTERCEPTOR(int, statfs, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statfs, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statfs)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
return res;
@@ -2347,6 +3069,9 @@ INTERCEPTOR(int, statfs, char *path, void *buf) {
INTERCEPTOR(int, fstatfs, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatfs, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatfs)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
return res;
@@ -2363,6 +3088,9 @@ INTERCEPTOR(int, statfs64, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statfs64, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statfs64)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
return res;
@@ -2370,6 +3098,9 @@ INTERCEPTOR(int, statfs64, char *path, void *buf) {
INTERCEPTOR(int, fstatfs64, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatfs64, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatfs64)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
return res;
@@ -2386,6 +3117,9 @@ INTERCEPTOR(int, statvfs, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statvfs)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
return res;
@@ -2393,6 +3127,9 @@ INTERCEPTOR(int, statvfs, char *path, void *buf) {
INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatvfs)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
return res;
@@ -2409,6 +3146,9 @@ INTERCEPTOR(int, statvfs64, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs64, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statvfs64)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
return res;
@@ -2416,6 +3156,9 @@ INTERCEPTOR(int, statvfs64, char *path, void *buf) {
INTERCEPTOR(int, fstatvfs64, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs64, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatvfs64)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
return res;
@@ -2440,13 +3183,13 @@ INTERCEPTOR(int, initgroups, char *user, u32 group) {
#define INIT_INITGROUPS
#endif
-#if SANITIZER_INTERCEPT_ETHER
+#if SANITIZER_INTERCEPT_ETHER_NTOA_ATON
INTERCEPTOR(char *, ether_ntoa, __sanitizer_ether_addr *addr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa, addr);
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
char *res = REAL(ether_ntoa)(addr);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
return res;
}
INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {
@@ -2454,13 +3197,24 @@ INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {
COMMON_INTERCEPTOR_ENTER(ctx, ether_aton, buf);
if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
__sanitizer_ether_addr *res = REAL(ether_aton)(buf);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, sizeof(*res));
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, sizeof(*res));
return res;
}
+#define INIT_ETHER_NTOA_ATON \
+ COMMON_INTERCEPT_FUNCTION(ether_ntoa); \
+ COMMON_INTERCEPT_FUNCTION(ether_aton);
+#else
+#define INIT_ETHER_NTOA_ATON
+#endif
+
+#if SANITIZER_INTERCEPT_ETHER_HOST
INTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntohost, hostname, addr);
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(ether_ntohost)(hostname, addr);
if (!res && hostname)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
@@ -2471,6 +3225,9 @@ INTERCEPTOR(int, ether_hostton, char *hostname, __sanitizer_ether_addr *addr) {
COMMON_INTERCEPTOR_ENTER(ctx, ether_hostton, hostname, addr);
if (hostname)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(ether_hostton)(hostname, addr);
if (!res && addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
return res;
@@ -2480,6 +3237,9 @@ INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_line, line, addr, hostname);
if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, REAL(strlen)(line) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(ether_line)(line, addr, hostname);
if (!res) {
if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
@@ -2488,14 +3248,12 @@ INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
}
return res;
}
-#define INIT_ETHER \
- COMMON_INTERCEPT_FUNCTION(ether_ntoa); \
- COMMON_INTERCEPT_FUNCTION(ether_aton); \
+#define INIT_ETHER_HOST \
COMMON_INTERCEPT_FUNCTION(ether_ntohost); \
COMMON_INTERCEPT_FUNCTION(ether_hostton); \
COMMON_INTERCEPT_FUNCTION(ether_line);
#else
-#define INIT_ETHER
+#define INIT_ETHER_HOST
#endif
#if SANITIZER_INTERCEPT_ETHER_R
@@ -2503,6 +3261,9 @@ INTERCEPTOR(char *, ether_ntoa_r, __sanitizer_ether_addr *addr, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa_r, addr, buf);
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(ether_ntoa_r)(addr, buf);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -2512,6 +3273,9 @@ INTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_aton_r, buf, addr);
if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_ether_addr *res = REAL(ether_aton_r)(buf, addr);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(*res));
return res;
@@ -2527,6 +3291,9 @@ INTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf,
INTERCEPTOR(int, shmctl, int shmid, int cmd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, shmctl, shmid, cmd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(shmctl)(shmid, cmd, buf);
if (res >= 0) {
unsigned sz = 0;
@@ -2549,6 +3316,9 @@ INTERCEPTOR(int, shmctl, int shmid, int cmd, void *buf) {
INTERCEPTOR(int, random_r, void *buf, u32 *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, random_r, buf, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(random_r)(buf, result);
if (!res && result)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
@@ -2559,16 +3329,33 @@ INTERCEPTOR(int, random_r, void *buf, u32 *result) {
#define INIT_RANDOM_R
#endif
-#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET || \
- SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSSCHED
-#define INTERCEPTOR_PTHREAD_ATTR_GET(what, sz) \
- INTERCEPTOR(int, pthread_attr_get##what, void *attr, void *r) { \
- void *ctx; \
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_get##what, attr, r); \
- int res = REAL(pthread_attr_get##what)(attr, r); \
- if (!res && r) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, r, sz); \
- return res; \
+// FIXME: under ASan the REAL() call below may write to freed memory and corrupt
+// its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSSCHED || \
+ SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GET
+#define INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(fn, sz) \
+ INTERCEPTOR(int, fn, void *attr, void *r) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, fn, attr, r); \
+ int res = REAL(fn)(attr, r); \
+ if (!res && r) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, r, sz); \
+ return res; \
}
+#define INTERCEPTOR_PTHREAD_ATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_attr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_MUTEXATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_mutexattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_rwlockattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_CONDATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_condattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_BARRIERATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_barrierattr_get##what, sz)
#endif
#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET
@@ -2581,6 +3368,9 @@ INTERCEPTOR_PTHREAD_ATTR_GET(stacksize, sizeof(SIZE_T))
INTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getstack, attr, addr, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(pthread_attr_getstack)(attr, addr, size);
if (!res) {
if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
@@ -2592,9 +3382,13 @@ INTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) {
// We may need to call the real pthread_attr_getstack from the run-time
// in sanitizer_common, but we don't want to include the interception headers
// there. So, just define this function here.
-int __sanitizer_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) {
+namespace __sanitizer {
+extern "C" {
+int real_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) {
return REAL(pthread_attr_getstack)(attr, addr, size);
}
+} // extern "C"
+} // namespace __sanitizer
#define INIT_PTHREAD_ATTR_GET \
COMMON_INTERCEPT_FUNCTION(pthread_attr_getdetachstate); \
@@ -2623,6 +3417,9 @@ INTERCEPTOR(int, pthread_attr_getaffinity_np, void *attr, SIZE_T cpusetsize,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getaffinity_np, attr, cpusetsize,
cpuset);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(pthread_attr_getaffinity_np)(attr, cpusetsize, cpuset);
if (!res && cpusetsize && cpuset)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cpuset, cpusetsize);
@@ -2635,6 +3432,94 @@ INTERCEPTOR(int, pthread_attr_getaffinity_np, void *attr, SIZE_T cpusetsize,
#define INIT_PTHREAD_ATTR_GETAFFINITY_NP
#endif
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getpshared);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(type, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETTYPE \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_gettype);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETTYPE
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(protocol, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprotocol);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(prioceiling, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprioceiling);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust_np, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust_np);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getpshared);
+#else
+#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP
+INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(kind_np, sizeof(int))
+#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getkind_np);
+#else
+#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_CONDATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_CONDATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getpshared);
+#else
+#define INIT_PTHREAD_CONDATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK
+INTERCEPTOR_PTHREAD_CONDATTR_GET(clock, sizeof(int))
+#define INIT_PTHREAD_CONDATTR_GETCLOCK \
+ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getclock);
+#else
+#define INIT_PTHREAD_CONDATTR_GETCLOCK
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_BARRIERATTR_GET(pshared, sizeof(int)) // !mac !android
+#define INIT_PTHREAD_BARRIERATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_barrierattr_getpshared);
+#else
+#define INIT_PTHREAD_BARRIERATTR_GETPSHARED
+#endif
+
#if SANITIZER_INTERCEPT_TMPNAM
INTERCEPTOR(char *, tmpnam, char *s) {
void *ctx;
@@ -2642,9 +3527,12 @@ INTERCEPTOR(char *, tmpnam, char *s) {
char *res = REAL(tmpnam)(s);
if (res) {
if (s)
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
else
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
}
return res;
}
@@ -2657,6 +3545,9 @@ INTERCEPTOR(char *, tmpnam, char *s) {
INTERCEPTOR(char *, tmpnam_r, char *s) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, tmpnam_r, s);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(tmpnam_r)(s);
if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
return res;
@@ -2673,7 +3564,7 @@ INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, REAL(strlen)(dir) + 1);
if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, REAL(strlen)(pfx) + 1);
char *res = REAL(tempnam)(dir, pfx);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
return res;
}
#define INIT_TEMPNAM COMMON_INTERCEPT_FUNCTION(tempnam);
@@ -2697,6 +3588,9 @@ INTERCEPTOR(int, pthread_setname_np, uptr thread, const char *name) {
INTERCEPTOR(void, sincos, double x, double *sin, double *cos) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sincos, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
REAL(sincos)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
@@ -2704,6 +3598,9 @@ INTERCEPTOR(void, sincos, double x, double *sin, double *cos) {
INTERCEPTOR(void, sincosf, float x, float *sin, float *cos) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sincosf, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
REAL(sincosf)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
@@ -2711,6 +3608,9 @@ INTERCEPTOR(void, sincosf, float x, float *sin, float *cos) {
INTERCEPTOR(void, sincosl, long double x, long double *sin, long double *cos) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sincosl, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
REAL(sincosl)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
@@ -2727,6 +3627,9 @@ INTERCEPTOR(void, sincosl, long double x, long double *sin, long double *cos) {
INTERCEPTOR(double, remquo, double x, double y, int *quo) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, remquo, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
double res = REAL(remquo)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
@@ -2734,6 +3637,9 @@ INTERCEPTOR(double, remquo, double x, double y, int *quo) {
INTERCEPTOR(float, remquof, float x, float y, int *quo) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, remquof, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(remquof)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
@@ -2741,6 +3647,9 @@ INTERCEPTOR(float, remquof, float x, float y, int *quo) {
INTERCEPTOR(long double, remquol, long double x, long double y, int *quo) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, remquol, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(remquol)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
@@ -2788,6 +3697,9 @@ INTERCEPTOR(long double, lgammal, long double x) {
INTERCEPTOR(double, lgamma_r, double x, int *signp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lgamma_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
double res = REAL(lgamma_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
@@ -2795,29 +3707,43 @@ INTERCEPTOR(double, lgamma_r, double x, int *signp) {
INTERCEPTOR(float, lgammaf_r, float x, int *signp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lgammaf_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(lgammaf_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
}
+#define INIT_LGAMMA_R \
+ COMMON_INTERCEPT_FUNCTION(lgamma_r); \
+ COMMON_INTERCEPT_FUNCTION(lgammaf_r);
+#else
+#define INIT_LGAMMA_R
+#endif
+
+#if SANITIZER_INTERCEPT_LGAMMAL_R
INTERCEPTOR(long double, lgammal_r, long double x, int *signp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lgammal_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(lgammal_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
}
-#define INIT_LGAMMA_R \
- COMMON_INTERCEPT_FUNCTION(lgamma_r); \
- COMMON_INTERCEPT_FUNCTION(lgammaf_r); \
- COMMON_INTERCEPT_FUNCTION(lgammal_r);
+#define INIT_LGAMMAL_R COMMON_INTERCEPT_FUNCTION(lgammal_r);
#else
-#define INIT_LGAMMA_R
+#define INIT_LGAMMAL_R
#endif
#if SANITIZER_INTERCEPT_DRAND48_R
INTERCEPTOR(int, drand48_r, void *buffer, double *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, drand48_r, buffer, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(drand48_r)(buffer, result);
if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
@@ -2825,6 +3751,9 @@ INTERCEPTOR(int, drand48_r, void *buffer, double *result) {
INTERCEPTOR(int, lrand48_r, void *buffer, long *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lrand48_r, buffer, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(lrand48_r)(buffer, result);
if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
@@ -2836,10 +3765,25 @@ INTERCEPTOR(int, lrand48_r, void *buffer, long *result) {
#define INIT_DRAND48_R
#endif
+#if SANITIZER_INTERCEPT_RAND_R
+INTERCEPTOR(int, rand_r, unsigned *seedp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, rand_r, seedp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, seedp, sizeof(*seedp));
+ return REAL(rand_r)(seedp);
+}
+#define INIT_RAND_R COMMON_INTERCEPT_FUNCTION(rand_r);
+#else
+#define INIT_RAND_R
+#endif
+
#if SANITIZER_INTERCEPT_GETLINE
INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getline, lineptr, n, stream);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(getline)(lineptr, n, stream);
if (res > 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr));
@@ -2848,11 +3792,14 @@ INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) {
}
return res;
}
-INTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim,
+INTERCEPTOR(SSIZE_T, __getdelim, char **lineptr, SIZE_T *n, int delim,
void *stream) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, getdelim, lineptr, n, delim, stream);
- SSIZE_T res = REAL(getdelim)(lineptr, n, delim, stream);
+ COMMON_INTERCEPTOR_ENTER(ctx, __getdelim, lineptr, n, delim, stream);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(__getdelim)(lineptr, n, delim, stream);
if (res > 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
@@ -2860,8 +3807,13 @@ INTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim,
}
return res;
}
-#define INIT_GETLINE \
- COMMON_INTERCEPT_FUNCTION(getline); \
+INTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim,
+ void *stream) {
+ return __getdelim(lineptr, n, delim, stream);
+}
+#define INIT_GETLINE \
+ COMMON_INTERCEPT_FUNCTION(getline); \
+ COMMON_INTERCEPT_FUNCTION(__getdelim); \
COMMON_INTERCEPT_FUNCTION(getdelim);
#else
#define INIT_GETLINE
@@ -2880,6 +3832,9 @@ INTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft,
if (outbytesleft)
COMMON_INTERCEPTOR_READ_RANGE(ctx, outbytesleft, sizeof(*outbytesleft));
void *outbuf_orig = outbuf ? *outbuf : 0;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(iconv)(cd, inbuf, inbytesleft, outbuf, outbytesleft);
if (res != (SIZE_T) - 1 && outbuf && *outbuf > outbuf_orig) {
SIZE_T sz = (char *)*outbuf - (char *)outbuf_orig;
@@ -2896,6 +3851,9 @@ INTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft,
INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, times, tms);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_clock_t res = REAL(times)(tms);
if (res != (__sanitizer_clock_t)-1 && tms)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tms, struct_tms_sz);
@@ -2906,117 +3864,1032 @@ INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
#define INIT_TIMES
#endif
-#define SANITIZER_COMMON_INTERCEPTORS_INIT \
- INIT_TEXTDOMAIN; \
- INIT_STRCMP; \
- INIT_STRNCMP; \
- INIT_STRCASECMP; \
- INIT_STRNCASECMP; \
- INIT_READ; \
- INIT_PREAD; \
- INIT_PREAD64; \
- INIT_READV; \
- INIT_PREADV; \
- INIT_PREADV64; \
- INIT_WRITE; \
- INIT_PWRITE; \
- INIT_PWRITE64; \
- INIT_WRITEV; \
- INIT_PWRITEV; \
- INIT_PWRITEV64; \
- INIT_PRCTL; \
- INIT_LOCALTIME_AND_FRIENDS; \
- INIT_STRPTIME; \
- INIT_SCANF; \
- INIT_ISOC99_SCANF; \
- INIT_FREXP; \
- INIT_FREXPF_FREXPL; \
- INIT_GETPWNAM_AND_FRIENDS; \
- INIT_GETPWNAM_R_AND_FRIENDS; \
- INIT_CLOCK_GETTIME; \
- INIT_GETITIMER; \
- INIT_TIME; \
- INIT_GLOB; \
- INIT_WAIT; \
- INIT_INET; \
- INIT_PTHREAD_GETSCHEDPARAM; \
- INIT_GETADDRINFO; \
- INIT_GETNAMEINFO; \
- INIT_GETSOCKNAME; \
- INIT_GETHOSTBYNAME; \
- INIT_GETHOSTBYNAME_R; \
- INIT_GETSOCKOPT; \
- INIT_ACCEPT; \
- INIT_ACCEPT4; \
- INIT_MODF; \
- INIT_RECVMSG; \
- INIT_GETPEERNAME; \
- INIT_IOCTL; \
- INIT_INET_ATON; \
- INIT_SYSINFO; \
- INIT_READDIR; \
- INIT_READDIR64; \
- INIT_PTRACE; \
- INIT_SETLOCALE; \
- INIT_GETCWD; \
- INIT_GET_CURRENT_DIR_NAME; \
- INIT_STRTOIMAX; \
- INIT_MBSTOWCS; \
- INIT_MBSNRTOWCS; \
- INIT_WCSTOMBS; \
- INIT_WCSNRTOMBS; \
- INIT_TCGETATTR; \
- INIT_REALPATH; \
- INIT_CANONICALIZE_FILE_NAME; \
- INIT_CONFSTR; \
- INIT_SCHED_GETAFFINITY; \
- INIT_STRERROR; \
- INIT_STRERROR_R; \
- INIT_XPG_STRERROR_R; \
- INIT_SCANDIR; \
- INIT_SCANDIR64; \
- INIT_GETGROUPS; \
- INIT_POLL; \
- INIT_PPOLL; \
- INIT_WORDEXP; \
- INIT_SIGWAIT; \
- INIT_SIGWAITINFO; \
- INIT_SIGTIMEDWAIT; \
- INIT_SIGSETOPS; \
- INIT_SIGPENDING; \
- INIT_SIGPROCMASK; \
- INIT_BACKTRACE; \
- INIT__EXIT; \
- INIT_PTHREAD_MUTEX_LOCK; \
- INIT_PTHREAD_MUTEX_UNLOCK; \
- INIT_PTHREAD_COND_WAIT; \
- INIT_PTHREAD_COND_INIT; \
- INIT_PTHREAD_COND_SIGNAL; \
- INIT_PTHREAD_COND_BROADCAST; \
- INIT_GETMNTENT; \
- INIT_GETMNTENT_R; \
- INIT_STATFS; \
- INIT_STATFS64; \
- INIT_STATVFS; \
- INIT_STATVFS64; \
- INIT_INITGROUPS; \
- INIT_ETHER; \
- INIT_ETHER_R; \
- INIT_SHMCTL; \
- INIT_RANDOM_R; \
- INIT_PTHREAD_ATTR_GET; \
- INIT_PTHREAD_ATTR_GETINHERITSCHED; \
- INIT_PTHREAD_ATTR_GETAFFINITY_NP; \
- INIT_TMPNAM; \
- INIT_TMPNAM_R; \
- INIT_TEMPNAM; \
- INIT_PTHREAD_SETNAME_NP; \
- INIT_SINCOS; \
- INIT_REMQUO; \
- INIT_LGAMMA; \
- INIT_LGAMMA_R; \
- INIT_DRAND48_R; \
- INIT_GETLINE; \
- INIT_ICONV; \
- INIT_TIMES; \
-/**/
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
+INTERCEPTOR(void *, __tls_get_addr, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg);
+ void *res = REAL(__tls_get_addr)(arg);
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res);
+ if (dtv) {
+ // New DTLS block has been allocated.
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size);
+ }
+ return res;
+}
+#else
+#define INIT_TLS_GET_ADDR
+#endif
+
+#if SANITIZER_INTERCEPT_LISTXATTR
+INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, listxattr, path, list, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(listxattr)(path, list, size);
+ // Here and below, size == 0 is a special case where nothing is written to the
+ // buffer, and res contains the desired buffer size.
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, llistxattr, const char *path, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, llistxattr, path, list, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(llistxattr)(path, list, size);
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, flistxattr, int fd, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, flistxattr, fd, list, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(flistxattr)(fd, list, size);
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+#define INIT_LISTXATTR \
+ COMMON_INTERCEPT_FUNCTION(listxattr); \
+ COMMON_INTERCEPT_FUNCTION(llistxattr); \
+ COMMON_INTERCEPT_FUNCTION(flistxattr);
+#else
+#define INIT_LISTXATTR
+#endif
+
+#if SANITIZER_INTERCEPT_GETXATTR
+INTERCEPTOR(SSIZE_T, getxattr, const char *path, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getxattr, path, name, value, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(getxattr)(path, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, lgetxattr, const char *path, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgetxattr, path, name, value, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(lgetxattr)(path, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, fgetxattr, int fd, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetxattr, fd, name, value, size);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(fgetxattr)(fd, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+#define INIT_GETXATTR \
+ COMMON_INTERCEPT_FUNCTION(getxattr); \
+ COMMON_INTERCEPT_FUNCTION(lgetxattr); \
+ COMMON_INTERCEPT_FUNCTION(fgetxattr);
+#else
+#define INIT_GETXATTR
+#endif
+
+#if SANITIZER_INTERCEPT_GETRESID
+INTERCEPTOR(int, getresuid, void *ruid, void *euid, void *suid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getresuid, ruid, euid, suid);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(getresuid)(ruid, euid, suid);
+ if (res >= 0) {
+ if (ruid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ruid, uid_t_sz);
+ if (euid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, euid, uid_t_sz);
+ if (suid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, suid, uid_t_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, getresgid, void *rgid, void *egid, void *sgid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getresgid, rgid, egid, sgid);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(getresgid)(rgid, egid, sgid);
+ if (res >= 0) {
+ if (rgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rgid, gid_t_sz);
+ if (egid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, egid, gid_t_sz);
+ if (sgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sgid, gid_t_sz);
+ }
+ return res;
+}
+#define INIT_GETRESID \
+ COMMON_INTERCEPT_FUNCTION(getresuid); \
+ COMMON_INTERCEPT_FUNCTION(getresgid);
+#else
+#define INIT_GETRESID
+#endif
+
+#if SANITIZER_INTERCEPT_GETIFADDRS
+// As long as getifaddrs()/freeifaddrs() use calloc()/free(), we don't need to
+// intercept freeifaddrs(). If that ceases to be the case, we might need to
+// intercept it to poison the memory again.
+INTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getifaddrs, ifap);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(getifaddrs)(ifap);
+ if (res == 0 && ifap) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifap, sizeof(void *));
+ __sanitizer_ifaddrs *p = *ifap;
+ while (p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_ifaddrs));
+ if (p->ifa_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_name,
+ REAL(strlen)(p->ifa_name) + 1);
+ if (p->ifa_addr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_addr, struct_sockaddr_sz);
+ if (p->ifa_netmask)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_netmask, struct_sockaddr_sz);
+ // On Linux this is a union, but the other member also points to a
+ // struct sockaddr, so the following is sufficient.
+ if (p->ifa_dstaddr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_dstaddr, struct_sockaddr_sz);
+ // FIXME(smatveev): Unpoison p->ifa_data as well.
+ p = p->ifa_next;
+ }
+ }
+ return res;
+}
+#define INIT_GETIFADDRS \
+ COMMON_INTERCEPT_FUNCTION(getifaddrs);
+#else
+#define INIT_GETIFADDRS
+#endif
+
+#if SANITIZER_INTERCEPT_IF_INDEXTONAME
+INTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, if_indextoname, ifindex, ifname);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ char *res = REAL(if_indextoname)(ifindex, ifname);
+ if (res && ifname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ return res;
+}
+INTERCEPTOR(unsigned int, if_nametoindex, const char* ifname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, if_nametoindex, ifname);
+ if (ifname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ return REAL(if_nametoindex)(ifname);
+}
+#define INIT_IF_INDEXTONAME \
+ COMMON_INTERCEPT_FUNCTION(if_indextoname); \
+ COMMON_INTERCEPT_FUNCTION(if_nametoindex);
+#else
+#define INIT_IF_INDEXTONAME
+#endif
+
+#if SANITIZER_INTERCEPT_CAPGET
+INTERCEPTOR(int, capget, void *hdrp, void *datap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, capget, hdrp, datap);
+ if (hdrp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(capget)(hdrp, datap);
+ if (res == 0 && datap)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ // We can also return -1 and write to hdrp->version if the version passed in
+ // hdrp->version is unsupported. But that's not a trivial condition to check,
+ // and anyway COMMON_INTERCEPTOR_READ_RANGE protects us to some extent.
+ return res;
+}
+INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, capset, hdrp, datap);
+ if (hdrp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
+ if (datap)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ return REAL(capset)(hdrp, datap);
+}
+#define INIT_CAPGET \
+ COMMON_INTERCEPT_FUNCTION(capget); \
+ COMMON_INTERCEPT_FUNCTION(capset);
+#else
+#define INIT_CAPGET
+#endif
+
+#if SANITIZER_INTERCEPT_AEABI_MEM
+DECLARE_REAL_AND_INTERCEPTOR(void *, memmove, void *, const void *, uptr);
+DECLARE_REAL_AND_INTERCEPTOR(void *, memcpy, void *, const void *, uptr);
+DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr);
+
+INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
+ return WRAP(memmove)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
+ return WRAP(memmove)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
+ return WRAP(memmove)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
+ return WRAP(memcpy)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
+ return WRAP(memcpy)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
+ return WRAP(memcpy)(to, from, size);
+}
+// Note the argument order.
+INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
+ return WRAP(memset)(block, c, size);
+}
+INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
+ return WRAP(memset)(block, c, size);
+}
+INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
+ return WRAP(memset)(block, c, size);
+}
+INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+#define INIT_AEABI_MEM \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
+#else
+#define INIT_AEABI_MEM
+#endif // SANITIZER_INTERCEPT_AEABI_MEM
+
+#if SANITIZER_INTERCEPT___BZERO
+DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr);
+
+INTERCEPTOR(void *, __bzero, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
+#else
+#define INIT___BZERO
+#endif // SANITIZER_INTERCEPT___BZERO
+
+#if SANITIZER_INTERCEPT_FTIME
+INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ftime, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(ftime)(tp);
+ if (tp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, sizeof(*tp));
+ return res;
+}
+#define INIT_FTIME COMMON_INTERCEPT_FUNCTION(ftime);
+#else
+#define INIT_FTIME
+#endif // SANITIZER_INTERCEPT_FTIME
+
+#if SANITIZER_INTERCEPT_XDR
+INTERCEPTOR(void, xdrmem_create, __sanitizer_XDR *xdrs, uptr addr,
+ unsigned size, int op) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrmem_create, xdrs, addr, size, op);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ REAL(xdrmem_create)(xdrs, addr, size, op);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
+ if (op == __sanitizer_XDR_ENCODE) {
+ // It's not obvious how much data individual xdr_ routines write.
+ // Simply unpoison the entire target buffer in advance.
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (void *)addr, size);
+ }
+}
+
+INTERCEPTOR(void, xdrstdio_create, __sanitizer_XDR *xdrs, void *file, int op) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrstdio_create, xdrs, file, op);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ REAL(xdrstdio_create)(xdrs, file, op);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
+}
+
+// FIXME: under ASan the call below may write to freed memory and corrupt
+// its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#define XDR_INTERCEPTOR(F, T) \
+ INTERCEPTOR(int, F, __sanitizer_XDR *xdrs, T *p) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, F, xdrs, p); \
+ if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p)); \
+ int res = REAL(F)(xdrs, p); \
+ if (res && p && xdrs->x_op == __sanitizer_XDR_DECODE) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); \
+ return res; \
+ }
+
+XDR_INTERCEPTOR(xdr_short, short)
+XDR_INTERCEPTOR(xdr_u_short, unsigned short)
+XDR_INTERCEPTOR(xdr_int, int)
+XDR_INTERCEPTOR(xdr_u_int, unsigned)
+XDR_INTERCEPTOR(xdr_long, long)
+XDR_INTERCEPTOR(xdr_u_long, unsigned long)
+XDR_INTERCEPTOR(xdr_hyper, long long)
+XDR_INTERCEPTOR(xdr_u_hyper, unsigned long long)
+XDR_INTERCEPTOR(xdr_longlong_t, long long)
+XDR_INTERCEPTOR(xdr_u_longlong_t, unsigned long long)
+XDR_INTERCEPTOR(xdr_int8_t, u8)
+XDR_INTERCEPTOR(xdr_uint8_t, u8)
+XDR_INTERCEPTOR(xdr_int16_t, u16)
+XDR_INTERCEPTOR(xdr_uint16_t, u16)
+XDR_INTERCEPTOR(xdr_int32_t, u32)
+XDR_INTERCEPTOR(xdr_uint32_t, u32)
+XDR_INTERCEPTOR(xdr_int64_t, u64)
+XDR_INTERCEPTOR(xdr_uint64_t, u64)
+XDR_INTERCEPTOR(xdr_quad_t, long long)
+XDR_INTERCEPTOR(xdr_u_quad_t, unsigned long long)
+XDR_INTERCEPTOR(xdr_bool, bool)
+XDR_INTERCEPTOR(xdr_enum, int)
+XDR_INTERCEPTOR(xdr_char, char)
+XDR_INTERCEPTOR(xdr_u_char, unsigned char)
+XDR_INTERCEPTOR(xdr_float, float)
+XDR_INTERCEPTOR(xdr_double, double)
+
+// FIXME: intercept xdr_array, opaque, union, vector, reference, pointer,
+// wrapstring, sizeof
+
+INTERCEPTOR(int, xdr_bytes, __sanitizer_XDR *xdrs, char **p, unsigned *sizep,
+ unsigned maxsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_bytes, xdrs, p, sizep, maxsize);
+ if (p && sizep && xdrs->x_op == __sanitizer_XDR_ENCODE) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sizep, sizeof(*sizep));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, *sizep);
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(xdr_bytes)(xdrs, p, sizep, maxsize);
+ if (p && sizep && xdrs->x_op == __sanitizer_XDR_DECODE) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizep, sizeof(*sizep));
+ if (res && *p && *sizep) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, *sizep);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
+ unsigned maxsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_string, xdrs, p, maxsize);
+ if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(xdr_string)(xdrs, p, maxsize);
+ if (p && xdrs->x_op == __sanitizer_XDR_DECODE) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ if (res && *p)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ return res;
+}
+
+#define INIT_XDR \
+ COMMON_INTERCEPT_FUNCTION(xdrmem_create); \
+ COMMON_INTERCEPT_FUNCTION(xdrstdio_create); \
+ COMMON_INTERCEPT_FUNCTION(xdr_short); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_short); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_int); \
+ COMMON_INTERCEPT_FUNCTION(xdr_long); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_long); \
+ COMMON_INTERCEPT_FUNCTION(xdr_hyper); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_hyper); \
+ COMMON_INTERCEPT_FUNCTION(xdr_longlong_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_longlong_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int8_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint8_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int16_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint16_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int32_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint32_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int64_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint64_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_quad_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_quad_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_bool); \
+ COMMON_INTERCEPT_FUNCTION(xdr_enum); \
+ COMMON_INTERCEPT_FUNCTION(xdr_char); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_char); \
+ COMMON_INTERCEPT_FUNCTION(xdr_float); \
+ COMMON_INTERCEPT_FUNCTION(xdr_double); \
+ COMMON_INTERCEPT_FUNCTION(xdr_bytes); \
+ COMMON_INTERCEPT_FUNCTION(xdr_string);
+#else
+#define INIT_XDR
+#endif // SANITIZER_INTERCEPT_XDR
+
+#if SANITIZER_INTERCEPT_TSEARCH
+INTERCEPTOR(void *, tsearch, void *key, void **rootp,
+ int (*compar)(const void *, const void *)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tsearch, key, rootp, compar);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ void *res = REAL(tsearch)(key, rootp, compar);
+ if (res && *(void **)res == key)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(void *));
+ return res;
+}
+#define INIT_TSEARCH COMMON_INTERCEPT_FUNCTION(tsearch);
+#else
+#define INIT_TSEARCH
+#endif
+
+#if SANITIZER_INTERCEPT_LIBIO_INTERNALS || SANITIZER_INTERCEPT_FOPEN || \
+ SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+void unpoison_file(__sanitizer_FILE *fp) {
+#if SANITIZER_HAS_STRUCT_FILE
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp, sizeof(*fp));
+ if (fp->_IO_read_base && fp->_IO_read_base < fp->_IO_read_end)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_read_base,
+ fp->_IO_read_end - fp->_IO_read_base);
+#endif // SANITIZER_HAS_STRUCT_FILE
+}
+#endif
+
+#if SANITIZER_INTERCEPT_LIBIO_INTERNALS
+// These guys are called when a .c source is built with -O2.
+INTERCEPTOR(int, __uflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __uflow, fp);
+ int res = REAL(__uflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __underflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __underflow, fp);
+ int res = REAL(__underflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __overflow, __sanitizer_FILE *fp, int ch) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __overflow, fp, ch);
+ int res = REAL(__overflow)(fp, ch);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __wuflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wuflow, fp);
+ int res = REAL(__wuflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __wunderflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wunderflow, fp);
+ int res = REAL(__wunderflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __woverflow, __sanitizer_FILE *fp, int ch) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __woverflow, fp, ch);
+ int res = REAL(__woverflow)(fp, ch);
+ unpoison_file(fp);
+ return res;
+}
+#define INIT_LIBIO_INTERNALS \
+ COMMON_INTERCEPT_FUNCTION(__uflow); \
+ COMMON_INTERCEPT_FUNCTION(__underflow); \
+ COMMON_INTERCEPT_FUNCTION(__overflow); \
+ COMMON_INTERCEPT_FUNCTION(__wuflow); \
+ COMMON_INTERCEPT_FUNCTION(__wunderflow); \
+ COMMON_INTERCEPT_FUNCTION(__woverflow);
+#else
+#define INIT_LIBIO_INTERNALS
+#endif
+
+#if SANITIZER_INTERCEPT_FOPEN
+INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopen, path, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fopen)(path, mode);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, fdopen, int fd, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fdopen, fd, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fdopen)(fd, mode);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,
+ __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, freopen, path, mode, fp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ __sanitizer_FILE *res = REAL(freopen)(path, mode, fp);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_FOPEN \
+ COMMON_INTERCEPT_FUNCTION(fopen); \
+ COMMON_INTERCEPT_FUNCTION(fdopen); \
+ COMMON_INTERCEPT_FUNCTION(freopen);
+#else
+#define INIT_FOPEN
+#endif
+
+#if SANITIZER_INTERCEPT_FOPEN64
+INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopen64, path, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fopen64)(path, mode);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, freopen64, const char *path, const char *mode,
+ __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, freopen64, path, mode, fp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ __sanitizer_FILE *res = REAL(freopen64)(path, mode, fp);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_FOPEN64 \
+ COMMON_INTERCEPT_FUNCTION(fopen64); \
+ COMMON_INTERCEPT_FUNCTION(freopen64);
+#else
+#define INIT_FOPEN64
+#endif
+
+#if SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+INTERCEPTOR(__sanitizer_FILE *, open_memstream, char **ptr, SIZE_T *sizeloc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_memstream, ptr, sizeloc);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ __sanitizer_FILE *res = REAL(open_memstream)(ptr, sizeloc);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));
+ unpoison_file(res);
+ FileMetadata file = {ptr, sizeloc};
+ SetInterceptorMetadata(res, file);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, open_wmemstream, wchar_t **ptr,
+ SIZE_T *sizeloc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_wmemstream, ptr, sizeloc);
+ __sanitizer_FILE *res = REAL(open_wmemstream)(ptr, sizeloc);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));
+ unpoison_file(res);
+ FileMetadata file = {(char **)ptr, sizeloc};
+ SetInterceptorMetadata(res, file);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, fmemopen, void *buf, SIZE_T size,
+ const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fmemopen, buf, size, mode);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ __sanitizer_FILE *res = REAL(fmemopen)(buf, size, mode);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_OPEN_MEMSTREAM \
+ COMMON_INTERCEPT_FUNCTION(open_memstream); \
+ COMMON_INTERCEPT_FUNCTION(open_wmemstream); \
+ COMMON_INTERCEPT_FUNCTION(fmemopen);
+#else
+#define INIT_OPEN_MEMSTREAM
+#endif
+
+#if SANITIZER_INTERCEPT_OBSTACK
+static void initialize_obstack(__sanitizer_obstack *obstack) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack, sizeof(*obstack));
+ if (obstack->chunk)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack->chunk,
+ sizeof(*obstack->chunk));
+}
+
+INTERCEPTOR(int, _obstack_begin_1, __sanitizer_obstack *obstack, int sz,
+ int align, void *(*alloc_fn)(uptr arg, uptr sz),
+ void (*free_fn)(uptr arg, void *p)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin_1, obstack, sz, align, alloc_fn,
+ free_fn);
+ int res = REAL(_obstack_begin_1)(obstack, sz, align, alloc_fn, free_fn);
+ if (res) initialize_obstack(obstack);
+ return res;
+}
+INTERCEPTOR(int, _obstack_begin, __sanitizer_obstack *obstack, int sz,
+ int align, void *(*alloc_fn)(uptr sz), void (*free_fn)(void *p)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin, obstack, sz, align, alloc_fn,
+ free_fn);
+ int res = REAL(_obstack_begin)(obstack, sz, align, alloc_fn, free_fn);
+ if (res) initialize_obstack(obstack);
+ return res;
+}
+INTERCEPTOR(void, _obstack_newchunk, __sanitizer_obstack *obstack, int length) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_newchunk, obstack, length);
+ REAL(_obstack_newchunk)(obstack, length);
+ if (obstack->chunk)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(
+ obstack->chunk, obstack->next_free - (char *)obstack->chunk);
+}
+#define INIT_OBSTACK \
+ COMMON_INTERCEPT_FUNCTION(_obstack_begin_1); \
+ COMMON_INTERCEPT_FUNCTION(_obstack_begin); \
+ COMMON_INTERCEPT_FUNCTION(_obstack_newchunk);
+#else
+#define INIT_OBSTACK
+#endif
+
+#if SANITIZER_INTERCEPT_FFLUSH
+INTERCEPTOR(int, fflush, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fflush, fp);
+ int res = REAL(fflush)(fp);
+ // FIXME: handle fp == NULL
+ if (fp) {
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ if (m) COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ }
+ return res;
+}
+#define INIT_FFLUSH COMMON_INTERCEPT_FUNCTION(fflush);
+#else
+#define INIT_FFLUSH
+#endif
+
+#if SANITIZER_INTERCEPT_FCLOSE
+INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp);
+ if (fp) {
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ if (m) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ DeleteInterceptorMetadata(fp);
+ }
+ }
+ return REAL(fclose)(fp);
+}
+#define INIT_FCLOSE COMMON_INTERCEPT_FUNCTION(fclose);
+#else
+#define INIT_FCLOSE
+#endif
+
+#if SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
+ void *res = REAL(dlopen)(filename, flag);
+ COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
+ return res;
+}
+
+INTERCEPTOR(int, dlclose, void *handle) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlclose, handle);
+ int res = REAL(dlclose)(handle);
+ COMMON_INTERCEPTOR_LIBRARY_UNLOADED();
+ return res;
+}
+#define INIT_DLOPEN_DLCLOSE \
+ COMMON_INTERCEPT_FUNCTION(dlopen); \
+ COMMON_INTERCEPT_FUNCTION(dlclose);
+#else
+#define INIT_DLOPEN_DLCLOSE
+#endif
+
+#if SANITIZER_INTERCEPT_GETPASS
+INTERCEPTOR(char *, getpass, const char *prompt) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpass, prompt);
+ if (prompt)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, REAL(strlen)(prompt)+1);
+ char *res = REAL(getpass)(prompt);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res)+1);
+ return res;
+}
+
+#define INIT_GETPASS COMMON_INTERCEPT_FUNCTION(getpass);
+#else
+#define INIT_GETPASS
+#endif
+
+#if SANITIZER_INTERCEPT_TIMERFD
+INTERCEPTOR(int, timerfd_settime, int fd, int flags, void *new_value,
+ void *old_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timerfd_settime, fd, flags, new_value,
+ old_value);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerspec_sz);
+ int res = REAL(timerfd_settime)(fd, flags, new_value, old_value);
+ if (res != -1 && old_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerspec_sz);
+ return res;
+}
+
+INTERCEPTOR(int, timerfd_gettime, int fd, void *curr_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timerfd_gettime, fd, curr_value);
+ int res = REAL(timerfd_gettime)(fd, curr_value);
+ if (res != -1 && curr_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerspec_sz);
+ return res;
+}
+#define INIT_TIMERFD \
+ COMMON_INTERCEPT_FUNCTION(timerfd_settime); \
+ COMMON_INTERCEPT_FUNCTION(timerfd_gettime);
+#else
+#define INIT_TIMERFD
+#endif
+
+#if SANITIZER_INTERCEPT_MLOCKX
+// Linux kernel has a bug that leads to kernel deadlock if a process
+// maps TBs of memory and then calls mlock().
+static void MlockIsUnsupported() {
+ static atomic_uint8_t printed;
+ if (atomic_exchange(&printed, 1, memory_order_relaxed))
+ return;
+ VPrintf(1, "INFO: %s ignores mlock/mlockall/munlock/munlockall\n",
+ SanitizerToolName);
+}
+
+INTERCEPTOR(int, mlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, munlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, mlockall, int flags) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, munlockall, void) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+#define INIT_MLOCKX \
+ COMMON_INTERCEPT_FUNCTION(mlock); \
+ COMMON_INTERCEPT_FUNCTION(munlock); \
+ COMMON_INTERCEPT_FUNCTION(mlockall); \
+ COMMON_INTERCEPT_FUNCTION(munlockall);
+
+#else
+#define INIT_MLOCKX
+#endif // SANITIZER_INTERCEPT_MLOCKX
+
+static void InitializeCommonInterceptors() {
+ static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
+ interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap();
+
+ INIT_TEXTDOMAIN;
+ INIT_STRCMP;
+ INIT_STRNCMP;
+ INIT_STRCASECMP;
+ INIT_STRNCASECMP;
+ INIT_MEMCHR;
+ INIT_MEMRCHR;
+ INIT_READ;
+ INIT_PREAD;
+ INIT_PREAD64;
+ INIT_READV;
+ INIT_PREADV;
+ INIT_PREADV64;
+ INIT_WRITE;
+ INIT_PWRITE;
+ INIT_PWRITE64;
+ INIT_WRITEV;
+ INIT_PWRITEV;
+ INIT_PWRITEV64;
+ INIT_PRCTL;
+ INIT_LOCALTIME_AND_FRIENDS;
+ INIT_STRPTIME;
+ INIT_SCANF;
+ INIT_ISOC99_SCANF;
+ INIT_PRINTF;
+ INIT_ISOC99_PRINTF;
+ INIT_FREXP;
+ INIT_FREXPF_FREXPL;
+ INIT_GETPWNAM_AND_FRIENDS;
+ INIT_GETPWNAM_R_AND_FRIENDS;
+ INIT_GETPWENT;
+ INIT_FGETPWENT;
+ INIT_GETPWENT_R;
+ INIT_SETPWENT;
+ INIT_CLOCK_GETTIME;
+ INIT_GETITIMER;
+ INIT_TIME;
+ INIT_GLOB;
+ INIT_WAIT;
+ INIT_WAIT4;
+ INIT_INET;
+ INIT_PTHREAD_GETSCHEDPARAM;
+ INIT_GETADDRINFO;
+ INIT_GETNAMEINFO;
+ INIT_GETSOCKNAME;
+ INIT_GETHOSTBYNAME;
+ INIT_GETHOSTBYNAME_R;
+ INIT_GETHOSTBYNAME2_R;
+ INIT_GETHOSTBYADDR_R;
+ INIT_GETHOSTENT_R;
+ INIT_GETSOCKOPT;
+ INIT_ACCEPT;
+ INIT_ACCEPT4;
+ INIT_MODF;
+ INIT_RECVMSG;
+ INIT_GETPEERNAME;
+ INIT_IOCTL;
+ INIT_INET_ATON;
+ INIT_SYSINFO;
+ INIT_READDIR;
+ INIT_READDIR64;
+ INIT_PTRACE;
+ INIT_SETLOCALE;
+ INIT_GETCWD;
+ INIT_GET_CURRENT_DIR_NAME;
+ INIT_STRTOIMAX;
+ INIT_MBSTOWCS;
+ INIT_MBSNRTOWCS;
+ INIT_WCSTOMBS;
+ INIT_WCSNRTOMBS;
+ INIT_TCGETATTR;
+ INIT_REALPATH;
+ INIT_CANONICALIZE_FILE_NAME;
+ INIT_CONFSTR;
+ INIT_SCHED_GETAFFINITY;
+ INIT_STRERROR;
+ INIT_STRERROR_R;
+ INIT_XPG_STRERROR_R;
+ INIT_SCANDIR;
+ INIT_SCANDIR64;
+ INIT_GETGROUPS;
+ INIT_POLL;
+ INIT_PPOLL;
+ INIT_WORDEXP;
+ INIT_SIGWAIT;
+ INIT_SIGWAITINFO;
+ INIT_SIGTIMEDWAIT;
+ INIT_SIGSETOPS;
+ INIT_SIGPENDING;
+ INIT_SIGPROCMASK;
+ INIT_BACKTRACE;
+ INIT__EXIT;
+ INIT_PTHREAD_MUTEX_LOCK;
+ INIT_PTHREAD_MUTEX_UNLOCK;
+ INIT_GETMNTENT;
+ INIT_GETMNTENT_R;
+ INIT_STATFS;
+ INIT_STATFS64;
+ INIT_STATVFS;
+ INIT_STATVFS64;
+ INIT_INITGROUPS;
+ INIT_ETHER_NTOA_ATON;
+ INIT_ETHER_HOST;
+ INIT_ETHER_R;
+ INIT_SHMCTL;
+ INIT_RANDOM_R;
+ INIT_PTHREAD_ATTR_GET;
+ INIT_PTHREAD_ATTR_GETINHERITSCHED;
+ INIT_PTHREAD_ATTR_GETAFFINITY_NP;
+ INIT_PTHREAD_MUTEXATTR_GETPSHARED;
+ INIT_PTHREAD_MUTEXATTR_GETTYPE;
+ INIT_PTHREAD_MUTEXATTR_GETPROTOCOL;
+ INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING;
+ INIT_PTHREAD_MUTEXATTR_GETROBUST;
+ INIT_PTHREAD_MUTEXATTR_GETROBUST_NP;
+ INIT_PTHREAD_RWLOCKATTR_GETPSHARED;
+ INIT_PTHREAD_RWLOCKATTR_GETKIND_NP;
+ INIT_PTHREAD_CONDATTR_GETPSHARED;
+ INIT_PTHREAD_CONDATTR_GETCLOCK;
+ INIT_PTHREAD_BARRIERATTR_GETPSHARED;
+ INIT_TMPNAM;
+ INIT_TMPNAM_R;
+ INIT_TEMPNAM;
+ INIT_PTHREAD_SETNAME_NP;
+ INIT_SINCOS;
+ INIT_REMQUO;
+ INIT_LGAMMA;
+ INIT_LGAMMA_R;
+ INIT_LGAMMAL_R;
+ INIT_DRAND48_R;
+ INIT_RAND_R;
+ INIT_GETLINE;
+ INIT_ICONV;
+ INIT_TIMES;
+ INIT_TLS_GET_ADDR;
+ INIT_LISTXATTR;
+ INIT_GETXATTR;
+ INIT_GETRESID;
+ INIT_GETIFADDRS;
+ INIT_IF_INDEXTONAME;
+ INIT_CAPGET;
+ INIT_AEABI_MEM;
+ INIT___BZERO;
+ INIT_FTIME;
+ INIT_XDR;
+ INIT_TSEARCH;
+ INIT_LIBIO_INTERNALS;
+ INIT_FOPEN;
+ INIT_FOPEN64;
+ INIT_OPEN_MEMSTREAM;
+ INIT_OBSTACK;
+ INIT_FFLUSH;
+ INIT_FCLOSE;
+ INIT_DLOPEN_DLCLOSE;
+ INIT_GETPASS;
+ INIT_TIMERFD;
+ INIT_MLOCKX;
+}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc
new file mode 100644
index 0000000000..26cbe68dc9
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -0,0 +1,557 @@
+//===-- sanitizer_common_interceptors_format.inc ----------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Scanf/printf implementation for use in *Sanitizer interceptors.
+// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
+// and http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html
+// with a few common GNU extensions.
+//
+//===----------------------------------------------------------------------===//
+#include <stdarg.h>
+
+static const char *parse_number(const char *p, int *out) {
+ *out = internal_atoll(p);
+ while (*p >= '0' && *p <= '9')
+ ++p;
+ return p;
+}
+
+static const char *maybe_parse_param_index(const char *p, int *out) {
+ // n$
+ if (*p >= '0' && *p <= '9') {
+ int number;
+ const char *q = parse_number(p, &number);
+ CHECK(q);
+ if (*q == '$') {
+ *out = number;
+ p = q + 1;
+ }
+ }
+
+ // Otherwise, do not change p. This will be re-parsed later as the field
+ // width.
+ return p;
+}
+
+static bool char_is_one_of(char c, const char *s) {
+ return !!internal_strchr(s, c);
+}
+
+static const char *maybe_parse_length_modifier(const char *p, char ll[2]) {
+ if (char_is_one_of(*p, "jztLq")) {
+ ll[0] = *p;
+ ++p;
+ } else if (*p == 'h') {
+ ll[0] = 'h';
+ ++p;
+ if (*p == 'h') {
+ ll[1] = 'h';
+ ++p;
+ }
+ } else if (*p == 'l') {
+ ll[0] = 'l';
+ ++p;
+ if (*p == 'l') {
+ ll[1] = 'l';
+ ++p;
+ }
+ }
+ return p;
+}
+
+// Returns true if the character is an integer conversion specifier.
+static bool format_is_integer_conv(char c) {
+ return char_is_one_of(c, "diouxXn");
+}
+
+// Returns true if the character is an floating point conversion specifier.
+static bool format_is_float_conv(char c) {
+ return char_is_one_of(c, "aAeEfFgG");
+}
+
+// Returns string output character size for string-like conversions,
+// or 0 if the conversion is invalid.
+static int format_get_char_size(char convSpecifier,
+ const char lengthModifier[2]) {
+ if (char_is_one_of(convSpecifier, "CS")) {
+ return sizeof(wchar_t);
+ }
+
+ if (char_is_one_of(convSpecifier, "cs[")) {
+ if (lengthModifier[0] == 'l' && lengthModifier[1] == '\0')
+ return sizeof(wchar_t);
+ else if (lengthModifier[0] == '\0')
+ return sizeof(char);
+ }
+
+ return 0;
+}
+
+enum FormatStoreSize {
+ // Store size not known in advance; can be calculated as wcslen() of the
+ // destination buffer.
+ FSS_WCSLEN = -2,
+ // Store size not known in advance; can be calculated as strlen() of the
+ // destination buffer.
+ FSS_STRLEN = -1,
+ // Invalid conversion specifier.
+ FSS_INVALID = 0
+};
+
+// Returns the memory size of a format directive (if >0), or a value of
+// FormatStoreSize.
+static int format_get_value_size(char convSpecifier,
+ const char lengthModifier[2],
+ bool promote_float) {
+ if (format_is_integer_conv(convSpecifier)) {
+ switch (lengthModifier[0]) {
+ case 'h':
+ return lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
+ case 'l':
+ return lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
+ case 'q':
+ return sizeof(long long);
+ case 'L':
+ return sizeof(long long);
+ case 'j':
+ return sizeof(INTMAX_T);
+ case 'z':
+ return sizeof(SIZE_T);
+ case 't':
+ return sizeof(PTRDIFF_T);
+ case 0:
+ return sizeof(int);
+ default:
+ return FSS_INVALID;
+ }
+ }
+
+ if (format_is_float_conv(convSpecifier)) {
+ switch (lengthModifier[0]) {
+ case 'L':
+ case 'q':
+ return sizeof(long double);
+ case 'l':
+ return lengthModifier[1] == 'l' ? sizeof(long double)
+ : sizeof(double);
+ case 0:
+ // Printf promotes floats to doubles but scanf does not
+ return promote_float ? sizeof(double) : sizeof(float);
+ default:
+ return FSS_INVALID;
+ }
+ }
+
+ if (convSpecifier == 'p') {
+ if (lengthModifier[0] != 0)
+ return FSS_INVALID;
+ return sizeof(void *);
+ }
+
+ return FSS_INVALID;
+}
+
+struct ScanfDirective {
+ int argIdx; // argument index, or -1 if not specified ("%n$")
+ int fieldWidth;
+ const char *begin;
+ const char *end;
+ bool suppressed; // suppress assignment ("*")
+ bool allocate; // allocate space ("m")
+ char lengthModifier[2];
+ char convSpecifier;
+ bool maybeGnuMalloc;
+};
+
+// Parse scanf format string. If a valid directive in encountered, it is
+// returned in dir. This function returns the pointer to the first
+// unprocessed character, or 0 in case of error.
+// In case of the end-of-string, a pointer to the closing \0 is returned.
+static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
+ ScanfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ dir->begin = p;
+ ++p;
+ // %%
+ if (*p == '%') {
+ ++p;
+ continue;
+ }
+ if (*p == '\0') {
+ return 0;
+ }
+ // %n$
+ p = maybe_parse_param_index(p, &dir->argIdx);
+ CHECK(p);
+ // *
+ if (*p == '*') {
+ dir->suppressed = true;
+ ++p;
+ }
+ // Field width
+ if (*p >= '0' && *p <= '9') {
+ p = parse_number(p, &dir->fieldWidth);
+ CHECK(p);
+ if (dir->fieldWidth <= 0) // Width if at all must be non-zero
+ return 0;
+ }
+ // m
+ if (*p == 'm') {
+ dir->allocate = true;
+ ++p;
+ }
+ // Length modifier.
+ p = maybe_parse_length_modifier(p, dir->lengthModifier);
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ // Consume %[...] expression.
+ if (dir->convSpecifier == '[') {
+ if (*p == '^')
+ ++p;
+ if (*p == ']')
+ ++p;
+ while (*p && *p != ']')
+ ++p;
+ if (*p == 0)
+ return 0; // unexpected end of string
+ // Consume the closing ']'.
+ ++p;
+ }
+ // This is unfortunately ambiguous between old GNU extension
+ // of %as, %aS and %a[...] and newer POSIX %a followed by
+ // letters s, S or [.
+ if (allowGnuMalloc && dir->convSpecifier == 'a' &&
+ !dir->lengthModifier[0]) {
+ if (*p == 's' || *p == 'S') {
+ dir->maybeGnuMalloc = true;
+ ++p;
+ } else if (*p == '[') {
+ // Watch for %a[h-j%d], if % appears in the
+ // [...] range, then we need to give up, we don't know
+ // if scanf will parse it as POSIX %a [h-j %d ] or
+ // GNU allocation of string with range dh-j plus %.
+ const char *q = p + 1;
+ if (*q == '^')
+ ++q;
+ if (*q == ']')
+ ++q;
+ while (*q && *q != ']' && *q != '%')
+ ++q;
+ if (*q == 0 || *q == '%')
+ return 0;
+ p = q + 1; // Consume the closing ']'.
+ dir->maybeGnuMalloc = true;
+ }
+ }
+ dir->end = p;
+ break;
+ }
+ return p;
+}
+
+static int scanf_get_value_size(ScanfDirective *dir) {
+ if (dir->allocate) {
+ if (!char_is_one_of(dir->convSpecifier, "cCsS["))
+ return FSS_INVALID;
+ return sizeof(char *);
+ }
+
+ if (dir->maybeGnuMalloc) {
+ if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
+ return FSS_INVALID;
+ // This is ambiguous, so check the smaller size of char * (if it is
+ // a GNU extension of %as, %aS or %a[...]) and float (if it is
+ // POSIX %a followed by s, S or [ letters).
+ return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cCsS[")) {
+ bool needsTerminator = char_is_one_of(dir->convSpecifier, "sS[");
+ unsigned charSize =
+ format_get_char_size(dir->convSpecifier, dir->lengthModifier);
+ if (charSize == 0)
+ return FSS_INVALID;
+ if (dir->fieldWidth == 0) {
+ if (!needsTerminator)
+ return charSize;
+ return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
+ }
+ return (dir->fieldWidth + needsTerminator) * charSize;
+ }
+
+ return format_get_value_size(dir->convSpecifier, dir->lengthModifier, false);
+}
+
+// Common part of *scanf interceptors.
+// Process format string and va_list, and report all store ranges.
+// Stops when "consuming" n_inputs input items.
+static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
+ const char *format, va_list aq) {
+ CHECK_GT(n_inputs, 0);
+ const char *p = format;
+
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
+
+ while (*p) {
+ ScanfDirective dir;
+ p = scanf_parse_next(p, allowGnuMalloc, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.suppressed)
+ continue;
+ int size = scanf_get_value_size(&dir);
+ if (size == FSS_INVALID) {
+ Report("WARNING: unexpected format specifier in scanf interceptor: "
+ "%.*s\n", dir.end - dir.begin, dir.begin);
+ break;
+ }
+ void *argp = va_arg(aq, void *);
+ if (dir.convSpecifier != 'n')
+ --n_inputs;
+ if (n_inputs < 0)
+ break;
+ if (size == FSS_STRLEN) {
+ size = internal_strlen((const char *)argp) + 1;
+ } else if (size == FSS_WCSLEN) {
+ // FIXME: actually use wcslen() to calculate it.
+ size = 0;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ }
+}
+
+#if SANITIZER_INTERCEPT_PRINTF
+
+struct PrintfDirective {
+ int fieldWidth;
+ int fieldPrecision;
+ int argIdx; // width argument index, or -1 if not specified ("%*n$")
+ int precisionIdx; // precision argument index, or -1 if not specified (".*n$")
+ const char *begin;
+ const char *end;
+ bool starredWidth;
+ bool starredPrecision;
+ char lengthModifier[2];
+ char convSpecifier;
+};
+
+static const char *maybe_parse_number(const char *p, int *out) {
+ if (*p >= '0' && *p <= '9')
+ p = parse_number(p, out);
+ return p;
+}
+
+static const char *maybe_parse_number_or_star(const char *p, int *out,
+ bool *star) {
+ if (*p == '*') {
+ *star = true;
+ ++p;
+ } else {
+ *star = false;
+ p = maybe_parse_number(p, out);
+ }
+ return p;
+}
+
+// Parse printf format string. Same as scanf_parse_next.
+static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
+ dir->precisionIdx = -1;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ dir->begin = p;
+ ++p;
+ // %%
+ if (*p == '%') {
+ ++p;
+ continue;
+ }
+ if (*p == '\0') {
+ return 0;
+ }
+ // %n$
+ p = maybe_parse_param_index(p, &dir->precisionIdx);
+ CHECK(p);
+ // Flags
+ while (char_is_one_of(*p, "'-+ #0")) {
+ ++p;
+ }
+ // Field width
+ p = maybe_parse_number_or_star(p, &dir->fieldWidth,
+ &dir->starredWidth);
+ if (!p)
+ return 0;
+ // Precision
+ if (*p == '.') {
+ ++p;
+ // Actual precision is optional (surprise!)
+ p = maybe_parse_number_or_star(p, &dir->fieldPrecision,
+ &dir->starredPrecision);
+ if (!p)
+ return 0;
+ // m$
+ if (dir->starredPrecision) {
+ p = maybe_parse_param_index(p, &dir->precisionIdx);
+ CHECK(p);
+ }
+ }
+ // Length modifier.
+ p = maybe_parse_length_modifier(p, dir->lengthModifier);
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ dir->end = p;
+ break;
+ }
+ return p;
+}
+
+static int printf_get_value_size(PrintfDirective *dir) {
+ if (dir->convSpecifier == 'm') {
+ return sizeof(char *);
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cCsS")) {
+ unsigned charSize =
+ format_get_char_size(dir->convSpecifier, dir->lengthModifier);
+ if (charSize == 0)
+ return FSS_INVALID;
+ if (char_is_one_of(dir->convSpecifier, "sS")) {
+ return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
+ }
+ return charSize;
+ }
+
+ return format_get_value_size(dir->convSpecifier, dir->lengthModifier, true);
+}
+
+#define SKIP_SCALAR_ARG(aq, convSpecifier, size) \
+ do { \
+ if (format_is_float_conv(convSpecifier)) { \
+ switch (size) { \
+ case 8: \
+ va_arg(*aq, double); \
+ break; \
+ case 12: \
+ va_arg(*aq, long double); \
+ break; \
+ case 16: \
+ va_arg(*aq, long double); \
+ break; \
+ default: \
+ Report("WARNING: unexpected floating-point arg size" \
+ " in printf interceptor: %d\n", size); \
+ return; \
+ } \
+ } else { \
+ switch (size) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ va_arg(*aq, u32); \
+ break; \
+ case 8: \
+ va_arg(*aq, u64); \
+ break; \
+ default: \
+ Report("WARNING: unexpected arg size" \
+ " in printf interceptor: %d\n", size); \
+ return; \
+ } \
+ } \
+ } while (0)
+
+// Common part of *printf interceptors.
+// Process format string and va_list, and report all load ranges.
+static void printf_common(void *ctx, const char *format, va_list aq) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
+
+ const char *p = format;
+
+ while (*p) {
+ PrintfDirective dir;
+ p = printf_parse_next(p, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1 || dir.precisionIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.starredWidth) {
+ // Dynamic width
+ SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
+ }
+ if (dir.starredPrecision) {
+ // Dynamic precision
+ SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
+ }
+ int size = printf_get_value_size(&dir);
+ if (size == FSS_INVALID) {
+ Report("WARNING: unexpected format specifier in printf "
+ "interceptor: %.*s\n", dir.end - dir.begin, dir.begin);
+ break;
+ }
+ if (dir.convSpecifier == 'n') {
+ void *argp = va_arg(aq, void *);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ continue;
+ } else if (size == FSS_STRLEN) {
+ if (void *argp = va_arg(aq, void *)) {
+ if (dir.starredPrecision) {
+ // FIXME: properly support starred precision for strings.
+ size = 0;
+ } else if (dir.fieldPrecision > 0) {
+ // Won't read more than "precision" symbols.
+ size = internal_strnlen((const char *)argp, dir.fieldPrecision);
+ if (size < dir.fieldPrecision) size++;
+ } else {
+ // Whole string will be accessed.
+ size = internal_strlen((const char *)argp) + 1;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ }
+ } else if (size == FSS_WCSLEN) {
+ if (void *argp = va_arg(aq, void *)) {
+ // FIXME: Properly support wide-character strings (via wcsrtombs).
+ size = 0;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ }
+ } else {
+ // Skip non-pointer args
+ SKIP_SCALAR_ARG(&aq, dir.convSpecifier, size);
+ }
+ }
+}
+
+#endif // SANITIZER_INTERCEPT_PRINTF
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
index ac8cdae5b8..ca264d899a 100755
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -12,14 +12,18 @@
struct ioctl_desc {
unsigned req;
- // FIXME: support read+write arguments. Those are currently marked as WRITE.
+ // FIXME: support read+write arguments. Currently READWRITE and WRITE do the
+ // same thing.
+ // XXX: The declarations below may use WRITE instead of READWRITE, unless
+ // explicitly noted.
enum {
NONE,
READ,
WRITE,
+ READWRITE,
CUSTOM
- } type : 2;
- unsigned size : 30;
+ } type : 3;
+ unsigned size : 29;
const char* name;
};
@@ -487,11 +491,15 @@ static void ioctl_init() {
// Handle the most evil ioctls that encode argument value as part of request id.
static unsigned ioctl_request_fixup(unsigned req) {
#if SANITIZER_LINUX
- if ((req & ~0x3fff001fU) == IOCTL_EVIOCGBIT)
+ // Strip size and event number.
+ const unsigned kEviocgbitMask =
+ (IOC_SIZEMASK << IOC_SIZESHIFT) | EVIOC_EV_MAX;
+ if ((req & ~kEviocgbitMask) == IOCTL_EVIOCGBIT)
return IOCTL_EVIOCGBIT;
- if ((req & ~0x3fU) == IOCTL_EVIOCGABS)
+ // Strip absolute axis number.
+ if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCGABS)
return IOCTL_EVIOCGABS;
- if ((req & ~0x3fU) == IOCTL_EVIOCSABS)
+ if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCSABS)
return IOCTL_EVIOCSABS;
#endif
return req;
@@ -513,24 +521,56 @@ static const ioctl_desc *ioctl_table_lookup(unsigned req) {
return 0;
}
+static bool ioctl_decode(unsigned req, ioctl_desc *desc) {
+ CHECK(desc);
+ desc->req = req;
+ desc->name = "<DECODED_IOCTL>";
+ desc->size = IOC_SIZE(req);
+ // Sanity check.
+ if (desc->size > 0xFFFF) return false;
+ unsigned dir = IOC_DIR(req);
+ switch (dir) {
+ case IOC_NONE:
+ desc->type = ioctl_desc::NONE;
+ break;
+ case IOC_READ | IOC_WRITE:
+ desc->type = ioctl_desc::READWRITE;
+ break;
+ case IOC_READ:
+ desc->type = ioctl_desc::WRITE;
+ break;
+ case IOC_WRITE:
+ desc->type = ioctl_desc::READ;
+ break;
+ default:
+ return false;
+ }
+ // Size can be 0 iff type is NONE.
+ if ((desc->type == IOC_NONE) != (desc->size == 0)) return false;
+ // Sanity check.
+ if (IOC_TYPE(req) == 0) return false;
+ return true;
+}
+
static const ioctl_desc *ioctl_lookup(unsigned req) {
req = ioctl_request_fixup(req);
const ioctl_desc *desc = ioctl_table_lookup(req);
if (desc) return desc;
// Try stripping access size from the request id.
- desc = ioctl_table_lookup(req & ~0x3fff0000U);
+ desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT));
// Sanity check: requests that encode access size are either read or write and
// have size of 0 in the table.
if (desc && desc->size == 0 &&
- (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READ))
+ (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||
+ desc->type == ioctl_desc::READ))
return desc;
return 0;
}
static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
unsigned request, void *arg) {
- if (desc->type == ioctl_desc::READ) {
+ if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) {
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);
}
@@ -548,7 +588,7 @@ static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
unsigned request, void *arg) {
- if (desc->type == ioctl_desc::WRITE) {
+ if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) {
// FIXME: add verbose output
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc
deleted file mode 100644
index 2660dada2b..0000000000
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc
+++ /dev/null
@@ -1,309 +0,0 @@
-//===-- sanitizer_common_interceptors_scanf.inc -----------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Scanf implementation for use in *Sanitizer interceptors.
-// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
-// with a few common GNU extensions.
-//
-//===----------------------------------------------------------------------===//
-#include <stdarg.h>
-
-struct ScanfDirective {
- int argIdx; // argument index, or -1 of not specified ("%n$")
- int fieldWidth;
- bool suppressed; // suppress assignment ("*")
- bool allocate; // allocate space ("m")
- char lengthModifier[2];
- char convSpecifier;
- bool maybeGnuMalloc;
-};
-
-static const char *parse_number(const char *p, int *out) {
- *out = internal_atoll(p);
- while (*p >= '0' && *p <= '9')
- ++p;
- return p;
-}
-
-static bool char_is_one_of(char c, const char *s) {
- return !!internal_strchr(s, c);
-}
-
-// Parse scanf format string. If a valid directive in encountered, it is
-// returned in dir. This function returns the pointer to the first
-// unprocessed character, or 0 in case of error.
-// In case of the end-of-string, a pointer to the closing \0 is returned.
-static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
- ScanfDirective *dir) {
- internal_memset(dir, 0, sizeof(*dir));
- dir->argIdx = -1;
-
- while (*p) {
- if (*p != '%') {
- ++p;
- continue;
- }
- ++p;
- // %%
- if (*p == '%') {
- ++p;
- continue;
- }
- if (*p == '\0') {
- return 0;
- }
- // %n$
- if (*p >= '0' && *p <= '9') {
- int number;
- const char *q = parse_number(p, &number);
- if (*q == '$') {
- dir->argIdx = number;
- p = q + 1;
- }
- // Otherwise, do not change p. This will be re-parsed later as the field
- // width.
- }
- // *
- if (*p == '*') {
- dir->suppressed = true;
- ++p;
- }
- // Field width.
- if (*p >= '0' && *p <= '9') {
- p = parse_number(p, &dir->fieldWidth);
- if (dir->fieldWidth <= 0)
- return 0;
- }
- // m
- if (*p == 'm') {
- dir->allocate = true;
- ++p;
- }
- // Length modifier.
- if (char_is_one_of(*p, "jztLq")) {
- dir->lengthModifier[0] = *p;
- ++p;
- } else if (*p == 'h') {
- dir->lengthModifier[0] = 'h';
- ++p;
- if (*p == 'h') {
- dir->lengthModifier[1] = 'h';
- ++p;
- }
- } else if (*p == 'l') {
- dir->lengthModifier[0] = 'l';
- ++p;
- if (*p == 'l') {
- dir->lengthModifier[1] = 'l';
- ++p;
- }
- }
- // Conversion specifier.
- dir->convSpecifier = *p++;
- // Consume %[...] expression.
- if (dir->convSpecifier == '[') {
- if (*p == '^')
- ++p;
- if (*p == ']')
- ++p;
- while (*p && *p != ']')
- ++p;
- if (*p == 0)
- return 0; // unexpected end of string
- // Consume the closing ']'.
- ++p;
- }
- // This is unfortunately ambiguous between old GNU extension
- // of %as, %aS and %a[...] and newer POSIX %a followed by
- // letters s, S or [.
- if (allowGnuMalloc && dir->convSpecifier == 'a' &&
- !dir->lengthModifier[0]) {
- if (*p == 's' || *p == 'S') {
- dir->maybeGnuMalloc = true;
- ++p;
- } else if (*p == '[') {
- // Watch for %a[h-j%d], if % appears in the
- // [...] range, then we need to give up, we don't know
- // if scanf will parse it as POSIX %a [h-j %d ] or
- // GNU allocation of string with range dh-j plus %.
- const char *q = p + 1;
- if (*q == '^')
- ++q;
- if (*q == ']')
- ++q;
- while (*q && *q != ']' && *q != '%')
- ++q;
- if (*q == 0 || *q == '%')
- return 0;
- p = q + 1; // Consume the closing ']'.
- dir->maybeGnuMalloc = true;
- }
- }
- break;
- }
- return p;
-}
-
-// Returns true if the character is an integer conversion specifier.
-static bool scanf_is_integer_conv(char c) {
- return char_is_one_of(c, "diouxXn");
-}
-
-// Returns true if the character is an floating point conversion specifier.
-static bool scanf_is_float_conv(char c) {
- return char_is_one_of(c, "aAeEfFgG");
-}
-
-// Returns string output character size for string-like conversions,
-// or 0 if the conversion is invalid.
-static int scanf_get_char_size(ScanfDirective *dir) {
- if (char_is_one_of(dir->convSpecifier, "CS")) {
- // wchar_t
- return 0;
- }
-
- if (char_is_one_of(dir->convSpecifier, "cs[")) {
- if (dir->lengthModifier[0] == 'l')
- // wchar_t
- return 0;
- else if (dir->lengthModifier[0] == 0)
- return sizeof(char);
- else
- return 0;
- }
-
- return 0;
-}
-
-enum ScanfStoreSize {
- // Store size not known in advance; can be calculated as strlen() of the
- // destination buffer.
- SSS_STRLEN = -1,
- // Invalid conversion specifier.
- SSS_INVALID = 0
-};
-
-// Returns the store size of a scanf directive (if >0), or a value of
-// ScanfStoreSize.
-static int scanf_get_store_size(ScanfDirective *dir) {
- if (dir->allocate) {
- if (!char_is_one_of(dir->convSpecifier, "cCsS["))
- return SSS_INVALID;
- return sizeof(char *);
- }
-
- if (dir->maybeGnuMalloc) {
- if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
- return SSS_INVALID;
- // This is ambiguous, so check the smaller size of char * (if it is
- // a GNU extension of %as, %aS or %a[...]) and float (if it is
- // POSIX %a followed by s, S or [ letters).
- return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
- }
-
- if (scanf_is_integer_conv(dir->convSpecifier)) {
- switch (dir->lengthModifier[0]) {
- case 'h':
- return dir->lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
- case 'l':
- return dir->lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
- case 'L':
- return sizeof(long long);
- case 'j':
- return sizeof(INTMAX_T);
- case 'z':
- return sizeof(SIZE_T);
- case 't':
- return sizeof(PTRDIFF_T);
- case 0:
- return sizeof(int);
- default:
- return SSS_INVALID;
- }
- }
-
- if (scanf_is_float_conv(dir->convSpecifier)) {
- switch (dir->lengthModifier[0]) {
- case 'L':
- case 'q':
- return sizeof(long double);
- case 'l':
- return dir->lengthModifier[1] == 'l' ? sizeof(long double)
- : sizeof(double);
- case 0:
- return sizeof(float);
- default:
- return SSS_INVALID;
- }
- }
-
- if (char_is_one_of(dir->convSpecifier, "sS[")) {
- unsigned charSize = scanf_get_char_size(dir);
- if (charSize == 0)
- return SSS_INVALID;
- if (dir->fieldWidth == 0)
- return SSS_STRLEN;
- return (dir->fieldWidth + 1) * charSize;
- }
-
- if (char_is_one_of(dir->convSpecifier, "cC")) {
- unsigned charSize = scanf_get_char_size(dir);
- if (charSize == 0)
- return SSS_INVALID;
- if (dir->fieldWidth == 0)
- return charSize;
- return dir->fieldWidth * charSize;
- }
-
- if (dir->convSpecifier == 'p') {
- if (dir->lengthModifier[1] != 0)
- return SSS_INVALID;
- return sizeof(void *);
- }
-
- return SSS_INVALID;
-}
-
-// Common part of *scanf interceptors.
-// Process format string and va_list, and report all store ranges.
-// Stops when "consuming" n_inputs input items.
-static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
- const char *format, va_list aq) {
- CHECK_GT(n_inputs, 0);
- const char *p = format;
-
- while (*p) {
- ScanfDirective dir;
- p = scanf_parse_next(p, allowGnuMalloc, &dir);
- if (!p)
- break;
- if (dir.convSpecifier == 0) {
- // This can only happen at the end of the format string.
- CHECK_EQ(*p, 0);
- break;
- }
- // Here the directive is valid. Do what it says.
- if (dir.argIdx != -1) {
- // Unsupported.
- break;
- }
- if (dir.suppressed)
- continue;
- int size = scanf_get_store_size(&dir);
- if (size == SSS_INVALID)
- break;
- void *argp = va_arg(aq, void *);
- if (dir.convSpecifier != 'n')
- --n_inputs;
- if (n_inputs < 0)
- break;
- if (size == SSS_STRLEN) {
- size = internal_strlen((const char *)argp) + 1;
- }
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
- }
-}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
index 215a61deab..b5251444f0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
@@ -10,6 +10,9 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
namespace __sanitizer {
@@ -32,4 +35,38 @@ bool PrintsToTtyCached() {
}
return prints_to_tty;
}
+
+bool ColorizeReports() {
+ const char *flag = common_flags()->color;
+ return internal_strcmp(flag, "always") == 0 ||
+ (internal_strcmp(flag, "auto") == 0 && PrintsToTtyCached());
+}
+
+static void (*sandboxing_callback)();
+void SetSandboxingCallback(void (*f)()) {
+ sandboxing_callback = f;
+}
+
+void ReportErrorSummary(const char *error_type, StackTrace *stack) {
+ if (!common_flags()->print_summary)
+ return;
+ AddressInfo ai;
+#if !SANITIZER_GO
+ if (stack->size > 0 && Symbolizer::GetOrInit()->CanReturnFileLineInfo()) {
+ // Currently, we include the first stack frame into the report summary.
+ // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
+ uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
+ Symbolizer::GetOrInit()->SymbolizePC(pc, &ai, 1);
+ }
+#endif
+ ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
+}
+
} // namespace __sanitizer
+
+void NOINLINE
+__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
+ PrepareForSandboxing(args);
+ if (sandboxing_callback)
+ sandboxing_callback();
+}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
index 75f7d1d1e8..104c27cd5d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
@@ -232,6 +232,7 @@ POST_SYSCALL(settimeofday)(long res, void *tv, void *tz) {
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(adjtimex)(void *txc_p) {}
POST_SYSCALL(adjtimex)(long res, void *txc_p) {
@@ -239,6 +240,7 @@ POST_SYSCALL(adjtimex)(long res, void *txc_p) {
if (txc_p) POST_WRITE(txc_p, struct_timex_sz);
}
}
+#endif
PRE_SYSCALL(times)(void *tbuf) {}
@@ -384,24 +386,21 @@ PRE_SYSCALL(acct)(const void *name) {
POST_SYSCALL(acct)(long res, const void *name) {}
-PRE_SYSCALL(capget)(void *header, void *dataptr) {}
+PRE_SYSCALL(capget)(void *header, void *dataptr) {
+ if (header) PRE_READ(header, __user_cap_header_struct_sz);
+}
POST_SYSCALL(capget)(long res, void *header, void *dataptr) {
- if (res >= 0) {
- if (header) POST_WRITE(header, __user_cap_header_struct_sz);
+ if (res >= 0)
if (dataptr) POST_WRITE(dataptr, __user_cap_data_struct_sz);
- }
}
PRE_SYSCALL(capset)(void *header, const void *data) {
+ if (header) PRE_READ(header, __user_cap_header_struct_sz);
if (data) PRE_READ(data, __user_cap_data_struct_sz);
}
-POST_SYSCALL(capset)(long res, void *header, const void *data) {
- if (res >= 0) {
- if (header) POST_WRITE(header, __user_cap_header_struct_sz);
- }
-}
+POST_SYSCALL(capset)(long res, void *header, const void *data) {}
PRE_SYSCALL(personality)(long personality) {}
@@ -494,6 +493,7 @@ POST_SYSCALL(clock_gettime)(long res, long which_clock, void *tp) {
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(clock_adjtime)(long which_clock, void *tx) {}
POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
@@ -501,6 +501,7 @@ POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
if (tx) POST_WRITE(tx, struct_timex_sz);
}
}
+#endif
PRE_SYSCALL(clock_getres)(long which_clock, void *tp) {}
@@ -828,6 +829,7 @@ POST_SYSCALL(stat)(long res, const void *filename, void *statbuf) {
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(statfs)(const void *path, void *buf) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
@@ -865,6 +867,7 @@ POST_SYSCALL(fstatfs64)(long res, long fd, long sz, void *buf) {
if (buf) POST_WRITE(buf, struct_statfs64_sz);
}
}
+#endif // !SANITIZER_ANDROID
PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
if (filename)
@@ -918,6 +921,7 @@ POST_SYSCALL(newfstat)(long res, long fd, void *statbuf) {
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(ustat)(long dev, void *ubuf) {}
POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
@@ -925,6 +929,7 @@ POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
if (ubuf) POST_WRITE(ubuf, struct_ustat_sz);
}
}
+#endif // !SANITIZER_ANDROID
PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
if (filename)
@@ -1002,8 +1007,8 @@ PRE_SYSCALL(getxattr)(const void *path, const void *name, void *value,
POST_SYSCALL(getxattr)(long res, const void *path, const void *name,
void *value, long size) {
- if (res >= 0) {
- if (value) POST_WRITE(value, size);
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
}
}
@@ -1017,8 +1022,8 @@ PRE_SYSCALL(lgetxattr)(const void *path, const void *name, void *value,
POST_SYSCALL(lgetxattr)(long res, const void *path, const void *name,
void *value, long size) {
- if (res >= 0) {
- if (value) POST_WRITE(value, size);
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
}
}
@@ -1029,8 +1034,8 @@ PRE_SYSCALL(fgetxattr)(long fd, const void *name, void *value, long size) {
POST_SYSCALL(fgetxattr)(long res, long fd, const void *name, void *value,
long size) {
- if (res >= 0) {
- if (value) POST_WRITE(value, size);
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
}
}
@@ -1040,8 +1045,8 @@ PRE_SYSCALL(listxattr)(const void *path, void *list, long size) {
}
POST_SYSCALL(listxattr)(long res, const void *path, void *list, long size) {
- if (res >= 0) {
- if (list) POST_WRITE(list, size);
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
}
}
@@ -1051,16 +1056,16 @@ PRE_SYSCALL(llistxattr)(const void *path, void *list, long size) {
}
POST_SYSCALL(llistxattr)(long res, const void *path, void *list, long size) {
- if (res >= 0) {
- if (list) POST_WRITE(list, size);
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
}
}
PRE_SYSCALL(flistxattr)(long fd, void *list, long size) {}
POST_SYSCALL(flistxattr)(long res, long fd, void *list, long size) {
- if (res >= 0) {
- if (list) POST_WRITE(list, size);
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
}
}
@@ -1319,13 +1324,13 @@ PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
} else if (op == iocb_cmd_pread && buf && len) {
POST_WRITE(buf, len);
} else if (op == iocb_cmd_pwritev) {
- __sanitizer_iovec *iovec = (__sanitizer_iovec*)iocbpp[i]->aio_buf;
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
for (uptr v = 0; v < len; v++)
- PRE_READ(iovec[i].iov_base, iovec[i].iov_len);
+ PRE_READ(iovec[v].iov_base, iovec[v].iov_len);
} else if (op == iocb_cmd_preadv) {
- __sanitizer_iovec *iovec = (__sanitizer_iovec*)iocbpp[i]->aio_buf;
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
for (uptr v = 0; v < len; v++)
- POST_WRITE(iovec[i].iov_base, iovec[i].iov_len);
+ POST_WRITE(iovec[v].iov_base, iovec[v].iov_len);
}
// See comment in io_getevents.
COMMON_SYSCALL_RELEASE(data);
@@ -1436,6 +1441,7 @@ PRE_SYSCALL(fchown)(long fd, long user, long group) {}
POST_SYSCALL(fchown)(long res, long fd, long user, long group) {}
+#if SANITIZER_USES_UID16_SYSCALLS
PRE_SYSCALL(chown16)(const void *filename, long user, long group) {
if (filename)
PRE_READ(filename,
@@ -1545,6 +1551,7 @@ POST_SYSCALL(getgid16)(long res) {}
PRE_SYSCALL(getegid16)() {}
POST_SYSCALL(getegid16)(long res) {}
+#endif // SANITIZER_USES_UID16_SYSCALLS
PRE_SYSCALL(utime)(void *filename, void *times) {}
@@ -2080,6 +2087,7 @@ POST_SYSCALL(msgrcv)(long res, long msqid, void *msgp, long msgsz, long msgtyp,
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(msgctl)(long msqid, long cmd, void *buf) {}
POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
@@ -2087,6 +2095,7 @@ POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
if (buf) POST_WRITE(buf, struct_msqid_ds_sz);
}
}
+#endif
PRE_SYSCALL(semget)(long key, long nsems, long semflg) {}
@@ -2288,7 +2297,7 @@ PRE_SYSCALL(ni_syscall)() {}
POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
-#if defined(__i386) || defined (__x86_64)
+#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
@@ -2307,7 +2316,7 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
-#if defined(__i386) || defined (__x86_64)
+#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.
diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage.cc b/libsanitizer/sanitizer_common/sanitizer_coverage.cc
deleted file mode 100644
index e87b76c008..0000000000
--- a/libsanitizer/sanitizer_common/sanitizer_coverage.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-//===-- sanitizer_coverage.cc ---------------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Sanitizer Coverage.
-// This file implements run-time support for a poor man's coverage tool.
-//
-// Compiler instrumentation:
-// For every function F the compiler injects the following code:
-// if (*Guard) {
-// __sanitizer_cov(&F);
-// *Guard = 1;
-// }
-// It's fine to call __sanitizer_cov more than once for a given function.
-//
-// Run-time:
-// - __sanitizer_cov(pc): record that we've executed a given PC.
-// - __sanitizer_cov_dump: dump the coverage data to disk.
-// For every module of the current process that has coverage data
-// this will create a file module_name.PID.sancov. The file format is simple:
-// it's just a sorted sequence of 4-byte offsets in the module.
-//
-// Eventually, this coverage implementation should be obsoleted by a more
-// powerful general purpose Clang/LLVM coverage instrumentation.
-// Consider this implementation as prototype.
-//
-// FIXME: support (or at least test with) dlclose.
-//===----------------------------------------------------------------------===//
-
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_common.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_procmaps.h"
-#include "sanitizer_flags.h"
-
-struct CovData {
- BlockingMutex mu;
- InternalMmapVector<uptr> v;
-};
-
-static uptr cov_data_placeholder[sizeof(CovData) / sizeof(uptr)];
-COMPILER_CHECK(sizeof(cov_data_placeholder) >= sizeof(CovData));
-static CovData *cov_data = reinterpret_cast<CovData*>(cov_data_placeholder);
-
-namespace __sanitizer {
-
-// Simply add the pc into the vector under lock. If the function is called more
-// than once for a given PC it will be inserted multiple times, which is fine.
-static void CovAdd(uptr pc) {
- BlockingMutexLock lock(&cov_data->mu);
- cov_data->v.push_back(pc);
-}
-
-static inline bool CompareLess(const uptr &a, const uptr &b) {
- return a < b;
-}
-
-// Dump the coverage on disk.
-void CovDump() {
-#if !SANITIZER_WINDOWS
- BlockingMutexLock lock(&cov_data->mu);
- InternalMmapVector<uptr> &v = cov_data->v;
- InternalSort(&v, v.size(), CompareLess);
- InternalMmapVector<u32> offsets(v.size());
- const uptr *vb = v.data();
- const uptr *ve = vb + v.size();
- MemoryMappingLayout proc_maps(/*cache_enabled*/false);
- uptr mb, me, off, prot;
- InternalScopedBuffer<char> module(4096);
- InternalScopedBuffer<char> path(4096 * 2);
- for (int i = 0;
- proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot);
- i++) {
- if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
- continue;
- if (vb >= ve) break;
- if (mb <= *vb && *vb < me) {
- offsets.clear();
- const uptr *old_vb = vb;
- CHECK_LE(off, *vb);
- for (; vb < ve && *vb < me; vb++) {
- uptr diff = *vb - (i ? mb : 0) + off;
- CHECK_LE(diff, 0xffffffffU);
- offsets.push_back(static_cast<u32>(diff));
- }
- char *module_name = StripModuleName(module.data());
- internal_snprintf((char *)path.data(), path.size(), "%s.%zd.sancov",
- module_name, internal_getpid());
- InternalFree(module_name);
- uptr fd = OpenFile(path.data(), true);
- internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
- internal_close(fd);
- if (common_flags()->verbosity)
- Report(" CovDump: %s: %zd PCs written\n", path.data(), vb - old_vb);
- }
- }
-#endif // !SANITIZER_WINDOWS
-}
-
-} // namespace __sanitizer
-
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc) {
- CovAdd(reinterpret_cast<uptr>(pc));
-}
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
-} // extern "C"
diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc
new file mode 100644
index 0000000000..214e8a98d0
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc
@@ -0,0 +1,470 @@
+//===-- sanitizer_coverage.cc ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage.
+// This file implements run-time support for a poor man's coverage tool.
+//
+// Compiler instrumentation:
+// For every interesting basic block the compiler injects the following code:
+// if (*Guard) {
+// __sanitizer_cov();
+// *Guard = 1;
+// }
+// It's fine to call __sanitizer_cov more than once for a given block.
+//
+// Run-time:
+// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
+// - __sanitizer_cov_dump: dump the coverage data to disk.
+// For every module of the current process that has coverage data
+// this will create a file module_name.PID.sancov. The file format is simple:
+// it's just a sorted sequence of 4-byte offsets in the module.
+//
+// Eventually, this coverage implementation should be obsoleted by a more
+// powerful general purpose Clang/LLVM coverage instrumentation.
+// Consider this implementation as prototype.
+//
+// FIXME: support (or at least test with) dlclose.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
+#include "sanitizer_flags.h"
+
+atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
+
+// pc_array is the array containing the covered PCs.
+// To make the pc_array thread- and async-signal-safe it has to be large enough.
+// 128M counters "ought to be enough for anybody" (4M on 32-bit).
+
+// With coverage_direct=1 in ASAN_OPTIONS, pc_array memory is mapped to a file.
+// In this mode, __sanitizer_cov_dump does nothing, and CovUpdateMapping()
+// dump current memory layout to another file.
+
+static bool cov_sandboxed = false;
+static int cov_fd = kInvalidFd;
+static unsigned int cov_max_block_size = 0;
+
+namespace __sanitizer {
+
+class CoverageData {
+ public:
+ void Init();
+ void BeforeFork();
+ void AfterFork(int child_pid);
+ void Extend(uptr npcs);
+ void Add(uptr pc);
+ void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
+ uptr cache_size);
+ void DumpCallerCalleePairs();
+
+ uptr *data();
+ uptr size();
+
+ private:
+ // Maximal size pc array may ever grow.
+ // We MmapNoReserve this space to ensure that the array is contiguous.
+ static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
+ // The amount file mapping for the pc array is grown by.
+ static const uptr kPcArrayMmapSize = 64 * 1024;
+
+ // pc_array is allocated with MmapNoReserveOrDie and so it uses only as
+ // much RAM as it really needs.
+ uptr *pc_array;
+ // Index of the first available pc_array slot.
+ atomic_uintptr_t pc_array_index;
+ // Array size.
+ atomic_uintptr_t pc_array_size;
+ // Current file mapped size of the pc array.
+ uptr pc_array_mapped_size;
+ // Descriptor of the file mapped pc array.
+ int pc_fd;
+
+ // Caller-Callee (cc) array, size and current index.
+ static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
+ uptr **cc_array;
+ atomic_uintptr_t cc_array_index;
+ atomic_uintptr_t cc_array_size;
+
+
+ StaticSpinMutex mu;
+
+ void DirectOpen();
+ void ReInit();
+};
+
+static CoverageData coverage_data;
+
+void CoverageData::DirectOpen() {
+ InternalScopedString path(1024);
+ internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
+ common_flags()->coverage_dir, internal_getpid());
+ pc_fd = OpenFile(path.data(), true);
+ if (internal_iserror(pc_fd)) {
+ Report(" Coverage: failed to open %s for writing\n", path.data());
+ Die();
+ }
+
+ pc_array_mapped_size = 0;
+ CovUpdateMapping();
+}
+
+void CoverageData::Init() {
+ pc_array = reinterpret_cast<uptr *>(
+ MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
+ pc_fd = kInvalidFd;
+ if (common_flags()->coverage_direct) {
+ atomic_store(&pc_array_size, 0, memory_order_relaxed);
+ atomic_store(&pc_array_index, 0, memory_order_relaxed);
+ } else {
+ atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
+ atomic_store(&pc_array_index, 0, memory_order_relaxed);
+ }
+
+ cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
+ sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
+ atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
+ atomic_store(&cc_array_index, 0, memory_order_relaxed);
+}
+
+void CoverageData::ReInit() {
+ internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize);
+ if (pc_fd != kInvalidFd) internal_close(pc_fd);
+ if (common_flags()->coverage_direct) {
+ // In memory-mapped mode we must extend the new file to the known array
+ // size.
+ uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
+ Init();
+ if (size) Extend(size);
+ } else {
+ Init();
+ }
+}
+
+void CoverageData::BeforeFork() {
+ mu.Lock();
+}
+
+void CoverageData::AfterFork(int child_pid) {
+ // We are single-threaded so it's OK to release the lock early.
+ mu.Unlock();
+ if (child_pid == 0) ReInit();
+}
+
+// Extend coverage PC array to fit additional npcs elements.
+void CoverageData::Extend(uptr npcs) {
+ if (!common_flags()->coverage_direct) return;
+ SpinMutexLock l(&mu);
+
+ if (pc_fd == kInvalidFd) DirectOpen();
+ CHECK_NE(pc_fd, kInvalidFd);
+
+ uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
+ size += npcs * sizeof(uptr);
+
+ if (size > pc_array_mapped_size) {
+ uptr new_mapped_size = pc_array_mapped_size;
+ while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
+
+ // Extend the file and map the new space at the end of pc_array.
+ uptr res = internal_ftruncate(pc_fd, new_mapped_size);
+ int err;
+ if (internal_iserror(res, &err)) {
+ Printf("failed to extend raw coverage file: %d\n", err);
+ Die();
+ }
+ void *p = MapWritableFileToMemory(pc_array + pc_array_mapped_size,
+ new_mapped_size - pc_array_mapped_size,
+ pc_fd, pc_array_mapped_size);
+ CHECK_EQ(p, pc_array + pc_array_mapped_size);
+ pc_array_mapped_size = new_mapped_size;
+ }
+
+ atomic_store(&pc_array_size, size, memory_order_release);
+}
+
+// Simply add the pc into the vector under lock. If the function is called more
+// than once for a given PC it will be inserted multiple times, which is fine.
+void CoverageData::Add(uptr pc) {
+ if (!pc_array) return;
+ uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
+ CHECK_LT(idx * sizeof(uptr),
+ atomic_load(&pc_array_size, memory_order_acquire));
+ pc_array[idx] = pc;
+}
+
+// Registers a pair caller=>callee.
+// When a given caller is seen for the first time, the callee_cache is added
+// to the global array cc_array, callee_cache[0] is set to caller and
+// callee_cache[1] is set to cache_size.
+// Then we are trying to add callee to callee_cache [2,cache_size) if it is
+// not there yet.
+// If the cache is full we drop the callee (may want to fix this later).
+void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[],
+ uptr cache_size) {
+ if (!cc_array) return;
+ atomic_uintptr_t *atomic_callee_cache =
+ reinterpret_cast<atomic_uintptr_t *>(callee_cache);
+ uptr zero = 0;
+ if (atomic_compare_exchange_strong(&atomic_callee_cache[0], &zero, caller,
+ memory_order_seq_cst)) {
+ uptr idx = atomic_fetch_add(&cc_array_index, 1, memory_order_relaxed);
+ CHECK_LT(idx * sizeof(uptr),
+ atomic_load(&cc_array_size, memory_order_acquire));
+ callee_cache[1] = cache_size;
+ cc_array[idx] = callee_cache;
+ }
+ CHECK_EQ(atomic_load(&atomic_callee_cache[0], memory_order_relaxed), caller);
+ for (uptr i = 2; i < cache_size; i++) {
+ uptr was = 0;
+ if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee,
+ memory_order_seq_cst))
+ return;
+ if (was == callee) // Already have this callee.
+ return;
+ }
+}
+
+uptr *CoverageData::data() {
+ return pc_array;
+}
+
+uptr CoverageData::size() {
+ return atomic_load(&pc_array_index, memory_order_relaxed);
+}
+
+// Block layout for packed file format: header, followed by module name (no
+// trailing zero), followed by data blob.
+struct CovHeader {
+ int pid;
+ unsigned int module_name_length;
+ unsigned int data_length;
+};
+
+static void CovWritePacked(int pid, const char *module, const void *blob,
+ unsigned int blob_size) {
+ if (cov_fd < 0) return;
+ unsigned module_name_length = internal_strlen(module);
+ CovHeader header = {pid, module_name_length, blob_size};
+
+ if (cov_max_block_size == 0) {
+ // Writing to a file. Just go ahead.
+ internal_write(cov_fd, &header, sizeof(header));
+ internal_write(cov_fd, module, module_name_length);
+ internal_write(cov_fd, blob, blob_size);
+ } else {
+ // Writing to a socket. We want to split the data into appropriately sized
+ // blocks.
+ InternalScopedBuffer<char> block(cov_max_block_size);
+ CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
+ uptr header_size_with_module = sizeof(header) + module_name_length;
+ CHECK_LT(header_size_with_module, cov_max_block_size);
+ unsigned int max_payload_size =
+ cov_max_block_size - header_size_with_module;
+ char *block_pos = block.data();
+ internal_memcpy(block_pos, &header, sizeof(header));
+ block_pos += sizeof(header);
+ internal_memcpy(block_pos, module, module_name_length);
+ block_pos += module_name_length;
+ char *block_data_begin = block_pos;
+ char *blob_pos = (char *)blob;
+ while (blob_size > 0) {
+ unsigned int payload_size = Min(blob_size, max_payload_size);
+ blob_size -= payload_size;
+ internal_memcpy(block_data_begin, blob_pos, payload_size);
+ blob_pos += payload_size;
+ ((CovHeader *)block.data())->data_length = payload_size;
+ internal_write(cov_fd, block.data(),
+ header_size_with_module + payload_size);
+ }
+ }
+}
+
+// If packed = false: <name>.<pid>.<sancov> (name = module name).
+// If packed = true and name == 0: <pid>.<sancov>.<packed>.
+// If packed = true and name != 0: <name>.<sancov>.<packed> (name is
+// user-supplied).
+static int CovOpenFile(bool packed, const char* name) {
+ InternalScopedBuffer<char> path(1024);
+ if (!packed) {
+ CHECK(name);
+ internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov",
+ common_flags()->coverage_dir, name, internal_getpid());
+ } else {
+ if (!name)
+ internal_snprintf((char *)path.data(), path.size(),
+ "%s/%zd.sancov.packed", common_flags()->coverage_dir,
+ internal_getpid());
+ else
+ internal_snprintf((char *)path.data(), path.size(), "%s/%s.sancov.packed",
+ common_flags()->coverage_dir, name);
+ }
+ uptr fd = OpenFile(path.data(), true);
+ if (internal_iserror(fd)) {
+ Report(" SanitizerCoverage: failed to open %s for writing\n", path.data());
+ return -1;
+ }
+ return fd;
+}
+
+// This function dumps the caller=>callee pairs into a file as a sequence of
+// lines like "module_name offset".
+void CoverageData::DumpCallerCalleePairs() {
+ uptr max_idx = atomic_load(&cc_array_index, memory_order_relaxed);
+ if (!max_idx) return;
+ auto sym = Symbolizer::GetOrInit();
+ if (!sym)
+ return;
+ InternalScopedString out(32 << 20);
+ uptr total = 0;
+ for (uptr i = 0; i < max_idx; i++) {
+ uptr *cc_cache = cc_array[i];
+ CHECK(cc_cache);
+ uptr caller = cc_cache[0];
+ uptr n_callees = cc_cache[1];
+ const char *caller_module_name = "<unknown>";
+ uptr caller_module_address = 0;
+ sym->GetModuleNameAndOffsetForPC(caller, &caller_module_name,
+ &caller_module_address);
+ for (uptr j = 2; j < n_callees; j++) {
+ uptr callee = cc_cache[j];
+ if (!callee) break;
+ total++;
+ const char *callee_module_name = "<unknown>";
+ uptr callee_module_address = 0;
+ sym->GetModuleNameAndOffsetForPC(callee, &callee_module_name,
+ &callee_module_address);
+ out.append("%s 0x%zx\n%s 0x%zx\n", caller_module_name,
+ caller_module_address, callee_module_name,
+ callee_module_address);
+ }
+ }
+ int fd = CovOpenFile(false, "caller-callee");
+ if (fd < 0) return;
+ internal_write(fd, out.data(), out.length());
+ internal_close(fd);
+ VReport(1, " CovDump: %zd caller-callee pairs written\n", total);
+}
+
+// Dump the coverage on disk.
+static void CovDump() {
+ if (!common_flags()->coverage || common_flags()->coverage_direct) return;
+#if !SANITIZER_WINDOWS
+ if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
+ return;
+ uptr size = coverage_data.size();
+ InternalMmapVector<u32> offsets(size);
+ uptr *vb = coverage_data.data();
+ uptr *ve = vb + size;
+ SortArray(vb, size);
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr mb, me, off, prot;
+ InternalScopedBuffer<char> module(4096);
+ InternalScopedBuffer<char> path(4096 * 2);
+ for (int i = 0;
+ proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot);
+ i++) {
+ if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
+ continue;
+ while (vb < ve && *vb < mb) vb++;
+ if (vb >= ve) break;
+ if (*vb < me) {
+ offsets.clear();
+ const uptr *old_vb = vb;
+ CHECK_LE(off, *vb);
+ for (; vb < ve && *vb < me; vb++) {
+ uptr diff = *vb - (i ? mb : 0) + off;
+ CHECK_LE(diff, 0xffffffffU);
+ offsets.push_back(static_cast<u32>(diff));
+ }
+ const char *module_name = StripModuleName(module.data());
+ if (cov_sandboxed) {
+ if (cov_fd >= 0) {
+ CovWritePacked(internal_getpid(), module_name, offsets.data(),
+ offsets.size() * sizeof(u32));
+ VReport(1, " CovDump: %zd PCs written to packed file\n", vb - old_vb);
+ }
+ } else {
+ // One file per module per process.
+ internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov",
+ common_flags()->coverage_dir, module_name,
+ internal_getpid());
+ int fd = CovOpenFile(false /* packed */, module_name);
+ if (fd > 0) {
+ internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
+ internal_close(fd);
+ VReport(1, " CovDump: %s: %zd PCs written\n", path.data(),
+ vb - old_vb);
+ }
+ }
+ }
+ }
+ if (cov_fd >= 0)
+ internal_close(cov_fd);
+ coverage_data.DumpCallerCalleePairs();
+#endif // !SANITIZER_WINDOWS
+}
+
+void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ if (!args) return;
+ if (!common_flags()->coverage) return;
+ cov_sandboxed = args->coverage_sandboxed;
+ if (!cov_sandboxed) return;
+ cov_fd = args->coverage_fd;
+ cov_max_block_size = args->coverage_max_block_size;
+ if (cov_fd < 0)
+ // Pre-open the file now. The sandbox won't allow us to do it later.
+ cov_fd = CovOpenFile(true /* packed */, 0);
+}
+
+int MaybeOpenCovFile(const char *name) {
+ CHECK(name);
+ if (!common_flags()->coverage) return -1;
+ return CovOpenFile(true /* packed */, name);
+}
+
+void CovBeforeFork() {
+ coverage_data.BeforeFork();
+}
+
+void CovAfterFork(int child_pid) {
+ coverage_data.AfterFork(child_pid);
+}
+
+} // namespace __sanitizer
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() {
+ coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()));
+}
+SANITIZER_INTERFACE_ATTRIBUTE void
+__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
+ coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
+ callee, callee_cache16, 16);
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
+ coverage_data.Init();
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_module_init(uptr npcs) {
+ if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
+ if (SANITIZER_ANDROID) {
+ // dlopen/dlclose interceptors do not work on Android, so we rely on
+ // Extend() calls to update .sancov.map.
+ CovUpdateMapping(GET_CALLER_PC());
+ }
+ coverage_data.Extend(npcs);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+sptr __sanitizer_maybe_open_cov_file(const char *name) {
+ return MaybeOpenCovFile(name);
+}
+} // extern "C"
diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
new file mode 100644
index 0000000000..b88814b816
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
@@ -0,0 +1,125 @@
+//===-- sanitizer_coverage_mapping.cc -------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Mmap-based implementation of sanitizer coverage.
+//
+// This is part of the implementation of code coverage that does not require
+// __sanitizer_cov_dump() call. Data is stored in 2 files per process.
+//
+// $pid.sancov.map describes process memory layout in the following text-based
+// format:
+// <pointer size in bits> // 1 line, 32 or 64
+// <mapping start> <mapping end> <base address> <dso name> // repeated
+// ...
+// Mapping lines are NOT sorted. This file is updated every time memory layout
+// is changed (i.e. in dlopen() and dlclose() interceptors).
+//
+// $pid.sancov.raw is a binary dump of PC values, sizeof(uptr) each. Again, not
+// sorted. This file is extended by 64Kb at a time and mapped into memory. It
+// contains one or more 0 words at the end, up to the next 64Kb aligned offset.
+//
+// To convert these 2 files to the usual .sancov format, run sancov.py rawunpack
+// $pid.sancov.raw.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+static const uptr kMaxNumberOfModules = 1 << 14;
+static const uptr kMaxTextSize = 64 * 1024;
+
+struct CachedMapping {
+ public:
+ bool NeedsUpdate(uptr pc) {
+ int new_pid = internal_getpid();
+ if (last_pid == new_pid && pc && pc >= last_range_start &&
+ pc < last_range_end)
+ return false;
+ last_pid = new_pid;
+ return true;
+ }
+
+ void SetModuleRange(uptr start, uptr end) {
+ last_range_start = start;
+ last_range_end = end;
+ }
+
+ private:
+ uptr last_range_start, last_range_end;
+ int last_pid;
+};
+
+static CachedMapping cached_mapping;
+static StaticSpinMutex mapping_mu;
+
+void CovUpdateMapping(uptr caller_pc) {
+ if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
+
+ SpinMutexLock l(&mapping_mu);
+
+ if (!cached_mapping.NeedsUpdate(caller_pc))
+ return;
+
+ InternalScopedString text(kMaxTextSize);
+ InternalScopedBuffer<char> modules_data(kMaxNumberOfModules *
+ sizeof(LoadedModule));
+ LoadedModule *modules = (LoadedModule *)modules_data.data();
+ CHECK(modules);
+ int n_modules = GetListOfModules(modules, kMaxNumberOfModules,
+ /* filter */ 0);
+
+ text.append("%d\n", sizeof(uptr) * 8);
+ for (int i = 0; i < n_modules; ++i) {
+ const char *module_name = StripModuleName(modules[i].full_name());
+ for (unsigned j = 0; j < modules[i].n_ranges(); ++j) {
+ if (modules[i].address_range_executable(j)) {
+ uptr start = modules[i].address_range_start(j);
+ uptr end = modules[i].address_range_end(j);
+ uptr base = modules[i].base_address();
+ text.append("%zx %zx %zx %s\n", start, end, base, module_name);
+ if (caller_pc && caller_pc >= start && caller_pc < end)
+ cached_mapping.SetModuleRange(start, end);
+ }
+ }
+ }
+
+ int err;
+ InternalScopedString tmp_path(64 +
+ internal_strlen(common_flags()->coverage_dir));
+ uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(),
+ "%s/%zd.sancov.map.tmp", common_flags()->coverage_dir,
+ internal_getpid());
+ CHECK_LE(res, tmp_path.size());
+ uptr map_fd = OpenFile(tmp_path.data(), true);
+ if (internal_iserror(map_fd)) {
+ Report(" Coverage: failed to open %s for writing\n", tmp_path.data());
+ Die();
+ }
+
+ res = internal_write(map_fd, text.data(), text.length());
+ if (internal_iserror(res, &err)) {
+ Printf("sancov.map write failed: %d\n", err);
+ Die();
+ }
+ internal_close(map_fd);
+
+ InternalScopedString path(64 + internal_strlen(common_flags()->coverage_dir));
+ res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map",
+ common_flags()->coverage_dir, internal_getpid());
+ CHECK_LE(res, path.size());
+ res = internal_rename(tmp_path.data(), path.data());
+ if (internal_iserror(res, &err)) {
+ Printf("sancov.map rename failed: %d\n", err);
+ Die();
+ }
+}
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h
new file mode 100644
index 0000000000..5c8317554a
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h
@@ -0,0 +1,414 @@
+//===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// The deadlock detector maintains a directed graph of lock acquisitions.
+// When a lock event happens, the detector checks if the locks already held by
+// the current thread are reachable from the newly acquired lock.
+//
+// The detector can handle only a fixed amount of simultaneously live locks
+// (a lock is alive if it has been locked at least once and has not been
+// destroyed). When the maximal number of locks is reached the entire graph
+// is flushed and the new lock epoch is started. The node ids from the old
+// epochs can not be used with any of the detector methods except for
+// nodeBelongsToCurrentEpoch().
+//
+// FIXME: this is work in progress, nothing really works yet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_H
+#define SANITIZER_DEADLOCK_DETECTOR_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_bvgraph.h"
+
+namespace __sanitizer {
+
+// Thread-local state for DeadlockDetector.
+// It contains the locks currently held by the owning thread.
+template <class BV>
+class DeadlockDetectorTLS {
+ public:
+ // No CTOR.
+ void clear() {
+ bv_.clear();
+ epoch_ = 0;
+ n_recursive_locks = 0;
+ n_all_locks_ = 0;
+ }
+
+ bool empty() const { return bv_.empty(); }
+
+ void ensureCurrentEpoch(uptr current_epoch) {
+ if (epoch_ == current_epoch) return;
+ bv_.clear();
+ epoch_ = current_epoch;
+ n_recursive_locks = 0;
+ n_all_locks_ = 0;
+ }
+
+ uptr getEpoch() const { return epoch_; }
+
+ // Returns true if this is the first (non-recursive) acquisition of this lock.
+ bool addLock(uptr lock_id, uptr current_epoch, u32 stk) {
+ // Printf("addLock: %zx %zx stk %u\n", lock_id, current_epoch, stk);
+ CHECK_EQ(epoch_, current_epoch);
+ if (!bv_.setBit(lock_id)) {
+ // The lock is already held by this thread, it must be recursive.
+ CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks));
+ recursive_locks[n_recursive_locks++] = lock_id;
+ return false;
+ }
+ CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_));
+ // lock_id < BV::kSize, can cast to a smaller int.
+ u32 lock_id_short = static_cast<u32>(lock_id);
+ LockWithContext l = {lock_id_short, stk};
+ all_locks_with_contexts_[n_all_locks_++] = l;
+ return true;
+ }
+
+ void removeLock(uptr lock_id) {
+ if (n_recursive_locks) {
+ for (sptr i = n_recursive_locks - 1; i >= 0; i--) {
+ if (recursive_locks[i] == lock_id) {
+ n_recursive_locks--;
+ Swap(recursive_locks[i], recursive_locks[n_recursive_locks]);
+ return;
+ }
+ }
+ }
+ // Printf("remLock: %zx %zx\n", lock_id, epoch_);
+ if (!bv_.clearBit(lock_id))
+ return; // probably addLock happened before flush
+ if (n_all_locks_) {
+ for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
+ Swap(all_locks_with_contexts_[i],
+ all_locks_with_contexts_[n_all_locks_ - 1]);
+ n_all_locks_--;
+ break;
+ }
+ }
+ }
+ }
+
+ u32 findLockContext(uptr lock_id) {
+ for (uptr i = 0; i < n_all_locks_; i++)
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id))
+ return all_locks_with_contexts_[i].stk;
+ return 0;
+ }
+
+ const BV &getLocks(uptr current_epoch) const {
+ CHECK_EQ(epoch_, current_epoch);
+ return bv_;
+ }
+
+ uptr getNumLocks() const { return n_all_locks_; }
+ uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; }
+
+ private:
+ BV bv_;
+ uptr epoch_;
+ uptr recursive_locks[64];
+ uptr n_recursive_locks;
+ struct LockWithContext {
+ u32 lock;
+ u32 stk;
+ };
+ LockWithContext all_locks_with_contexts_[64];
+ uptr n_all_locks_;
+};
+
+// DeadlockDetector.
+// For deadlock detection to work we need one global DeadlockDetector object
+// and one DeadlockDetectorTLS object per evey thread.
+// This class is not thread safe, all concurrent accesses should be guarded
+// by an external lock.
+// Most of the methods of this class are not thread-safe (i.e. should
+// be protected by an external lock) unless explicitly told otherwise.
+template <class BV>
+class DeadlockDetector {
+ public:
+ typedef BV BitVector;
+
+ uptr size() const { return g_.size(); }
+
+ // No CTOR.
+ void clear() {
+ current_epoch_ = 0;
+ available_nodes_.clear();
+ recycled_nodes_.clear();
+ g_.clear();
+ n_edges_ = 0;
+ }
+
+ // Allocate new deadlock detector node.
+ // If we are out of available nodes first try to recycle some.
+ // If there is nothing to recycle, flush the graph and increment the epoch.
+ // Associate 'data' (opaque user's object) with the new node.
+ uptr newNode(uptr data) {
+ if (!available_nodes_.empty())
+ return getAvailableNode(data);
+ if (!recycled_nodes_.empty()) {
+ // Printf("recycling: n_edges_ %zd\n", n_edges_);
+ for (sptr i = n_edges_ - 1; i >= 0; i--) {
+ if (recycled_nodes_.getBit(edges_[i].from) ||
+ recycled_nodes_.getBit(edges_[i].to)) {
+ Swap(edges_[i], edges_[n_edges_ - 1]);
+ n_edges_--;
+ }
+ }
+ CHECK(available_nodes_.empty());
+ // removeEdgesFrom was called in removeNode.
+ g_.removeEdgesTo(recycled_nodes_);
+ available_nodes_.setUnion(recycled_nodes_);
+ recycled_nodes_.clear();
+ return getAvailableNode(data);
+ }
+ // We are out of vacant nodes. Flush and increment the current_epoch_.
+ current_epoch_ += size();
+ recycled_nodes_.clear();
+ available_nodes_.setAll();
+ g_.clear();
+ n_edges_ = 0;
+ return getAvailableNode(data);
+ }
+
+ // Get data associated with the node created by newNode().
+ uptr getData(uptr node) const { return data_[nodeToIndex(node)]; }
+
+ bool nodeBelongsToCurrentEpoch(uptr node) {
+ return node && (node / size() * size()) == current_epoch_;
+ }
+
+ void removeNode(uptr node) {
+ uptr idx = nodeToIndex(node);
+ CHECK(!available_nodes_.getBit(idx));
+ CHECK(recycled_nodes_.setBit(idx));
+ g_.removeEdgesFrom(idx);
+ }
+
+ void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) {
+ dtls->ensureCurrentEpoch(current_epoch_);
+ }
+
+ // Returns true if there is a cycle in the graph after this lock event.
+ // Ideally should be called before the lock is acquired so that we can
+ // report a deadlock before a real deadlock happens.
+ bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
+ }
+
+ u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ return dtls->findLockContext(nodeToIndex(node));
+ }
+
+ // Add cur_node to the set of locks held currently by dtls.
+ void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ dtls->addLock(cur_idx, current_epoch_, stk);
+ }
+
+ // Experimental *racy* fast path function.
+ // Returns true if all edges from the currently held locks to cur_node exist.
+ bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ uptr local_epoch = dtls->getEpoch();
+ // Read from current_epoch_ is racy.
+ if (cur_node && local_epoch == current_epoch_ &&
+ local_epoch == nodeToEpoch(cur_node)) {
+ uptr cur_idx = nodeToIndexUnchecked(cur_node);
+ for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) {
+ if (!g_.hasEdge(dtls->getLock(i), cur_idx))
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ // Adds edges from currently held locks to cur_node,
+ // returns the number of added edges, and puts the sources of added edges
+ // into added_edges[].
+ // Should be called before onLockAfter.
+ uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk,
+ int unique_tid) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ uptr added_edges[40];
+ uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx,
+ added_edges, ARRAY_SIZE(added_edges));
+ for (uptr i = 0; i < n_added_edges; i++) {
+ if (n_edges_ < ARRAY_SIZE(edges_)) {
+ Edge e = {(u16)added_edges[i], (u16)cur_idx,
+ dtls->findLockContext(added_edges[i]), stk,
+ unique_tid};
+ edges_[n_edges_++] = e;
+ }
+ // Printf("Edge%zd: %u %zd=>%zd in T%d\n",
+ // n_edges_, stk, added_edges[i], cur_idx, unique_tid);
+ }
+ return n_added_edges;
+ }
+
+ bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to,
+ int *unique_tid) {
+ uptr from_idx = nodeToIndex(from_node);
+ uptr to_idx = nodeToIndex(to_node);
+ for (uptr i = 0; i < n_edges_; i++) {
+ if (edges_[i].from == from_idx && edges_[i].to == to_idx) {
+ *stk_from = edges_[i].stk_from;
+ *stk_to = edges_[i].stk_to;
+ *unique_tid = edges_[i].unique_tid;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Test-only function. Handles the before/after lock events,
+ // returns true if there is a cycle.
+ bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node);
+ addEdges(dtls, cur_node, stk, 0);
+ onLockAfter(dtls, cur_node, stk);
+ return is_reachable;
+ }
+
+ // Handles the try_lock event, returns false.
+ // When a try_lock event happens (i.e. a try_lock call succeeds) we need
+ // to add this lock to the currently held locks, but we should not try to
+ // change the lock graph or to detect a cycle. We may want to investigate
+ // whether a more aggressive strategy is possible for try_lock.
+ bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ dtls->addLock(cur_idx, current_epoch_, stk);
+ return false;
+ }
+
+ // Returns true iff dtls is empty (no locks are currently held) and we can
+ // add the node to the currently held locks w/o chanding the global state.
+ // This operation is thread-safe as it only touches the dtls.
+ bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
+ if (!dtls->empty()) return false;
+ if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) {
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
+ return true;
+ }
+ return false;
+ }
+
+ // Finds a path between the lock 'cur_node' (currently not held in dtls)
+ // and some currently held lock, returns the length of the path
+ // or 0 on failure.
+ uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path,
+ uptr path_size) {
+ tmp_bv_.copyFrom(dtls->getLocks(current_epoch_));
+ uptr idx = nodeToIndex(cur_node);
+ CHECK(!tmp_bv_.getBit(idx));
+ uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size);
+ for (uptr i = 0; i < res; i++)
+ path[i] = indexToNode(path[i]);
+ if (res)
+ CHECK_EQ(path[0], cur_node);
+ return res;
+ }
+
+ // Handle the unlock event.
+ // This operation is thread-safe as it only touches the dtls.
+ void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ if (dtls->getEpoch() == nodeToEpoch(node))
+ dtls->removeLock(nodeToIndexUnchecked(node));
+ }
+
+ // Tries to handle the lock event w/o writing to global state.
+ // Returns true on success.
+ // This operation is thread-safe as it only touches the dtls
+ // (modulo racy nature of hasAllEdges).
+ bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
+ if (hasAllEdges(dtls, node)) {
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
+ return true;
+ }
+ return false;
+ }
+
+ bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const {
+ return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node));
+ }
+
+ uptr testOnlyGetEpoch() const { return current_epoch_; }
+ bool testOnlyHasEdge(uptr l1, uptr l2) {
+ return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2));
+ }
+ // idx1 and idx2 are raw indices to g_, not lock IDs.
+ bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) {
+ return g_.hasEdge(idx1, idx2);
+ }
+
+ void Print() {
+ for (uptr from = 0; from < size(); from++)
+ for (uptr to = 0; to < size(); to++)
+ if (g_.hasEdge(from, to))
+ Printf(" %zx => %zx\n", from, to);
+ }
+
+ private:
+ void check_idx(uptr idx) const { CHECK_LT(idx, size()); }
+
+ void check_node(uptr node) const {
+ CHECK_GE(node, size());
+ CHECK_EQ(current_epoch_, nodeToEpoch(node));
+ }
+
+ uptr indexToNode(uptr idx) const {
+ check_idx(idx);
+ return idx + current_epoch_;
+ }
+
+ uptr nodeToIndexUnchecked(uptr node) const { return node % size(); }
+
+ uptr nodeToIndex(uptr node) const {
+ check_node(node);
+ return nodeToIndexUnchecked(node);
+ }
+
+ uptr nodeToEpoch(uptr node) const { return node / size() * size(); }
+
+ uptr getAvailableNode(uptr data) {
+ uptr idx = available_nodes_.getAndClearFirstOne();
+ data_[idx] = data;
+ return indexToNode(idx);
+ }
+
+ struct Edge {
+ u16 from;
+ u16 to;
+ u32 stk_from;
+ u32 stk_to;
+ int unique_tid;
+ };
+
+ uptr current_epoch_;
+ BV available_nodes_;
+ BV recycled_nodes_;
+ BV tmp_bv_;
+ BVGraph<BV> g_;
+ uptr data_[BV::kSize];
+ Edge edges_[BV::kSize * 32];
+ uptr n_edges_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DEADLOCK_DETECTOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc
new file mode 100644
index 0000000000..fc6f5dcfbb
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc
@@ -0,0 +1,187 @@
+//===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Deadlock detector implementation based on NxN adjacency bit matrix.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_deadlock_detector.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
+
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
+
+namespace __sanitizer {
+
+typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector.
+
+struct DDPhysicalThread {
+};
+
+struct DDLogicalThread {
+ u64 ctx;
+ DeadlockDetectorTLS<DDBV> dd;
+ DDReport rep;
+ bool report_pending;
+};
+
+struct DD : public DDetector {
+ SpinMutex mtx;
+ DeadlockDetector<DDBV> dd;
+ DDFlags flags;
+
+ explicit DD(const DDFlags *flags);
+
+ DDPhysicalThread* CreatePhysicalThread();
+ void DestroyPhysicalThread(DDPhysicalThread *pt);
+
+ DDLogicalThread* CreateLogicalThread(u64 ctx);
+ void DestroyLogicalThread(DDLogicalThread *lt);
+
+ void MutexInit(DDCallback *cb, DDMutex *m);
+ void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock);
+ void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexDestroy(DDCallback *cb, DDMutex *m);
+
+ DDReport *GetReport(DDCallback *cb);
+
+ void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
+ void ReportDeadlock(DDCallback *cb, DDMutex *m);
+};
+
+DDetector *DDetector::Create(const DDFlags *flags) {
+ (void)flags;
+ void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
+ return new(mem) DD(flags);
+}
+
+DD::DD(const DDFlags *flags)
+ : flags(*flags) {
+ dd.clear();
+}
+
+DDPhysicalThread* DD::CreatePhysicalThread() {
+ return 0;
+}
+
+void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
+}
+
+DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
+ DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
+ lt->ctx = ctx;
+ lt->dd.clear();
+ lt->report_pending = false;
+ return lt;
+}
+
+void DD::DestroyLogicalThread(DDLogicalThread *lt) {
+ lt->~DDLogicalThread();
+ InternalFree(lt);
+}
+
+void DD::MutexInit(DDCallback *cb, DDMutex *m) {
+ m->id = 0;
+ m->stk = cb->Unwind();
+}
+
+void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
+ if (!dd.nodeBelongsToCurrentEpoch(m->id))
+ m->id = dd.newNode(reinterpret_cast<uptr>(m));
+ dd.ensureCurrentEpoch(&lt->dd);
+}
+
+void DD::MutexBeforeLock(DDCallback *cb,
+ DDMutex *m, bool wlock) {
+ DDLogicalThread *lt = cb->lt;
+ if (lt->dd.empty()) return; // This will be the first lock held by lt.
+ if (dd.hasAllEdges(&lt->dd, m->id)) return; // We already have all edges.
+ SpinMutexLock lk(&mtx);
+ MutexEnsureID(lt, m);
+ if (dd.isHeld(&lt->dd, m->id))
+ return; // FIXME: allow this only for recursive locks.
+ if (dd.onLockBefore(&lt->dd, m->id)) {
+ // Actually add this edge now so that we have all the stack traces.
+ dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
+ ReportDeadlock(cb, m);
+ }
+}
+
+void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
+ DDLogicalThread *lt = cb->lt;
+ uptr path[10];
+ uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
+ CHECK_GT(len, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
+ CHECK_EQ(m->id, path[0]);
+ lt->report_pending = true;
+ DDReport *rep = &lt->rep;
+ rep->n = len;
+ for (uptr i = 0; i < len; i++) {
+ uptr from = path[i];
+ uptr to = path[(i + 1) % len];
+ DDMutex *m0 = (DDMutex*)dd.getData(from);
+ DDMutex *m1 = (DDMutex*)dd.getData(to);
+
+ u32 stk_from = -1U, stk_to = -1U;
+ int unique_tid = 0;
+ dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
+ // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
+ // unique_tid);
+ rep->loop[i].thr_ctx = unique_tid;
+ rep->loop[i].mtx_ctx0 = m0->ctx;
+ rep->loop[i].mtx_ctx1 = m1->ctx;
+ rep->loop[i].stk[0] = stk_to;
+ rep->loop[i].stk[1] = stk_from;
+ }
+}
+
+void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
+ DDLogicalThread *lt = cb->lt;
+ u32 stk = 0;
+ if (flags.second_deadlock_stack)
+ stk = cb->Unwind();
+ // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
+ if (dd.onFirstLock(&lt->dd, m->id, stk))
+ return;
+ if (dd.onLockFast(&lt->dd, m->id, stk))
+ return;
+
+ SpinMutexLock lk(&mtx);
+ MutexEnsureID(lt, m);
+ if (wlock) // Only a recursive rlock may be held.
+ CHECK(!dd.isHeld(&lt->dd, m->id));
+ if (!trylock)
+ dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
+ dd.onLockAfter(&lt->dd, m->id, stk);
+}
+
+void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
+ // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
+ dd.onUnlock(&cb->lt->dd, m->id);
+}
+
+void DD::MutexDestroy(DDCallback *cb,
+ DDMutex *m) {
+ if (!m->id) return;
+ SpinMutexLock lk(&mtx);
+ if (dd.nodeBelongsToCurrentEpoch(m->id))
+ dd.removeNode(m->id);
+ m->id = 0;
+}
+
+DDReport *DD::GetReport(DDCallback *cb) {
+ if (!cb->lt->report_pending)
+ return 0;
+ cb->lt->report_pending = false;
+ return &cb->lt->rep;
+}
+
+} // namespace __sanitizer
+#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc
new file mode 100644
index 0000000000..bc96ad230d
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc
@@ -0,0 +1,426 @@
+//===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Deadlock detector implementation based on adjacency lists.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_common.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
+
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
+
+namespace __sanitizer {
+
+const int kMaxNesting = 64;
+const u32 kNoId = -1;
+const u32 kEndId = -2;
+const int kMaxLink = 8;
+const int kL1Size = 1024;
+const int kL2Size = 1024;
+const int kMaxMutex = kL1Size * kL2Size;
+
+struct Id {
+ u32 id;
+ u32 seq;
+
+ explicit Id(u32 id = 0, u32 seq = 0)
+ : id(id)
+ , seq(seq) {
+ }
+};
+
+struct Link {
+ u32 id;
+ u32 seq;
+ u32 tid;
+ u32 stk0;
+ u32 stk1;
+
+ explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
+ : id(id)
+ , seq(seq)
+ , tid(tid)
+ , stk0(s0)
+ , stk1(s1) {
+ }
+};
+
+struct DDPhysicalThread {
+ DDReport rep;
+ bool report_pending;
+ bool visited[kMaxMutex];
+ Link pending[kMaxMutex];
+ Link path[kMaxMutex];
+};
+
+struct ThreadMutex {
+ u32 id;
+ u32 stk;
+};
+
+struct DDLogicalThread {
+ u64 ctx;
+ ThreadMutex locked[kMaxNesting];
+ int nlocked;
+};
+
+struct Mutex {
+ StaticSpinMutex mtx;
+ u32 seq;
+ int nlink;
+ Link link[kMaxLink];
+};
+
+struct DD : public DDetector {
+ explicit DD(const DDFlags *flags);
+
+ DDPhysicalThread* CreatePhysicalThread();
+ void DestroyPhysicalThread(DDPhysicalThread *pt);
+
+ DDLogicalThread* CreateLogicalThread(u64 ctx);
+ void DestroyLogicalThread(DDLogicalThread *lt);
+
+ void MutexInit(DDCallback *cb, DDMutex *m);
+ void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock);
+ void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexDestroy(DDCallback *cb, DDMutex *m);
+
+ DDReport *GetReport(DDCallback *cb);
+
+ void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
+ void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
+ u32 allocateId(DDCallback *cb);
+ Mutex *getMutex(u32 id);
+ u32 getMutexId(Mutex *m);
+
+ DDFlags flags;
+
+ Mutex* mutex[kL1Size];
+
+ SpinMutex mtx;
+ InternalMmapVector<u32> free_id;
+ int id_gen;
+};
+
+DDetector *DDetector::Create(const DDFlags *flags) {
+ (void)flags;
+ void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
+ return new(mem) DD(flags);
+}
+
+DD::DD(const DDFlags *flags)
+ : flags(*flags)
+ , free_id(1024) {
+ id_gen = 0;
+}
+
+DDPhysicalThread* DD::CreatePhysicalThread() {
+ DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
+ "deadlock detector (physical thread)");
+ return pt;
+}
+
+void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
+ pt->~DDPhysicalThread();
+ UnmapOrDie(pt, sizeof(DDPhysicalThread));
+}
+
+DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
+ DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
+ sizeof(DDLogicalThread));
+ lt->ctx = ctx;
+ lt->nlocked = 0;
+ return lt;
+}
+
+void DD::DestroyLogicalThread(DDLogicalThread *lt) {
+ lt->~DDLogicalThread();
+ InternalFree(lt);
+}
+
+void DD::MutexInit(DDCallback *cb, DDMutex *m) {
+ VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
+ m->id = kNoId;
+ m->recursion = 0;
+ atomic_store(&m->owner, 0, memory_order_relaxed);
+}
+
+Mutex *DD::getMutex(u32 id) {
+ return &mutex[id / kL2Size][id % kL2Size];
+}
+
+u32 DD::getMutexId(Mutex *m) {
+ for (int i = 0; i < kL1Size; i++) {
+ Mutex *tab = mutex[i];
+ if (tab == 0)
+ break;
+ if (m >= tab && m < tab + kL2Size)
+ return i * kL2Size + (m - tab);
+ }
+ return -1;
+}
+
+u32 DD::allocateId(DDCallback *cb) {
+ u32 id = -1;
+ SpinMutexLock l(&mtx);
+ if (free_id.size() > 0) {
+ id = free_id.back();
+ free_id.pop_back();
+ } else {
+ CHECK_LT(id_gen, kMaxMutex);
+ if ((id_gen % kL2Size) == 0) {
+ mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
+ "deadlock detector (mutex table)");
+ }
+ id = id_gen++;
+ }
+ CHECK_LE(id, kMaxMutex);
+ VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
+ return id;
+}
+
+void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
+ VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, cb->lt->nlocked);
+ DDPhysicalThread *pt = cb->pt;
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ CHECK_LE(lt->nlocked, kMaxNesting);
+
+ // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
+ if (m->id == kNoId)
+ m->id = allocateId(cb);
+
+ ThreadMutex *tm = &lt->locked[lt->nlocked++];
+ tm->id = m->id;
+ if (flags.second_deadlock_stack)
+ tm->stk = cb->Unwind();
+ if (lt->nlocked == 1) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ bool added = false;
+ Mutex *mtx = getMutex(m->id);
+ for (int i = 0; i < lt->nlocked - 1; i++) {
+ u32 id1 = lt->locked[i].id;
+ u32 stk1 = lt->locked[i].stk;
+ Mutex *mtx1 = getMutex(id1);
+ SpinMutexLock l(&mtx1->mtx);
+ if (mtx1->nlink == kMaxLink) {
+ // FIXME(dvyukov): check stale links
+ continue;
+ }
+ int li = 0;
+ for (; li < mtx1->nlink; li++) {
+ Link *link = &mtx1->link[li];
+ if (link->id == m->id) {
+ if (link->seq != mtx->seq) {
+ link->seq = mtx->seq;
+ link->tid = lt->ctx;
+ link->stk0 = stk1;
+ link->stk1 = cb->Unwind();
+ added = true;
+ VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
+ cb->lt->ctx, getMutexId(mtx1), m->id);
+ }
+ break;
+ }
+ }
+ if (li == mtx1->nlink) {
+ // FIXME(dvyukov): check stale links
+ Link *link = &mtx1->link[mtx1->nlink++];
+ link->id = m->id;
+ link->seq = mtx->seq;
+ link->tid = lt->ctx;
+ link->stk0 = stk1;
+ link->stk1 = cb->Unwind();
+ added = true;
+ VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
+ cb->lt->ctx, getMutexId(mtx1), m->id);
+ }
+ }
+
+ if (!added || mtx->nlink == 0) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ CycleCheck(pt, lt, m);
+}
+
+void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) {
+ VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
+ CHECK(wlock);
+ m->recursion++;
+ return;
+ }
+ CHECK_EQ(owner, 0);
+ if (wlock) {
+ VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
+ CHECK_EQ(m->recursion, 0);
+ m->recursion = 1;
+ atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
+ }
+
+ if (!trylock)
+ return;
+
+ CHECK_LE(lt->nlocked, kMaxNesting);
+ if (m->id == kNoId)
+ m->id = allocateId(cb);
+ ThreadMutex *tm = &lt->locked[lt->nlocked++];
+ tm->id = m->id;
+ if (flags.second_deadlock_stack)
+ tm->stk = cb->Unwind();
+}
+
+void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
+ VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, cb->lt->nlocked);
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
+ if (--m->recursion > 0)
+ return;
+ VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
+ atomic_store(&m->owner, 0, memory_order_relaxed);
+ }
+ CHECK_NE(m->id, kNoId);
+ int last = lt->nlocked - 1;
+ for (int i = last; i >= 0; i--) {
+ if (cb->lt->locked[i].id == m->id) {
+ lt->locked[i] = lt->locked[last];
+ lt->nlocked--;
+ break;
+ }
+ }
+}
+
+void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
+ VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
+ cb->lt->ctx, m);
+ DDLogicalThread *lt = cb->lt;
+
+ if (m->id == kNoId)
+ return;
+
+ // Remove the mutex from lt->locked if there.
+ int last = lt->nlocked - 1;
+ for (int i = last; i >= 0; i--) {
+ if (lt->locked[i].id == m->id) {
+ lt->locked[i] = lt->locked[last];
+ lt->nlocked--;
+ break;
+ }
+ }
+
+ // Clear and invalidate the mutex descriptor.
+ {
+ Mutex *mtx = getMutex(m->id);
+ SpinMutexLock l(&mtx->mtx);
+ mtx->seq++;
+ mtx->nlink = 0;
+ }
+
+ // Return id to cache.
+ {
+ SpinMutexLock l(&mtx);
+ free_id.push_back(m->id);
+ }
+}
+
+void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
+ DDMutex *m) {
+ internal_memset(pt->visited, 0, sizeof(pt->visited));
+ int npath = 0;
+ int npending = 0;
+ {
+ Mutex *mtx = getMutex(m->id);
+ SpinMutexLock l(&mtx->mtx);
+ for (int li = 0; li < mtx->nlink; li++)
+ pt->pending[npending++] = mtx->link[li];
+ }
+ while (npending > 0) {
+ Link link = pt->pending[--npending];
+ if (link.id == kEndId) {
+ npath--;
+ continue;
+ }
+ if (pt->visited[link.id])
+ continue;
+ Mutex *mtx1 = getMutex(link.id);
+ SpinMutexLock l(&mtx1->mtx);
+ if (mtx1->seq != link.seq)
+ continue;
+ pt->visited[link.id] = true;
+ if (mtx1->nlink == 0)
+ continue;
+ pt->path[npath++] = link;
+ pt->pending[npending++] = Link(kEndId);
+ if (link.id == m->id)
+ return Report(pt, lt, npath); // Bingo!
+ for (int li = 0; li < mtx1->nlink; li++) {
+ Link *link1 = &mtx1->link[li];
+ // Mutex *mtx2 = getMutex(link->id);
+ // FIXME(dvyukov): fast seq check
+ // FIXME(dvyukov): fast nlink != 0 check
+ // FIXME(dvyukov): fast pending check?
+ // FIXME(dvyukov): npending can be larger than kMaxMutex
+ pt->pending[npending++] = *link1;
+ }
+ }
+}
+
+void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
+ DDReport *rep = &pt->rep;
+ rep->n = npath;
+ for (int i = 0; i < npath; i++) {
+ Link *link = &pt->path[i];
+ Link *link0 = &pt->path[i ? i - 1 : npath - 1];
+ rep->loop[i].thr_ctx = link->tid;
+ rep->loop[i].mtx_ctx0 = link0->id;
+ rep->loop[i].mtx_ctx1 = link->id;
+ rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
+ rep->loop[i].stk[1] = link->stk1;
+ }
+ pt->report_pending = true;
+}
+
+DDReport *DD::GetReport(DDCallback *cb) {
+ if (!cb->pt->report_pending)
+ return 0;
+ cb->pt->report_pending = false;
+ return &cb->pt->rep;
+}
+
+} // namespace __sanitizer
+#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h
new file mode 100644
index 0000000000..59d8d93fe1
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h
@@ -0,0 +1,91 @@
+//===-- sanitizer_deadlock_detector_interface.h -----------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// Abstract deadlock detector interface.
+// FIXME: this is work in progress, nothing really works yet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
+#define SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_VERSION
+# define SANITIZER_DEADLOCK_DETECTOR_VERSION 1
+#endif
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+// dd - deadlock detector.
+// lt - logical (user) thread.
+// pt - physical (OS) thread.
+
+struct DDPhysicalThread;
+struct DDLogicalThread;
+
+struct DDMutex {
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
+ uptr id;
+ u32 stk; // creation stack
+#elif SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
+ u32 id;
+ u32 recursion;
+ atomic_uintptr_t owner;
+#else
+# error "BAD SANITIZER_DEADLOCK_DETECTOR_VERSION"
+#endif
+ u64 ctx;
+};
+
+struct DDFlags {
+ bool second_deadlock_stack;
+};
+
+struct DDReport {
+ enum { kMaxLoopSize = 8 };
+ int n; // number of entries in loop
+ struct {
+ u64 thr_ctx; // user thread context
+ u64 mtx_ctx0; // user mutex context, start of the edge
+ u64 mtx_ctx1; // user mutex context, end of the edge
+ u32 stk[2]; // stack ids for the edge
+ } loop[kMaxLoopSize];
+};
+
+struct DDCallback {
+ DDPhysicalThread *pt;
+ DDLogicalThread *lt;
+
+ virtual u32 Unwind() { return 0; }
+ virtual int UniqueTid() { return 0; }
+};
+
+struct DDetector {
+ static DDetector *Create(const DDFlags *flags);
+
+ virtual DDPhysicalThread* CreatePhysicalThread() { return 0; }
+ virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {}
+
+ virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return 0; }
+ virtual void DestroyLogicalThread(DDLogicalThread *lt) {}
+
+ virtual void MutexInit(DDCallback *cb, DDMutex *m) {}
+ virtual void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {}
+ virtual void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) {}
+ virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {}
+ virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {}
+
+ virtual DDReport *GetReport(DDCallback *cb) { return 0; }
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.cc b/libsanitizer/sanitizer_common/sanitizer_flags.cc
index 90bb57dff9..d7e7118212 100644
--- a/libsanitizer/sanitizer_common/sanitizer_flags.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_flags.cc
@@ -13,39 +13,157 @@
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
namespace __sanitizer {
+CommonFlags common_flags_dont_use;
+
+struct FlagDescription {
+ const char *name;
+ const char *description;
+ FlagDescription *next;
+};
+
+IntrusiveList<FlagDescription> flag_descriptions;
+
+// If set, the tool will install its own SEGV signal handler by default.
+#ifndef SANITIZER_NEEDS_SEGV
+# define SANITIZER_NEEDS_SEGV 1
+#endif
+
void SetCommonFlagsDefaults(CommonFlags *f) {
f->symbolize = true;
f->external_symbolizer_path = 0;
+ f->allow_addr2line = false;
f->strip_path_prefix = "";
+ f->fast_unwind_on_check = false;
f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true;
f->handle_ioctl = false;
f->malloc_context_size = 1;
f->log_path = "stderr";
f->verbosity = 0;
- f->detect_leaks = false;
+ f->detect_leaks = true;
f->leak_check_at_exit = true;
f->allocator_may_return_null = false;
f->print_summary = true;
+ f->check_printf = true;
+ // TODO(glider): tools may want to set different defaults for handle_segv.
+ f->handle_segv = SANITIZER_NEEDS_SEGV;
+ f->allow_user_segv_handler = false;
+ f->use_sigaltstack = true;
+ f->detect_deadlocks = false;
+ f->clear_shadow_mmap_threshold = 64 * 1024;
+ f->color = "auto";
+ f->legacy_pthread_cond = false;
+ f->intercept_tls_get_addr = false;
+ f->coverage = false;
+ f->coverage_direct = SANITIZER_ANDROID;
+ f->coverage_dir = ".";
+ f->full_address_space = false;
+ f->suppressions = "";
+ f->print_suppressions = true;
+ f->disable_coredump = (SANITIZER_WORDSIZE == 64);
+ f->symbolize_inline_frames = true;
+ f->stack_trace_format = "DEFAULT";
}
void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
- ParseFlag(str, &f->symbolize, "symbolize");
- ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path");
- ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix");
- ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
- ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
- ParseFlag(str, &f->handle_ioctl, "handle_ioctl");
- ParseFlag(str, &f->malloc_context_size, "malloc_context_size");
- ParseFlag(str, &f->log_path, "log_path");
- ParseFlag(str, &f->verbosity, "verbosity");
- ParseFlag(str, &f->detect_leaks, "detect_leaks");
- ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit");
- ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null");
- ParseFlag(str, &f->print_summary, "print_summary");
+ ParseFlag(str, &f->symbolize, "symbolize",
+ "If set, use the online symbolizer from common sanitizer runtime to turn "
+ "virtual addresses to file/line locations.");
+ ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path",
+ "Path to external symbolizer. If empty, the tool will search $PATH for "
+ "the symbolizer.");
+ ParseFlag(str, &f->allow_addr2line, "allow_addr2line",
+ "If set, allows online symbolizer to run addr2line binary to symbolize "
+ "stack traces (addr2line will only be used if llvm-symbolizer binary is "
+ "unavailable.");
+ ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix",
+ "Strips this prefix from file paths in error reports.");
+ ParseFlag(str, &f->fast_unwind_on_check, "fast_unwind_on_check",
+ "If available, use the fast frame-pointer-based unwinder on "
+ "internal CHECK failures.");
+ ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal",
+ "If available, use the fast frame-pointer-based unwinder on fatal "
+ "errors.");
+ ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc",
+ "If available, use the fast frame-pointer-based unwinder on "
+ "malloc/free.");
+ ParseFlag(str, &f->handle_ioctl, "handle_ioctl",
+ "Intercept and handle ioctl requests.");
+ ParseFlag(str, &f->malloc_context_size, "malloc_context_size",
+ "Max number of stack frames kept for each allocation/deallocation.");
+ ParseFlag(str, &f->log_path, "log_path",
+ "Write logs to \"log_path.pid\". The special values are \"stdout\" and "
+ "\"stderr\". The default is \"stderr\".");
+ ParseFlag(str, &f->verbosity, "verbosity",
+ "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).");
+ ParseFlag(str, &f->detect_leaks, "detect_leaks",
+ "Enable memory leak detection.");
+ ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit",
+ "Invoke leak checking in an atexit handler. Has no effect if "
+ "detect_leaks=false, or if __lsan_do_leak_check() is called before the "
+ "handler has a chance to run.");
+ ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null",
+ "If false, the allocator will crash instead of returning 0 on "
+ "out-of-memory.");
+ ParseFlag(str, &f->print_summary, "print_summary",
+ "If false, disable printing error summaries in addition to error "
+ "reports.");
+ ParseFlag(str, &f->check_printf, "check_printf",
+ "Check printf arguments.");
+ ParseFlag(str, &f->handle_segv, "handle_segv",
+ "If set, registers the tool's custom SEGV handler (both SIGBUS and "
+ "SIGSEGV on OSX).");
+ ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler",
+ "If set, allows user to register a SEGV handler even if the tool "
+ "registers one.");
+ ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack",
+ "If set, uses alternate stack for signal handling.");
+ ParseFlag(str, &f->detect_deadlocks, "detect_deadlocks",
+ "If set, deadlock detection is enabled.");
+ ParseFlag(str, &f->clear_shadow_mmap_threshold,
+ "clear_shadow_mmap_threshold",
+ "Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
+ "memset(). This is the threshold size in bytes.");
+ ParseFlag(str, &f->color, "color",
+ "Colorize reports: (always|never|auto).");
+ ParseFlag(str, &f->legacy_pthread_cond, "legacy_pthread_cond",
+ "Enables support for dynamic libraries linked with libpthread 2.2.5.");
+ ParseFlag(str, &f->intercept_tls_get_addr, "intercept_tls_get_addr",
+ "Intercept __tls_get_addr.");
+ ParseFlag(str, &f->help, "help", "Print the flag descriptions.");
+ ParseFlag(str, &f->mmap_limit_mb, "mmap_limit_mb",
+ "Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
+ "not a user-facing flag, used mosly for testing the tools");
+ ParseFlag(str, &f->coverage, "coverage",
+ "If set, coverage information will be dumped at program shutdown (if the "
+ "coverage instrumentation was enabled at compile time).");
+ ParseFlag(str, &f->coverage_direct, "coverage_direct",
+ "If set, coverage information will be dumped directly to a memory "
+ "mapped file. This way data is not lost even if the process is "
+ "suddenly killed.");
+ ParseFlag(str, &f->coverage_dir, "coverage_dir",
+ "Target directory for coverage dumps. Defaults to the current "
+ "directory.");
+ ParseFlag(str, &f->full_address_space, "full_address_space",
+ "Sanitize complete address space; "
+ "by default kernel area on 32-bit platforms will not be sanitized");
+ ParseFlag(str, &f->suppressions, "suppressions", "Suppressions file name.");
+ ParseFlag(str, &f->print_suppressions, "print_suppressions",
+ "Print matched suppressions at exit.");
+ ParseFlag(str, &f->disable_coredump, "disable_coredump",
+ "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
+ "dumping a 16T+ core file. Ignored on OSes that don't dump core by"
+ "default and for sanitizers that don't reserve lots of virtual memory.");
+ ParseFlag(str, &f->symbolize_inline_frames, "symbolize_inline_frames",
+ "Print inlined frames in stacktraces. Defaults to true.");
+ ParseFlag(str, &f->stack_trace_format, "stack_trace_format",
+ "Format string used to render stack frames. "
+ "See sanitizer_stacktrace_printer.h for the format description. "
+ "Use DEFAULT to get default format.");
// Do a sanity check for certain flags.
if (f->malloc_context_size < 1)
@@ -61,14 +179,17 @@ static bool GetFlagValue(const char *env, const char *name,
pos = internal_strstr(env, name);
if (pos == 0)
return false;
- if (pos != env && ((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) {
+ const char *name_end = pos + internal_strlen(name);
+ if ((pos != env &&
+ ((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) ||
+ *name_end != '=') {
// Seems to be middle of another flag name or value.
env = pos + 1;
continue;
}
+ pos = name_end;
break;
}
- pos += internal_strlen(name);
const char *end;
if (pos[0] != '=') {
end = pos;
@@ -100,9 +221,40 @@ static bool StartsWith(const char *flag, int flag_length, const char *value) {
(0 == internal_strncmp(flag, value, value_length));
}
-void ParseFlag(const char *env, bool *flag, const char *name) {
+static LowLevelAllocator allocator_for_flags;
+
+// The linear scan is suboptimal, but the number of flags is relatively small.
+bool FlagInDescriptionList(const char *name) {
+ IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
+ while (it.hasNext()) {
+ if (!internal_strcmp(it.next()->name, name)) return true;
+ }
+ return false;
+}
+
+void AddFlagDescription(const char *name, const char *description) {
+ if (FlagInDescriptionList(name)) return;
+ FlagDescription *new_description = new(allocator_for_flags) FlagDescription;
+ new_description->name = name;
+ new_description->description = description;
+ flag_descriptions.push_back(new_description);
+}
+
+// TODO(glider): put the descriptions inside CommonFlags.
+void PrintFlagDescriptions() {
+ IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
+ Printf("Available flags for %s:\n", SanitizerToolName);
+ while (it.hasNext()) {
+ FlagDescription *descr = it.next();
+ Printf("\t%s\n\t\t- %s\n", descr->name, descr->description);
+ }
+}
+
+void ParseFlag(const char *env, bool *flag,
+ const char *name, const char *descr) {
const char *value;
int value_length;
+ AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
if (StartsWith(value, value_length, "0") ||
@@ -115,19 +267,31 @@ void ParseFlag(const char *env, bool *flag, const char *name) {
*flag = true;
}
-void ParseFlag(const char *env, int *flag, const char *name) {
+void ParseFlag(const char *env, int *flag,
+ const char *name, const char *descr) {
const char *value;
int value_length;
+ AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
*flag = static_cast<int>(internal_atoll(value));
}
-static LowLevelAllocator allocator_for_flags;
+void ParseFlag(const char *env, uptr *flag,
+ const char *name, const char *descr) {
+ const char *value;
+ int value_length;
+ AddFlagDescription(name, descr);
+ if (!GetFlagValue(env, name, &value, &value_length))
+ return;
+ *flag = static_cast<uptr>(internal_atoll(value));
+}
-void ParseFlag(const char *env, const char **flag, const char *name) {
+void ParseFlag(const char *env, const char **flag,
+ const char *name, const char *descr) {
const char *value;
int value_length;
+ AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
// Copy the flag value. Don't use locks here, as flags are parsed at
diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.h b/libsanitizer/sanitizer_common/sanitizer_flags.h
index 46ec092819..b7a5013181 100644
--- a/libsanitizer/sanitizer_common/sanitizer_flags.h
+++ b/libsanitizer/sanitizer_common/sanitizer_flags.h
@@ -16,51 +16,61 @@
namespace __sanitizer {
-void ParseFlag(const char *env, bool *flag, const char *name);
-void ParseFlag(const char *env, int *flag, const char *name);
-void ParseFlag(const char *env, const char **flag, const char *name);
+void ParseFlag(const char *env, bool *flag,
+ const char *name, const char *descr);
+void ParseFlag(const char *env, int *flag,
+ const char *name, const char *descr);
+void ParseFlag(const char *env, uptr *flag,
+ const char *name, const char *descr);
+void ParseFlag(const char *env, const char **flag,
+ const char *name, const char *descr);
struct CommonFlags {
- // If set, use the online symbolizer from common sanitizer runtime.
bool symbolize;
- // Path to external symbolizer. If it is NULL, symbolizer will be looked for
- // in PATH. If it is empty, external symbolizer will not be started.
const char *external_symbolizer_path;
- // Strips this prefix from file paths in error reports.
+ bool allow_addr2line;
const char *strip_path_prefix;
- // Use fast (frame-pointer-based) unwinder on fatal errors (if available).
+ bool fast_unwind_on_check;
bool fast_unwind_on_fatal;
- // Use fast (frame-pointer-based) unwinder on malloc/free (if available).
bool fast_unwind_on_malloc;
- // Intercept and handle ioctl requests.
bool handle_ioctl;
- // Max number of stack frames kept for each allocation/deallocation.
int malloc_context_size;
- // Write logs to "log_path.pid".
- // The special values are "stdout" and "stderr".
- // The default is "stderr".
const char *log_path;
- // Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
int verbosity;
- // Enable memory leak detection.
bool detect_leaks;
- // Invoke leak checking in an atexit handler. Has no effect if
- // detect_leaks=false, or if __lsan_do_leak_check() is called before the
- // handler has a chance to run.
bool leak_check_at_exit;
- // If false, the allocator will crash instead of returning 0 on out-of-memory.
bool allocator_may_return_null;
- // If false, disable printing error summaries in addition to error reports.
bool print_summary;
+ bool check_printf;
+ bool handle_segv;
+ bool allow_user_segv_handler;
+ bool use_sigaltstack;
+ bool detect_deadlocks;
+ uptr clear_shadow_mmap_threshold;
+ const char *color;
+ bool legacy_pthread_cond;
+ bool intercept_tls_get_addr;
+ bool help;
+ uptr mmap_limit_mb;
+ bool coverage;
+ bool coverage_direct;
+ const char *coverage_dir;
+ bool full_address_space;
+ const char *suppressions;
+ bool print_suppressions;
+ bool disable_coredump;
+ bool symbolize_inline_frames;
+ const char *stack_trace_format;
};
inline CommonFlags *common_flags() {
- static CommonFlags f;
- return &f;
+ extern CommonFlags common_flags_dont_use;
+ return &common_flags_dont_use;
}
void SetCommonFlagsDefaults(CommonFlags *f);
void ParseCommonFlagsFromString(CommonFlags *f, const char *str);
+void PrintFlagDescriptions();
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_freebsd.h b/libsanitizer/sanitizer_common/sanitizer_freebsd.h
new file mode 100644
index 0000000000..47bb1313e6
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_freebsd.h
@@ -0,0 +1,135 @@
+//===-- sanitizer_freebsd.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime. It contains FreeBSD-specific
+// definitions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FREEBSD_H
+#define SANITIZER_FREEBSD_H
+
+#include "sanitizer_internal_defs.h"
+
+// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
+// 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+# include <osreldate.h>
+# if __FreeBSD_version <= 902001 // v9.2
+# include <link.h>
+# include <sys/param.h>
+# include <ucontext.h>
+
+namespace __sanitizer {
+
+typedef unsigned long long __xuint64_t;
+
+typedef __int32_t __xregister_t;
+
+typedef struct __xmcontext {
+ __xregister_t mc_onstack;
+ __xregister_t mc_gs;
+ __xregister_t mc_fs;
+ __xregister_t mc_es;
+ __xregister_t mc_ds;
+ __xregister_t mc_edi;
+ __xregister_t mc_esi;
+ __xregister_t mc_ebp;
+ __xregister_t mc_isp;
+ __xregister_t mc_ebx;
+ __xregister_t mc_edx;
+ __xregister_t mc_ecx;
+ __xregister_t mc_eax;
+ __xregister_t mc_trapno;
+ __xregister_t mc_err;
+ __xregister_t mc_eip;
+ __xregister_t mc_cs;
+ __xregister_t mc_eflags;
+ __xregister_t mc_esp;
+ __xregister_t mc_ss;
+
+ int mc_len;
+ int mc_fpformat;
+ int mc_ownedfp;
+ __xregister_t mc_flags;
+
+ int mc_fpstate[128] __aligned(16);
+ __xregister_t mc_fsbase;
+ __xregister_t mc_gsbase;
+ __xregister_t mc_xfpustate;
+ __xregister_t mc_xfpustate_len;
+
+ int mc_spare2[4];
+} xmcontext_t;
+
+typedef struct __xucontext {
+ sigset_t uc_sigmask;
+ xmcontext_t uc_mcontext;
+
+ struct __ucontext *uc_link;
+ stack_t uc_stack;
+ int uc_flags;
+ int __spare__[4];
+} xucontext_t;
+
+struct xkinfo_vmentry {
+ int kve_structsize;
+ int kve_type;
+ __xuint64_t kve_start;
+ __xuint64_t kve_end;
+ __xuint64_t kve_offset;
+ __xuint64_t kve_vn_fileid;
+ __uint32_t kve_vn_fsid;
+ int kve_flags;
+ int kve_resident;
+ int kve_private_resident;
+ int kve_protection;
+ int kve_ref_count;
+ int kve_shadow_count;
+ int kve_vn_type;
+ __xuint64_t kve_vn_size;
+ __uint32_t kve_vn_rdev;
+ __uint16_t kve_vn_mode;
+ __uint16_t kve_status;
+ int _kve_ispare[12];
+ char kve_path[PATH_MAX];
+};
+
+typedef struct {
+ __uint32_t p_type;
+ __uint32_t p_offset;
+ __uint32_t p_vaddr;
+ __uint32_t p_paddr;
+ __uint32_t p_filesz;
+ __uint32_t p_memsz;
+ __uint32_t p_flags;
+ __uint32_t p_align;
+} XElf32_Phdr;
+
+struct xdl_phdr_info {
+ Elf_Addr dlpi_addr;
+ const char *dlpi_name;
+ const XElf32_Phdr *dlpi_phdr;
+ Elf_Half dlpi_phnum;
+ unsigned long long int dlpi_adds;
+ unsigned long long int dlpi_subs;
+ size_t dlpi_tls_modid;
+ void *dlpi_tls_data;
+};
+
+typedef int (*__xdl_iterate_hdr_callback)(struct xdl_phdr_info*, size_t, void*);
+typedef int xdl_iterate_phdr_t(__xdl_iterate_hdr_callback, void*);
+
+#define xdl_iterate_phdr(callback, param) \
+ (((xdl_iterate_phdr_t*) dl_iterate_phdr)((callback), (param)))
+
+} // namespace __sanitizer
+
+# endif // __FreeBSD_version <= 902001
+#endif // SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+
+#endif // SANITIZER_FREEBSD_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_interception.h b/libsanitizer/sanitizer_common/sanitizer_interception.h
new file mode 100644
index 0000000000..2346fa859b
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_interception.h
@@ -0,0 +1,23 @@
+//===-- sanitizer_interception.h --------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common macro definitions for interceptors.
+// Always use this headers instead of interception/interception.h.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_INTERCEPTION_H
+#define SANITIZER_INTERCEPTION_H
+
+#include "interception/interception.h"
+#include "sanitizer_common.h"
+
+#if SANITIZER_LINUX && !defined(SANITIZER_GO)
+#undef REAL
+#define REAL(x) IndirectExternCall(__interception::PTR_TO_REAL(x))
+#endif
+
+#endif // SANITIZER_INTERCEPTION_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
index 0dab7c2f6c..f1f243c17e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
+++ b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
@@ -32,10 +32,13 @@
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
#endif
-#if __LP64__ || defined(_WIN64)
-# define SANITIZER_WORDSIZE 64
+// We can use .preinit_array section on Linux to call sanitizer initialization
+// functions very early in the process startup (unless PIC macro is defined).
+// FIXME: do we have anything like this on Mac?
+#if SANITIZER_LINUX && !SANITIZER_ANDROID && !defined(PIC)
+# define SANITIZER_CAN_USE_PREINIT_ARRAY 1
#else
-# define SANITIZER_WORDSIZE 32
+# define SANITIZER_CAN_USE_PREINIT_ARRAY 0
#endif
// GCC does not understand __has_feature
@@ -57,7 +60,7 @@ typedef unsigned long uptr; // NOLINT
typedef signed long sptr; // NOLINT
#endif // defined(_WIN64)
#if defined(__x86_64__)
-// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
+// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
// 64-bit pointer to unwind stack frame.
typedef unsigned long long uhwptr; // NOLINT
#else
@@ -97,11 +100,15 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
- // Notify the tools that the sandbox is going to be turned on. The reserved
- // parameter will be used in the future to hold a structure with functions
- // that the tools may call to bypass the sandbox.
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_sandbox_on_notify(void *reserved);
+ typedef struct {
+ int coverage_sandboxed;
+ __sanitizer::sptr coverage_fd;
+ unsigned int coverage_max_block_size;
+ } __sanitizer_sandbox_arguments;
+
+ // Notify the tools that the sandbox is going to be turned on.
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+ __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
@@ -110,12 +117,16 @@ extern "C" {
void __sanitizer_report_error_summary(const char *error_summary);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc);
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov();
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
} // extern "C"
@@ -141,8 +152,6 @@ using namespace __sanitizer; // NOLINT
# define NOTHROW
# define LIKELY(x) (x)
# define UNLIKELY(x) (x)
-# define UNUSED
-# define USED
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
#else // _MSC_VER
# define ALWAYS_INLINE inline __attribute__((always_inline))
@@ -157,8 +166,6 @@ using namespace __sanitizer; // NOLINT
# define NOTHROW throw()
# define LIKELY(x) __builtin_expect(!!(x), 1)
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
-# define UNUSED __attribute__((unused))
-# define USED __attribute__((used))
# if defined(__i386__) || defined(__x86_64__)
// __builtin_prefetch(x) generates prefetchnt0 on x86
# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
@@ -167,6 +174,14 @@ using namespace __sanitizer; // NOLINT
# endif
#endif // _MSC_VER
+#if !defined(_MSC_VER) || defined(__clang__)
+# define UNUSED __attribute__((unused))
+# define USED __attribute__((used))
+#else
+# define UNUSED
+# define USED
+#endif
+
// Unaligned versions of basic types.
typedef ALIGNED(1) u16 uu16;
typedef ALIGNED(1) u32 uu32;
@@ -197,7 +212,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
// Check macro
#define RAW_CHECK_MSG(expr, msg) do { \
- if (!(expr)) { \
+ if (UNLIKELY(!(expr))) { \
RawWrite(msg); \
Die(); \
} \
@@ -209,7 +224,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
do { \
__sanitizer::u64 v1 = (u64)(c1); \
__sanitizer::u64 v2 = (u64)(c2); \
- if (!(v1 op v2)) \
+ if (UNLIKELY(!(v1 op v2))) \
__sanitizer::CheckFailed(__FILE__, __LINE__, \
"(" #c1 ") " #op " (" #c2 ")", v1, v2); \
} while (false) \
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.cc b/libsanitizer/sanitizer_common/sanitizer_libc.cc
index 53c8755509..c13a66d88c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.cc
@@ -16,7 +16,7 @@ namespace __sanitizer {
// Make the compiler think that something is going on there.
static inline void break_optimization(void *arg) {
-#if SANITIZER_WINDOWS
+#if _MSC_VER
// FIXME: make sure this is actually enough.
__asm;
#else
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.h b/libsanitizer/sanitizer_common/sanitizer_libc.h
index ae23bc45e9..680b888e21 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.h
@@ -72,6 +72,7 @@ uptr internal_open(const char *filename, int flags, u32 mode);
uptr internal_read(fd_t fd, void *buf, uptr count);
uptr internal_write(fd_t fd, const void *buf, uptr count);
+uptr internal_ftruncate(fd_t fd, uptr size);
// OS
uptr internal_filesize(fd_t fd); // -1 on error.
@@ -81,6 +82,7 @@ uptr internal_fstat(fd_t fd, void *buf);
uptr internal_dup2(int oldfd, int newfd);
uptr internal_readlink(const char *path, char *buf, uptr bufsize);
uptr internal_unlink(const char *path);
+uptr internal_rename(const char *oldpath, const char *newpath);
void NORETURN internal__exit(int exitcode);
uptr internal_lseek(fd_t fd, OFF_T offset, int whence);
@@ -89,12 +91,16 @@ uptr internal_waitpid(int pid, int *status, int options);
uptr internal_getpid();
uptr internal_getppid();
+int internal_fork();
+
// Threading
uptr internal_sched_yield();
// Error handling
bool internal_iserror(uptr retval, int *rverrno = 0);
+int internal_sigaction(int signum, const void *act, void *oldact);
+
} // namespace __sanitizer
#endif // SANITIZER_LIBC_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_libignore.cc b/libsanitizer/sanitizer_common/sanitizer_libignore.cc
index 310e811df1..9877d66865 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libignore.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_libignore.cc
@@ -6,7 +6,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_libignore.h"
#include "sanitizer_flags.h"
@@ -74,9 +74,10 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
loaded = true;
if (lib->loaded)
continue;
- if (common_flags()->verbosity)
- Report("Matched called_from_lib suppression '%s' against library"
- " '%s'\n", lib->templ, module.data());
+ VReport(1,
+ "Matched called_from_lib suppression '%s' against library"
+ " '%s'\n",
+ lib->templ, module.data());
lib->loaded = true;
lib->name = internal_strdup(module.data());
const uptr idx = atomic_load(&loaded_count_, memory_order_relaxed);
@@ -100,4 +101,4 @@ void LibIgnore::OnLibraryUnloaded() {
} // namespace __sanitizer
-#endif // #if SANITIZER_LINUX
+#endif // #if SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cc b/libsanitizer/sanitizer_common/sanitizer_linux.cc
index 69c9c1063f..9feb307db9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.cc
@@ -11,9 +11,10 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_linux.h"
@@ -23,7 +24,10 @@
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
+#if !SANITIZER_FREEBSD
#include <asm/param.h>
+#endif
+
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
@@ -40,12 +44,28 @@
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
-#include <unwind.h>
+
+#if SANITIZER_FREEBSD
+#include <sys/sysctl.h>
+#include <machine/atomic.h>
+extern "C" {
+// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
+// FreeBSD 9.2 and 10.0.
+#include <sys/umtx.h>
+}
+extern char **environ; // provided by crt1
+#endif // SANITIZER_FREEBSD
#if !SANITIZER_ANDROID
#include <sys/signal.h>
#endif
+#if SANITIZER_ANDROID
+#include <android/log.h>
+#include <sys/system_properties.h>
+#endif
+
+#if SANITIZER_LINUX
// <linux/time.h>
struct kernel_timeval {
long tv_sec;
@@ -55,11 +75,12 @@ struct kernel_timeval {
// <linux/futex.h> is broken on some linux distributions.
const int FUTEX_WAIT = 0;
const int FUTEX_WAKE = 1;
+#endif // SANITIZER_LINUX
-// Are we using 32-bit or 64-bit syscalls?
+// Are we using 32-bit or 64-bit Linux syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
// but it still needs to use 64-bit syscalls.
-#if defined(__x86_64__) || SANITIZER_WORDSIZE == 64
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_WORDSIZE == 64)
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
#else
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
@@ -67,7 +88,7 @@ const int FUTEX_WAKE = 1;
namespace __sanitizer {
-#ifdef __x86_64__
+#if SANITIZER_LINUX && defined(__x86_64__)
#include "sanitizer_syscall_linux_x86_64.inc"
#else
#include "sanitizer_syscall_generic.inc"
@@ -76,48 +97,66 @@ namespace __sanitizer {
// --------------- sanitizer_libc.h
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_mmap, (uptr)addr, length, prot, flags, fd,
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
offset);
#else
- return internal_syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
+ return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,
+ offset);
#endif
}
uptr internal_munmap(void *addr, uptr length) {
- return internal_syscall(__NR_munmap, (uptr)addr, length);
+ return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
}
uptr internal_close(fd_t fd) {
- return internal_syscall(__NR_close, fd);
+ return internal_syscall(SYSCALL(close), fd);
}
uptr internal_open(const char *filename, int flags) {
- return internal_syscall(__NR_open, (uptr)filename, flags);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
+#else
+ return internal_syscall(SYSCALL(open), (uptr)filename, flags);
+#endif
}
uptr internal_open(const char *filename, int flags, u32 mode) {
- return internal_syscall(__NR_open, (uptr)filename, flags, mode);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
+ mode);
+#else
+ return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode);
+#endif
}
uptr OpenFile(const char *filename, bool write) {
return internal_open(filename,
- write ? O_WRONLY | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660);
+ write ? O_RDWR | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660);
}
uptr internal_read(fd_t fd, void *buf, uptr count) {
sptr res;
- HANDLE_EINTR(res, (sptr)internal_syscall(__NR_read, fd, (uptr)buf, count));
+ HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf,
+ count));
return res;
}
uptr internal_write(fd_t fd, const void *buf, uptr count) {
sptr res;
- HANDLE_EINTR(res, (sptr)internal_syscall(__NR_write, fd, (uptr)buf, count));
+ HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(write), fd, (uptr)buf,
+ count));
+ return res;
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ sptr res;
+ HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd, size));
return res;
}
-#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS
+#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && !SANITIZER_FREEBSD
static void stat64_to_stat(struct stat64 *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
@@ -138,33 +177,43 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
#endif
uptr internal_stat(const char *path, void *buf) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_stat, (uptr)path, (uptr)buf);
+#if SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(stat), path, buf);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
+ (uptr)buf, 0);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
#else
struct stat64 buf64;
- int res = internal_syscall(__NR_stat64, path, &buf64);
+ int res = internal_syscall(SYSCALL(stat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
}
uptr internal_lstat(const char *path, void *buf) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_lstat, (uptr)path, (uptr)buf);
+#if SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(lstat), path, buf);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
+ (uptr)buf, AT_SYMLINK_NOFOLLOW);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
#else
struct stat64 buf64;
- int res = internal_syscall(__NR_lstat64, path, &buf64);
+ int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
}
uptr internal_fstat(fd_t fd, void *buf) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_fstat, fd, (uptr)buf);
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
#else
struct stat64 buf64;
- int res = internal_syscall(__NR_fstat64, fd, &buf64);
+ int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
@@ -178,54 +227,104 @@ uptr internal_filesize(fd_t fd) {
}
uptr internal_dup2(int oldfd, int newfd) {
- return internal_syscall(__NR_dup2, oldfd, newfd);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
+#else
+ return internal_syscall(SYSCALL(dup2), oldfd, newfd);
+#endif
}
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
- return internal_syscall(__NR_readlink, (uptr)path, (uptr)buf, bufsize);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(readlinkat), AT_FDCWD,
+ (uptr)path, (uptr)buf, bufsize);
+#else
+ return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize);
+#endif
}
uptr internal_unlink(const char *path) {
- return internal_syscall(__NR_unlink, (uptr)path);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
+#else
+ return internal_syscall(SYSCALL(unlink), (uptr)path);
+#endif
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
+ (uptr)newpath);
+#else
+ return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
+#endif
}
uptr internal_sched_yield() {
- return internal_syscall(__NR_sched_yield);
+ return internal_syscall(SYSCALL(sched_yield));
}
void internal__exit(int exitcode) {
- internal_syscall(__NR_exit_group, exitcode);
+#if SANITIZER_FREEBSD
+ internal_syscall(SYSCALL(exit), exitcode);
+#else
+ internal_syscall(SYSCALL(exit_group), exitcode);
+#endif
Die(); // Unreachable.
}
uptr internal_execve(const char *filename, char *const argv[],
char *const envp[]) {
- return internal_syscall(__NR_execve, (uptr)filename, (uptr)argv, (uptr)envp);
+ return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,
+ (uptr)envp);
}
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
struct stat st;
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
+#else
if (internal_stat(filename, &st))
+#endif
return false;
// Sanity check: filename is a regular file.
return S_ISREG(st.st_mode);
}
uptr GetTid() {
- return internal_syscall(__NR_gettid);
+#if SANITIZER_FREEBSD
+ return (uptr)pthread_self();
+#else
+ return internal_syscall(SYSCALL(gettid));
+#endif
}
u64 NanoTime() {
+#if SANITIZER_FREEBSD
+ timeval tv;
+#else
kernel_timeval tv;
+#endif
internal_memset(&tv, 0, sizeof(tv));
- internal_syscall(__NR_gettimeofday, (uptr)&tv, 0);
+ internal_syscall(SYSCALL(gettimeofday), (uptr)&tv, 0);
return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
}
-// Like getenv, but reads env directly from /proc and does not use libc.
-// This function should be called first inside __asan_init.
+// Like getenv, but reads env directly from /proc (on Linux) or parses the
+// 'environ' array (on FreeBSD) and does not use libc. This function should be
+// called first inside __asan_init.
const char *GetEnv(const char *name) {
+#if SANITIZER_FREEBSD
+ if (::environ != 0) {
+ uptr NameLen = internal_strlen(name);
+ for (char **Env = ::environ; *Env != 0; Env++) {
+ if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
+ return (*Env) + NameLen + 1;
+ }
+ }
+ return 0; // Not found.
+#elif SANITIZER_LINUX
static char *environ;
static uptr len;
static bool inited;
@@ -249,6 +348,9 @@ const char *GetEnv(const char *name) {
p = endp + 1;
}
return 0; // Not found.
+#else
+#error "Unsupported platform"
+#endif
}
extern "C" {
@@ -303,225 +405,16 @@ void ReExec() {
Die();
}
-void PrepareForSandboxing() {
- // Some kinds of sandboxes may forbid filesystem access, so we won't be able
- // to read the file mappings from /proc/self/maps. Luckily, neither the
- // process will be able to load additional libraries, so it's fine to use the
- // cached mappings.
- MemoryMappingLayout::CacheMemoryMappings();
- // Same for /proc/self/exe in the symbolizer.
-#if !SANITIZER_GO
- if (Symbolizer *sym = Symbolizer::GetOrNull())
- sym->PrepareForSandboxing();
-#endif
-}
-
-// ----------------- sanitizer_procmaps.h
-// Linker initialized.
-ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
-StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
-
-MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
- proc_self_maps_.len =
- ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
- &proc_self_maps_.mmaped_size, 1 << 26);
- if (cache_enabled) {
- if (proc_self_maps_.mmaped_size == 0) {
- LoadFromCache();
- CHECK_GT(proc_self_maps_.len, 0);
- }
- } else {
- CHECK_GT(proc_self_maps_.mmaped_size, 0);
- }
- Reset();
- // FIXME: in the future we may want to cache the mappings on demand only.
- if (cache_enabled)
- CacheMemoryMappings();
-}
-
-MemoryMappingLayout::~MemoryMappingLayout() {
- // Only unmap the buffer if it is different from the cached one. Otherwise
- // it will be unmapped when the cache is refreshed.
- if (proc_self_maps_.data != cached_proc_self_maps_.data) {
- UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
- }
-}
-
-void MemoryMappingLayout::Reset() {
- current_ = proc_self_maps_.data;
-}
-
-// static
-void MemoryMappingLayout::CacheMemoryMappings() {
- SpinMutexLock l(&cache_lock_);
- // Don't invalidate the cache if the mappings are unavailable.
- ProcSelfMapsBuff old_proc_self_maps;
- old_proc_self_maps = cached_proc_self_maps_;
- cached_proc_self_maps_.len =
- ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_.data,
- &cached_proc_self_maps_.mmaped_size, 1 << 26);
- if (cached_proc_self_maps_.mmaped_size == 0) {
- cached_proc_self_maps_ = old_proc_self_maps;
- } else {
- if (old_proc_self_maps.mmaped_size) {
- UnmapOrDie(old_proc_self_maps.data,
- old_proc_self_maps.mmaped_size);
- }
- }
-}
-
-void MemoryMappingLayout::LoadFromCache() {
- SpinMutexLock l(&cache_lock_);
- if (cached_proc_self_maps_.data) {
- proc_self_maps_ = cached_proc_self_maps_;
- }
-}
-
-// Parse a hex value in str and update str.
-static uptr ParseHex(char **str) {
- uptr x = 0;
- char *s;
- for (s = *str; ; s++) {
- char c = *s;
- uptr v = 0;
- if (c >= '0' && c <= '9')
- v = c - '0';
- else if (c >= 'a' && c <= 'f')
- v = c - 'a' + 10;
- else if (c >= 'A' && c <= 'F')
- v = c - 'A' + 10;
- else
- break;
- x = x * 16 + v;
- }
- *str = s;
- return x;
-}
-
-static bool IsOneOf(char c, char c1, char c2) {
- return c == c1 || c == c2;
-}
-
-static bool IsDecimal(char c) {
- return c >= '0' && c <= '9';
-}
-
-static bool IsHex(char c) {
- return (c >= '0' && c <= '9')
- || (c >= 'a' && c <= 'f');
-}
-
-static uptr ReadHex(const char *p) {
- uptr v = 0;
- for (; IsHex(p[0]); p++) {
- if (p[0] >= '0' && p[0] <= '9')
- v = v * 16 + p[0] - '0';
- else
- v = v * 16 + p[0] - 'a' + 10;
- }
- return v;
-}
-
-static uptr ReadDecimal(const char *p) {
- uptr v = 0;
- for (; IsDecimal(p[0]); p++)
- v = v * 10 + p[0] - '0';
- return v;
-}
-
-
-bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- char *last = proc_self_maps_.data + proc_self_maps_.len;
- if (current_ >= last) return false;
- uptr dummy;
- if (!start) start = &dummy;
- if (!end) end = &dummy;
- if (!offset) offset = &dummy;
- char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
- if (next_line == 0)
- next_line = last;
- // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
- *start = ParseHex(&current_);
- CHECK_EQ(*current_++, '-');
- *end = ParseHex(&current_);
- CHECK_EQ(*current_++, ' ');
- uptr local_protection = 0;
- CHECK(IsOneOf(*current_, '-', 'r'));
- if (*current_++ == 'r')
- local_protection |= kProtectionRead;
- CHECK(IsOneOf(*current_, '-', 'w'));
- if (*current_++ == 'w')
- local_protection |= kProtectionWrite;
- CHECK(IsOneOf(*current_, '-', 'x'));
- if (*current_++ == 'x')
- local_protection |= kProtectionExecute;
- CHECK(IsOneOf(*current_, 's', 'p'));
- if (*current_++ == 's')
- local_protection |= kProtectionShared;
- if (protection) {
- *protection = local_protection;
- }
- CHECK_EQ(*current_++, ' ');
- *offset = ParseHex(&current_);
- CHECK_EQ(*current_++, ' ');
- ParseHex(&current_);
- CHECK_EQ(*current_++, ':');
- ParseHex(&current_);
- CHECK_EQ(*current_++, ' ');
- while (IsDecimal(*current_))
- current_++;
- // Qemu may lack the trailing space.
- // http://code.google.com/p/address-sanitizer/issues/detail?id=160
- // CHECK_EQ(*current_++, ' ');
- // Skip spaces.
- while (current_ < next_line && *current_ == ' ')
- current_++;
- // Fill in the filename.
- uptr i = 0;
- while (current_ < next_line) {
- if (filename && i < filename_size - 1)
- filename[i++] = *current_;
- current_++;
- }
- if (filename && i < filename_size)
- filename[i] = 0;
- current_ = next_line + 1;
- return true;
-}
-
-// Gets the object name and the offset by walking MemoryMappingLayout.
-bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[],
- uptr filename_size,
- uptr *protection) {
- return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
- protection);
-}
-
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
- char *smaps = 0;
- uptr smaps_cap = 0;
- uptr smaps_len = ReadFileToBuffer("/proc/self/smaps",
- &smaps, &smaps_cap, 64<<20);
- uptr start = 0;
- bool file = false;
- const char *pos = smaps;
- while (pos < smaps + smaps_len) {
- if (IsHex(pos[0])) {
- start = ReadHex(pos);
- for (; *pos != '/' && *pos > '\n'; pos++) {}
- file = *pos == '/';
- } else if (internal_strncmp(pos, "Rss:", 4) == 0) {
- for (; *pos < '0' || *pos > '9'; pos++) {}
- uptr rss = ReadDecimal(pos) * 1024;
- cb(start, rss, file, stats, stats_size);
- }
- while (*pos++ != '\n') {}
- }
- UnmapOrDie(smaps, smaps_cap);
+// Stub implementation of GetThreadStackAndTls for Go.
+#if SANITIZER_GO
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+ *stk_addr = 0;
+ *stk_size = 0;
+ *tls_addr = 0;
+ *tls_size = 0;
}
+#endif // SANITIZER_GO
enum MutexState {
MtxUnlocked = 0,
@@ -541,16 +434,26 @@ void BlockingMutex::Lock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
- while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
- internal_syscall(__NR_futex, (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
+#if SANITIZER_FREEBSD
+ _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
+#else
+ internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
+#endif
+ }
}
void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
CHECK_NE(v, MtxUnlocked);
- if (v == MtxSleeping)
- internal_syscall(__NR_futex, (uptr)m, FUTEX_WAKE, 1, 0, 0, 0);
+ if (v == MtxSleeping) {
+#if SANITIZER_FREEBSD
+ _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
+#else
+ internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE, 1, 0, 0, 0);
+#endif
+ }
}
void BlockingMutex::CheckLocked() {
@@ -563,71 +466,137 @@ void BlockingMutex::CheckLocked() {
// Note that getdents64 uses a different structure format. We only provide the
// 32-bit syscall here.
struct linux_dirent {
+#if SANITIZER_X32
+ u64 d_ino;
+ u64 d_off;
+#else
unsigned long d_ino;
unsigned long d_off;
+#endif
unsigned short d_reclen;
char d_name[256];
};
// Syscall wrappers.
uptr internal_ptrace(int request, int pid, void *addr, void *data) {
- return internal_syscall(__NR_ptrace, request, pid, (uptr)addr, (uptr)data);
+ return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,
+ (uptr)data);
}
uptr internal_waitpid(int pid, int *status, int options) {
- return internal_syscall(__NR_wait4, pid, (uptr)status, options,
+ return internal_syscall(SYSCALL(wait4), pid, (uptr)status, options,
0 /* rusage */);
}
uptr internal_getpid() {
- return internal_syscall(__NR_getpid);
+ return internal_syscall(SYSCALL(getpid));
}
uptr internal_getppid() {
- return internal_syscall(__NR_getppid);
+ return internal_syscall(SYSCALL(getppid));
}
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
- return internal_syscall(__NR_getdents, fd, (uptr)dirp, count);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
+#else
+ return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
+#endif
}
uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
- return internal_syscall(__NR_lseek, fd, offset, whence);
+ return internal_syscall(SYSCALL(lseek), fd, offset, whence);
}
+#if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
- return internal_syscall(__NR_prctl, option, arg2, arg3, arg4, arg5);
+ return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);
}
+#endif
uptr internal_sigaltstack(const struct sigaltstack *ss,
struct sigaltstack *oss) {
- return internal_syscall(__NR_sigaltstack, (uptr)ss, (uptr)oss);
+ return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
}
-uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact) {
- return internal_syscall(__NR_rt_sigaction, signum, act, oldact,
- sizeof(__sanitizer_kernel_sigset_t));
+int internal_fork() {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(clone), SIGCHLD, 0);
+#else
+ return internal_syscall(SYSCALL(fork));
+#endif
+}
+
+#if SANITIZER_LINUX
+// Doesn't set sa_restorer, use with caution (see below).
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
+ __sanitizer_kernel_sigaction_t k_act, k_oldact;
+ internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t));
+ internal_memset(&k_oldact, 0, sizeof(__sanitizer_kernel_sigaction_t));
+ const __sanitizer_sigaction *u_act = (__sanitizer_sigaction *)act;
+ __sanitizer_sigaction *u_oldact = (__sanitizer_sigaction *)oldact;
+ if (u_act) {
+ k_act.handler = u_act->handler;
+ k_act.sigaction = u_act->sigaction;
+ internal_memcpy(&k_act.sa_mask, &u_act->sa_mask,
+ sizeof(__sanitizer_kernel_sigset_t));
+ k_act.sa_flags = u_act->sa_flags;
+ // FIXME: most often sa_restorer is unset, however the kernel requires it
+ // to point to a valid signal restorer that calls the rt_sigreturn syscall.
+ // If sa_restorer passed to the kernel is NULL, the program may crash upon
+ // signal delivery or fail to unwind the stack in the signal handler.
+ // libc implementation of sigaction() passes its own restorer to
+ // rt_sigaction, so we need to do the same (we'll need to reimplement the
+ // restorers; for x86_64 the restorer address can be obtained from
+ // oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact).
+ k_act.sa_restorer = u_act->sa_restorer;
+ }
+
+ uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum,
+ (uptr)(u_act ? &k_act : NULL),
+ (uptr)(u_oldact ? &k_oldact : NULL),
+ (uptr)sizeof(__sanitizer_kernel_sigset_t));
+
+ if ((result == 0) && u_oldact) {
+ u_oldact->handler = k_oldact.handler;
+ u_oldact->sigaction = k_oldact.sigaction;
+ internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask,
+ sizeof(__sanitizer_kernel_sigset_t));
+ u_oldact->sa_flags = k_oldact.sa_flags;
+ u_oldact->sa_restorer = k_oldact.sa_restorer;
+ }
+ return result;
}
+#endif // SANITIZER_LINUX
-uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
- __sanitizer_kernel_sigset_t *oldset) {
- return internal_syscall(__NR_rt_sigprocmask, (uptr)how, &set->sig[0],
- &oldset->sig[0], sizeof(__sanitizer_kernel_sigset_t));
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+#if SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);
+#else
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ __sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;
+ return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how,
+ (uptr)&k_set->sig[0], (uptr)&k_oldset->sig[0],
+ sizeof(__sanitizer_kernel_sigset_t));
+#endif
}
-void internal_sigfillset(__sanitizer_kernel_sigset_t *set) {
+void internal_sigfillset(__sanitizer_sigset_t *set) {
internal_memset(set, 0xff, sizeof(*set));
}
-void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum) {
+#if SANITIZER_LINUX
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
signum -= 1;
CHECK_GE(signum, 0);
CHECK_LT(signum, sizeof(*set) * 8);
- const uptr idx = signum / (sizeof(set->sig[0]) * 8);
- const uptr bit = signum % (sizeof(set->sig[0]) * 8);
- set->sig[idx] &= ~(1 << bit);
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
+ const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
+ k_set->sig[idx] &= ~(1 << bit);
}
+#endif // SANITIZER_LINUX
// ThreadLister implementation.
ThreadLister::ThreadLister(int pid)
@@ -698,7 +667,7 @@ bool ThreadLister::GetDirectoryEntries() {
}
uptr GetPageSize() {
-#if defined(__x86_64__) || defined(__i386__)
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
return EXEC_PAGESIZE;
#else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
@@ -709,24 +678,32 @@ static char proc_self_exe_cache_str[kMaxPathLength];
static uptr proc_self_exe_cache_len = 0;
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ if (proc_self_exe_cache_len > 0) {
+ // If available, use the cached module name.
+ uptr module_name_len =
+ internal_snprintf(buf, buf_len, "%s", proc_self_exe_cache_str);
+ CHECK_LT(module_name_len, buf_len);
+ return module_name_len;
+ }
+#if SANITIZER_FREEBSD
+ const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
+ size_t Size = buf_len;
+ bool IsErr = (sysctl(Mib, 4, buf, &Size, NULL, 0) != 0);
+ int readlink_error = IsErr ? errno : 0;
+ uptr module_name_len = Size;
+#else
uptr module_name_len = internal_readlink(
"/proc/self/exe", buf, buf_len);
int readlink_error;
- if (internal_iserror(module_name_len, &readlink_error)) {
- if (proc_self_exe_cache_len) {
- // If available, use the cached module name.
- CHECK_LE(proc_self_exe_cache_len, buf_len);
- internal_strncpy(buf, proc_self_exe_cache_str, buf_len);
- module_name_len = internal_strlen(proc_self_exe_cache_str);
- } else {
- // We can't read /proc/self/exe for some reason, assume the name of the
- // binary is unknown.
- Report("WARNING: readlink(\"/proc/self/exe\") failed with errno %d, "
- "some stack frames may not be symbolized\n", readlink_error);
- module_name_len = internal_snprintf(buf, buf_len, "/proc/self/exe");
- }
+ bool IsErr = internal_iserror(module_name_len, &readlink_error);
+#endif
+ if (IsErr) {
+ // We can't read /proc/self/exe for some reason, assume the name of the
+ // binary is unknown.
+ Report("WARNING: readlink(\"/proc/self/exe\") failed with errno %d, "
+ "some stack frames may not be symbolized\n", readlink_error);
+ module_name_len = internal_snprintf(buf, buf_len, "/proc/self/exe");
CHECK_LT(module_name_len, buf_len);
- buf[module_name_len] = '\0';
}
return module_name_len;
}
@@ -753,8 +730,10 @@ bool LibraryNameIs(const char *full_name, const char *base_name) {
#if !SANITIZER_ANDROID
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
+#if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
typedef ElfW(Ehdr) Elf_Ehdr;
+#endif // !SANITIZER_FREEBSD
char *base = (char *)map->l_addr;
Elf_Ehdr *ehdr = (Elf_Ehdr *)base;
char *phdrs = base + ehdr->e_phoff;
@@ -788,7 +767,7 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
}
#endif
-#if defined(__x86_64__)
+#if defined(__x86_64__) && SANITIZER_LINUX
// We cannot use glibc's clone wrapper, because it messes with the child
// task's TLS. It writes the PID and TID of the child task to its thread
// descriptor, but in our case the child task shares the thread descriptor with
@@ -807,7 +786,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
register void *r8 __asm__("r8") = newtls;
register int *r10 __asm__("r10") = child_tidptr;
__asm__ __volatile__(
- /* %rax = syscall(%rax = __NR_clone,
+ /* %rax = syscall(%rax = SYSCALL(clone),
* %rdi = flags,
* %rsi = child_stack,
* %rdx = parent_tidptr,
@@ -841,7 +820,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
/* Return to parent. */
"1:\n"
: "=a" (res)
- : "a"(__NR_clone), "i"(__NR_exit),
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
"S"(child_stack),
"D"(flags),
"d"(parent_tidptr),
@@ -850,7 +829,46 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "rsp", "memory", "r11", "rcx");
return res;
}
-#endif // defined(__x86_64__)
+#endif // defined(__x86_64__) && SANITIZER_LINUX
+
+#if SANITIZER_ANDROID
+static atomic_uint8_t android_log_initialized;
+
+void AndroidLogInit() {
+ atomic_store(&android_log_initialized, 1, memory_order_release);
+}
+// This thing is not, strictly speaking, async signal safe, but it does not seem
+// to cause any issues. Alternative is writing to log devices directly, but
+// their location and message format might change in the future, so we'd really
+// like to avoid that.
+void AndroidLogWrite(const char *buffer) {
+ if (!atomic_load(&android_log_initialized, memory_order_acquire))
+ return;
+
+ char *copy = internal_strdup(buffer);
+ char *p = copy;
+ char *q;
+ // __android_log_write has an implicit message length limit.
+ // Print one line at a time.
+ do {
+ q = internal_strchr(p, '\n');
+ if (q) *q = '\0';
+ __android_log_write(ANDROID_LOG_INFO, NULL, p);
+ if (q) p = q + 1;
+ } while (q);
+ InternalFree(copy);
+}
+
+void GetExtraActivationFlags(char *buf, uptr size) {
+ CHECK(size > PROP_VALUE_MAX);
+ __system_property_get("asan.options", buf);
+}
+#endif
+
+bool IsDeadlySignal(int signum) {
+ return (signum == SIGSEGV) && common_flags()->handle_segv;
+}
+
} // namespace __sanitizer
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.h b/libsanitizer/sanitizer_common/sanitizer_linux.h
index 6422df142e..086834c3a2 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.h
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.h
@@ -12,7 +12,7 @@
#define SANITIZER_LINUX_H
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_posix.h"
@@ -27,20 +27,25 @@ struct linux_dirent;
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
-uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
uptr internal_sigaltstack(const struct sigaltstack* ss,
struct sigaltstack* oss);
-uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact);
-uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
- __sanitizer_kernel_sigset_t *oldset);
-void internal_sigfillset(__sanitizer_kernel_sigset_t *set);
-void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum);
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset);
+void internal_sigfillset(__sanitizer_sigset_t *set);
-#ifdef __x86_64__
+// Linux-only syscalls.
+#if SANITIZER_LINUX
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
+// Used only by sanitizer_stoptheworld. Signal handlers that are actually used
+// (like the process-wide error reporting SEGV handler) must use
+// internal_sigaction instead.
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
+#if defined(__x86_64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
+#endif // SANITIZER_LINUX
// This class reads thread IDs from /proc/<pid>/task using only syscalls.
class ThreadLister {
@@ -64,8 +69,6 @@ class ThreadLister {
int bytes_read_;
};
-void AdjustStackSizeLinux(void *attr);
-
// Exposed for testing.
uptr ThreadDescriptorSize();
uptr ThreadSelf();
@@ -84,5 +87,5 @@ void CacheBinaryName();
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
} // namespace __sanitizer
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
#endif // SANITIZER_LINUX_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
index c9eb435be9..36aaafdcc4 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
@@ -11,21 +11,35 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
+#include "sanitizer_freebsd.h"
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_atomic.h"
+#include "sanitizer_symbolizer.h"
+
+#if SANITIZER_ANDROID || SANITIZER_FREEBSD
+#include <dlfcn.h> // for dlsym()
+#endif
-#include <dlfcn.h>
#include <pthread.h>
-#include <sys/prctl.h>
+#include <signal.h>
#include <sys/resource.h>
-#include <unwind.h>
+
+#if SANITIZER_FREEBSD
+#include <pthread_np.h>
+#include <osreldate.h>
+#define pthread_getattr_np pthread_attr_get_np
+#endif
+
+#if SANITIZER_LINUX
+#include <sys/prctl.h>
+#endif
#if !SANITIZER_ANDROID
#include <elf.h>
@@ -33,17 +47,31 @@
#include <unistd.h>
#endif
+namespace __sanitizer {
+
// This function is defined elsewhere if we intercepted pthread_attr_getstack.
-SANITIZER_WEAK_ATTRIBUTE
-int __sanitizer_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
- return pthread_attr_getstack((pthread_attr_t*)attr, addr, size);
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE int
+real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
+} // extern "C"
+
+static int my_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
+ if (real_pthread_attr_getstack)
+ return real_pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
+ return pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
}
-namespace __sanitizer {
+SANITIZER_WEAK_ATTRIBUTE int
+real_sigaction(int signum, const void *act, void *oldact);
+
+int internal_sigaction(int signum, const void *act, void *oldact) {
+ if (real_sigaction)
+ return real_sigaction(signum, act, oldact);
+ return sigaction(signum, (struct sigaction *)act, (struct sigaction *)oldact);
+}
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
- static const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
CHECK(stack_top);
CHECK(stack_bottom);
if (at_initialization) {
@@ -77,10 +105,11 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
return;
}
pthread_attr_t attr;
+ pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
uptr stacksize = 0;
void *stackaddr = 0;
- __sanitizer_pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
+ my_pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
pthread_attr_destroy(&attr);
CHECK_LE(stacksize, kMaxThreadStackSize); // Sanity check.
@@ -88,8 +117,6 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
*stack_bottom = (uptr)stackaddr;
}
-// Does not compile for Go because dlsym() requires -ldl
-#ifndef SANITIZER_GO
bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv");
if (f == 0)
@@ -98,9 +125,8 @@ bool SetEnv(const char *name, const char *value) {
setenv_ft setenv_f;
CHECK_EQ(sizeof(setenv_f), sizeof(f));
internal_memcpy(&setenv_f, &f, sizeof(f));
- return setenv_f(name, value, 1) == 0;
+ return IndirectExternCall(setenv_f)(name, value, 1) == 0;
}
-#endif
bool SanitizerSetThreadName(const char *name) {
#ifdef PR_SET_NAME
@@ -123,61 +149,9 @@ bool SanitizerGetThreadName(char *name, int max_len) {
#endif
}
-#ifndef SANITIZER_GO
-//------------------------- SlowUnwindStack -----------------------------------
-#ifdef __arm__
-#define UNWIND_STOP _URC_END_OF_STACK
-#define UNWIND_CONTINUE _URC_NO_REASON
-#else
-#define UNWIND_STOP _URC_NORMAL_STOP
-#define UNWIND_CONTINUE _URC_NO_REASON
-#endif
-
-uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
-#ifdef __arm__
- uptr val;
- _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
- 15 /* r15 = PC */, _UVRSD_UINT32, &val);
- CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
- // Clear the Thumb bit.
- return val & ~(uptr)1;
-#else
- return _Unwind_GetIP(ctx);
-#endif
-}
-
-struct UnwindTraceArg {
- StackTrace *stack;
- uptr max_depth;
-};
-
-_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
- UnwindTraceArg *arg = (UnwindTraceArg*)param;
- CHECK_LT(arg->stack->size, arg->max_depth);
- uptr pc = Unwind_GetIP(ctx);
- arg->stack->trace[arg->stack->size++] = pc;
- if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
- return UNWIND_CONTINUE;
-}
-
-void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
- size = 0;
- if (max_depth == 0)
- return;
- UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
- _Unwind_Backtrace(Unwind_Trace, &arg);
- // We need to pop a few frames so that pc is on top.
- uptr to_pop = LocatePcInTrace(pc);
- // trace[0] belongs to the current function so we always pop it.
- if (to_pop == 0)
- to_pop = 1;
- PopStackFrames(to_pop);
- trace[0] = pc;
-}
-
-#endif // !SANITIZER_GO
-
+#if !SANITIZER_FREEBSD
static uptr g_tls_size;
+#endif
#ifdef __i386__
# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
@@ -186,7 +160,7 @@ static uptr g_tls_size;
#endif
void InitTlsSize() {
-#if !defined(SANITIZER_GO) && !SANITIZER_ANDROID
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
get_tls_func get_tls;
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
@@ -196,32 +170,30 @@ void InitTlsSize() {
CHECK_NE(get_tls, 0);
size_t tls_size = 0;
size_t tls_align = 0;
- get_tls(&tls_size, &tls_align);
+ IndirectExternCall(get_tls)(&tls_size, &tls_align);
g_tls_size = tls_size;
-#endif
+#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID
}
-uptr GetTlsSize() {
- return g_tls_size;
-}
-
-#if defined(__x86_64__) || defined(__i386__)
+#if (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
// sizeof(struct thread) from glibc.
static atomic_uintptr_t kThreadDescriptorSize;
uptr ThreadDescriptorSize() {
- char buf[64];
uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed);
if (val)
return val;
#ifdef _CS_GNU_LIBC_VERSION
+ char buf[64];
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
if (len < sizeof(buf) && internal_strncmp(buf, "glibc 2.", 8) == 0) {
char *end;
int minor = internal_simple_strtoll(buf + 8, &end, 10);
if (end != buf + 8 && (*end == '\0' || *end == '.')) {
/* sizeof(struct thread) values from various glibc versions. */
- if (minor <= 3)
+ if (SANITIZER_X32)
+ val = 1728; // Assume only one particular version for x32.
+ else if (minor <= 3)
val = FIRST_32_SECOND_64(1104, 1696);
else if (minor == 4)
val = FIRST_32_SECOND_64(1120, 1728);
@@ -253,27 +225,79 @@ uptr ThreadSelfOffset() {
uptr ThreadSelf() {
uptr descr_addr;
-#ifdef __i386__
+# if defined(__i386__)
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-#else
+# elif defined(__x86_64__)
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-#endif
+# else
+# error "unsupported CPU arch"
+# endif
return descr_addr;
}
-#endif // defined(__x86_64__) || defined(__i386__)
+#endif // (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
+
+#if SANITIZER_FREEBSD
+static void **ThreadSelfSegbase() {
+ void **segbase = 0;
+# if defined(__i386__)
+ // sysarch(I386_GET_GSBASE, segbase);
+ __asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
+# elif defined(__x86_64__)
+ // sysarch(AMD64_GET_FSBASE, segbase);
+ __asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
+# else
+# error "unsupported CPU arch for FreeBSD platform"
+# endif
+ return segbase;
+}
-void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
- uptr *tls_addr, uptr *tls_size) {
-#ifndef SANITIZER_GO
-#if defined(__x86_64__) || defined(__i386__)
- *tls_addr = ThreadSelf();
- *tls_size = GetTlsSize();
- *tls_addr -= *tls_size;
- *tls_addr += ThreadDescriptorSize();
+uptr ThreadSelf() {
+ return (uptr)ThreadSelfSegbase()[2];
+}
+#endif // SANITIZER_FREEBSD
+
+static void GetTls(uptr *addr, uptr *size) {
+#if SANITIZER_LINUX
+# if defined(__x86_64__) || defined(__i386__)
+ *addr = ThreadSelf();
+ *size = GetTlsSize();
+ *addr -= *size;
+ *addr += ThreadDescriptorSize();
+# else
+ *addr = 0;
+ *size = 0;
+# endif
+#elif SANITIZER_FREEBSD
+ void** segbase = ThreadSelfSegbase();
+ *addr = 0;
+ *size = 0;
+ if (segbase != 0) {
+ // tcbalign = 16
+ // tls_size = round(tls_static_space, tcbalign);
+ // dtv = segbase[1];
+ // dtv[2] = segbase - tls_static_space;
+ void **dtv = (void**) segbase[1];
+ *addr = (uptr) dtv[2];
+ *size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
+ }
#else
- *tls_addr = 0;
- *tls_size = 0;
+# error "Unknown OS"
#endif
+}
+
+uptr GetTlsSize() {
+#if SANITIZER_FREEBSD
+ uptr addr, size;
+ GetTls(&addr, &size);
+ return size;
+#else
+ return g_tls_size;
+#endif
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+ GetTls(tls_addr, tls_size);
uptr stack_top, stack_bottom;
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
@@ -289,19 +313,13 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
*tls_addr = *stk_addr + *stk_size;
}
}
-#else // SANITIZER_GO
- *stk_addr = 0;
- *stk_size = 0;
- *tls_addr = 0;
- *tls_size = 0;
-#endif // SANITIZER_GO
}
-void AdjustStackSizeLinux(void *attr_) {
+void AdjustStackSize(void *attr_) {
pthread_attr_t *attr = (pthread_attr_t *)attr_;
uptr stackaddr = 0;
size_t stacksize = 0;
- __sanitizer_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
+ my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
// GLibC will return (0 - stacksize) as the stack address in the case when
// stacksize is set, but stackaddr is not.
bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
@@ -309,10 +327,11 @@ void AdjustStackSizeLinux(void *attr_) {
const uptr minstacksize = GetTlsSize() + 128*1024;
if (stacksize < minstacksize) {
if (!stack_set) {
- if (common_flags()->verbosity && stacksize != 0)
- Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
- minstacksize);
- pthread_attr_setstacksize(attr, minstacksize);
+ if (stacksize != 0) {
+ VPrintf(1, "Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
+ minstacksize);
+ pthread_attr_setstacksize(attr, minstacksize);
+ }
} else {
Printf("Sanitizer: pre-allocated stack size is insufficient: "
"%zu < %zu\n", stacksize, minstacksize);
@@ -324,10 +343,17 @@ void AdjustStackSizeLinux(void *attr_) {
#if SANITIZER_ANDROID
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
- return 0;
+ MemoryMappingLayout memory_mapping(false);
+ return memory_mapping.DumpListOfModules(modules, max_modules, filter);
}
#else // SANITIZER_ANDROID
+# if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
+# elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
+# define Elf_Phdr XElf32_Phdr
+# define dl_phdr_info xdl_phdr_info
+# define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
+# endif
struct DlIteratePhdrData {
LoadedModule *modules;
@@ -363,7 +389,8 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
if (phdr->p_type == PT_LOAD) {
uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
uptr cur_end = cur_beg + phdr->p_memsz;
- cur_module->addAddressRange(cur_beg, cur_end);
+ bool executable = phdr->p_flags & PF_X;
+ cur_module->addAddressRange(cur_beg, cur_end, executable);
}
}
return 0;
@@ -378,6 +405,27 @@ uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
}
#endif // SANITIZER_ANDROID
+uptr indirect_call_wrapper;
+
+void SetIndirectCallWrapper(uptr wrapper) {
+ CHECK(!indirect_call_wrapper);
+ CHECK(wrapper);
+ indirect_call_wrapper = wrapper;
+}
+
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ // Some kinds of sandboxes may forbid filesystem access, so we won't be able
+ // to read the file mappings from /proc/self/maps. Luckily, neither the
+ // process will be able to load additional libraries, so it's fine to use the
+ // cached mappings.
+ MemoryMappingLayout::CacheMemoryMappings();
+ // Same for /proc/self/exe in the symbolizer.
+#if !SANITIZER_GO
+ Symbolizer::GetOrInit()->PrepareForSandboxing();
+ CovPrepareForSandboxing(args);
+#endif
+}
+
} // namespace __sanitizer
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_list.h b/libsanitizer/sanitizer_common/sanitizer_list.h
index 9692e01b8e..b72c548e38 100644
--- a/libsanitizer/sanitizer_common/sanitizer_list.h
+++ b/libsanitizer/sanitizer_common/sanitizer_list.h
@@ -24,6 +24,8 @@ namespace __sanitizer {
// non-zero-initialized objects before using.
template<class Item>
struct IntrusiveList {
+ friend class Iterator;
+
void clear() {
first_ = last_ = 0;
size_ = 0;
@@ -111,6 +113,21 @@ struct IntrusiveList {
}
}
+ class Iterator {
+ public:
+ explicit Iterator(IntrusiveList<Item> *list)
+ : list_(list), current_(list->first_) { }
+ Item *next() {
+ Item *ret = current_;
+ if (current_) current_ = current_->next;
+ return ret;
+ }
+ bool hasNext() const { return current_ != 0; }
+ private:
+ IntrusiveList<Item> *list_;
+ Item *current_;
+ };
+
// private, don't use directly.
uptr size_;
Item *first_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cc b/libsanitizer/sanitizer_common/sanitizer_mac.cc
index 288e31ca07..17b931cb53 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.cc
@@ -5,9 +5,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file is shared between AddressSanitizer and ThreadSanitizer
-// run-time libraries and implements mac-specific functions from
-// sanitizer_libc.h.
+// This file is shared between various sanitizers' runtime libraries and
+// implements OSX-specific functions.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
@@ -21,20 +20,22 @@
#include <stdio.h>
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
+#include "sanitizer_mac.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include <crt_externs.h> // for _NSGetEnviron
#include <fcntl.h>
-#include <mach-o/dyld.h>
-#include <mach-o/loader.h>
#include <pthread.h>
#include <sched.h>
+#include <signal.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
+#include <sys/sysctl.h>
#include <sys/types.h>
#include <unistd.h>
#include <libkern/OSAtomic.h>
@@ -118,6 +119,24 @@ uptr internal_getpid() {
return getpid();
}
+int internal_sigaction(int signum, const void *act, void *oldact) {
+ return sigaction(signum,
+ (struct sigaction *)act, (struct sigaction *)oldact);
+}
+
+int internal_fork() {
+ // TODO(glider): this may call user's pthread_atfork() handlers which is bad.
+ return fork();
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+ return rename(oldpath, newpath);
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ return ftruncate(fd, size);
+}
+
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
struct stat st;
@@ -136,6 +155,20 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK(stack_top);
CHECK(stack_bottom);
uptr stacksize = pthread_get_stacksize_np(pthread_self());
+ // pthread_get_stacksize_np() returns an incorrect stack size for the main
+ // thread on Mavericks. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=261
+ if ((GetMacosVersion() >= MACOS_VERSION_MAVERICKS) && at_initialization &&
+ stacksize == (1 << 19)) {
+ struct rlimit rl;
+ CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
+ // Most often rl.rlim_cur will be the desired 8M.
+ if (rl.rlim_cur < kMaxThreadStackSize) {
+ stacksize = rl.rlim_cur;
+ } else {
+ stacksize = kMaxThreadStackSize;
+ }
+ }
void *stackaddr = pthread_get_stackaddr_np(pthread_self());
*stack_top = (uptr)stackaddr;
*stack_bottom = *stack_top - stacksize;
@@ -169,7 +202,8 @@ void ReExec() {
UNIMPLEMENTED();
}
-void PrepareForSandboxing() {
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ (void)args;
// Nothing here for now.
}
@@ -177,148 +211,6 @@ uptr GetPageSize() {
return sysconf(_SC_PAGESIZE);
}
-// ----------------- sanitizer_procmaps.h
-
-MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
- Reset();
-}
-
-MemoryMappingLayout::~MemoryMappingLayout() {
-}
-
-// More information about Mach-O headers can be found in mach-o/loader.h
-// Each Mach-O image has a header (mach_header or mach_header_64) starting with
-// a magic number, and a list of linker load commands directly following the
-// header.
-// A load command is at least two 32-bit words: the command type and the
-// command size in bytes. We're interested only in segment load commands
-// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
-// into the task's address space.
-// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
-// segment_command_64 correspond to the memory address, memory size and the
-// file offset of the current memory segment.
-// Because these fields are taken from the images as is, one needs to add
-// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
-
-void MemoryMappingLayout::Reset() {
- // Count down from the top.
- // TODO(glider): as per man 3 dyld, iterating over the headers with
- // _dyld_image_count is thread-unsafe. We need to register callbacks for
- // adding and removing images which will invalidate the MemoryMappingLayout
- // state.
- current_image_ = _dyld_image_count();
- current_load_cmd_count_ = -1;
- current_load_cmd_addr_ = 0;
- current_magic_ = 0;
- current_filetype_ = 0;
-}
-
-// static
-void MemoryMappingLayout::CacheMemoryMappings() {
- // No-op on Mac for now.
-}
-
-void MemoryMappingLayout::LoadFromCache() {
- // No-op on Mac for now.
-}
-
-// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
-// Google Perftools, http://code.google.com/p/google-perftools.
-
-// NextSegmentLoad scans the current image for the next segment load command
-// and returns the start and end addresses and file offset of the corresponding
-// segment.
-// Note that the segment addresses are not necessarily sorted.
-template<u32 kLCSegment, typename SegmentCommand>
-bool MemoryMappingLayout::NextSegmentLoad(
- uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size, uptr *protection) {
- if (protection)
- UNIMPLEMENTED();
- const char* lc = current_load_cmd_addr_;
- current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
- if (((const load_command *)lc)->cmd == kLCSegment) {
- const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
- const SegmentCommand* sc = (const SegmentCommand *)lc;
- if (start) *start = sc->vmaddr + dlloff;
- if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
- if (offset) {
- if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
- *offset = sc->vmaddr;
- } else {
- *offset = sc->fileoff;
- }
- }
- if (filename) {
- internal_strncpy(filename, _dyld_get_image_name(current_image_),
- filename_size);
- }
- return true;
- }
- return false;
-}
-
-bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- for (; current_image_ >= 0; current_image_--) {
- const mach_header* hdr = _dyld_get_image_header(current_image_);
- if (!hdr) continue;
- if (current_load_cmd_count_ < 0) {
- // Set up for this image;
- current_load_cmd_count_ = hdr->ncmds;
- current_magic_ = hdr->magic;
- current_filetype_ = hdr->filetype;
- switch (current_magic_) {
-#ifdef MH_MAGIC_64
- case MH_MAGIC_64: {
- current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
- break;
- }
-#endif
- case MH_MAGIC: {
- current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
- break;
- }
- default: {
- continue;
- }
- }
- }
-
- for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
- switch (current_magic_) {
- // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
-#ifdef MH_MAGIC_64
- case MH_MAGIC_64: {
- if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
- start, end, offset, filename, filename_size, protection))
- return true;
- break;
- }
-#endif
- case MH_MAGIC: {
- if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
- start, end, offset, filename, filename_size, protection))
- return true;
- break;
- }
- }
- }
- // If we get here, no more load_cmd's in this image talk about
- // segments. Go on to the next image.
- }
- return false;
-}
-
-bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[],
- uptr filename_size,
- uptr *protection) {
- return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
- protection);
-}
-
BlockingMutex::BlockingMutex(LinkerInitialized) {
// We assume that OS_SPINLOCK_INIT is zero
}
@@ -377,32 +269,50 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
MemoryMappingLayout memory_mapping(false);
- memory_mapping.Reset();
- uptr cur_beg, cur_end, cur_offset;
- InternalScopedBuffer<char> module_name(kMaxPathLength);
- uptr n_modules = 0;
- for (uptr i = 0;
- n_modules < max_modules &&
- memory_mapping.Next(&cur_beg, &cur_end, &cur_offset,
- module_name.data(), module_name.size(), 0);
- i++) {
- const char *cur_name = module_name.data();
- if (cur_name[0] == '\0')
- continue;
- if (filter && !filter(cur_name))
- continue;
- LoadedModule *cur_module = 0;
- if (n_modules > 0 &&
- 0 == internal_strcmp(cur_name, modules[n_modules - 1].full_name())) {
- cur_module = &modules[n_modules - 1];
- } else {
- void *mem = &modules[n_modules];
- cur_module = new(mem) LoadedModule(cur_name, cur_beg);
- n_modules++;
+ return memory_mapping.DumpListOfModules(modules, max_modules, filter);
+}
+
+bool IsDeadlySignal(int signum) {
+ return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
+}
+
+MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
+
+MacosVersion GetMacosVersionInternal() {
+ int mib[2] = { CTL_KERN, KERN_OSRELEASE };
+ char version[100];
+ uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
+ for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
+ // Get the version length.
+ CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1);
+ CHECK_LT(len, maxlen);
+ CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1);
+ switch (version[0]) {
+ case '9': return MACOS_VERSION_LEOPARD;
+ case '1': {
+ switch (version[1]) {
+ case '0': return MACOS_VERSION_SNOW_LEOPARD;
+ case '1': return MACOS_VERSION_LION;
+ case '2': return MACOS_VERSION_MOUNTAIN_LION;
+ case '3': return MACOS_VERSION_MAVERICKS;
+ case '4': return MACOS_VERSION_YOSEMITE;
+ default: return MACOS_VERSION_UNKNOWN;
+ }
}
- cur_module->addAddressRange(cur_beg, cur_end);
+ default: return MACOS_VERSION_UNKNOWN;
+ }
+}
+
+MacosVersion GetMacosVersion() {
+ atomic_uint32_t *cache =
+ reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
+ MacosVersion result =
+ static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
+ if (result == MACOS_VERSION_UNINITIALIZED) {
+ result = GetMacosVersionInternal();
+ atomic_store(cache, result, memory_order_release);
}
- return n_modules;
+ return result;
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_mac.h
new file mode 100644
index 0000000000..47739f71c3
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.h
@@ -0,0 +1,35 @@
+//===-- sanitizer_mac.h -----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// provides definitions for OSX-specific functions.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_MAC_H
+#define SANITIZER_MAC_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+
+namespace __sanitizer {
+
+enum MacosVersion {
+ MACOS_VERSION_UNINITIALIZED = 0,
+ MACOS_VERSION_UNKNOWN,
+ MACOS_VERSION_LEOPARD,
+ MACOS_VERSION_SNOW_LEOPARD,
+ MACOS_VERSION_LION,
+ MACOS_VERSION_MOUNTAIN_LION,
+ MACOS_VERSION_MAVERICKS,
+ MACOS_VERSION_YOSEMITE,
+};
+
+MacosVersion GetMacosVersion();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
+#endif // SANITIZER_MAC_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_mutex.h b/libsanitizer/sanitizer_common/sanitizer_mutex.h
index d78f43edaa..adc3add600 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mutex.h
+++ b/libsanitizer/sanitizer_common/sanitizer_mutex.h
@@ -81,6 +81,88 @@ class BlockingMutex {
uptr owner_; // for debugging
};
+// Reader-writer spin mutex.
+class RWMutex {
+ public:
+ RWMutex() {
+ atomic_store(&state_, kUnlocked, memory_order_relaxed);
+ }
+
+ ~RWMutex() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+ }
+
+ void Lock() {
+ u32 cmp = kUnlocked;
+ if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ LockSlow();
+ }
+
+ void Unlock() {
+ u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
+ DCHECK_NE(prev & kWriteLock, 0);
+ (void)prev;
+ }
+
+ void ReadLock() {
+ u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ ReadLockSlow();
+ }
+
+ void ReadUnlock() {
+ u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
+ DCHECK_EQ(prev & kWriteLock, 0);
+ DCHECK_GT(prev & ~kWriteLock, 0);
+ (void)prev;
+ }
+
+ void CheckLocked() {
+ CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+ }
+
+ private:
+ atomic_uint32_t state_;
+
+ enum {
+ kUnlocked = 0,
+ kWriteLock = 1,
+ kReadLock = 2
+ };
+
+ void NOINLINE LockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ u32 cmp = atomic_load(&state_, memory_order_relaxed);
+ if (cmp == kUnlocked &&
+ atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ }
+ }
+
+ void NOINLINE ReadLockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ u32 prev = atomic_load(&state_, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ }
+ }
+
+ RWMutex(const RWMutex&);
+ void operator = (const RWMutex&);
+};
+
template<typename MutexType>
class GenericScopedLock {
public:
@@ -121,6 +203,8 @@ class GenericScopedReadLock {
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
+typedef GenericScopedLock<RWMutex> RWMutexLock;
+typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.cc b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.cc
new file mode 100644
index 0000000000..b989ed0c90
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.cc
@@ -0,0 +1,17 @@
+//===-- sanitizer_persistent_allocator.cc -----------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+PersistentAllocator thePersistentAllocator;
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h
new file mode 100644
index 0000000000..e29b7bd57a
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h
@@ -0,0 +1,69 @@
+//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A fast memory allocator that does not support free() nor realloc().
+// All allocations are forever.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
+#define SANITIZER_PERSISTENT_ALLOCATOR_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class PersistentAllocator {
+ public:
+ void *alloc(uptr size);
+
+ private:
+ void *tryAlloc(uptr size);
+ StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
+ atomic_uintptr_t region_pos; // Region allocator for Node's.
+ atomic_uintptr_t region_end;
+};
+
+inline void *PersistentAllocator::tryAlloc(uptr size) {
+ // Optimisic lock-free allocation, essentially try to bump the region ptr.
+ for (;;) {
+ uptr cmp = atomic_load(&region_pos, memory_order_acquire);
+ uptr end = atomic_load(&region_end, memory_order_acquire);
+ if (cmp == 0 || cmp + size > end) return 0;
+ if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
+ memory_order_acquire))
+ return (void *)cmp;
+ }
+}
+
+inline void *PersistentAllocator::alloc(uptr size) {
+ // First, try to allocate optimisitically.
+ void *s = tryAlloc(size);
+ if (s) return s;
+ // If failed, lock, retry and alloc new superblock.
+ SpinMutexLock l(&mtx);
+ for (;;) {
+ s = tryAlloc(size);
+ if (s) return s;
+ atomic_store(&region_pos, 0, memory_order_relaxed);
+ uptr allocsz = 64 * 1024;
+ if (allocsz < size) allocsz = size;
+ uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
+ atomic_store(&region_end, mem + allocsz, memory_order_release);
+ atomic_store(&region_pos, mem, memory_order_release);
+ }
+}
+
+extern PersistentAllocator thePersistentAllocator;
+inline void *PersistentAlloc(uptr sz) {
+ return thePersistentAllocator.alloc(sz);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h
index 7693fe7ff1..55bec90b08 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform.h
@@ -11,7 +11,8 @@
#ifndef SANITIZER_PLATFORM_H
#define SANITIZER_PLATFORM_H
-#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
+#if !defined(__linux__) && !defined(__FreeBSD__) && \
+ !defined(__APPLE__) && !defined(_WIN32)
# error "This operating system is not supported"
#endif
@@ -21,6 +22,12 @@
# define SANITIZER_LINUX 0
#endif
+#if defined(__FreeBSD__)
+# define SANITIZER_FREEBSD 1
+#else
+# define SANITIZER_FREEBSD 0
+#endif
+
#if defined(__APPLE__)
# define SANITIZER_MAC 1
# include <TargetConditionals.h>
@@ -40,12 +47,83 @@
# define SANITIZER_WINDOWS 0
#endif
-#if defined(__ANDROID__) || defined(ANDROID)
+#if defined(__ANDROID__)
# define SANITIZER_ANDROID 1
#else
# define SANITIZER_ANDROID 0
#endif
-#define SANITIZER_POSIX (SANITIZER_LINUX || SANITIZER_MAC)
+#define SANITIZER_POSIX (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC)
+
+#if __LP64__ || defined(_WIN64)
+# define SANITIZER_WORDSIZE 64
+#else
+# define SANITIZER_WORDSIZE 32
+#endif
+
+#if SANITIZER_WORDSIZE == 64
+# define FIRST_32_SECOND_64(a, b) (b)
+#else
+# define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+#if defined(__x86_64__) && !defined(_LP64)
+# define SANITIZER_X32 1
+#else
+# define SANITIZER_X32 0
+#endif
+
+// By default we allow to use SizeClassAllocator64 on 64-bit platform.
+// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
+// does not work well and we need to fallback to SizeClassAllocator32.
+// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
+// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
+#ifndef SANITIZER_CAN_USE_ALLOCATOR64
+# if defined(__aarch64__) || defined(__mips64)
+# define SANITIZER_CAN_USE_ALLOCATOR64 0
+# else
+# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
+# endif
+#endif
+
+// The range of addresses which can be returned my mmap.
+// FIXME: this value should be different on different platforms,
+// e.g. on AArch64 it is most likely (1ULL << 39). Larger values will still work
+// but will consume more memory for TwoLevelByteMap.
+#if defined(__aarch64__)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 39)
+#else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+#endif
+
+// The AArch64 linux port uses the canonical syscall set as mandated by
+// the upstream linux community for all new ports. Other ports may still
+// use legacy syscalls.
+#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if defined(__aarch64__) && SANITIZER_LINUX
+# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
+# else
+# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
+# endif
+#endif
+
+// udi16 syscalls can only be used when the following conditions are
+// met:
+// * target is one of arm32, x86-32, sparc32, sh or m68k
+// * libc version is libc5, glibc-2.0, glibc-2.1 or glibc-2.2 to 2.15
+// built against > linux-2.2 kernel headers
+// Since we don't want to include libc headers here, we check the
+// target only.
+#if defined(__arm__) || SANITIZER_X32 || defined(__sparc__)
+#define SANITIZER_USES_UID16_SYSCALLS 1
+#else
+#define SANITIZER_USES_UID16_SYSCALLS 0
+#endif
+
+#ifdef __mips__
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
+#else
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+#endif
#endif // SANITIZER_PLATFORM_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
index f37d84b041..fc2d7ba990 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
@@ -27,6 +27,12 @@
# define SI_LINUX_NOT_ANDROID 0
#endif
+#if SANITIZER_FREEBSD
+# define SI_FREEBSD 1
+#else
+# define SI_FREEBSD 0
+#endif
+
#if SANITIZER_LINUX
# define SI_LINUX 1
#else
@@ -45,14 +51,16 @@
# define SI_IOS 0
#endif
-# define SANITIZER_INTERCEPT_STRCMP 1
-# define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRCMP 1
+#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MEMCHR 1
+#define SANITIZER_INTERCEPT_MEMRCHR SI_LINUX
-# define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
@@ -65,109 +73,165 @@
#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PRCTL SI_LINUX
-
-# define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
-
-# define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX
-
-# define SANITIZER_INTERCEPT_FREXP 1
-# define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS
-
-# define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
- SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX
-# define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX
-# define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX
-# define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_IOCTL SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_INET_ATON SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
-# define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \
- (defined(__i386) || defined (__x86_64)) // NOLINT
-# define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX
-# define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_WCSNRTOMBS SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX
-# define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_CONFSTR SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SCANDIR SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_WORDEXP SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SIGSETOPS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
-# define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STATFS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_STATFS64 \
- (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ETHER SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SHMCTL \
- (SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64)
-# define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
- SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SINCOS SI_LINUX
-# define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
-# define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_ICONV SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PRCTL SI_LINUX
-// FIXME: getline seems to be available on OSX 10.7
-# define SANITIZER_INTERCEPT_GETLINE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
+
+#define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX_NOT_ANDROID
+
+#ifndef SANITIZER_INTERCEPT_PRINTF
+# define SANITIZER_INTERCEPT_PRINTF SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID
+#endif
-# define SANITIZER_INTERCEPT__EXIT SI_LINUX
+#define SANITIZER_INTERCEPT_FREXP 1
+#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_COND SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
+ SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETPWENT SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETPWENT_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SETPWENT SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX
+#define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_IOCTL SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_INET_ATON SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
+#define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \
+ (defined(__i386) || defined (__x86_64)) // NOLINT
+#define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_WCSNRTOMBS SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_CONFSTR SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCANDIR SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WORDEXP (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SIGSETOPS \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
+#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATFS SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATFS64 \
+ (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ETHER_HOST SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SHMCTL \
+ (SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64)
+#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
+ SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL \
+ SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
+ SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SINCOS SI_LINUX
+#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
+#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_RAND_R SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_ICONV SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
+
+// FIXME: getline seems to be available on OSX 10.7
+#define SANITIZER_INTERCEPT_GETLINE SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT__EXIT SI_LINUX || SI_FREEBSD
+
+#define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
+ SI_FREEBSD || SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_TLS_GET_ADDR SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
+#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
+#define SANITIZER_INTERCEPT_GETRESID SI_LINUX
+#define SANITIZER_INTERCEPT_GETIFADDRS SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_IF_INDEXTONAME SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_AEABI_MEM SI_LINUX && defined(__arm__)
+#define SANITIZER_INTERCEPT___BZERO SI_MAC
+#define SANITIZER_INTERCEPT_FTIME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TSEARCH SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FOPEN SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FFLUSH SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FCLOSE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \
+ SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_GETPASS SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_MLOCKX SI_NOT_WINDOWS
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
index ac56f33c84..a1f0432503 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
@@ -27,7 +27,7 @@
// are not defined anywhere in userspace headers. Fake them. This seems to work
// fine with newer headers, too.
#include <asm/posix_types.h>
-#if defined(__x86_64__)
+#if defined(__x86_64__) || defined(__mips__)
#include <sys/stat.h>
#else
#define ino_t __kernel_ino_t
@@ -49,21 +49,19 @@
#include <linux/aio_abi.h>
-#if SANITIZER_ANDROID
-#include <asm/statfs.h>
-#else
-#include <sys/statfs.h>
-#endif
-
#if !SANITIZER_ANDROID
+#include <sys/statfs.h>
#include <linux/perf_event.h>
#endif
namespace __sanitizer {
+#if !SANITIZER_ANDROID
unsigned struct_statfs64_sz = sizeof(struct statfs64);
+#endif
} // namespace __sanitizer
-#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__sparc__)
+#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
+ && !defined(__mips__) && !defined(__sparc__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
index 196eb3b3c6..4971c4075b 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
@@ -12,11 +12,20 @@
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
-
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform_limits_posix.h"
-
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
+// Tests in this file assume that off_t-dependent data structures match the
+// libc ABI. For example, struct dirent here is what readdir() function (as
+// exported from libc) returns, and not the user-facing "dirent", which
+// depends on _FILE_OFFSET_BITS setting.
+// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
+#ifdef _FILE_OFFSET_BITS
+#undef _FILE_OFFSET_BITS
+#endif
+#if SANITIZER_FREEBSD
+#define _WANT_RTENTRY
+#include <sys/param.h>
+#include <sys/socketvar.h>
+#endif
#include <arpa/inet.h>
#include <dirent.h>
#include <errno.h>
@@ -31,6 +40,7 @@
#include <pwd.h>
#include <signal.h>
#include <stddef.h>
+#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/socket.h>
#include <sys/stat.h>
@@ -42,12 +52,15 @@
#include <time.h>
#include <wchar.h>
+#if !SANITIZER_ANDROID
+#include <sys/mount.h>
+#include <sys/timeb.h>
+#endif
+
#if SANITIZER_LINUX
+#include <malloc.h>
#include <mntent.h>
#include <netinet/ether.h>
-#include <utime.h>
-#include <sys/mount.h>
-#include <sys/ptrace.h>
#include <sys/sysinfo.h>
#include <sys/vt.h>
#include <linux/cdrom.h>
@@ -62,18 +75,63 @@
#include <linux/posix_types.h>
#endif
+#if SANITIZER_FREEBSD
+# include <sys/mount.h>
+# include <sys/sockio.h>
+# include <sys/socket.h>
+# include <sys/filio.h>
+# include <sys/signal.h>
+# include <sys/timespec.h>
+# include <sys/timex.h>
+# include <sys/mqueue.h>
+# include <sys/msg.h>
+# include <sys/ipc.h>
+# include <sys/msg.h>
+# include <sys/statvfs.h>
+# include <sys/soundcard.h>
+# include <sys/mtio.h>
+# include <sys/consio.h>
+# include <sys/kbio.h>
+# include <sys/link_elf.h>
+# include <netinet/ip_mroute.h>
+# include <netinet/in.h>
+# include <netinet/ip_compat.h>
+# include <net/ethernet.h>
+# include <net/ppp_defs.h>
+# include <glob.h>
+# include <term.h>
+
+#define _KERNEL // to declare 'shminfo' structure
+# include <sys/shm.h>
+#undef _KERNEL
+
+#undef INLINE // to avoid clashes with sanitizers' definitions
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_IOS
+#undef IOC_DIRMASK
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+# include <utime.h>
+# include <sys/ptrace.h>
+#endif
+
#if !SANITIZER_ANDROID
+#include <ifaddrs.h>
#include <sys/ucontext.h>
#include <wordexp.h>
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
#include <glob.h>
+#include <obstack.h>
#include <mqueue.h>
#include <net/if_ppp.h>
#include <netax25/ax25.h>
#include <netipx/ipx.h>
#include <netrom/netrom.h>
+#include <rpc/xdr.h>
#include <scsi/scsi.h>
#include <sys/mtio.h>
#include <sys/kd.h>
@@ -92,7 +150,6 @@
#include <linux/serial.h>
#include <sys/msg.h>
#include <sys/ipc.h>
-#include <sys/shm.h>
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
#if SANITIZER_ANDROID
@@ -112,16 +169,19 @@
#if SANITIZER_MAC
#include <net/ethernet.h>
#include <sys/filio.h>
-#include <sys/mount.h>
#include <sys/sockio.h>
#endif
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_posix.h"
+
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
-#if !SANITIZER_IOS
+#if !SANITIZER_IOS && !SANITIZER_FREEBSD
unsigned struct_stat64_sz = sizeof(struct stat64);
-#endif // !SANITIZER_IOS
+#endif // !SANITIZER_IOS && !SANITIZER_FREEBSD
unsigned struct_rusage_sz = sizeof(struct rusage);
unsigned struct_tm_sz = sizeof(struct tm);
unsigned struct_passwd_sz = sizeof(struct passwd);
@@ -134,46 +194,55 @@ namespace __sanitizer {
unsigned pid_t_sz = sizeof(pid_t);
unsigned timeval_sz = sizeof(timeval);
unsigned uid_t_sz = sizeof(uid_t);
+ unsigned gid_t_sz = sizeof(gid_t);
unsigned mbstate_t_sz = sizeof(mbstate_t);
unsigned sigset_t_sz = sizeof(sigset_t);
unsigned struct_timezone_sz = sizeof(struct timezone);
unsigned struct_tms_sz = sizeof(struct tms);
unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
- unsigned struct_statfs_sz = sizeof(struct statfs);
+
#if SANITIZER_MAC && !SANITIZER_IOS
unsigned struct_statfs64_sz = sizeof(struct statfs64);
#endif // SANITIZER_MAC && !SANITIZER_IOS
#if !SANITIZER_ANDROID
+ unsigned struct_statfs_sz = sizeof(struct statfs);
+ unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
unsigned ucontext_t_sz = sizeof(ucontext_t);
#endif // !SANITIZER_ANDROID
#if SANITIZER_LINUX
- unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
- unsigned struct_timespec_sz = sizeof(struct timespec);
unsigned __user_cap_header_struct_sz =
sizeof(struct __user_cap_header_struct);
unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct);
- unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
unsigned struct_new_utsname_sz = sizeof(struct new_utsname);
unsigned struct_old_utsname_sz = sizeof(struct old_utsname);
unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ unsigned struct_rlimit_sz = sizeof(struct rlimit);
+ unsigned struct_timespec_sz = sizeof(struct timespec);
+ unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
- unsigned struct_ustat_sz = sizeof(struct ustat);
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_ustat_sz = sizeof(struct ustat);
unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
+ unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
unsigned struct_timex_sz = sizeof(struct timex);
unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
unsigned struct_statvfs_sz = sizeof(struct statvfs);
- unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
uptr sig_ign = (uptr)SIG_IGN;
uptr sig_dfl = (uptr)SIG_DFL;
@@ -184,15 +253,17 @@ namespace __sanitizer {
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
unsigned struct_shminfo_sz = sizeof(struct shminfo);
unsigned struct_shm_info_sz = sizeof(struct shm_info);
int shmctl_ipc_stat = (int)IPC_STAT;
int shmctl_ipc_info = (int)IPC_INFO;
int shmctl_shm_info = (int)SHM_INFO;
- int shmctl_shm_stat = (int)SHM_INFO;
+ int shmctl_shm_stat = (int)SHM_STAT;
#endif
+ int map_fixed = MAP_FIXED;
+
int af_inet = (int)AF_INET;
int af_inet6 = (int)AF_INET6;
@@ -205,13 +276,13 @@ namespace __sanitizer {
return 0;
}
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
- (defined(__i386) || defined (__x86_64)) // NOLINT
+ (defined(__i386) || defined(__x86_64))
unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
#ifdef __x86_64
@@ -229,15 +300,22 @@ namespace __sanitizer {
int ptrace_setfpregs = PTRACE_SETFPREGS;
int ptrace_getfpxregs = PTRACE_GETFPXREGS;
int ptrace_setfpxregs = PTRACE_SETFPXREGS;
+ int ptrace_geteventmsg = PTRACE_GETEVENTMSG;
+#if (defined(PTRACE_GETSIGINFO) && defined(PTRACE_SETSIGINFO)) || \
+ (defined(PT_GETSIGINFO) && defined(PT_SETSIGINFO))
int ptrace_getsiginfo = PTRACE_GETSIGINFO;
int ptrace_setsiginfo = PTRACE_SETSIGINFO;
+#else
+ int ptrace_getsiginfo = -1;
+ int ptrace_setsiginfo = -1;
+#endif // PTRACE_GETSIGINFO/PTRACE_SETSIGINFO
#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET)
int ptrace_getregset = PTRACE_GETREGSET;
int ptrace_setregset = PTRACE_SETREGSET;
#else
int ptrace_getregset = -1;
int ptrace_setregset = -1;
-#endif
+#endif // PTRACE_GETREGSET/PTRACE_SETREGSET
#endif
unsigned path_max = PATH_MAX;
@@ -257,15 +335,6 @@ namespace __sanitizer {
unsigned struct_cdrom_tocentry_sz = sizeof(struct cdrom_tocentry);
unsigned struct_cdrom_tochdr_sz = sizeof(struct cdrom_tochdr);
unsigned struct_cdrom_volctrl_sz = sizeof(struct cdrom_volctrl);
-#if SOUND_VERSION >= 0x040000
- unsigned struct_copr_buffer_sz = 0;
- unsigned struct_copr_debug_buf_sz = 0;
- unsigned struct_copr_msg_sz = 0;
-#else
- unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);
- unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);
- unsigned struct_copr_msg_sz = sizeof(struct copr_msg);
-#endif
unsigned struct_ff_effect_sz = sizeof(struct ff_effect);
unsigned struct_floppy_drive_params_sz = sizeof(struct floppy_drive_params);
unsigned struct_floppy_drive_struct_sz = sizeof(struct floppy_drive_struct);
@@ -279,23 +348,34 @@ namespace __sanitizer {
unsigned struct_hd_geometry_sz = sizeof(struct hd_geometry);
unsigned struct_input_absinfo_sz = sizeof(struct input_absinfo);
unsigned struct_input_id_sz = sizeof(struct input_id);
+ unsigned struct_mtpos_sz = sizeof(struct mtpos);
+ unsigned struct_termio_sz = sizeof(struct termio);
+ unsigned struct_vt_consize_sz = sizeof(struct vt_consize);
+ unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);
+ unsigned struct_vt_stat_sz = sizeof(struct vt_stat);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+#if SOUND_VERSION >= 0x040000
+ unsigned struct_copr_buffer_sz = 0;
+ unsigned struct_copr_debug_buf_sz = 0;
+ unsigned struct_copr_msg_sz = 0;
+#else
+ unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);
+ unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);
+ unsigned struct_copr_msg_sz = sizeof(struct copr_msg);
+#endif
unsigned struct_midi_info_sz = sizeof(struct midi_info);
unsigned struct_mtget_sz = sizeof(struct mtget);
unsigned struct_mtop_sz = sizeof(struct mtop);
- unsigned struct_mtpos_sz = sizeof(struct mtpos);
unsigned struct_rtentry_sz = sizeof(struct rtentry);
unsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument);
unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec);
unsigned struct_synth_info_sz = sizeof(struct synth_info);
- unsigned struct_termio_sz = sizeof(struct termio);
- unsigned struct_vt_consize_sz = sizeof(struct vt_consize);
unsigned struct_vt_mode_sz = sizeof(struct vt_mode);
- unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);
- unsigned struct_vt_stat_sz = sizeof(struct vt_stat);
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
#if SANITIZER_LINUX && !SANITIZER_ANDROID
- unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct);
unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor);
#if EV_VERSION > (0x010000)
@@ -310,7 +390,6 @@ namespace __sanitizer {
unsigned struct_kbsentry_sz = sizeof(struct kbsentry);
unsigned struct_mtconfiginfo_sz = sizeof(struct mtconfiginfo);
unsigned struct_nr_parms_struct_sz = sizeof(struct nr_parms_struct);
- unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
unsigned struct_scc_modem_sz = sizeof(struct scc_modem);
unsigned struct_scc_stat_sz = sizeof(struct scc_stat);
unsigned struct_serial_multiport_struct_sz
@@ -319,14 +398,19 @@ namespace __sanitizer {
unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25);
unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc);
unsigned struct_unimapinit_sz = sizeof(struct unimapinit);
-#endif
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
+ unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
#if !SANITIZER_ANDROID && !SANITIZER_MAC
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
#endif
- unsigned IOCTL_NOT_PRESENT = 0;
+ const unsigned IOCTL_NOT_PRESENT = 0;
unsigned IOCTL_FIOASYNC = FIOASYNC;
unsigned IOCTL_FIOCLEX = FIOCLEX;
@@ -372,10 +456,11 @@ namespace __sanitizer {
unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
unsigned IOCTL_TIOCSTI = TIOCSTI;
unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
+#if ((SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID)
unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
#endif
+
#if SANITIZER_LINUX
unsigned IOCTL_EVIOCGABS = EVIOCGABS(0);
unsigned IOCTL_EVIOCGBIT = EVIOCGBIT(0, 0);
@@ -466,9 +551,7 @@ namespace __sanitizer {
unsigned IOCTL_HDIO_SET_MULTCOUNT = HDIO_SET_MULTCOUNT;
unsigned IOCTL_HDIO_SET_NOWERR = HDIO_SET_NOWERR;
unsigned IOCTL_HDIO_SET_UNMASKINTR = HDIO_SET_UNMASKINTR;
- unsigned IOCTL_MTIOCGET = MTIOCGET;
unsigned IOCTL_MTIOCPOS = MTIOCPOS;
- unsigned IOCTL_MTIOCTOP = MTIOCTOP;
unsigned IOCTL_PPPIOCGASYNCMAP = PPPIOCGASYNCMAP;
unsigned IOCTL_PPPIOCGDEBUG = PPPIOCGDEBUG;
unsigned IOCTL_PPPIOCGFLAGS = PPPIOCGFLAGS;
@@ -501,7 +584,7 @@ namespace __sanitizer {
unsigned IOCTL_SIOCSIFMEM = SIOCSIFMEM;
unsigned IOCTL_SIOCSIFSLAVE = SIOCSIFSLAVE;
unsigned IOCTL_SIOCSRARP = SIOCSRARP;
-#if SOUND_VERSION >= 0x040000
+# if SOUND_VERSION >= 0x040000
unsigned IOCTL_SNDCTL_COPR_HALT = IOCTL_NOT_PRESENT;
unsigned IOCTL_SNDCTL_COPR_LOAD = IOCTL_NOT_PRESENT;
unsigned IOCTL_SNDCTL_COPR_RCODE = IOCTL_NOT_PRESENT;
@@ -518,7 +601,7 @@ namespace __sanitizer {
unsigned IOCTL_SOUND_PCM_READ_RATE = IOCTL_NOT_PRESENT;
unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = IOCTL_NOT_PRESENT;
unsigned IOCTL_SOUND_PCM_WRITE_FILTER = IOCTL_NOT_PRESENT;
-#else
+# else // SOUND_VERSION
unsigned IOCTL_SNDCTL_COPR_HALT = SNDCTL_COPR_HALT;
unsigned IOCTL_SNDCTL_COPR_LOAD = SNDCTL_COPR_LOAD;
unsigned IOCTL_SNDCTL_COPR_RCODE = SNDCTL_COPR_RCODE;
@@ -535,7 +618,39 @@ namespace __sanitizer {
unsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE;
unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = SOUND_PCM_WRITE_CHANNELS;
unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER;
-#endif
+#endif // SOUND_VERSION
+ unsigned IOCTL_TCFLSH = TCFLSH;
+ unsigned IOCTL_TCGETA = TCGETA;
+ unsigned IOCTL_TCGETS = TCGETS;
+ unsigned IOCTL_TCSBRK = TCSBRK;
+ unsigned IOCTL_TCSBRKP = TCSBRKP;
+ unsigned IOCTL_TCSETA = TCSETA;
+ unsigned IOCTL_TCSETAF = TCSETAF;
+ unsigned IOCTL_TCSETAW = TCSETAW;
+ unsigned IOCTL_TCSETS = TCSETS;
+ unsigned IOCTL_TCSETSF = TCSETSF;
+ unsigned IOCTL_TCSETSW = TCSETSW;
+ unsigned IOCTL_TCXONC = TCXONC;
+ unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS;
+ unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR;
+ unsigned IOCTL_TIOCINQ = TIOCINQ;
+ unsigned IOCTL_TIOCLINUX = TIOCLINUX;
+ unsigned IOCTL_TIOCSERCONFIG = TIOCSERCONFIG;
+ unsigned IOCTL_TIOCSERGETLSR = TIOCSERGETLSR;
+ unsigned IOCTL_TIOCSERGWILD = TIOCSERGWILD;
+ unsigned IOCTL_TIOCSERSWILD = TIOCSERSWILD;
+ unsigned IOCTL_TIOCSLCKTRMIOS = TIOCSLCKTRMIOS;
+ unsigned IOCTL_TIOCSSOFTCAR = TIOCSSOFTCAR;
+ unsigned IOCTL_VT_DISALLOCATE = VT_DISALLOCATE;
+ unsigned IOCTL_VT_GETSTATE = VT_GETSTATE;
+ unsigned IOCTL_VT_RESIZE = VT_RESIZE;
+ unsigned IOCTL_VT_RESIZEX = VT_RESIZEX;
+ unsigned IOCTL_VT_SENDSIG = VT_SENDSIG;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ unsigned IOCTL_MTIOCGET = MTIOCGET;
+ unsigned IOCTL_MTIOCTOP = MTIOCTOP;
unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;
unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;
unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;
@@ -620,40 +735,14 @@ namespace __sanitizer {
unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH;
unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE;
unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME;
- unsigned IOCTL_TCFLSH = TCFLSH;
- unsigned IOCTL_TCGETA = TCGETA;
- unsigned IOCTL_TCGETS = TCGETS;
- unsigned IOCTL_TCSBRK = TCSBRK;
- unsigned IOCTL_TCSBRKP = TCSBRKP;
- unsigned IOCTL_TCSETA = TCSETA;
- unsigned IOCTL_TCSETAF = TCSETAF;
- unsigned IOCTL_TCSETAW = TCSETAW;
- unsigned IOCTL_TCSETS = TCSETS;
- unsigned IOCTL_TCSETSF = TCSETSF;
- unsigned IOCTL_TCSETSW = TCSETSW;
- unsigned IOCTL_TCXONC = TCXONC;
- unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS;
- unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR;
- unsigned IOCTL_TIOCINQ = TIOCINQ;
- unsigned IOCTL_TIOCLINUX = TIOCLINUX;
- unsigned IOCTL_TIOCSERCONFIG = TIOCSERCONFIG;
- unsigned IOCTL_TIOCSERGETLSR = TIOCSERGETLSR;
- unsigned IOCTL_TIOCSERGWILD = TIOCSERGWILD;
- unsigned IOCTL_TIOCSERSWILD = TIOCSERSWILD;
- unsigned IOCTL_TIOCSLCKTRMIOS = TIOCSLCKTRMIOS;
- unsigned IOCTL_TIOCSSOFTCAR = TIOCSSOFTCAR;
unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;
- unsigned IOCTL_VT_DISALLOCATE = VT_DISALLOCATE;
unsigned IOCTL_VT_GETMODE = VT_GETMODE;
- unsigned IOCTL_VT_GETSTATE = VT_GETSTATE;
unsigned IOCTL_VT_OPENQRY = VT_OPENQRY;
unsigned IOCTL_VT_RELDISP = VT_RELDISP;
- unsigned IOCTL_VT_RESIZE = VT_RESIZE;
- unsigned IOCTL_VT_RESIZEX = VT_RESIZEX;
- unsigned IOCTL_VT_SENDSIG = VT_SENDSIG;
unsigned IOCTL_VT_SETMODE = VT_SETMODE;
unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned IOCTL_CYGETDEFTHRESH = CYGETDEFTHRESH;
unsigned IOCTL_CYGETDEFTIMEOUT = CYGETDEFTIMEOUT;
@@ -685,37 +774,25 @@ namespace __sanitizer {
unsigned IOCTL_FS_IOC_SETVERSION = FS_IOC_SETVERSION;
unsigned IOCTL_GIO_CMAP = GIO_CMAP;
unsigned IOCTL_GIO_FONT = GIO_FONT;
- unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;
unsigned IOCTL_GIO_UNIMAP = GIO_UNIMAP;
unsigned IOCTL_GIO_UNISCRNMAP = GIO_UNISCRNMAP;
unsigned IOCTL_KDADDIO = KDADDIO;
unsigned IOCTL_KDDELIO = KDDELIO;
- unsigned IOCTL_KDDISABIO = KDDISABIO;
- unsigned IOCTL_KDENABIO = KDENABIO;
unsigned IOCTL_KDGETKEYCODE = KDGETKEYCODE;
- unsigned IOCTL_KDGETLED = KDGETLED;
- unsigned IOCTL_KDGETMODE = KDGETMODE;
unsigned IOCTL_KDGKBDIACR = KDGKBDIACR;
unsigned IOCTL_KDGKBENT = KDGKBENT;
unsigned IOCTL_KDGKBLED = KDGKBLED;
unsigned IOCTL_KDGKBMETA = KDGKBMETA;
- unsigned IOCTL_KDGKBMODE = KDGKBMODE;
unsigned IOCTL_KDGKBSENT = KDGKBSENT;
- unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
unsigned IOCTL_KDMAPDISP = KDMAPDISP;
- unsigned IOCTL_KDMKTONE = KDMKTONE;
unsigned IOCTL_KDSETKEYCODE = KDSETKEYCODE;
- unsigned IOCTL_KDSETLED = KDSETLED;
- unsigned IOCTL_KDSETMODE = KDSETMODE;
unsigned IOCTL_KDSIGACCEPT = KDSIGACCEPT;
unsigned IOCTL_KDSKBDIACR = KDSKBDIACR;
unsigned IOCTL_KDSKBENT = KDSKBENT;
unsigned IOCTL_KDSKBLED = KDSKBLED;
unsigned IOCTL_KDSKBMETA = KDSKBMETA;
- unsigned IOCTL_KDSKBMODE = KDSKBMODE;
unsigned IOCTL_KDSKBSENT = KDSKBSENT;
unsigned IOCTL_KDUNMAPDISP = KDUNMAPDISP;
- unsigned IOCTL_KIOCSOUND = KIOCSOUND;
unsigned IOCTL_LPABORT = LPABORT;
unsigned IOCTL_LPABORTOPEN = LPABORTOPEN;
unsigned IOCTL_LPCAREFUL = LPCAREFUL;
@@ -730,7 +807,6 @@ namespace __sanitizer {
unsigned IOCTL_MTIOCSETCONFIG = MTIOCSETCONFIG;
unsigned IOCTL_PIO_CMAP = PIO_CMAP;
unsigned IOCTL_PIO_FONT = PIO_FONT;
- unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;
unsigned IOCTL_PIO_UNIMAP = PIO_UNIMAP;
unsigned IOCTL_PIO_UNIMAPCLR = PIO_UNIMAPCLR;
unsigned IOCTL_PIO_UNISCRNMAP = PIO_UNISCRNMAP;
@@ -752,20 +828,40 @@ namespace __sanitizer {
unsigned IOCTL_SIOCNRGETPARMS = SIOCNRGETPARMS;
unsigned IOCTL_SIOCNRRTCTL = SIOCNRRTCTL;
unsigned IOCTL_SIOCNRSETPARMS = SIOCNRSETPARMS;
- unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
- unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
unsigned IOCTL_TIOCGSERIAL = TIOCGSERIAL;
unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI;
unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI;
unsigned IOCTL_TIOCSSERIAL = TIOCSSERIAL;
-#endif
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;
+ unsigned IOCTL_KDDISABIO = KDDISABIO;
+ unsigned IOCTL_KDENABIO = KDENABIO;
+ unsigned IOCTL_KDGETLED = KDGETLED;
+ unsigned IOCTL_KDGETMODE = KDGETMODE;
+ unsigned IOCTL_KDGKBMODE = KDGKBMODE;
+ unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
+ unsigned IOCTL_KDMKTONE = KDMKTONE;
+ unsigned IOCTL_KDSETLED = KDSETLED;
+ unsigned IOCTL_KDSETMODE = KDSETMODE;
+ unsigned IOCTL_KDSKBMODE = KDSKBMODE;
+ unsigned IOCTL_KIOCSOUND = KIOCSOUND;
+ unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;
+ unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
+ unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+
+ const int errno_EINVAL = EINVAL;
// EOWNERDEAD is not present in some older platforms.
#if defined(EOWNERDEAD)
- extern const int errno_EOWNERDEAD = EOWNERDEAD;
+ const int errno_EOWNERDEAD = EOWNERDEAD;
#else
- extern const int errno_EOWNERDEAD = -1;
+ const int errno_EOWNERDEAD = -1;
#endif
+
+ const int si_SEGV_MAPERR = SEGV_MAPERR;
+ const int si_SEGV_ACCERR = SEGV_ACCERR;
} // namespace __sanitizer
COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
@@ -774,6 +870,31 @@ COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
CHECK_TYPE_SIZE(pthread_key_t);
#if SANITIZER_LINUX
+// FIXME: We define those on Linux and Mac, but only check on Linux.
+COMPILER_CHECK(IOC_NRBITS == _IOC_NRBITS);
+COMPILER_CHECK(IOC_TYPEBITS == _IOC_TYPEBITS);
+COMPILER_CHECK(IOC_SIZEBITS == _IOC_SIZEBITS);
+COMPILER_CHECK(IOC_DIRBITS == _IOC_DIRBITS);
+COMPILER_CHECK(IOC_NRMASK == _IOC_NRMASK);
+COMPILER_CHECK(IOC_TYPEMASK == _IOC_TYPEMASK);
+COMPILER_CHECK(IOC_SIZEMASK == _IOC_SIZEMASK);
+COMPILER_CHECK(IOC_DIRMASK == _IOC_DIRMASK);
+COMPILER_CHECK(IOC_NRSHIFT == _IOC_NRSHIFT);
+COMPILER_CHECK(IOC_TYPESHIFT == _IOC_TYPESHIFT);
+COMPILER_CHECK(IOC_SIZESHIFT == _IOC_SIZESHIFT);
+COMPILER_CHECK(IOC_DIRSHIFT == _IOC_DIRSHIFT);
+COMPILER_CHECK(IOC_NONE == _IOC_NONE);
+COMPILER_CHECK(IOC_WRITE == _IOC_WRITE);
+COMPILER_CHECK(IOC_READ == _IOC_READ);
+COMPILER_CHECK(EVIOC_ABS_MAX == ABS_MAX);
+COMPILER_CHECK(EVIOC_EV_MAX == EV_MAX);
+COMPILER_CHECK(IOC_SIZE(0x12345678) == _IOC_SIZE(0x12345678));
+COMPILER_CHECK(IOC_DIR(0x12345678) == _IOC_DIR(0x12345678));
+COMPILER_CHECK(IOC_NR(0x12345678) == _IOC_NR(0x12345678));
+COMPILER_CHECK(IOC_TYPE(0x12345678) == _IOC_TYPE(0x12345678));
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
// There are more undocumented fields in dl_phdr_info that we are not interested
// in.
COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
@@ -781,11 +902,9 @@ CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
-COMPILER_CHECK(IOC_SIZE(0x12345678) == _IOC_SIZE(0x12345678));
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
CHECK_TYPE_SIZE(glob_t);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
@@ -837,6 +956,8 @@ COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
CHECK_SIZE_AND_OFFSET(dirent, d_ino);
#if SANITIZER_MAC
CHECK_SIZE_AND_OFFSET(dirent, d_seekoff);
+#elif SANITIZER_FREEBSD
+// There is no 'd_off' field on FreeBSD.
#else
CHECK_SIZE_AND_OFFSET(dirent, d_off);
#endif
@@ -882,8 +1003,12 @@ CHECK_SIZE_AND_OFFSET(__sysctl_args, newlen);
CHECK_TYPE_SIZE(__kernel_uid_t);
CHECK_TYPE_SIZE(__kernel_gid_t);
+
+#if SANITIZER_USES_UID16_SYSCALLS
CHECK_TYPE_SIZE(__kernel_old_uid_t);
CHECK_TYPE_SIZE(__kernel_old_gid_t);
+#endif
+
CHECK_TYPE_SIZE(__kernel_off_t);
CHECK_TYPE_SIZE(__kernel_loff_t);
CHECK_TYPE_SIZE(__kernel_fd_set);
@@ -921,15 +1046,26 @@ CHECK_SIZE_AND_OFFSET(mntent, mnt_passno);
CHECK_TYPE_SIZE(ether_addr);
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
CHECK_TYPE_SIZE(ipc_perm);
+# if SANITIZER_FREEBSD
+CHECK_SIZE_AND_OFFSET(ipc_perm, key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
+# else
CHECK_SIZE_AND_OFFSET(ipc_perm, __key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, __seq);
+# endif
CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(x, y) 0
+#endif
+#if !defined(__aarch64__) || !SANITIZER_LINUX || __GLIBC_PREREQ (2, 21)
+/* On aarch64 glibc 2.20 and earlier provided incorrect mode field. */
CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
-CHECK_SIZE_AND_OFFSET(ipc_perm, __seq);
+#endif
CHECK_TYPE_SIZE(shmid_ds);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
@@ -944,4 +1080,114 @@ CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
CHECK_TYPE_SIZE(clock_t);
-#endif // SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_LINUX
+CHECK_TYPE_SIZE(clockid_t);
+#endif
+
+#if !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+# if SANITIZER_FREEBSD
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+# else
+COMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)NULL)->ifa_dstaddr) ==
+ sizeof(((ifaddrs *)NULL)->ifa_ifu));
+COMPILER_CHECK(offsetof(__sanitizer_ifaddrs, ifa_dstaddr) ==
+ offsetof(ifaddrs, ifa_ifu));
+# endif // SANITIZER_FREEBSD
+#else
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+#endif // SANITIZER_LINUX
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+#endif
+
+#if SANITIZER_LINUX
+COMPILER_CHECK(sizeof(__sanitizer_mallinfo) == sizeof(struct mallinfo));
+#endif
+
+#if !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+#endif
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+#if !SANITIZER_ANDROID
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+#endif
+
+#if SANITIZER_MAC
+CHECK_SIZE_AND_OFFSET(passwd, pw_change);
+CHECK_SIZE_AND_OFFSET(passwd, pw_expire);
+CHECK_SIZE_AND_OFFSET(passwd, pw_class);
+#endif
+
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(XDR);
+CHECK_SIZE_AND_OFFSET(XDR, x_op);
+CHECK_SIZE_AND_OFFSET(XDR, x_ops);
+CHECK_SIZE_AND_OFFSET(XDR, x_public);
+CHECK_SIZE_AND_OFFSET(XDR, x_private);
+CHECK_SIZE_AND_OFFSET(XDR, x_base);
+CHECK_SIZE_AND_OFFSET(XDR, x_handy);
+COMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);
+COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);
+COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));
+CHECK_SIZE_AND_OFFSET(FILE, _flags);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_ptr);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_ptr);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_save_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_backup_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_save_end);
+CHECK_SIZE_AND_OFFSET(FILE, _markers);
+CHECK_SIZE_AND_OFFSET(FILE, _chain);
+CHECK_SIZE_AND_OFFSET(FILE, _fileno);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer__obstack_chunk) <= sizeof(_obstack_chunk));
+CHECK_SIZE_AND_OFFSET(_obstack_chunk, limit);
+CHECK_SIZE_AND_OFFSET(_obstack_chunk, prev);
+CHECK_TYPE_SIZE(obstack);
+CHECK_SIZE_AND_OFFSET(obstack, chunk_size);
+CHECK_SIZE_AND_OFFSET(obstack, chunk);
+CHECK_SIZE_AND_OFFSET(obstack, object_base);
+CHECK_SIZE_AND_OFFSET(obstack, next_free);
+#endif
+
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
index aec950454b..657cd85264 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -19,12 +19,10 @@
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
-#if !SANITIZER_IOS
+#if !SANITIZER_FREEBSD && !SANITIZER_IOS
extern unsigned struct_stat64_sz;
#endif
extern unsigned struct_rusage_sz;
- extern unsigned struct_passwd_sz;
- extern unsigned struct_group_sz;
extern unsigned siginfo_t_sz;
extern unsigned struct_itimerval_sz;
extern unsigned pthread_t_sz;
@@ -32,41 +30,48 @@ namespace __sanitizer {
extern unsigned pid_t_sz;
extern unsigned timeval_sz;
extern unsigned uid_t_sz;
+ extern unsigned gid_t_sz;
extern unsigned mbstate_t_sz;
extern unsigned struct_timezone_sz;
extern unsigned struct_tms_sz;
extern unsigned struct_itimerspec_sz;
extern unsigned struct_sigevent_sz;
extern unsigned struct_sched_param_sz;
- extern unsigned struct_statfs_sz;
extern unsigned struct_statfs64_sz;
#if !SANITIZER_ANDROID
+ extern unsigned struct_statfs_sz;
+ extern unsigned struct_sockaddr_sz;
extern unsigned ucontext_t_sz;
#endif // !SANITIZER_ANDROID
#if SANITIZER_LINUX
#if defined(__x86_64__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
#elif defined(__i386__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 96;
#elif defined(__arm__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__aarch64__)
+ const unsigned struct_kernel_stat_sz = 128;
+ const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__powerpc__) && !defined(__powerpc64__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 72;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__powerpc64__)
- const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__mips__)
+ #if SANITIZER_WORDSIZE == 64
+ const unsigned struct_kernel_stat_sz = 216;
+ #else
+ const unsigned struct_kernel_stat_sz = 144;
+ #endif
+ const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__sparc__) && defined(__arch64__)
const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 104;
@@ -82,23 +87,28 @@ namespace __sanitizer {
// More fields that vary with the kernel version.
};
- extern unsigned struct_utimbuf_sz;
+ extern unsigned struct_epoll_event_sz;
+ extern unsigned struct_sysinfo_sz;
+ extern unsigned __user_cap_header_struct_sz;
+ extern unsigned __user_cap_data_struct_sz;
extern unsigned struct_new_utsname_sz;
extern unsigned struct_old_utsname_sz;
extern unsigned struct_oldold_utsname_sz;
- extern unsigned struct_msqid_ds_sz;
- extern unsigned struct_mq_attr_sz;
- extern unsigned struct_timex_sz;
- extern unsigned struct_ustat_sz;
+
+ const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+
+#if defined(__powerpc64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+#elif !defined(__sparc__)
+ const unsigned struct___old_kernel_stat_sz = 32;
+#endif
extern unsigned struct_rlimit_sz;
- extern unsigned struct_epoll_event_sz;
- extern unsigned struct_sysinfo_sz;
+ extern unsigned struct_utimbuf_sz;
extern unsigned struct_timespec_sz;
- extern unsigned __user_cap_header_struct_sz;
- extern unsigned __user_cap_data_struct_sz;
- const unsigned old_sigset_t_sz = sizeof(unsigned long);
- const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
struct __sanitizer_iocb {
u64 aio_data;
@@ -135,11 +145,23 @@ namespace __sanitizer {
uptr newlen;
unsigned long ___unused[4];
};
-#endif // SANITIZER_LINUX
+
+ const unsigned old_sigset_t_sz = sizeof(unsigned long);
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
+#if SANITIZER_ANDROID
+ struct __sanitizer_mallinfo {
+ uptr v[10];
+ };
+#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ struct __sanitizer_mallinfo {
+ int v[10];
+ };
+
+ extern unsigned struct_ustat_sz;
extern unsigned struct_rlimit64_sz;
- extern unsigned struct_statvfs_sz;
extern unsigned struct_statvfs64_sz;
struct __sanitizer_ipc_perm {
@@ -153,6 +175,12 @@ namespace __sanitizer {
unsigned __seq;
u64 __unused1;
u64 __unused2;
+#elif defined(__mips__) || defined(__aarch64__)
+ unsigned int mode;
+ unsigned short __seq;
+ unsigned short __pad1;
+ unsigned long __unused1;
+ unsigned long __unused2;
#elif defined(__sparc__)
# if defined(__arch64__)
unsigned mode;
@@ -213,15 +241,15 @@ namespace __sanitizer {
u64 shm_ctime;
#else
uptr shm_atime;
- #ifndef _LP64
+ #if !defined(_LP64) && !defined(__mips__)
uptr __unused1;
#endif
uptr shm_dtime;
- #ifndef _LP64
+ #if !defined(_LP64) && !defined(__mips__)
uptr __unused2;
#endif
uptr shm_ctime;
- #ifndef _LP64
+ #if !defined(_LP64) && !defined(__mips__)
uptr __unused3;
#endif
#endif
@@ -241,19 +269,121 @@ namespace __sanitizer {
#endif
#endif
};
- #endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#elif SANITIZER_FREEBSD
+ struct __sanitizer_ipc_perm {
+ unsigned int cuid;
+ unsigned int cgid;
+ unsigned int uid;
+ unsigned int gid;
+ unsigned short mode;
+ unsigned short seq;
+ long key;
+ };
+
+ struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ unsigned long shm_segsz;
+ unsigned int shm_lpid;
+ unsigned int shm_cpid;
+ int shm_nattch;
+ unsigned long shm_atime;
+ unsigned long shm_dtime;
+ unsigned long shm_ctime;
+ };
+#endif
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ extern unsigned struct_msqid_ds_sz;
+ extern unsigned struct_mq_attr_sz;
+ extern unsigned struct_timex_sz;
+ extern unsigned struct_statvfs_sz;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
struct __sanitizer_iovec {
- void *iov_base;
+ void *iov_base;
uptr iov_len;
};
+#if !SANITIZER_ANDROID
+ struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+ // This is a union on Linux.
+# ifdef ifa_dstaddr
+# undef ifa_dstaddr
+# endif
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+ };
+#endif // !SANITIZER_ANDROID
+
#if SANITIZER_MAC
typedef unsigned long __sanitizer_pthread_key_t;
#else
typedef unsigned __sanitizer_pthread_key_t;
#endif
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+
+ struct __sanitizer_XDR {
+ int x_op;
+ void *x_ops;
+ uptr x_public;
+ uptr x_private;
+ uptr x_base;
+ unsigned x_handy;
+ };
+
+ const int __sanitizer_XDR_ENCODE = 0;
+ const int __sanitizer_XDR_DECODE = 1;
+ const int __sanitizer_XDR_FREE = 2;
+#endif
+
+ struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+#if SANITIZER_MAC || SANITIZER_FREEBSD
+ long pw_change;
+ char *pw_class;
+#endif
+#if !SANITIZER_ANDROID
+ char *pw_gecos;
+#endif
+ char *pw_dir;
+ char *pw_shell;
+#if SANITIZER_MAC || SANITIZER_FREEBSD
+ long pw_expire;
+#endif
+#if SANITIZER_FREEBSD
+ int pw_fields;
+#endif
+ };
+
+ struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+ };
+
+#if defined(__x86_64__) && !defined(_LP64)
+ typedef long long __sanitizer_time_t;
+#else
+ typedef long __sanitizer_time_t;
+#endif
+
+ struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+ };
+
struct __sanitizer_ether_addr {
u8 octet[6];
};
@@ -283,7 +413,7 @@ namespace __sanitizer {
};
#endif
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_MAC || SANITIZER_FREEBSD
struct __sanitizer_msghdr {
void *msg_name;
unsigned msg_namelen;
@@ -322,6 +452,12 @@ namespace __sanitizer {
unsigned short d_reclen;
// more fields that we don't care about
};
+#elif SANITIZER_FREEBSD
+ struct __sanitizer_dirent {
+ unsigned int d_fileno;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+ };
#elif SANITIZER_ANDROID || defined(__x86_64__)
struct __sanitizer_dirent {
unsigned long long d_ino;
@@ -347,14 +483,22 @@ namespace __sanitizer {
};
#endif
-#if defined(__x86_64__) && !defined(_LP64)
+// 'clock_t' is 32 bits wide on x64 FreeBSD
+#if SANITIZER_FREEBSD
+ typedef int __sanitizer_clock_t;
+#elif defined(__x86_64__) && !defined(_LP64)
typedef long long __sanitizer_clock_t;
#else
typedef long __sanitizer_clock_t;
#endif
#if SANITIZER_LINUX
-#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__)
+ typedef int __sanitizer_clockid_t;
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__)\
+ || defined(__mips__)
typedef unsigned __sanitizer___kernel_uid_t;
typedef unsigned __sanitizer___kernel_gid_t;
#else
@@ -367,7 +511,7 @@ namespace __sanitizer {
typedef long __sanitizer___kernel_off_t;
#endif
-#if defined(__powerpc__)
+#if defined(__powerpc__) || defined(__mips__)
typedef unsigned int __sanitizer___kernel_old_uid_t;
typedef unsigned int __sanitizer___kernel_old_gid_t;
#else
@@ -398,32 +542,56 @@ namespace __sanitizer {
// The size is determined by looking at sizeof of real sigset_t on linux.
uptr val[128 / sizeof(uptr)];
};
+#elif SANITIZER_FREEBSD
+ struct __sanitizer_sigset_t {
+ // uint32_t * 4
+ unsigned int __bits[4];
+ };
#endif
+ // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
struct __sanitizer_sigaction {
+#if defined(__mips__) && !SANITIZER_FREEBSD
+ unsigned int sa_flags;
+#endif
union {
- void (*sa_handler)(int sig);
- void (*sa_sigaction)(int sig, void *siginfo, void *uctx);
+ void (*sigaction)(int sig, void *siginfo, void *uctx);
+ void (*handler)(int sig);
};
+#if SANITIZER_FREEBSD
+ int sa_flags;
+ __sanitizer_sigset_t sa_mask;
+#else
__sanitizer_sigset_t sa_mask;
+#ifndef __mips__
#if defined(__sparc__)
unsigned long sa_flags;
#else
int sa_flags;
#endif
+#endif
+#endif
#if SANITIZER_LINUX
void (*sa_restorer)();
#endif
+#if defined(__mips__) && (SANITIZER_WORDSIZE == 32)
+ int sa_resv[1];
+#endif
};
+#if SANITIZER_FREEBSD
+ typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+#else
struct __sanitizer_kernel_sigset_t {
u8 sig[8];
};
+#endif
+ // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
struct __sanitizer_kernel_sigaction_t {
union {
- void (*sigaction)(int signo, void *info, void *ctx);
void (*handler)(int signo);
+ void (*sigaction)(int signo, void *info, void *ctx);
};
unsigned long sa_flags;
void (*sa_restorer)(void);
@@ -442,7 +610,7 @@ namespace __sanitizer {
extern int af_inet6;
uptr __sanitizer_in_addr_sz(int af);
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
struct __sanitizer_dl_phdr_info {
uptr dlpi_addr;
const char *dlpi_name;
@@ -456,7 +624,7 @@ namespace __sanitizer {
int ai_family;
int ai_socktype;
int ai_protocol;
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_MAC || SANITIZER_FREEBSD
unsigned ai_addrlen;
char *ai_canonname;
void *ai_addr;
@@ -482,13 +650,14 @@ namespace __sanitizer {
short revents;
};
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_MAC || SANITIZER_FREEBSD
typedef unsigned __sanitizer_nfds_t;
#else
typedef unsigned long __sanitizer_nfds_t;
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if !SANITIZER_ANDROID
+# if SANITIZER_LINUX
struct __sanitizer_glob_t {
uptr gl_pathc;
char **gl_pathv;
@@ -501,10 +670,27 @@ namespace __sanitizer {
int (*gl_lstat)(const char *, void *);
int (*gl_stat)(const char *, void *);
};
+# elif SANITIZER_FREEBSD
+ struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ uptr gl_matchc;
+ uptr gl_offs;
+ int gl_flags;
+ char **gl_pathv;
+ int (*gl_errfunc)(const char*, int);
+ void (*gl_closedir)(void *dirp);
+ struct dirent *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char*);
+ int (*gl_lstat)(const char*, void* /* struct stat* */);
+ int (*gl_stat)(const char*, void* /* struct stat* */);
+ };
+# endif // SANITIZER_FREEBSD
+# if SANITIZER_LINUX || SANITIZER_FREEBSD
extern int glob_nomatch;
extern int glob_altdirfunc;
-#endif
+# endif
+#endif // !SANITIZER_ANDROID
extern unsigned path_max;
@@ -512,10 +698,38 @@ namespace __sanitizer {
uptr we_wordc;
char **we_wordv;
uptr we_offs;
+#if SANITIZER_FREEBSD
+ char *we_strings;
+ uptr we_nbytes;
+#endif
};
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ struct __sanitizer_FILE {
+ int _flags;
+ char *_IO_read_ptr;
+ char *_IO_read_end;
+ char *_IO_read_base;
+ char *_IO_write_base;
+ char *_IO_write_ptr;
+ char *_IO_write_end;
+ char *_IO_buf_base;
+ char *_IO_buf_end;
+ char *_IO_save_base;
+ char *_IO_backup_base;
+ char *_IO_save_end;
+ void *_markers;
+ __sanitizer_FILE *_chain;
+ int _fileno;
+ };
+# define SANITIZER_HAS_STRUCT_FILE 1
+#else
+ typedef void __sanitizer_FILE;
+# define SANITIZER_HAS_STRUCT_FILE 0
+#endif
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
- (defined(__i386) || defined (__x86_64)) // NOLINT
+ (defined(__i386) || defined(__x86_64))
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;
@@ -533,9 +747,10 @@ namespace __sanitizer {
extern int ptrace_setsiginfo;
extern int ptrace_getregset;
extern int ptrace_setregset;
+ extern int ptrace_geteventmsg;
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
extern unsigned struct_shminfo_sz;
extern unsigned struct_shm_info_sz;
extern int shmctl_ipc_stat;
@@ -544,6 +759,8 @@ namespace __sanitizer {
extern int shmctl_shm_stat;
#endif
+ extern int map_fixed;
+
// ioctl arguments
struct __sanitizer_ifconf {
int ifc_len;
@@ -556,6 +773,54 @@ namespace __sanitizer {
};
#endif
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+struct __sanitizer__obstack_chunk {
+ char *limit;
+ struct __sanitizer__obstack_chunk *prev;
+};
+
+struct __sanitizer_obstack {
+ long chunk_size;
+ struct __sanitizer__obstack_chunk *chunk;
+ char *object_base;
+ char *next_free;
+ uptr more_fields[7];
+};
+#endif
+
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || defined(__sparc__)
+#define IOC_SIZEBITS 13
+#define IOC_DIRBITS 3
+#define IOC_NONE 1U
+#define IOC_WRITE 4U
+#define IOC_READ 2U
+#else
+#define IOC_SIZEBITS 14
+#define IOC_DIRBITS 2
+#define IOC_NONE 0U
+#define IOC_WRITE 1U
+#define IOC_READ 2U
+#endif
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#if defined(IOC_DIRMASK)
+#undef IOC_DIRMASK
+#endif
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+#define EVIOC_EV_MAX 0x1f
+#define EVIOC_ABS_MAX 0x3f
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+
#if defined(__sparc__)
// In sparc the 14 bits SIZE field overlaps with the
// least significant bit of DIR, so either IOC_READ or
@@ -564,7 +829,7 @@ namespace __sanitizer {
((((((nr) >> 29) & 0x7) & (4U|2U)) == 0)? \
0 : (((nr) >> 16) & 0x3fff))
#else
-# define IOC_SIZE(nr) (((nr) >> 16) & 0x3fff)
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
#endif
extern unsigned struct_arpreq_sz;
@@ -581,9 +846,6 @@ namespace __sanitizer {
extern unsigned struct_cdrom_tocentry_sz;
extern unsigned struct_cdrom_tochdr_sz;
extern unsigned struct_cdrom_volctrl_sz;
- extern unsigned struct_copr_buffer_sz;
- extern unsigned struct_copr_debug_buf_sz;
- extern unsigned struct_copr_msg_sz;
extern unsigned struct_ff_effect_sz;
extern unsigned struct_floppy_drive_params_sz;
extern unsigned struct_floppy_drive_struct_sz;
@@ -597,23 +859,28 @@ namespace __sanitizer {
extern unsigned struct_hd_geometry_sz;
extern unsigned struct_input_absinfo_sz;
extern unsigned struct_input_id_sz;
+ extern unsigned struct_mtpos_sz;
+ extern unsigned struct_termio_sz;
+ extern unsigned struct_vt_consize_sz;
+ extern unsigned struct_vt_sizes_sz;
+ extern unsigned struct_vt_stat_sz;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ extern unsigned struct_copr_buffer_sz;
+ extern unsigned struct_copr_debug_buf_sz;
+ extern unsigned struct_copr_msg_sz;
extern unsigned struct_midi_info_sz;
extern unsigned struct_mtget_sz;
extern unsigned struct_mtop_sz;
- extern unsigned struct_mtpos_sz;
extern unsigned struct_rtentry_sz;
extern unsigned struct_sbi_instrument_sz;
extern unsigned struct_seq_event_rec_sz;
extern unsigned struct_synth_info_sz;
- extern unsigned struct_termio_sz;
- extern unsigned struct_vt_consize_sz;
extern unsigned struct_vt_mode_sz;
- extern unsigned struct_vt_sizes_sz;
- extern unsigned struct_vt_stat_sz;
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
#if SANITIZER_LINUX && !SANITIZER_ANDROID
- extern unsigned struct_audio_buf_info_sz;
extern unsigned struct_ax25_parms_struct_sz;
extern unsigned struct_cyclades_monitor_sz;
extern unsigned struct_input_keymap_entry_sz;
@@ -624,7 +891,6 @@ namespace __sanitizer {
extern unsigned struct_kbsentry_sz;
extern unsigned struct_mtconfiginfo_sz;
extern unsigned struct_nr_parms_struct_sz;
- extern unsigned struct_ppp_stats_sz;
extern unsigned struct_scc_modem_sz;
extern unsigned struct_scc_stat_sz;
extern unsigned struct_serial_multiport_struct_sz;
@@ -632,7 +898,12 @@ namespace __sanitizer {
extern unsigned struct_sockaddr_ax25_sz;
extern unsigned struct_unimapdesc_sz;
extern unsigned struct_unimapinit_sz;
-#endif
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ extern unsigned struct_audio_buf_info_sz;
+ extern unsigned struct_ppp_stats_sz;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
#if !SANITIZER_ANDROID && !SANITIZER_MAC
extern unsigned struct_sioc_sg_req_sz;
@@ -643,7 +914,7 @@ namespace __sanitizer {
// A special value to mark ioctls that are not present on the target platform,
// when it can not be determined without including any system headers.
- extern unsigned IOCTL_NOT_PRESENT;
+ extern const unsigned IOCTL_NOT_PRESENT;
extern unsigned IOCTL_FIOASYNC;
extern unsigned IOCTL_FIOCLEX;
@@ -689,7 +960,7 @@ namespace __sanitizer {
extern unsigned IOCTL_TIOCSPGRP;
extern unsigned IOCTL_TIOCSTI;
extern unsigned IOCTL_TIOCSWINSZ;
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
extern unsigned IOCTL_SIOCGETSGCNT;
extern unsigned IOCTL_SIOCGETVIFCNT;
#endif
@@ -783,9 +1054,7 @@ namespace __sanitizer {
extern unsigned IOCTL_HDIO_SET_MULTCOUNT;
extern unsigned IOCTL_HDIO_SET_NOWERR;
extern unsigned IOCTL_HDIO_SET_UNMASKINTR;
- extern unsigned IOCTL_MTIOCGET;
extern unsigned IOCTL_MTIOCPOS;
- extern unsigned IOCTL_MTIOCTOP;
extern unsigned IOCTL_PPPIOCGASYNCMAP;
extern unsigned IOCTL_PPPIOCGDEBUG;
extern unsigned IOCTL_PPPIOCGFLAGS;
@@ -797,9 +1066,7 @@ namespace __sanitizer {
extern unsigned IOCTL_PPPIOCSMAXCID;
extern unsigned IOCTL_PPPIOCSMRU;
extern unsigned IOCTL_PPPIOCSXASYNCMAP;
- extern unsigned IOCTL_SIOCADDRT;
extern unsigned IOCTL_SIOCDARP;
- extern unsigned IOCTL_SIOCDELRT;
extern unsigned IOCTL_SIOCDRARP;
extern unsigned IOCTL_SIOCGARP;
extern unsigned IOCTL_SIOCGIFENCAP;
@@ -828,6 +1095,39 @@ namespace __sanitizer {
extern unsigned IOCTL_SNDCTL_COPR_SENDMSG;
extern unsigned IOCTL_SNDCTL_COPR_WCODE;
extern unsigned IOCTL_SNDCTL_COPR_WDATA;
+ extern unsigned IOCTL_TCFLSH;
+ extern unsigned IOCTL_TCGETA;
+ extern unsigned IOCTL_TCGETS;
+ extern unsigned IOCTL_TCSBRK;
+ extern unsigned IOCTL_TCSBRKP;
+ extern unsigned IOCTL_TCSETA;
+ extern unsigned IOCTL_TCSETAF;
+ extern unsigned IOCTL_TCSETAW;
+ extern unsigned IOCTL_TCSETS;
+ extern unsigned IOCTL_TCSETSF;
+ extern unsigned IOCTL_TCSETSW;
+ extern unsigned IOCTL_TCXONC;
+ extern unsigned IOCTL_TIOCGLCKTRMIOS;
+ extern unsigned IOCTL_TIOCGSOFTCAR;
+ extern unsigned IOCTL_TIOCINQ;
+ extern unsigned IOCTL_TIOCLINUX;
+ extern unsigned IOCTL_TIOCSERCONFIG;
+ extern unsigned IOCTL_TIOCSERGETLSR;
+ extern unsigned IOCTL_TIOCSERGWILD;
+ extern unsigned IOCTL_TIOCSERSWILD;
+ extern unsigned IOCTL_TIOCSLCKTRMIOS;
+ extern unsigned IOCTL_TIOCSSOFTCAR;
+ extern unsigned IOCTL_VT_DISALLOCATE;
+ extern unsigned IOCTL_VT_GETSTATE;
+ extern unsigned IOCTL_VT_RESIZE;
+ extern unsigned IOCTL_VT_RESIZEX;
+ extern unsigned IOCTL_VT_SENDSIG;
+#endif // SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ extern unsigned IOCTL_MTIOCGET;
+ extern unsigned IOCTL_MTIOCTOP;
+ extern unsigned IOCTL_SIOCADDRT;
+ extern unsigned IOCTL_SIOCDELRT;
extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;
extern unsigned IOCTL_SNDCTL_DSP_GETFMTS;
extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;
@@ -918,40 +1218,14 @@ namespace __sanitizer {
extern unsigned IOCTL_SOUND_PCM_READ_RATE;
extern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS;
extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;
- extern unsigned IOCTL_TCFLSH;
- extern unsigned IOCTL_TCGETA;
- extern unsigned IOCTL_TCGETS;
- extern unsigned IOCTL_TCSBRK;
- extern unsigned IOCTL_TCSBRKP;
- extern unsigned IOCTL_TCSETA;
- extern unsigned IOCTL_TCSETAF;
- extern unsigned IOCTL_TCSETAW;
- extern unsigned IOCTL_TCSETS;
- extern unsigned IOCTL_TCSETSF;
- extern unsigned IOCTL_TCSETSW;
- extern unsigned IOCTL_TCXONC;
- extern unsigned IOCTL_TIOCGLCKTRMIOS;
- extern unsigned IOCTL_TIOCGSOFTCAR;
- extern unsigned IOCTL_TIOCINQ;
- extern unsigned IOCTL_TIOCLINUX;
- extern unsigned IOCTL_TIOCSERCONFIG;
- extern unsigned IOCTL_TIOCSERGETLSR;
- extern unsigned IOCTL_TIOCSERGWILD;
- extern unsigned IOCTL_TIOCSERSWILD;
- extern unsigned IOCTL_TIOCSLCKTRMIOS;
- extern unsigned IOCTL_TIOCSSOFTCAR;
extern unsigned IOCTL_VT_ACTIVATE;
- extern unsigned IOCTL_VT_DISALLOCATE;
extern unsigned IOCTL_VT_GETMODE;
- extern unsigned IOCTL_VT_GETSTATE;
extern unsigned IOCTL_VT_OPENQRY;
extern unsigned IOCTL_VT_RELDISP;
- extern unsigned IOCTL_VT_RESIZE;
- extern unsigned IOCTL_VT_RESIZEX;
- extern unsigned IOCTL_VT_SENDSIG;
extern unsigned IOCTL_VT_SETMODE;
extern unsigned IOCTL_VT_WAITACTIVE;
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned IOCTL_CYGETDEFTHRESH;
extern unsigned IOCTL_CYGETDEFTIMEOUT;
@@ -977,37 +1251,25 @@ namespace __sanitizer {
extern unsigned IOCTL_FS_IOC_SETVERSION;
extern unsigned IOCTL_GIO_CMAP;
extern unsigned IOCTL_GIO_FONT;
- extern unsigned IOCTL_GIO_SCRNMAP;
extern unsigned IOCTL_GIO_UNIMAP;
extern unsigned IOCTL_GIO_UNISCRNMAP;
extern unsigned IOCTL_KDADDIO;
extern unsigned IOCTL_KDDELIO;
- extern unsigned IOCTL_KDDISABIO;
- extern unsigned IOCTL_KDENABIO;
extern unsigned IOCTL_KDGETKEYCODE;
- extern unsigned IOCTL_KDGETLED;
- extern unsigned IOCTL_KDGETMODE;
extern unsigned IOCTL_KDGKBDIACR;
extern unsigned IOCTL_KDGKBENT;
extern unsigned IOCTL_KDGKBLED;
extern unsigned IOCTL_KDGKBMETA;
- extern unsigned IOCTL_KDGKBMODE;
extern unsigned IOCTL_KDGKBSENT;
- extern unsigned IOCTL_KDGKBTYPE;
extern unsigned IOCTL_KDMAPDISP;
- extern unsigned IOCTL_KDMKTONE;
extern unsigned IOCTL_KDSETKEYCODE;
- extern unsigned IOCTL_KDSETLED;
- extern unsigned IOCTL_KDSETMODE;
extern unsigned IOCTL_KDSIGACCEPT;
extern unsigned IOCTL_KDSKBDIACR;
extern unsigned IOCTL_KDSKBENT;
extern unsigned IOCTL_KDSKBLED;
extern unsigned IOCTL_KDSKBMETA;
- extern unsigned IOCTL_KDSKBMODE;
extern unsigned IOCTL_KDSKBSENT;
extern unsigned IOCTL_KDUNMAPDISP;
- extern unsigned IOCTL_KIOCSOUND;
extern unsigned IOCTL_LPABORT;
extern unsigned IOCTL_LPABORTOPEN;
extern unsigned IOCTL_LPCAREFUL;
@@ -1022,7 +1284,6 @@ namespace __sanitizer {
extern unsigned IOCTL_MTIOCSETCONFIG;
extern unsigned IOCTL_PIO_CMAP;
extern unsigned IOCTL_PIO_FONT;
- extern unsigned IOCTL_PIO_SCRNMAP;
extern unsigned IOCTL_PIO_UNIMAP;
extern unsigned IOCTL_PIO_UNIMAPCLR;
extern unsigned IOCTL_PIO_UNISCRNMAP;
@@ -1050,9 +1311,29 @@ namespace __sanitizer {
extern unsigned IOCTL_TIOCSERGETMULTI;
extern unsigned IOCTL_TIOCSERSETMULTI;
extern unsigned IOCTL_TIOCSSERIAL;
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ extern unsigned IOCTL_GIO_SCRNMAP;
+ extern unsigned IOCTL_KDDISABIO;
+ extern unsigned IOCTL_KDENABIO;
+ extern unsigned IOCTL_KDGETLED;
+ extern unsigned IOCTL_KDGETMODE;
+ extern unsigned IOCTL_KDGKBMODE;
+ extern unsigned IOCTL_KDGKBTYPE;
+ extern unsigned IOCTL_KDMKTONE;
+ extern unsigned IOCTL_KDSETLED;
+ extern unsigned IOCTL_KDSETMODE;
+ extern unsigned IOCTL_KDSKBMODE;
+ extern unsigned IOCTL_KIOCSOUND;
+ extern unsigned IOCTL_PIO_SCRNMAP;
#endif
+ extern const int errno_EINVAL;
extern const int errno_EOWNERDEAD;
+
+ extern const int si_SEGV_MAPERR;
+ extern const int si_SEGV_ACCERR;
} // namespace __sanitizer
#define CHECK_TYPE_SIZE(TYPE) \
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.cc b/libsanitizer/sanitizer_common/sanitizer_posix.cc
index ef5cb0b03b..adb1b2a9d6 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_posix.cc
@@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_POSIX
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
@@ -20,6 +20,14 @@
#include <sys/mman.h>
+#if SANITIZER_LINUX
+#include <sys/utsname.h>
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#include <sys/personality.h>
+#endif
+
namespace __sanitizer {
// ------------- sanitizer_common.h
@@ -27,21 +35,67 @@ uptr GetMmapGranularity() {
return GetPageSize();
}
+#if SANITIZER_WORDSIZE == 32
+// Take care of unusable kernel area in top gigabyte.
+static uptr GetKernelAreaSize() {
+#if SANITIZER_LINUX
+ const uptr gbyte = 1UL << 30;
+
+ // Firstly check if there are writable segments
+ // mapped to top gigabyte (e.g. stack).
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr end, prot;
+ while (proc_maps.Next(/*start*/0, &end,
+ /*offset*/0, /*filename*/0,
+ /*filename_size*/0, &prot)) {
+ if ((end >= 3 * gbyte)
+ && (prot & MemoryMappingLayout::kProtectionWrite) != 0)
+ return 0;
+ }
+
+#if !SANITIZER_ANDROID
+ // Even if nothing is mapped, top Gb may still be accessible
+ // if we are running on 64-bit kernel.
+ // Uname may report misleading results if personality type
+ // is modified (e.g. under schroot) so check this as well.
+ struct utsname uname_info;
+ int pers = personality(0xffffffffUL);
+ if (!(pers & PER_MASK)
+ && uname(&uname_info) == 0
+ && internal_strstr(uname_info.machine, "64"))
+ return 0;
+#endif // SANITIZER_ANDROID
+
+ // Top gigabyte is reserved for kernel.
+ return gbyte;
+#else
+ return 0;
+#endif // SANITIZER_LINUX
+}
+#endif // SANITIZER_WORDSIZE == 32
+
uptr GetMaxVirtualAddress() {
#if SANITIZER_WORDSIZE == 64
-# if defined(__powerpc64__)
+# if defined(__powerpc64__) || defined(__aarch64__)
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
- // We somehow need to figure our which one we are using now and choose
+ // We somehow need to figure out which one we are using now and choose
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
// Note that with 'ulimit -s unlimited' the stack is moved away from the top
// of the address space, so simply checking the stack address is not enough.
- return (1ULL << 44) - 1; // 0x00000fffffffffffUL
+ // This should (does) work for both PowerPC64 Endian modes.
+ // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
+ return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
+# elif defined(__mips64)
+ return (1ULL << 40) - 1;
# else
return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
# endif
#else // SANITIZER_WORDSIZE == 32
- // FIXME: We can probably lower this on Android?
- return (1ULL << 32) - 1; // 0xffffffff;
+ uptr res = (1ULL << 32) - 1; // 0xffffffff;
+ if (!common_flags()->full_address_space)
+ res -= GetKernelAreaSize();
+ CHECK_LT(reinterpret_cast<uptr>(&res), res);
+ return res;
#endif // SANITIZER_WORDSIZE
}
@@ -60,11 +114,13 @@ void *MmapOrDie(uptr size, const char *mem_type) {
Die();
}
recursion_count++;
- Report("ERROR: %s failed to allocate 0x%zx (%zd) bytes of %s: %d\n",
+ Report("ERROR: %s failed to "
+ "allocate 0x%zx (%zd) bytes of %s (errno: %d)\n",
SanitizerToolName, size, size, mem_type, reserrno);
DumpProcessMap();
CHECK("unable to mmap" && 0);
}
+ IncreaseTotalMmap(size);
return (void *)res;
}
@@ -76,6 +132,25 @@ void UnmapOrDie(void *addr, uptr size) {
SanitizerToolName, size, size, addr);
CHECK("unable to unmap" && 0);
}
+ DecreaseTotalMmap(size);
+}
+
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ uptr PageSize = GetPageSizeCached();
+ uptr p = internal_mmap(0,
+ RoundUpTo(size, PageSize),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ -1, 0);
+ int reserrno;
+ if (internal_iserror(p, &reserrno)) {
+ Report("ERROR: %s failed to "
+ "allocate noreserve 0x%zx (%zd) bytes for '%s' (errno: %d)\n",
+ SanitizerToolName, size, size, mem_type, reserrno);
+ CHECK("unable to mmap" && 0);
+ }
+ IncreaseTotalMmap(size);
+ return (void *)p;
}
void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
@@ -87,9 +162,10 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
-1, 0);
int reserrno;
if (internal_iserror(p, &reserrno))
- Report("ERROR: "
- "%s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
+ Report("ERROR: %s failed to "
+ "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
SanitizerToolName, size, size, fixed_addr, reserrno);
+ IncreaseTotalMmap(size);
return (void *)p;
}
@@ -102,11 +178,12 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
-1, 0);
int reserrno;
if (internal_iserror(p, &reserrno)) {
- Report("ERROR:"
- " %s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
+ Report("ERROR: %s failed to "
+ "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
SanitizerToolName, size, size, fixed_addr, reserrno);
CHECK("unable to mmap" && 0);
}
+ IncreaseTotalMmap(size);
return (void *)p;
}
@@ -129,6 +206,17 @@ void *MapFileToMemory(const char *file_name, uptr *buff_size) {
return internal_iserror(map) ? 0 : (void *)map;
}
+void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset) {
+ uptr flags = MAP_SHARED;
+ if (addr) flags |= MAP_FIXED;
+ uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset);
+ if (internal_iserror(p)) {
+ Printf("could not map writable file (%zd, %zu, %zu): %zd\n", fd, offset,
+ size, p);
+ return 0;
+ }
+ return (void *)p;
+}
static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
uptr start2, uptr end2) {
@@ -157,7 +245,7 @@ void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
const sptr kBufSize = 4095;
- char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
+ char *filename = (char*)MmapOrDie(kBufSize, __func__);
Report("Process memory map follows:\n");
while (proc_maps.Next(&start, &end, /* file_offset */0,
filename, kBufSize, /* protection */0)) {
@@ -204,7 +292,7 @@ void MaybeOpenReportFile() {
if (report_fd_pid == pid) return;
InternalScopedBuffer<char> report_path_full(4096);
internal_snprintf(report_path_full.data(), report_path_full.size(),
- "%s.%d", report_path_prefix, pid);
+ "%s.%zu", report_path_prefix, pid);
uptr openrv = OpenFile(report_path_full.data(), true);
if (internal_iserror(openrv)) {
report_fd = kStderrFd;
@@ -248,4 +336,4 @@ bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
} // namespace __sanitizer
-#endif // SANITIZER_LINUX || SANITIZER_MAC
+#endif // SANITIZER_POSIX
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
index ae782ac39c..b4e42c7246 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -12,12 +12,15 @@
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_POSIX
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_stacktrace.h"
#include <errno.h>
#include <pthread.h>
+#include <signal.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/resource.h>
@@ -39,30 +42,49 @@ void FlushUnneededShadowMemory(uptr addr, uptr size) {
madvise((void*)addr, size, MADV_DONTNEED);
}
-void DisableCoreDumper() {
- struct rlimit nocore;
- nocore.rlim_cur = 0;
- nocore.rlim_max = 0;
- setrlimit(RLIMIT_CORE, &nocore);
+static rlim_t getlim(int res) {
+ rlimit rlim;
+ CHECK_EQ(0, getrlimit(res, &rlim));
+ return rlim.rlim_cur;
+}
+
+static void setlim(int res, rlim_t lim) {
+ // The following magic is to prevent clang from replacing it with memset.
+ volatile struct rlimit rlim;
+ rlim.rlim_cur = lim;
+ rlim.rlim_max = lim;
+ if (setrlimit(res, (struct rlimit*)&rlim)) {
+ Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
+ Die();
+ }
+}
+
+void DisableCoreDumperIfNecessary() {
+ if (common_flags()->disable_coredump) {
+ setlim(RLIMIT_CORE, 0);
+ }
}
bool StackSizeIsUnlimited() {
- struct rlimit rlim;
- CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim));
- return (rlim.rlim_cur == (uptr)-1);
+ rlim_t stack_size = getlim(RLIMIT_STACK);
+ return (stack_size == RLIM_INFINITY);
}
void SetStackSizeLimitInBytes(uptr limit) {
- struct rlimit rlim;
- rlim.rlim_cur = limit;
- rlim.rlim_max = limit;
- if (setrlimit(RLIMIT_STACK, &rlim)) {
- Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
- Die();
- }
+ setlim(RLIMIT_STACK, (rlim_t)limit);
CHECK(!StackSizeIsUnlimited());
}
+bool AddressSpaceIsUnlimited() {
+ rlim_t as_size = getlim(RLIMIT_AS);
+ return (as_size == RLIM_INFINITY);
+}
+
+void SetAddressSpaceUnlimited() {
+ setlim(RLIMIT_AS, RLIM_INFINITY);
+ CHECK(AddressSpaceIsUnlimited());
+}
+
void SleepForSeconds(int seconds) {
sleep(seconds);
}
@@ -87,6 +109,83 @@ int internal_isatty(fd_t fd) {
return isatty(fd);
}
+#ifndef SANITIZER_GO
+// TODO(glider): different tools may require different altstack size.
+static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
+
+void SetAlternateSignalStack() {
+ stack_t altstack, oldstack;
+ CHECK_EQ(0, sigaltstack(0, &oldstack));
+ // If the alternate stack is already in place, do nothing.
+ // Android always sets an alternate stack, but it's too small for us.
+ if (!SANITIZER_ANDROID && !(oldstack.ss_flags & SS_DISABLE)) return;
+ // TODO(glider): the mapped stack should have the MAP_STACK flag in the
+ // future. It is not required by man 2 sigaltstack now (they're using
+ // malloc()).
+ void* base = MmapOrDie(kAltStackSize, __func__);
+ altstack.ss_sp = (char*) base;
+ altstack.ss_flags = 0;
+ altstack.ss_size = kAltStackSize;
+ CHECK_EQ(0, sigaltstack(&altstack, 0));
+}
+
+void UnsetAlternateSignalStack() {
+ stack_t altstack, oldstack;
+ altstack.ss_sp = 0;
+ altstack.ss_flags = SS_DISABLE;
+ altstack.ss_size = kAltStackSize; // Some sane value required on Darwin.
+ CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
+ UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
+}
+
+typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
+static void MaybeInstallSigaction(int signum,
+ SignalHandlerType handler) {
+ if (!IsDeadlySignal(signum))
+ return;
+ struct sigaction sigact;
+ internal_memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_sigaction = (sa_sigaction_t)handler;
+ // Do not block the signal from being received in that signal's handler.
+ // Clients are responsible for handling this correctly.
+ sigact.sa_flags = SA_SIGINFO | SA_NODEFER;
+ if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
+ CHECK_EQ(0, internal_sigaction(signum, &sigact, 0));
+ VReport(1, "Installed the sigaction for signal %d\n", signum);
+}
+
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {
+ // Set the alternate signal stack for the main thread.
+ // This will cause SetAlternateSignalStack to be called twice, but the stack
+ // will be actually set only once.
+ if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
+ MaybeInstallSigaction(SIGSEGV, handler);
+ MaybeInstallSigaction(SIGBUS, handler);
+}
+#endif // SANITIZER_GO
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ uptr page_size = GetPageSizeCached();
+ // Checking too large memory ranges is slow.
+ CHECK_LT(size, page_size * 10);
+ int sock_pair[2];
+ if (pipe(sock_pair))
+ return false;
+ uptr bytes_written =
+ internal_write(sock_pair[1], reinterpret_cast<void *>(beg), size);
+ int write_errno;
+ bool result;
+ if (internal_iserror(bytes_written, &write_errno)) {
+ CHECK_EQ(EFAULT, write_errno);
+ result = false;
+ } else {
+ result = (bytes_written == size);
+ }
+ internal_close(sock_pair[0]);
+ internal_close(sock_pair[1]);
+ return result;
+}
+
} // namespace __sanitizer
-#endif
+#endif // SANITIZER_POSIX
diff --git a/libsanitizer/sanitizer_common/sanitizer_printf.cc b/libsanitizer/sanitizer_common/sanitizer_printf.cc
index 08951c7e24..599f2c5d7c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_printf.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_printf.cc
@@ -14,12 +14,14 @@
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
#include <stdio.h>
#include <stdarg.h>
-#if SANITIZER_WINDOWS && !defined(va_copy)
+#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
+ !defined(va_copy)
# define va_copy(dst, src) ((dst) = (src))
#endif
@@ -92,11 +94,14 @@ static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num,
minimal_num_length, pad_with_zero, negative);
}
-static int AppendString(char **buff, const char *buff_end, const char *s) {
+static int AppendString(char **buff, const char *buff_end, int precision,
+ const char *s) {
if (s == 0)
s = "<null>";
int result = 0;
for (; *s; s++) {
+ if (precision >= 0 && result >= precision)
+ break;
result += AppendChar(buff, buff_end, *s);
}
return result;
@@ -104,16 +109,16 @@ static int AppendString(char **buff, const char *buff_end, const char *s) {
static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int result = 0;
- result += AppendString(buff, buff_end, "0x");
+ result += AppendString(buff, buff_end, -1, "0x");
result += AppendUnsigned(buff, buff_end, ptr_value, 16,
- (SANITIZER_WORDSIZE == 64) ? 12 : 8, true);
+ SANITIZER_POINTER_FORMAT_LENGTH, true);
return result;
}
int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) {
static const char *kPrintfFormatsHelp =
- "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x}; %p; %s; %c\n";
+ "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x}; %p; %(\\.\\*)?s; %c\n";
RAW_CHECK(format);
RAW_CHECK(buff_length > 0);
const char *buff_end = &buff[buff_length - 1];
@@ -133,6 +138,12 @@ int VSNPrintf(char *buff, int buff_length,
width = width * 10 + *cur++ - '0';
}
}
+ bool have_precision = (cur[0] == '.' && cur[1] == '*');
+ int precision = -1;
+ if (have_precision) {
+ cur += 2;
+ precision = va_arg(args, int);
+ }
bool have_z = (*cur == 'z');
cur += have_z;
bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
@@ -140,6 +151,8 @@ int VSNPrintf(char *buff, int buff_length,
s64 dval;
u64 uval;
bool have_flags = have_width | have_z | have_ll;
+ // Only %s supports precision for now
+ CHECK(!(precision >= 0 && *cur != 's'));
switch (*cur) {
case 'd': {
dval = have_ll ? va_arg(args, s64)
@@ -165,7 +178,7 @@ int VSNPrintf(char *buff, int buff_length,
}
case 's': {
RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
- result += AppendString(&buff, buff_end, va_arg(args, char*));
+ result += AppendString(&buff, buff_end, precision, va_arg(args, char*));
break;
}
case 'c': {
@@ -258,6 +271,7 @@ static void SharedPrintfCode(bool append_pid, const char *format,
break;
}
RawWrite(buffer);
+ AndroidLogWrite(buffer);
CallPrintfAndReportCallback(buffer);
// If we had mapped any memory, clean up.
if (buffer != local_buffer)
@@ -265,6 +279,7 @@ static void SharedPrintfCode(bool append_pid, const char *format,
va_end(args2);
}
+FORMAT(1, 2)
void Printf(const char *format, ...) {
va_list args;
va_start(args, format);
@@ -273,6 +288,7 @@ void Printf(const char *format, ...) {
}
// Like Printf, but prints the current PID before the output string.
+FORMAT(1, 2)
void Report(const char *format, ...) {
va_list args;
va_start(args, format);
@@ -284,6 +300,7 @@ void Report(const char *format, ...) {
// Returns the number of symbols that should have been written to buffer
// (not including trailing '\0'). Thus, the string is truncated
// iff return value is not less than "length".
+FORMAT(3, 4)
int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
va_list args;
va_start(args, format);
@@ -292,6 +309,7 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
return needed_length;
}
+FORMAT(2, 3)
void InternalScopedString::append(const char *format, ...) {
CHECK_LT(length_, size());
va_list args;
@@ -299,6 +317,7 @@ void InternalScopedString::append(const char *format, ...) {
VSNPrintf(data() + length_, size() - length_, format, args);
va_end(args);
length_ += internal_strlen(data() + length_);
+ CHECK_LT(length_, size());
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps.h b/libsanitizer/sanitizer_common/sanitizer_procmaps.h
index 87887f6b74..7477abf30b 100644
--- a/libsanitizer/sanitizer_common/sanitizer_procmaps.h
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps.h
@@ -12,49 +12,38 @@
#ifndef SANITIZER_PROCMAPS_H
#define SANITIZER_PROCMAPS_H
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
namespace __sanitizer {
-#if SANITIZER_WINDOWS
-class MemoryMappingLayout {
- public:
- explicit MemoryMappingLayout(bool cache_enabled) {
- (void)cache_enabled;
- }
- bool GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- UNIMPLEMENTED();
- }
-};
-
-#else // SANITIZER_WINDOWS
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
struct ProcSelfMapsBuff {
char *data;
uptr mmaped_size;
uptr len;
};
-#endif // SANITIZER_LINUX
+
+// Reads process memory map in an OS-specific way.
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
class MemoryMappingLayout {
public:
explicit MemoryMappingLayout(bool cache_enabled);
+ ~MemoryMappingLayout();
bool Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size, uptr *protection);
void Reset();
- // Gets the object file name and the offset in that object for a given
- // address 'addr'. Returns true on success.
- bool GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection);
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
// instead of aborting.
static void CacheMemoryMappings();
- ~MemoryMappingLayout();
+
+ // Stores the list of mapped objects into an array.
+ uptr DumpListOfModules(LoadedModule *modules, uptr max_modules,
+ string_predicate_t filter);
// Memory protection masks.
static const uptr kProtectionRead = 1;
@@ -64,41 +53,12 @@ class MemoryMappingLayout {
private:
void LoadFromCache();
- // Default implementation of GetObjectNameAndOffset.
- // Quite slow, because it iterates through the whole process map for each
- // lookup.
- bool IterateForObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- Reset();
- uptr start, end, file_offset;
- for (int i = 0; Next(&start, &end, &file_offset, filename, filename_size,
- protection);
- i++) {
- if (addr >= start && addr < end) {
- // Don't subtract 'start' for the first entry:
- // * If a binary is compiled w/o -pie, then the first entry in
- // process maps is likely the binary itself (all dynamic libs
- // are mapped higher in address space). For such a binary,
- // instruction offset in binary coincides with the actual
- // instruction address in virtual memory (as code section
- // is mapped to a fixed memory range).
- // * If a binary is compiled with -pie, all the modules are
- // mapped high at address space (in particular, higher than
- // shadow memory of the tool), so the module can't be the
- // first entry.
- *offset = (addr - (i ? start : 0)) + file_offset;
- return true;
- }
- }
- if (filename_size)
- filename[0] = '\0';
- return false;
- }
-
-# if SANITIZER_LINUX
+
+ // FIXME: Hide implementation details for different platforms in
+ // platform-specific files.
+# if SANITIZER_FREEBSD || SANITIZER_LINUX
ProcSelfMapsBuff proc_self_maps_;
- char *current_;
+ const char *current_;
// Static mappings cache.
static ProcSelfMapsBuff cached_proc_self_maps_;
@@ -127,7 +87,10 @@ void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
// Returns code range for the specified module.
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end);
-#endif // SANITIZER_WINDOWS
+bool IsDecimal(char c);
+uptr ParseDecimal(const char **p);
+bool IsHex(char c);
+uptr ParseHex(const char **p);
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc
new file mode 100644
index 0000000000..ca9900fe9e
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc
@@ -0,0 +1,176 @@
+//===-- sanitizer_procmaps_common.cc --------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (common parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+// Linker initialized.
+ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
+StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
+
+static int TranslateDigit(char c) {
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+// Parse a number and promote 'p' up to the first non-digit character.
+static uptr ParseNumber(const char **p, int base) {
+ uptr n = 0;
+ int d;
+ CHECK(base >= 2 && base <= 16);
+ while ((d = TranslateDigit(**p)) >= 0 && d < base) {
+ n = n * base + d;
+ (*p)++;
+ }
+ return n;
+}
+
+bool IsDecimal(char c) {
+ int d = TranslateDigit(c);
+ return d >= 0 && d < 10;
+}
+
+uptr ParseDecimal(const char **p) {
+ return ParseNumber(p, 10);
+}
+
+bool IsHex(char c) {
+ int d = TranslateDigit(c);
+ return d >= 0 && d < 16;
+}
+
+uptr ParseHex(const char **p) {
+ return ParseNumber(p, 16);
+}
+
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
+ ReadProcMaps(&proc_self_maps_);
+ if (cache_enabled) {
+ if (proc_self_maps_.mmaped_size == 0) {
+ LoadFromCache();
+ CHECK_GT(proc_self_maps_.len, 0);
+ }
+ } else {
+ CHECK_GT(proc_self_maps_.mmaped_size, 0);
+ }
+ Reset();
+ // FIXME: in the future we may want to cache the mappings on demand only.
+ if (cache_enabled)
+ CacheMemoryMappings();
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+ // Only unmap the buffer if it is different from the cached one. Otherwise
+ // it will be unmapped when the cache is refreshed.
+ if (proc_self_maps_.data != cached_proc_self_maps_.data) {
+ UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
+ }
+}
+
+void MemoryMappingLayout::Reset() {
+ current_ = proc_self_maps_.data;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ SpinMutexLock l(&cache_lock_);
+ // Don't invalidate the cache if the mappings are unavailable.
+ ProcSelfMapsBuff old_proc_self_maps;
+ old_proc_self_maps = cached_proc_self_maps_;
+ ReadProcMaps(&cached_proc_self_maps_);
+ if (cached_proc_self_maps_.mmaped_size == 0) {
+ cached_proc_self_maps_ = old_proc_self_maps;
+ } else {
+ if (old_proc_self_maps.mmaped_size) {
+ UnmapOrDie(old_proc_self_maps.data,
+ old_proc_self_maps.mmaped_size);
+ }
+ }
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ SpinMutexLock l(&cache_lock_);
+ if (cached_proc_self_maps_.data) {
+ proc_self_maps_ = cached_proc_self_maps_;
+ }
+}
+
+uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules,
+ uptr max_modules,
+ string_predicate_t filter) {
+ Reset();
+ uptr cur_beg, cur_end, cur_offset, prot;
+ InternalScopedBuffer<char> module_name(kMaxPathLength);
+ uptr n_modules = 0;
+ for (uptr i = 0; n_modules < max_modules &&
+ Next(&cur_beg, &cur_end, &cur_offset, module_name.data(),
+ module_name.size(), &prot);
+ i++) {
+ const char *cur_name = module_name.data();
+ if (cur_name[0] == '\0')
+ continue;
+ if (filter && !filter(cur_name))
+ continue;
+ void *mem = &modules[n_modules];
+ // Don't subtract 'cur_beg' from the first entry:
+ // * If a binary is compiled w/o -pie, then the first entry in
+ // process maps is likely the binary itself (all dynamic libs
+ // are mapped higher in address space). For such a binary,
+ // instruction offset in binary coincides with the actual
+ // instruction address in virtual memory (as code section
+ // is mapped to a fixed memory range).
+ // * If a binary is compiled with -pie, all the modules are
+ // mapped high at address space (in particular, higher than
+ // shadow memory of the tool), so the module can't be the
+ // first entry.
+ uptr base_address = (i ? cur_beg : 0) - cur_offset;
+ LoadedModule *cur_module = new(mem) LoadedModule(cur_name, base_address);
+ cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
+ n_modules++;
+ }
+ return n_modules;
+}
+
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
+ char *smaps = 0;
+ uptr smaps_cap = 0;
+ uptr smaps_len = ReadFileToBuffer("/proc/self/smaps",
+ &smaps, &smaps_cap, 64<<20);
+ uptr start = 0;
+ bool file = false;
+ const char *pos = smaps;
+ while (pos < smaps + smaps_len) {
+ if (IsHex(pos[0])) {
+ start = ParseHex(&pos);
+ for (; *pos != '/' && *pos > '\n'; pos++) {}
+ file = *pos == '/';
+ } else if (internal_strncmp(pos, "Rss:", 4) == 0) {
+ while (!IsDecimal(*pos)) pos++;
+ uptr rss = ParseDecimal(&pos) * 1024;
+ cb(start, rss, file, stats, stats_size);
+ }
+ while (*pos++ != '\n') {}
+ }
+ UnmapOrDie(smaps, smaps_cap);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_freebsd.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_freebsd.cc
new file mode 100644
index 0000000000..fbc55203ab
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_freebsd.cc
@@ -0,0 +1,86 @@
+//===-- sanitizer_procmaps_freebsd.cc -------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (FreeBSD-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD
+#include "sanitizer_common.h"
+#include "sanitizer_freebsd.h"
+#include "sanitizer_procmaps.h"
+
+#include <unistd.h>
+#include <sys/sysctl.h>
+#include <sys/user.h>
+
+// Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+# include <osreldate.h>
+# if __FreeBSD_version <= 902001 // v9.2
+# define kinfo_vmentry xkinfo_vmentry
+# endif
+#endif
+
+namespace __sanitizer {
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+ const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid() };
+ size_t Size = 0;
+ int Err = sysctl(Mib, 4, NULL, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+ CHECK_GT(Size, 0);
+
+ size_t MmapedSize = Size * 4 / 3;
+ void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()");
+ Size = MmapedSize;
+ Err = sysctl(Mib, 4, VmMap, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+
+ proc_maps->data = (char*)VmMap;
+ proc_maps->mmaped_size = MmapedSize;
+ proc_maps->len = Size;
+}
+
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size,
+ uptr *protection) {
+ char *last = proc_self_maps_.data + proc_self_maps_.len;
+ if (current_ >= last) return false;
+ uptr dummy;
+ if (!start) start = &dummy;
+ if (!end) end = &dummy;
+ if (!offset) offset = &dummy;
+ if (!protection) protection = &dummy;
+ struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_;
+
+ *start = (uptr)VmEntry->kve_start;
+ *end = (uptr)VmEntry->kve_end;
+ *offset = (uptr)VmEntry->kve_offset;
+
+ *protection = 0;
+ if ((VmEntry->kve_protection & KVME_PROT_READ) != 0)
+ *protection |= kProtectionRead;
+ if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0)
+ *protection |= kProtectionWrite;
+ if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)
+ *protection |= kProtectionExecute;
+
+ if (filename != NULL && filename_size > 0) {
+ internal_snprintf(filename,
+ Min(filename_size, (uptr)PATH_MAX),
+ "%s", VmEntry->kve_path);
+ }
+
+ current_ += VmEntry->kve_structsize;
+
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc
new file mode 100644
index 0000000000..43babf8931
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc
@@ -0,0 +1,88 @@
+//===-- sanitizer_procmaps_linux.cc ---------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Linux-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
+#include "sanitizer_common.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+ proc_maps->len = ReadFileToBuffer("/proc/self/maps", &proc_maps->data,
+ &proc_maps->mmaped_size, 1 << 26);
+}
+
+static bool IsOneOf(char c, char c1, char c2) {
+ return c == c1 || c == c2;
+}
+
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size,
+ uptr *protection) {
+ char *last = proc_self_maps_.data + proc_self_maps_.len;
+ if (current_ >= last) return false;
+ uptr dummy;
+ if (!start) start = &dummy;
+ if (!end) end = &dummy;
+ if (!offset) offset = &dummy;
+ if (!protection) protection = &dummy;
+ char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
+ if (next_line == 0)
+ next_line = last;
+ // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
+ *start = ParseHex(&current_);
+ CHECK_EQ(*current_++, '-');
+ *end = ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ CHECK(IsOneOf(*current_, '-', 'r'));
+ *protection = 0;
+ if (*current_++ == 'r')
+ *protection |= kProtectionRead;
+ CHECK(IsOneOf(*current_, '-', 'w'));
+ if (*current_++ == 'w')
+ *protection |= kProtectionWrite;
+ CHECK(IsOneOf(*current_, '-', 'x'));
+ if (*current_++ == 'x')
+ *protection |= kProtectionExecute;
+ CHECK(IsOneOf(*current_, 's', 'p'));
+ if (*current_++ == 's')
+ *protection |= kProtectionShared;
+ CHECK_EQ(*current_++, ' ');
+ *offset = ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ ParseHex(&current_);
+ CHECK_EQ(*current_++, ':');
+ ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ while (IsDecimal(*current_))
+ current_++;
+ // Qemu may lack the trailing space.
+ // http://code.google.com/p/address-sanitizer/issues/detail?id=160
+ // CHECK_EQ(*current_++, ' ');
+ // Skip spaces.
+ while (current_ < next_line && *current_ == ' ')
+ current_++;
+ // Fill in the filename.
+ uptr i = 0;
+ while (current_ < next_line) {
+ if (filename && i < filename_size - 1)
+ filename[i++] = *current_;
+ current_++;
+ }
+ if (filename && i < filename_size)
+ filename[i] = 0;
+ current_ = next_line + 1;
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc
new file mode 100644
index 0000000000..81874c21b1
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc
@@ -0,0 +1,188 @@
+//===-- sanitizer_procmaps_mac.cc -----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Mac-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+#include <mach-o/dyld.h>
+#include <mach-o/loader.h>
+
+namespace __sanitizer {
+
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
+ Reset();
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+}
+
+// More information about Mach-O headers can be found in mach-o/loader.h
+// Each Mach-O image has a header (mach_header or mach_header_64) starting with
+// a magic number, and a list of linker load commands directly following the
+// header.
+// A load command is at least two 32-bit words: the command type and the
+// command size in bytes. We're interested only in segment load commands
+// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
+// into the task's address space.
+// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
+// segment_command_64 correspond to the memory address, memory size and the
+// file offset of the current memory segment.
+// Because these fields are taken from the images as is, one needs to add
+// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
+
+void MemoryMappingLayout::Reset() {
+ // Count down from the top.
+ // TODO(glider): as per man 3 dyld, iterating over the headers with
+ // _dyld_image_count is thread-unsafe. We need to register callbacks for
+ // adding and removing images which will invalidate the MemoryMappingLayout
+ // state.
+ current_image_ = _dyld_image_count();
+ current_load_cmd_count_ = -1;
+ current_load_cmd_addr_ = 0;
+ current_magic_ = 0;
+ current_filetype_ = 0;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ // No-op on Mac for now.
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ // No-op on Mac for now.
+}
+
+// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
+// Google Perftools, http://code.google.com/p/google-perftools.
+
+// NextSegmentLoad scans the current image for the next segment load command
+// and returns the start and end addresses and file offset of the corresponding
+// segment.
+// Note that the segment addresses are not necessarily sorted.
+template<u32 kLCSegment, typename SegmentCommand>
+bool MemoryMappingLayout::NextSegmentLoad(
+ uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size, uptr *protection) {
+ const char* lc = current_load_cmd_addr_;
+ current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
+ if (((const load_command *)lc)->cmd == kLCSegment) {
+ const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
+ const SegmentCommand* sc = (const SegmentCommand *)lc;
+ if (start) *start = sc->vmaddr + dlloff;
+ if (protection) {
+ // Return the initial protection.
+ *protection = sc->initprot;
+ }
+ if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
+ if (offset) {
+ if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
+ *offset = sc->vmaddr;
+ } else {
+ *offset = sc->fileoff;
+ }
+ }
+ if (filename) {
+ internal_strncpy(filename, _dyld_get_image_name(current_image_),
+ filename_size);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size,
+ uptr *protection) {
+ for (; current_image_ >= 0; current_image_--) {
+ const mach_header* hdr = _dyld_get_image_header(current_image_);
+ if (!hdr) continue;
+ if (current_load_cmd_count_ < 0) {
+ // Set up for this image;
+ current_load_cmd_count_ = hdr->ncmds;
+ current_magic_ = hdr->magic;
+ current_filetype_ = hdr->filetype;
+ switch (current_magic_) {
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
+ break;
+ }
+ default: {
+ continue;
+ }
+ }
+ }
+
+ for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
+ switch (current_magic_) {
+ // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
+ start, end, offset, filename, filename_size, protection))
+ return true;
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
+ start, end, offset, filename, filename_size, protection))
+ return true;
+ break;
+ }
+ }
+ }
+ // If we get here, no more load_cmd's in this image talk about
+ // segments. Go on to the next image.
+ }
+ return false;
+}
+
+uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules,
+ uptr max_modules,
+ string_predicate_t filter) {
+ Reset();
+ uptr cur_beg, cur_end, prot;
+ InternalScopedBuffer<char> module_name(kMaxPathLength);
+ uptr n_modules = 0;
+ for (uptr i = 0; n_modules < max_modules &&
+ Next(&cur_beg, &cur_end, 0, module_name.data(),
+ module_name.size(), &prot);
+ i++) {
+ const char *cur_name = module_name.data();
+ if (cur_name[0] == '\0')
+ continue;
+ if (filter && !filter(cur_name))
+ continue;
+ LoadedModule *cur_module = 0;
+ if (n_modules > 0 &&
+ 0 == internal_strcmp(cur_name, modules[n_modules - 1].full_name())) {
+ cur_module = &modules[n_modules - 1];
+ } else {
+ void *mem = &modules[n_modules];
+ cur_module = new(mem) LoadedModule(cur_name, cur_beg);
+ n_modules++;
+ }
+ cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
+ }
+ return n_modules;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
diff --git a/libsanitizer/sanitizer_common/sanitizer_report_decorator.h b/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
index c6e79ad0c7..e9be29fb3d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
+++ b/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
@@ -15,13 +15,19 @@
#ifndef SANITIZER_REPORT_DECORATOR_H
#define SANITIZER_REPORT_DECORATOR_H
+#include "sanitizer_common.h"
+
namespace __sanitizer {
-class AnsiColorDecorator {
+class SanitizerCommonDecorator {
// FIXME: This is not portable. It assumes the special strings are printed to
// stdout, which is not the case on Windows (see SetConsoleTextAttribute()).
public:
- explicit AnsiColorDecorator(bool use_ansi_colors) : ansi_(use_ansi_colors) { }
+ SanitizerCommonDecorator() : ansi_(ColorizeReports()) {}
const char *Bold() const { return ansi_ ? "\033[1m" : ""; }
+ const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; }
+ const char *Warning() { return Red(); }
+ const char *EndWarning() { return Default(); }
+ protected:
const char *Black() const { return ansi_ ? "\033[1m\033[30m" : ""; }
const char *Red() const { return ansi_ ? "\033[1m\033[31m" : ""; }
const char *Green() const { return ansi_ ? "\033[1m\033[32m" : ""; }
@@ -30,10 +36,10 @@ class AnsiColorDecorator {
const char *Magenta() const { return ansi_ ? "\033[1m\033[35m" : ""; }
const char *Cyan() const { return ansi_ ? "\033[1m\033[36m" : ""; }
const char *White() const { return ansi_ ? "\033[1m\033[37m" : ""; }
- const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; }
private:
bool ansi_;
};
+
} // namespace __sanitizer
#endif // SANITIZER_REPORT_DECORATOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc
index e8d9f01e7f..a3c9c0677e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc
@@ -10,193 +10,118 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_stackdepot.h"
+
#include "sanitizer_common.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_atomic.h"
+#include "sanitizer_stackdepotbase.h"
namespace __sanitizer {
-const int kTabSize = 1024 * 1024; // Hash table size.
-const int kPartBits = 8;
-const int kPartShift = sizeof(u32) * 8 - kPartBits - 1;
-const int kPartCount = 1 << kPartBits; // Number of subparts in the table.
-const int kPartSize = kTabSize / kPartCount;
-const int kMaxId = 1 << kPartShift;
-
-struct StackDesc {
- StackDesc *link;
+struct StackDepotNode {
+ StackDepotNode *link;
u32 id;
- u32 hash;
+ atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
uptr size;
uptr stack[1]; // [size]
-};
-static struct {
- StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
- atomic_uintptr_t region_pos; // Region allocator for StackDesc's.
- atomic_uintptr_t region_end;
- atomic_uintptr_t tab[kTabSize]; // Hash table of StackDesc's.
- atomic_uint32_t seq[kPartCount]; // Unique id generators.
-} depot;
+ static const u32 kTabSizeLog = 20;
+ // Lower kTabSizeLog bits are equal for all items in one bucket.
+ // We use these bits to store the per-stack use counter.
+ static const u32 kUseCountBits = kTabSizeLog;
+ static const u32 kMaxUseCount = 1 << kUseCountBits;
+ static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
+ static const u32 kHashMask = ~kUseCountMask;
+
+ typedef StackTrace args_type;
+ bool eq(u32 hash, const args_type &args) const {
+ u32 hash_bits =
+ atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
+ if ((hash & kHashMask) != hash_bits || args.size != size) return false;
+ uptr i = 0;
+ for (; i < size; i++) {
+ if (stack[i] != args.trace[i]) return false;
+ }
+ return true;
+ }
+ static uptr storage_size(const args_type &args) {
+ return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
+ }
+ static u32 hash(const args_type &args) {
+ // murmur2
+ const u32 m = 0x5bd1e995;
+ const u32 seed = 0x9747b28c;
+ const u32 r = 24;
+ u32 h = seed ^ (args.size * sizeof(uptr));
+ for (uptr i = 0; i < args.size; i++) {
+ u32 k = args.trace[i];
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+ }
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+ }
+ static bool is_valid(const args_type &args) {
+ return args.size > 0 && args.trace;
+ }
+ void store(const args_type &args, u32 hash) {
+ atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
+ size = args.size;
+ internal_memcpy(stack, args.trace, size * sizeof(uptr));
+ }
+ args_type load() const {
+ return args_type(&stack[0], size);
+ }
+ StackDepotHandle get_handle() { return StackDepotHandle(this); }
-static StackDepotStats stats;
+ typedef StackDepotHandle handle_type;
+};
-StackDepotStats *StackDepotGetStats() {
- return &stats;
-}
+COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
-static u32 hash(const uptr *stack, uptr size) {
- // murmur2
- const u32 m = 0x5bd1e995;
- const u32 seed = 0x9747b28c;
- const u32 r = 24;
- u32 h = seed ^ (size * sizeof(uptr));
- for (uptr i = 0; i < size; i++) {
- u32 k = stack[i];
- k *= m;
- k ^= k >> r;
- k *= m;
- h *= m;
- h ^= k;
- }
- h ^= h >> 13;
- h *= m;
- h ^= h >> 15;
- return h;
+u32 StackDepotHandle::id() { return node_->id; }
+int StackDepotHandle::use_count() {
+ return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
+ StackDepotNode::kUseCountMask;
}
-
-static StackDesc *tryallocDesc(uptr memsz) {
- // Optimisic lock-free allocation, essentially try to bump the region ptr.
- for (;;) {
- uptr cmp = atomic_load(&depot.region_pos, memory_order_acquire);
- uptr end = atomic_load(&depot.region_end, memory_order_acquire);
- if (cmp == 0 || cmp + memsz > end)
- return 0;
- if (atomic_compare_exchange_weak(
- &depot.region_pos, &cmp, cmp + memsz,
- memory_order_acquire))
- return (StackDesc*)cmp;
- }
+void StackDepotHandle::inc_use_count_unsafe() {
+ u32 prev =
+ atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
+ StackDepotNode::kUseCountMask;
+ CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
}
-static StackDesc *allocDesc(uptr size) {
- // First, try to allocate optimisitically.
- uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
- StackDesc *s = tryallocDesc(memsz);
- if (s)
- return s;
- // If failed, lock, retry and alloc new superblock.
- SpinMutexLock l(&depot.mtx);
- for (;;) {
- s = tryallocDesc(memsz);
- if (s)
- return s;
- atomic_store(&depot.region_pos, 0, memory_order_relaxed);
- uptr allocsz = 64 * 1024;
- if (allocsz < memsz)
- allocsz = memsz;
- uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
- stats.mapped += allocsz;
- atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
- atomic_store(&depot.region_pos, mem, memory_order_release);
- }
+// FIXME(dvyukov): this single reserved bit is used in TSan.
+typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
+ StackDepot;
+static StackDepot theDepot;
+
+StackDepotStats *StackDepotGetStats() {
+ return theDepot.GetStats();
}
-static u32 find(StackDesc *s, const uptr *stack, uptr size, u32 hash) {
- // Searches linked list s for the stack, returns its id.
- for (; s; s = s->link) {
- if (s->hash == hash && s->size == size) {
- uptr i = 0;
- for (; i < size; i++) {
- if (stack[i] != s->stack[i])
- break;
- }
- if (i == size)
- return s->id;
- }
- }
- return 0;
+u32 StackDepotPut(StackTrace stack) {
+ StackDepotHandle h = theDepot.Put(stack);
+ return h.valid() ? h.id() : 0;
}
-static StackDesc *lock(atomic_uintptr_t *p) {
- // Uses the pointer lsb as mutex.
- for (int i = 0;; i++) {
- uptr cmp = atomic_load(p, memory_order_relaxed);
- if ((cmp & 1) == 0
- && atomic_compare_exchange_weak(p, &cmp, cmp | 1,
- memory_order_acquire))
- return (StackDesc*)cmp;
- if (i < 10)
- proc_yield(10);
- else
- internal_sched_yield();
- }
+StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
+ return theDepot.Put(stack);
}
-static void unlock(atomic_uintptr_t *p, StackDesc *s) {
- DCHECK_EQ((uptr)s & 1, 0);
- atomic_store(p, (uptr)s, memory_order_release);
+StackTrace StackDepotGet(u32 id) {
+ return theDepot.Get(id);
}
-u32 StackDepotPut(const uptr *stack, uptr size) {
- if (stack == 0 || size == 0)
- return 0;
- uptr h = hash(stack, size);
- atomic_uintptr_t *p = &depot.tab[h % kTabSize];
- uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
- // First, try to find the existing stack.
- u32 id = find(s, stack, size, h);
- if (id)
- return id;
- // If failed, lock, retry and insert new.
- StackDesc *s2 = lock(p);
- if (s2 != s) {
- id = find(s2, stack, size, h);
- if (id) {
- unlock(p, s2);
- return id;
- }
- }
- uptr part = (h % kTabSize) / kPartSize;
- id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
- stats.n_uniq_ids++;
- CHECK_LT(id, kMaxId);
- id |= part << kPartShift;
- CHECK_NE(id, 0);
- CHECK_EQ(id & (1u << 31), 0);
- s = allocDesc(size);
- s->id = id;
- s->hash = h;
- s->size = size;
- internal_memcpy(s->stack, stack, size * sizeof(uptr));
- s->link = s2;
- unlock(p, s);
- return id;
+void StackDepotLockAll() {
+ theDepot.LockAll();
}
-const uptr *StackDepotGet(u32 id, uptr *size) {
- if (id == 0)
- return 0;
- CHECK_EQ(id & (1u << 31), 0);
- // High kPartBits contain part id, so we need to scan at most kPartSize lists.
- uptr part = id >> kPartShift;
- for (int i = 0; i != kPartSize; i++) {
- uptr idx = part * kPartSize + i;
- CHECK_LT(idx, kTabSize);
- atomic_uintptr_t *p = &depot.tab[idx];
- uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
- for (; s; s = s->link) {
- if (s->id == id) {
- *size = s->size;
- return s->stack;
- }
- }
- }
- *size = 0;
- return 0;
+void StackDepotUnlockAll() {
+ theDepot.UnlockAll();
}
bool StackDepotReverseMap::IdDescPair::IdComparator(
@@ -207,10 +132,10 @@ bool StackDepotReverseMap::IdDescPair::IdComparator(
StackDepotReverseMap::StackDepotReverseMap()
: map_(StackDepotGetStats()->n_uniq_ids + 100) {
- for (int idx = 0; idx < kTabSize; idx++) {
- atomic_uintptr_t *p = &depot.tab[idx];
+ for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
+ atomic_uintptr_t *p = &theDepot.tab[idx];
uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
+ StackDepotNode *s = (StackDepotNode*)(v & ~1);
for (; s; s = s->link) {
IdDescPair pair = {s->id, s};
map_.push_back(pair);
@@ -219,18 +144,15 @@ StackDepotReverseMap::StackDepotReverseMap()
InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
}
-const uptr *StackDepotReverseMap::Get(u32 id, uptr *size) {
- if (!map_.size()) return 0;
+StackTrace StackDepotReverseMap::Get(u32 id) {
+ if (!map_.size())
+ return StackTrace();
IdDescPair pair = {id, 0};
uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
IdDescPair::IdComparator);
- if (idx > map_.size()) {
- *size = 0;
- return 0;
- }
- StackDesc *desc = map_[idx].desc;
- *size = desc->size;
- return desc->stack;
+ if (idx > map_.size())
+ return StackTrace();
+ return map_[idx].desc->load();
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
index c2c04ef9d6..aad9bfb8d3 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
@@ -13,24 +13,32 @@
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_stacktrace.h"
namespace __sanitizer {
// StackDepot efficiently stores huge amounts of stack traces.
-
-// Maps stack trace to an unique id.
-u32 StackDepotPut(const uptr *stack, uptr size);
-// Retrieves a stored stack trace by the id.
-const uptr *StackDepotGet(u32 id, uptr *size);
-
-struct StackDepotStats {
- uptr n_uniq_ids;
- uptr mapped;
+struct StackDepotNode;
+struct StackDepotHandle {
+ StackDepotNode *node_;
+ StackDepotHandle() : node_(0) {}
+ explicit StackDepotHandle(StackDepotNode *node) : node_(node) {}
+ bool valid() { return node_; }
+ u32 id();
+ int use_count();
+ void inc_use_count_unsafe();
};
+const int kStackDepotMaxUseCount = 1U << 20;
+
StackDepotStats *StackDepotGetStats();
+u32 StackDepotPut(StackTrace stack);
+StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
+// Retrieves a stored stack trace by the id.
+StackTrace StackDepotGet(u32 id);
-struct StackDesc;
+void StackDepotLockAll();
+void StackDepotUnlockAll();
// Instantiating this class creates a snapshot of StackDepot which can be
// efficiently queried with StackDepotGet(). You can use it concurrently with
@@ -39,12 +47,12 @@ struct StackDesc;
class StackDepotReverseMap {
public:
StackDepotReverseMap();
- const uptr *Get(u32 id, uptr *size);
+ StackTrace Get(u32 id);
private:
struct IdDescPair {
u32 id;
- StackDesc *desc;
+ StackDepotNode *desc;
static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
};
@@ -55,6 +63,7 @@ class StackDepotReverseMap {
StackDepotReverseMap(const StackDepotReverseMap&);
void operator=(const StackDepotReverseMap&);
};
+
} // namespace __sanitizer
#endif // SANITIZER_STACKDEPOT_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h b/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h
new file mode 100644
index 0000000000..05b63092b8
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h
@@ -0,0 +1,174 @@
+//===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of a mapping from arbitrary values to unique 32-bit
+// identifiers.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKDEPOTBASE_H
+#define SANITIZER_STACKDEPOTBASE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+class StackDepotBase {
+ public:
+ typedef typename Node::args_type args_type;
+ typedef typename Node::handle_type handle_type;
+ // Maps stack trace to an unique id.
+ handle_type Put(args_type args, bool *inserted = 0);
+ // Retrieves a stored stack trace by the id.
+ args_type Get(u32 id);
+
+ StackDepotStats *GetStats() { return &stats; }
+
+ void LockAll();
+ void UnlockAll();
+
+ private:
+ static Node *find(Node *s, args_type args, u32 hash);
+ static Node *lock(atomic_uintptr_t *p);
+ static void unlock(atomic_uintptr_t *p, Node *s);
+
+ static const int kTabSize = 1 << kTabSizeLog; // Hash table size.
+ static const int kPartBits = 8;
+ static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
+ static const int kPartCount =
+ 1 << kPartBits; // Number of subparts in the table.
+ static const int kPartSize = kTabSize / kPartCount;
+ static const int kMaxId = 1 << kPartShift;
+
+ atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
+ atomic_uint32_t seq[kPartCount]; // Unique id generators.
+
+ StackDepotStats stats;
+
+ friend class StackDepotReverseMap;
+};
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
+ args_type args,
+ u32 hash) {
+ // Searches linked list s for the stack, returns its id.
+ for (; s; s = s->link) {
+ if (s->eq(hash, args)) {
+ return s;
+ }
+ }
+ return 0;
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
+ atomic_uintptr_t *p) {
+ // Uses the pointer lsb as mutex.
+ for (int i = 0;; i++) {
+ uptr cmp = atomic_load(p, memory_order_relaxed);
+ if ((cmp & 1) == 0 &&
+ atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
+ return (Node *)cmp;
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ }
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
+ atomic_uintptr_t *p, Node *s) {
+ DCHECK_EQ((uptr)s & 1, 0);
+ atomic_store(p, (uptr)s, memory_order_release);
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
+StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
+ bool *inserted) {
+ if (inserted) *inserted = false;
+ if (!Node::is_valid(args)) return handle_type();
+ uptr h = Node::hash(args);
+ atomic_uintptr_t *p = &tab[h % kTabSize];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ // First, try to find the existing stack.
+ Node *node = find(s, args, h);
+ if (node) return node->get_handle();
+ // If failed, lock, retry and insert new.
+ Node *s2 = lock(p);
+ if (s2 != s) {
+ node = find(s2, args, h);
+ if (node) {
+ unlock(p, s2);
+ return node->get_handle();
+ }
+ }
+ uptr part = (h % kTabSize) / kPartSize;
+ u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
+ stats.n_uniq_ids++;
+ CHECK_LT(id, kMaxId);
+ id |= part << kPartShift;
+ CHECK_NE(id, 0);
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ uptr memsz = Node::storage_size(args);
+ s = (Node *)PersistentAlloc(memsz);
+ stats.allocated += memsz;
+ s->id = id;
+ s->store(args, h);
+ s->link = s2;
+ unlock(p, s);
+ if (inserted) *inserted = true;
+ return s->get_handle();
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
+StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
+ if (id == 0) {
+ return args_type();
+ }
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ // High kPartBits contain part id, so we need to scan at most kPartSize lists.
+ uptr part = id >> kPartShift;
+ for (int i = 0; i != kPartSize; i++) {
+ uptr idx = part * kPartSize + i;
+ CHECK_LT(idx, kTabSize);
+ atomic_uintptr_t *p = &tab[idx];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ for (; s; s = s->link) {
+ if (s->id == id) {
+ return s->load();
+ }
+ }
+ }
+ return args_type();
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ lock(&tab[i]);
+ }
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ atomic_uintptr_t *p = &tab[i];
+ uptr s = atomic_load(p, memory_order_relaxed);
+ unlock(p, (Node *)(s & ~1UL));
+ }
+}
+
+} // namespace __sanitizer
+#endif // SANITIZER_STACKDEPOTBASE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
index 3a9e902537..9b99b5bb20 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
@@ -11,144 +11,87 @@
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
-#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
-#include "sanitizer_symbolizer.h"
namespace __sanitizer {
uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
-#ifdef __arm__
+#if defined(__arm__)
// Cancel Thumb bit.
pc = pc & (~1);
#endif
#if defined(__powerpc__) || defined(__powerpc64__)
// PCs are always 4 byte aligned.
return pc - 4;
-#elif defined(__sparc__)
+#elif defined(__sparc__) || defined(__mips__)
return pc - 8;
#else
return pc - 1;
#endif
}
-static void PrintStackFramePrefix(InternalScopedString *buffer, uptr frame_num,
- uptr pc) {
- buffer->append(" #%zu 0x%zx", frame_num, pc);
+uptr StackTrace::GetCurrentPc() {
+ return GET_CALLER_PC();
}
-void StackTrace::PrintStack(const uptr *addr, uptr size,
- SymbolizeCallback symbolize_callback) {
- if (addr == 0 || size == 0) {
- Printf(" <empty stack>\n\n");
- return;
- }
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- InternalScopedBuffer<char> buff(GetPageSizeCached() * 2);
- InternalScopedBuffer<AddressInfo> addr_frames(64);
- InternalScopedString frame_desc(GetPageSizeCached() * 2);
- uptr frame_num = 0;
- for (uptr i = 0; i < size && addr[i]; i++) {
- // PCs in stack traces are actually the return addresses, that is,
- // addresses of the next instructions after the call.
- uptr pc = GetPreviousInstructionPc(addr[i]);
- uptr addr_frames_num = 0; // The number of stack frames for current
- // instruction address.
- if (symbolize_callback) {
- if (symbolize_callback((void*)pc, buff.data(), buff.size())) {
- addr_frames_num = 1;
- frame_desc.clear();
- PrintStackFramePrefix(&frame_desc, frame_num, pc);
- // We can't know anything about the string returned by external
- // symbolizer, but if it starts with filename, try to strip path prefix
- // from it.
- frame_desc.append(
- " %s",
- StripPathPrefix(buff.data(), common_flags()->strip_path_prefix));
- Printf("%s\n", frame_desc.data());
- frame_num++;
- }
- }
- if (common_flags()->symbolize && addr_frames_num == 0) {
- // Use our own (online) symbolizer, if necessary.
- if (Symbolizer *sym = Symbolizer::GetOrNull())
- addr_frames_num =
- sym->SymbolizeCode(pc, addr_frames.data(), addr_frames.size());
- for (uptr j = 0; j < addr_frames_num; j++) {
- AddressInfo &info = addr_frames[j];
- frame_desc.clear();
- PrintStackFramePrefix(&frame_desc, frame_num, pc);
- if (info.function) {
- frame_desc.append(" in %s", info.function);
- }
- if (info.file) {
- frame_desc.append(" ");
- PrintSourceLocation(&frame_desc, info.file, info.line, info.column);
- } else if (info.module) {
- frame_desc.append(" ");
- PrintModuleAndOffset(&frame_desc, info.module, info.module_offset);
- }
- Printf("%s\n", frame_desc.data());
- frame_num++;
- info.Clear();
- }
- }
- if (addr_frames_num == 0) {
- // If online symbolization failed, try to output at least module and
- // offset for instruction.
- frame_desc.clear();
- PrintStackFramePrefix(&frame_desc, frame_num, pc);
- uptr offset;
- if (proc_maps.GetObjectNameAndOffset(pc, &offset,
- buff.data(), buff.size(),
- /* protection */0)) {
- frame_desc.append(" ");
- PrintModuleAndOffset(&frame_desc, buff.data(), offset);
- }
- Printf("%s\n", frame_desc.data());
- frame_num++;
- }
- }
- // Always print a trailing empty line after stack trace.
- Printf("\n");
+void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+ size = cnt + !!extra_top_pc;
+ CHECK_LE(size, kStackTraceMax);
+ internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+ if (extra_top_pc)
+ trace_buffer[cnt] = extra_top_pc;
+ top_frame_bp = 0;
}
-uptr StackTrace::GetCurrentPc() {
- return GET_CALLER_PC();
+// Check if given pointer points into allocated stack area.
+static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
+ return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);
}
-void StackTrace::FastUnwindStack(uptr pc, uptr bp,
- uptr stack_top, uptr stack_bottom,
- uptr max_depth) {
- if (max_depth == 0) {
- size = 0;
- return;
- }
- trace[0] = pc;
+// In GCC on ARM bp points to saved lr, not fp, so we should check the next
+// cell in stack to be a saved frame pointer. GetCanonicFrame returns the
+// pointer to saved frame pointer in any case.
+static inline uhwptr *GetCanonicFrame(uptr bp,
+ uptr stack_top,
+ uptr stack_bottom) {
+#ifdef __arm__
+ if (!IsValidFrame(bp, stack_top, stack_bottom)) return 0;
+ uhwptr *bp_prev = (uhwptr *)bp;
+ if (IsValidFrame((uptr)bp_prev[0], stack_top, stack_bottom)) return bp_prev;
+ // The next frame pointer does not look right. This could be a GCC frame, step
+ // back by 1 word and try again.
+ if (IsValidFrame((uptr)bp_prev[-1], stack_top, stack_bottom))
+ return bp_prev - 1;
+ // Nope, this does not look right either. This means the frame after next does
+ // not have a valid frame pointer, but we can still extract the caller PC.
+ // Unfortunately, there is no way to decide between GCC and LLVM frame
+ // layouts. Assume LLVM.
+ return bp_prev;
+#else
+ return (uhwptr*)bp;
+#endif
+}
+
+void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top,
+ uptr stack_bottom, uptr max_depth) {
+ CHECK_GE(max_depth, 2);
+ trace_buffer[0] = pc;
size = 1;
- uhwptr *frame = (uhwptr *)bp;
- uhwptr *prev_frame = frame - 1;
if (stack_top < 4096) return; // Sanity check for stack top.
+ uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom);
+ // Lowest possible address that makes sense as the next frame pointer.
+ // Goes up as we walk the stack.
+ uptr bottom = stack_bottom;
// Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
- while (frame > prev_frame &&
- frame < (uhwptr *)stack_top - 2 &&
- frame > (uhwptr *)stack_bottom &&
+ while (IsValidFrame((uptr)frame, stack_top, bottom) &&
IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_depth) {
uhwptr pc1 = frame[1];
if (pc1 != pc) {
- trace[size++] = (uptr) pc1;
+ trace_buffer[size++] = (uptr) pc1;
}
- prev_frame = frame;
- frame = (uhwptr *)frame[0];
- }
-}
-
-void StackTrace::PopStackFrames(uptr count) {
- CHECK(size >= count);
- size -= count;
- for (uptr i = 0; i < size; i++) {
- trace[i] = trace[i + count];
+ bottom = (uptr)frame;
+ frame = GetCanonicFrame((uptr)frame[0], stack_top, bottom);
}
}
@@ -156,10 +99,18 @@ static bool MatchPc(uptr cur_pc, uptr trace_pc, uptr threshold) {
return cur_pc - trace_pc <= threshold || trace_pc - cur_pc <= threshold;
}
-uptr StackTrace::LocatePcInTrace(uptr pc) {
+void BufferedStackTrace::PopStackFrames(uptr count) {
+ CHECK_LT(count, size);
+ size -= count;
+ for (uptr i = 0; i < size; ++i) {
+ trace_buffer[i] = trace_buffer[i + count];
+ }
+}
+
+uptr BufferedStackTrace::LocatePcInTrace(uptr pc) {
// Use threshold to find PC in stack trace, as PC we want to unwind from may
// slightly differ from return address in the actual unwinded stack trace.
- const int kPcThreshold = 192;
+ const int kPcThreshold = 288;
for (uptr i = 0; i < size; ++i) {
if (MatchPc(pc, trace[i], kPcThreshold))
return i;
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
index d06db5ffa7..31495cfc56 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
@@ -17,10 +17,9 @@ namespace __sanitizer {
static const uptr kStackTraceMax = 256;
-#if SANITIZER_LINUX && (defined(__arm__) || \
- defined(__powerpc__) || defined(__powerpc64__) || \
- defined(__sparc__) || \
- defined(__mips__))
+#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__powerpc__) || \
+ defined(__powerpc64__) || defined(__sparc__) || \
+ defined(__mips__))
# define SANITIZER_CAN_FAST_UNWIND 0
#elif SANITIZER_WINDOWS
# define SANITIZER_CAN_FAST_UNWIND 0
@@ -28,46 +27,61 @@ static const uptr kStackTraceMax = 256;
# define SANITIZER_CAN_FAST_UNWIND 1
#endif
+// Fast unwind is the only option on Mac for now; we will need to
+// revisit this macro when slow unwind works on Mac, see
+// https://code.google.com/p/address-sanitizer/issues/detail?id=137
+#if SANITIZER_MAC
+# define SANITIZER_CAN_SLOW_UNWIND 0
+#else
+# define SANITIZER_CAN_SLOW_UNWIND 1
+#endif
+
struct StackTrace {
- typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
- int out_size);
- uptr top_frame_bp;
+ const uptr *trace;
uptr size;
- uptr trace[kStackTraceMax];
+
+ StackTrace() : trace(nullptr), size(0) {}
+ StackTrace(const uptr *trace, uptr size) : trace(trace), size(size) {}
// Prints a symbolized stacktrace, followed by an empty line.
- static void PrintStack(const uptr *addr, uptr size,
- SymbolizeCallback symbolize_callback = 0);
-
- void CopyFrom(const uptr *src, uptr src_size) {
- top_frame_bp = 0;
- size = src_size;
- if (size > kStackTraceMax) size = kStackTraceMax;
- for (uptr i = 0; i < size; i++)
- trace[i] = src[i];
- }
+ void Print() const;
static bool WillUseFastUnwind(bool request_fast_unwind) {
- // Check if fast unwind is available. Fast unwind is the only option on Mac.
if (!SANITIZER_CAN_FAST_UNWIND)
return false;
- else if (SANITIZER_MAC)
+ else if (!SANITIZER_CAN_SLOW_UNWIND)
return true;
return request_fast_unwind;
}
- void Unwind(uptr max_depth, uptr pc, uptr bp, uptr stack_top,
- uptr stack_bottom, bool request_fast_unwind);
-
static uptr GetCurrentPc();
static uptr GetPreviousInstructionPc(uptr pc);
+ typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
+ int out_size);
+};
+
+// StackTrace that owns the buffer used to store the addresses.
+struct BufferedStackTrace : public StackTrace {
+ uptr trace_buffer[kStackTraceMax];
+ uptr top_frame_bp; // Optional bp of a top frame.
+
+ BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
+
+ void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+ void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
+ uptr stack_bottom, bool request_fast_unwind);
private:
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
uptr max_depth);
void SlowUnwindStack(uptr pc, uptr max_depth);
+ void SlowUnwindStackWithContext(uptr pc, void *context,
+ uptr max_depth);
void PopStackFrames(uptr count);
uptr LocatePcInTrace(uptr pc);
+
+ BufferedStackTrace(const BufferedStackTrace &);
+ void operator=(const BufferedStackTrace &);
};
} // namespace __sanitizer
@@ -80,6 +94,10 @@ struct StackTrace {
uptr local_stack; \
uptr sp = (uptr)&local_stack
+#define GET_CALLER_PC_BP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = GET_CALLER_PC();
+
// Use this macro if you want to print stack trace with the current
// function in the top frame.
#define GET_CURRENT_PC_BP_SP \
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
index ea2f9d0777..2d55b73f03 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
@@ -9,18 +9,70 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
#include "sanitizer_stacktrace.h"
+#include "sanitizer_stacktrace_printer.h"
+#include "sanitizer_symbolizer.h"
namespace __sanitizer {
-void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, uptr stack_top,
- uptr stack_bottom, bool request_fast_unwind) {
- if (!WillUseFastUnwind(request_fast_unwind))
- SlowUnwindStack(pc, max_depth);
- else
- FastUnwindStack(pc, bp, stack_top, stack_bottom, max_depth);
+void StackTrace::Print() const {
+ if (trace == nullptr || size == 0) {
+ Printf(" <empty stack>\n\n");
+ return;
+ }
+ const int kMaxAddrFrames = 64;
+ InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
+ for (uptr i = 0; i < kMaxAddrFrames; i++)
+ new(&addr_frames[i]) AddressInfo();
+ InternalScopedString frame_desc(GetPageSizeCached() * 2);
+ uptr frame_num = 0;
+ for (uptr i = 0; i < size && trace[i]; i++) {
+ // PCs in stack traces are actually the return addresses, that is,
+ // addresses of the next instructions after the call.
+ uptr pc = GetPreviousInstructionPc(trace[i]);
+ uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
+ pc, addr_frames.data(), kMaxAddrFrames);
+ if (addr_frames_num == 0) {
+ addr_frames[0].address = pc;
+ addr_frames_num = 1;
+ }
+ for (uptr j = 0; j < addr_frames_num; j++) {
+ AddressInfo &info = addr_frames[j];
+ frame_desc.clear();
+ RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
+ info, common_flags()->strip_path_prefix);
+ Printf("%s\n", frame_desc.data());
+ info.Clear();
+ }
+ }
+ // Always print a trailing empty line after stack trace.
+ Printf("\n");
+}
- top_frame_bp = size ? bp : 0;
+void BufferedStackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context,
+ uptr stack_top, uptr stack_bottom,
+ bool request_fast_unwind) {
+ top_frame_bp = (max_depth > 0) ? bp : 0;
+ // Avoid doing any work for small max_depth.
+ if (max_depth == 0) {
+ size = 0;
+ return;
+ }
+ if (max_depth == 1) {
+ size = 1;
+ trace_buffer[0] = pc;
+ return;
+ }
+ if (!WillUseFastUnwind(request_fast_unwind)) {
+ if (context)
+ SlowUnwindStackWithContext(pc, context, max_depth);
+ else
+ SlowUnwindStack(pc, max_depth);
+ } else {
+ FastUnwindStack(pc, bp, stack_top, stack_bottom, max_depth);
+ }
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc
new file mode 100644
index 0000000000..300b4904c5
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc
@@ -0,0 +1,130 @@
+//===-- sanitizer_common.cc -----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers' run-time libraries.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_stacktrace_printer.h"
+
+namespace __sanitizer {
+
+static const char *StripFunctionName(const char *function, const char *prefix) {
+ if (function == 0) return 0;
+ if (prefix == 0) return function;
+ uptr prefix_len = internal_strlen(prefix);
+ if (0 == internal_strncmp(function, prefix, prefix_len))
+ return function + prefix_len;
+ return function;
+}
+
+static const char kDefaultFormat[] = " #%n %p %F %L";
+
+void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
+ const AddressInfo &info, const char *strip_path_prefix,
+ const char *strip_func_prefix) {
+ if (0 == internal_strcmp(format, "DEFAULT"))
+ format = kDefaultFormat;
+ for (const char *p = format; *p != '\0'; p++) {
+ if (*p != '%') {
+ buffer->append("%c", *p);
+ continue;
+ }
+ p++;
+ switch (*p) {
+ case '%':
+ buffer->append("%%");
+ break;
+ // Frame number and all fields of AddressInfo structure.
+ case 'n':
+ buffer->append("%zu", frame_no);
+ break;
+ case 'p':
+ buffer->append("0x%zx", info.address);
+ break;
+ case 'm':
+ buffer->append("%s", StripPathPrefix(info.module, strip_path_prefix));
+ break;
+ case 'o':
+ buffer->append("0x%zx", info.module_offset);
+ break;
+ case 'f':
+ buffer->append("%s", StripFunctionName(info.function, strip_func_prefix));
+ break;
+ case 'q':
+ buffer->append("0x%zx", info.function_offset != AddressInfo::kUnknown
+ ? info.function_offset
+ : 0x0);
+ break;
+ case 's':
+ buffer->append("%s", StripPathPrefix(info.file, strip_path_prefix));
+ break;
+ case 'l':
+ buffer->append("%d", info.line);
+ break;
+ case 'c':
+ buffer->append("%d", info.column);
+ break;
+ // Smarter special cases.
+ case 'F':
+ // Function name and offset, if file is unknown.
+ if (info.function) {
+ buffer->append("in %s",
+ StripFunctionName(info.function, strip_func_prefix));
+ if (!info.file && info.function_offset != AddressInfo::kUnknown)
+ buffer->append("+0x%zx", info.function_offset);
+ }
+ break;
+ case 'S':
+ // File/line information.
+ RenderSourceLocation(buffer, info.file, info.line, info.column,
+ strip_path_prefix);
+ break;
+ case 'L':
+ // Source location, or module location.
+ if (info.file) {
+ RenderSourceLocation(buffer, info.file, info.line, info.column,
+ strip_path_prefix);
+ } else if (info.module) {
+ RenderModuleLocation(buffer, info.module, info.module_offset,
+ strip_path_prefix);
+ } else {
+ buffer->append("(<unknown module>)");
+ }
+ break;
+ case 'M':
+ // Module basename and offset, or PC.
+ if (info.module)
+ buffer->append("(%s+%p)", StripModuleName(info.module),
+ (void *)info.module_offset);
+ else
+ buffer->append("(%p)", (void *)info.address);
+ break;
+ default:
+ Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n",
+ *p, *p);
+ Die();
+ }
+ }
+}
+
+void RenderSourceLocation(InternalScopedString *buffer, const char *file,
+ int line, int column, const char *strip_path_prefix) {
+ buffer->append("%s", StripPathPrefix(file, strip_path_prefix));
+ if (line > 0) {
+ buffer->append(":%d", line);
+ if (column > 0)
+ buffer->append(":%d", column);
+ }
+}
+
+void RenderModuleLocation(InternalScopedString *buffer, const char *module,
+ uptr offset, const char *strip_path_prefix) {
+ buffer->append("(%s+0x%zx)", StripPathPrefix(module, strip_path_prefix),
+ offset);
+}
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h
new file mode 100644
index 0000000000..54e2fb024d
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -0,0 +1,60 @@
+//===-- sanitizer_stacktrace_printer.h --------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers' run-time libraries.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKTRACE_PRINTER_H
+#define SANITIZER_STACKTRACE_PRINTER_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+// Render the contents of "info" structure, which represents the contents of
+// stack frame "frame_no" and appends it to the "buffer". "format" is a
+// string with placeholders, which is copied to the output with
+// placeholders substituted with the contents of "info". For example,
+// format string
+// " frame %n: function %F at %S"
+// will be turned into
+// " frame 10: function foo::bar() at my/file.cc:10"
+// You may additionally pass "strip_path_prefix" to strip prefixes of paths to
+// source files and modules, and "strip_func_prefix" to strip prefixes of
+// function names.
+// Here's the full list of available placeholders:
+// %% - represents a '%' character;
+// %n - frame number (copy of frame_no);
+// %p - PC in hex format;
+// %m - path to module (binary or shared object);
+// %o - offset in the module in hex format;
+// %f - function name;
+// %q - offset in the function in hex format (*if available*);
+// %s - path to source file;
+// %l - line in the source file;
+// %c - column in the source file;
+// %F - if function is known to be <foo>, prints "in <foo>", possibly
+// followed by the offset in this function, but only if source file
+// is unknown;
+// %S - prints file/line/column information;
+// %L - prints location information: file/line/column, if it is known, or
+// module+offset if it is known, or (<unknown module>) string.
+// %M - prints module basename and offset, if it is known, or PC.
+void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
+ const AddressInfo &info, const char *strip_path_prefix = "",
+ const char *strip_func_prefix = "");
+
+void RenderSourceLocation(InternalScopedString *buffer, const char *file,
+ int line, int column, const char *strip_path_prefix);
+
+void RenderModuleLocation(InternalScopedString *buffer, const char *module,
+ uptr offset, const char *strip_path_prefix);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKTRACE_PRINTER_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
index 6ee63ec316..5881202367 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
@@ -98,12 +98,11 @@ bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) {
&pterrno)) {
// Either the thread is dead, or something prevented us from attaching.
// Log this event and move on.
- if (common_flags()->verbosity)
- Report("Could not attach to thread %d (errno %d).\n", thread_id, pterrno);
+ VReport(1, "Could not attach to thread %d (errno %d).\n", thread_id,
+ pterrno);
return false;
} else {
- if (common_flags()->verbosity)
- Report("Attached to thread %d.\n", thread_id);
+ VReport(1, "Attached to thread %d.\n", thread_id);
// The thread is not guaranteed to stop before ptrace returns, so we must
// wait on it.
uptr waitpid_status;
@@ -112,9 +111,8 @@ bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) {
if (internal_iserror(waitpid_status, &wperrno)) {
// Got a ECHILD error. I don't think this situation is possible, but it
// doesn't hurt to report it.
- if (common_flags()->verbosity)
- Report("Waiting on thread %d failed, detaching (errno %d).\n",
- thread_id, wperrno);
+ VReport(1, "Waiting on thread %d failed, detaching (errno %d).\n",
+ thread_id, wperrno);
internal_ptrace(PTRACE_DETACH, thread_id, NULL, NULL);
return false;
}
@@ -129,14 +127,12 @@ void ThreadSuspender::ResumeAllThreads() {
int pterrno;
if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, NULL, NULL),
&pterrno)) {
- if (common_flags()->verbosity)
- Report("Detached from thread %d.\n", tid);
+ VReport(1, "Detached from thread %d.\n", tid);
} else {
// Either the thread is dead, or we are already detached.
// The latter case is possible, for instance, if this function was called
// from a signal handler.
- if (common_flags()->verbosity)
- Report("Could not detach from thread %d (errno %d).\n", tid, pterrno);
+ VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno);
}
}
}
@@ -248,18 +244,18 @@ static int TracerThread(void* argument) {
// the mask we inherited from the caller thread.
for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
signal_index++) {
- __sanitizer_kernel_sigaction_t new_sigaction;
+ __sanitizer_sigaction new_sigaction;
internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
new_sigaction.sigaction = TracerThreadSignalHandler;
new_sigaction.sa_flags = SA_ONSTACK | SA_SIGINFO;
internal_sigfillset(&new_sigaction.sa_mask);
- internal_sigaction(kUnblockedSignals[signal_index], &new_sigaction, NULL);
+ internal_sigaction_norestorer(kUnblockedSignals[signal_index],
+ &new_sigaction, NULL);
}
int exit_code = 0;
if (!thread_suspender.SuspendAllThreads()) {
- if (common_flags()->verbosity)
- Report("Failed suspending threads.\n");
+ VReport(1, "Failed suspending threads.\n");
exit_code = 3;
} else {
tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
@@ -299,9 +295,9 @@ class ScopedStackSpaceWithGuard {
// We have a limitation on the stack frame size, so some stuff had to be moved
// into globals.
-static __sanitizer_kernel_sigset_t blocked_sigset;
-static __sanitizer_kernel_sigset_t old_sigset;
-static __sanitizer_kernel_sigaction_t old_sigactions
+static __sanitizer_sigset_t blocked_sigset;
+static __sanitizer_sigset_t old_sigset;
+static __sanitizer_sigaction old_sigactions
[ARRAY_SIZE(kUnblockedSignals)];
class StopTheWorldScope {
@@ -318,12 +314,12 @@ class StopTheWorldScope {
// Remove the signal from the set of blocked signals.
internal_sigdelset(&blocked_sigset, kUnblockedSignals[signal_index]);
// Install the default handler.
- __sanitizer_kernel_sigaction_t new_sigaction;
+ __sanitizer_sigaction new_sigaction;
internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
new_sigaction.handler = SIG_DFL;
internal_sigfillset(&new_sigaction.sa_mask);
- internal_sigaction(kUnblockedSignals[signal_index], &new_sigaction,
- &old_sigactions[signal_index]);
+ internal_sigaction_norestorer(kUnblockedSignals[signal_index],
+ &new_sigaction, &old_sigactions[signal_index]);
}
int sigprocmask_status =
internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
@@ -344,8 +340,8 @@ class StopTheWorldScope {
// Restore the signal handlers.
for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
signal_index++) {
- internal_sigaction(kUnblockedSignals[signal_index],
- &old_sigactions[signal_index], NULL);
+ internal_sigaction_norestorer(kUnblockedSignals[signal_index],
+ &old_sigactions[signal_index], NULL);
}
internal_sigprocmask(SIG_SETMASK, &old_sigset, &old_sigset);
}
@@ -387,8 +383,7 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
/* child_tidptr */);
int local_errno = 0;
if (internal_iserror(tracer_pid, &local_errno)) {
- if (common_flags()->verbosity)
- Report("Failed spawning a tracer thread (errno %d).\n", local_errno);
+ VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
tracer_thread_argument.mutex.Unlock();
} else {
ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
@@ -404,11 +399,9 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
// At this point, any signal will either be blocked or kill us, so waitpid
// should never return (and set errno) while the tracer thread is alive.
uptr waitpid_status = internal_waitpid(tracer_pid, NULL, __WALL);
- if (internal_iserror(waitpid_status, &local_errno)) {
- if (common_flags()->verbosity)
- Report("Waiting on the tracer thread failed (errno %d).\n",
- local_errno);
- }
+ if (internal_iserror(waitpid_status, &local_errno))
+ VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
+ local_errno);
}
}
@@ -449,9 +442,8 @@ int SuspendedThreadsList::GetRegistersAndSP(uptr index,
int pterrno;
if (internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, NULL, &regs),
&pterrno)) {
- if (common_flags()->verbosity)
- Report("Could not get registers from thread %d (errno %d).\n",
- tid, pterrno);
+ VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
+ pterrno);
return -1;
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_suppressions.cc b/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
index 14f13e6208..ab40598ae8 100644
--- a/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
@@ -13,13 +13,15 @@
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
namespace __sanitizer {
static const char *const kTypeStrings[SuppressionTypeCount] = {
- "none", "race", "mutex", "thread", "signal", "leak", "called_from_lib"
-};
+ "none", "race", "mutex", "thread", "signal",
+ "leak", "called_from_lib", "deadlock", "vptr_check"};
bool TemplateMatch(char *templ, const char *str) {
if (str == 0 || str[0] == 0)
@@ -63,6 +65,33 @@ bool TemplateMatch(char *templ, const char *str) {
return true;
}
+ALIGNED(64) static char placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = 0;
+
+SuppressionContext *SuppressionContext::Get() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
+}
+
+void SuppressionContext::InitIfNecessary() {
+ if (suppression_ctx)
+ return;
+ suppression_ctx = new(placeholder) SuppressionContext;
+ if (common_flags()->suppressions[0] == '\0')
+ return;
+ char *suppressions_from_file;
+ uptr buffer_size;
+ uptr contents_size =
+ ReadFileToBuffer(common_flags()->suppressions, &suppressions_from_file,
+ &buffer_size, 1 << 26 /* max_len */);
+ if (contents_size == 0) {
+ Printf("%s: failed to read suppressions file '%s'\n", SanitizerToolName,
+ common_flags()->suppressions);
+ Die();
+ }
+ suppression_ctx->Parse(suppressions_from_file);
+}
+
bool SuppressionContext::Match(const char *str, SuppressionType type,
Suppression **s) {
can_parse_ = false;
diff --git a/libsanitizer/sanitizer_common/sanitizer_suppressions.h b/libsanitizer/sanitizer_common/sanitizer_suppressions.h
index b4c719cb18..84ee834a9f 100644
--- a/libsanitizer/sanitizer_common/sanitizer_suppressions.h
+++ b/libsanitizer/sanitizer_common/sanitizer_suppressions.h
@@ -24,6 +24,8 @@ enum SuppressionType {
SuppressionSignal,
SuppressionLeak,
SuppressionLib,
+ SuppressionDeadlock,
+ SuppressionVptrCheck,
SuppressionTypeCount
};
@@ -36,14 +38,21 @@ struct Suppression {
class SuppressionContext {
public:
- SuppressionContext() : suppressions_(1), can_parse_(true) {}
void Parse(const char *str);
bool Match(const char* str, SuppressionType type, Suppression **s);
uptr SuppressionCount() const;
const Suppression *SuppressionAt(uptr i) const;
void GetMatched(InternalMmapVector<Suppression *> *matched);
+ // Create a SuppressionContext singleton if it hasn't been created earlier.
+ // Not thread safe. Must be called early during initialization (but after
+ // runtime flags are parsed).
+ static void InitIfNecessary();
+ // Returns a SuppressionContext singleton.
+ static SuppressionContext *Get();
+
private:
+ SuppressionContext() : suppressions_(1), can_parse_(true) {}
InternalMmapVector<Suppression> suppressions_;
bool can_parse_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
index f417b087ae..6821e5a326 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
@@ -20,17 +20,6 @@ Symbolizer *Symbolizer::symbolizer_;
StaticSpinMutex Symbolizer::init_mu_;
LowLevelAllocator Symbolizer::symbolizer_allocator_;
-Symbolizer *Symbolizer::GetOrNull() {
- SpinMutexLock l(&init_mu_);
- return symbolizer_;
-}
-
-Symbolizer *Symbolizer::Get() {
- SpinMutexLock l(&init_mu_);
- RAW_CHECK_MSG(symbolizer_ != 0, "Using uninitialized symbolizer!");
- return symbolizer_;
-}
-
Symbolizer *Symbolizer::Disable() {
CHECK_EQ(0, symbolizer_);
// Initialize a dummy symbolizer.
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
index af93de7508..2ff99372da 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
@@ -25,23 +25,30 @@ namespace __sanitizer {
struct AddressInfo {
uptr address;
+
char *module;
uptr module_offset;
+
+ static const uptr kUnknown = ~(uptr)0;
char *function;
+ uptr function_offset;
+
char *file;
int line;
int column;
AddressInfo() {
internal_memset(this, 0, sizeof(AddressInfo));
+ function_offset = kUnknown;
}
- // Deletes all strings and sets all fields to zero.
+ // Deletes all strings and resets all fields.
void Clear() {
InternalFree(module);
InternalFree(function);
InternalFree(file);
internal_memset(this, 0, sizeof(AddressInfo));
+ function_offset = kUnknown;
}
void FillAddressAndModuleInfo(uptr addr, const char *mod_name,
@@ -52,47 +59,44 @@ struct AddressInfo {
}
};
+// For now, DataInfo is used to describe global variable.
struct DataInfo {
- uptr address;
char *module;
uptr module_offset;
char *name;
uptr start;
uptr size;
+
+ DataInfo() {
+ internal_memset(this, 0, sizeof(DataInfo));
+ }
+
+ void Clear() {
+ InternalFree(module);
+ InternalFree(name);
+ internal_memset(this, 0, sizeof(DataInfo));
+ }
};
class Symbolizer {
public:
- /// Returns platform-specific implementation of Symbolizer. The symbolizer
- /// must be initialized (with init or disable) before calling this function.
- static Symbolizer *Get();
- /// Returns platform-specific implementation of Symbolizer, or null if not
- /// initialized.
- static Symbolizer *GetOrNull();
- /// Returns platform-specific implementation of Symbolizer. Will
- /// automatically initialize symbolizer as if by calling Init(0) if needed.
+ /// Initialize and return platform-specific implementation of symbolizer
+ /// (if it wasn't already initialized).
static Symbolizer *GetOrInit();
- /// Initialize and return the symbolizer, given an optional path to an
- /// external symbolizer. The path argument is only required for legacy
- /// reasons as this function will check $PATH for an external symbolizer. Not
- /// thread safe.
- static Symbolizer *Init(const char* path_to_external = 0);
- /// Initialize the symbolizer in a disabled state. Not thread safe.
- static Symbolizer *Disable();
// Fills at most "max_frames" elements of "frames" with descriptions
// for a given address (in all inlined functions). Returns the number
// of descriptions actually filled.
- virtual uptr SymbolizeCode(uptr address, AddressInfo *frames,
- uptr max_frames) {
+ virtual uptr SymbolizePC(uptr address, AddressInfo *frames, uptr max_frames) {
return 0;
}
virtual bool SymbolizeData(uptr address, DataInfo *info) {
return false;
}
- virtual bool IsAvailable() {
+ virtual bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address) {
return false;
}
- virtual bool IsExternalAvailable() {
+ virtual bool CanReturnFileLineInfo() {
return false;
}
// Release internal caches (if any).
@@ -115,10 +119,9 @@ class Symbolizer {
private:
/// Platform-specific function for creating a Symbolizer object.
- static Symbolizer *PlatformInit(const char *path_to_external);
- /// Create a symbolizer and store it to symbolizer_ without checking if one
- /// already exists. Not thread safe.
- static Symbolizer *CreateAndStore(const char *path_to_external);
+ static Symbolizer *PlatformInit();
+ /// Initialize the symbolizer in a disabled state. Not thread safe.
+ static Symbolizer *Disable();
static Symbolizer *symbolizer_;
static StaticSpinMutex init_mu_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
index 3023002af4..8bae57bfe6 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
@@ -35,7 +35,7 @@ namespace __sanitizer {
namespace {
-#if SANITIZER_CP_DEMANGLE
+# if SANITIZER_CP_DEMANGLE
struct CplusV3DemangleData {
char *buf;
uptr size, allocated;
@@ -62,13 +62,13 @@ static void CplusV3DemangleCallback(const char *s, size_t l, void *vdata) {
}
} // extern "C"
-char *CplusV3Demangle(const char *name, bool always_alloc) {
+char *CplusV3Demangle(const char *name) {
CplusV3DemangleData data;
data.buf = 0;
data.size = 0;
data.allocated = 0;
if (cplus_demangle_v3_callback(name, DMGL_PARAMS | DMGL_ANSI,
- CplusV3DemangleCallback, &data)) {
+ CplusV3DemangleCallback, &data)) {
if (data.size + 64 > data.allocated)
return data.buf;
char *buf = internal_strdup(data.buf);
@@ -77,17 +77,9 @@ char *CplusV3Demangle(const char *name, bool always_alloc) {
}
if (data.buf)
InternalFree(data.buf);
- if (always_alloc)
- return internal_strdup(name);
- return 0;
-}
-#else
-const char *CplusV3Demangle(const char *name, bool always_alloc) {
- if (always_alloc)
- return internal_strdup(name);
return 0;
}
-#endif
+# endif // SANITIZER_CP_DEMANGLE
struct SymbolizeCodeData {
AddressInfo *frames;
@@ -107,7 +99,7 @@ static int SymbolizeCodePCInfoCallback(void *vdata, uintptr_t addr,
info->Clear();
info->FillAddressAndModuleInfo(addr, cdata->module_name,
cdata->module_offset);
- info->function = CplusV3Demangle(function, true);
+ info->function = LibbacktraceSymbolizer::Demangle(function, true);
if (filename)
info->file = internal_strdup(filename);
info->line = lineno;
@@ -125,7 +117,7 @@ static void SymbolizeCodeCallback(void *vdata, uintptr_t addr,
info->Clear();
info->FillAddressAndModuleInfo(addr, cdata->module_name,
cdata->module_offset);
- info->function = CplusV3Demangle(symname, true);
+ info->function = LibbacktraceSymbolizer::Demangle(symname, true);
cdata->n_frames = 1;
}
}
@@ -134,7 +126,7 @@ static void SymbolizeDataCallback(void *vdata, uintptr_t, const char *symname,
uintptr_t symval, uintptr_t symsize) {
DataInfo *info = (DataInfo *)vdata;
if (symname && symval) {
- info->name = CplusV3Demangle(symname, true);
+ info->name = LibbacktraceSymbolizer::Demangle(symname, true);
info->start = symval;
info->size = symsize;
}
@@ -173,23 +165,12 @@ uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames,
return data.n_frames;
}
-bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) {
- backtrace_syminfo((backtrace_state *)state_, info->address,
- SymbolizeDataCallback, ErrorCallback, info);
+bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeDataCallback,
+ ErrorCallback, info);
return true;
}
-const char *LibbacktraceSymbolizer::Demangle(const char *name) {
-#if SANITIZER_CP_DEMANGLE
- const char *demangled = CplusV3Demangle(name, false);
- if (demangled)
- return demangled;
- return name;
-#else
- return 0;
-#endif
-}
-
#else // SANITIZER_LIBBACKTRACE
LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {
@@ -204,14 +185,20 @@ uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames,
return 0;
}
-bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) {
+bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
return false;
}
-const char *LibbacktraceSymbolizer::Demangle(const char *name) {
+#endif // SANITIZER_LIBBACKTRACE
+
+char *LibbacktraceSymbolizer::Demangle(const char *name, bool always_alloc) {
+#if SANITIZER_LIBBACKTRACE && SANITIZER_CP_DEMANGLE
+ if (char *demangled = CplusV3Demangle(name))
+ return demangled;
+#endif
+ if (always_alloc)
+ return internal_strdup(name);
return 0;
}
-#endif // SANITIZER_LIBBACKTRACE
-
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
index 05f0558c3d..aa75a7907d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
@@ -1,4 +1,4 @@
-//===-- sanitizer_symbolizer_libbacktrace.h -------------------------------===//
+//===-- sanitizer_symbolizer_libbacktrace.h ---------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
@@ -9,6 +9,8 @@
// run-time libraries.
// Header for libbacktrace symbolizer.
//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
+#define SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
#include "sanitizer_platform.h"
#include "sanitizer_common.h"
@@ -18,6 +20,10 @@
# define SANITIZER_LIBBACKTRACE 0
#endif
+#ifndef SANITIZER_CP_DEMANGLE
+# define SANITIZER_CP_DEMANGLE 0
+#endif
+
namespace __sanitizer {
class LibbacktraceSymbolizer {
@@ -27,9 +33,10 @@ class LibbacktraceSymbolizer {
uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames,
const char *module_name, uptr module_offset);
- bool SymbolizeData(DataInfo *info);
+ bool SymbolizeData(uptr addr, DataInfo *info);
- const char *Demangle(const char *name);
+ // May return NULL if demangling failed.
+ static char *Demangle(const char *name, bool always_alloc = false);
private:
explicit LibbacktraceSymbolizer(void *state) : state_(state) {}
@@ -38,3 +45,4 @@ class LibbacktraceSymbolizer {
};
} // namespace __sanitizer
+#endif // SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc
index 2d9caaf4e8..9afd805a82 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc
@@ -14,24 +14,13 @@
namespace __sanitizer {
-Symbolizer *Symbolizer::CreateAndStore(const char *path_to_external) {
- Symbolizer *platform_symbolizer = PlatformInit(path_to_external);
- if (!platform_symbolizer)
- return Disable();
- symbolizer_ = platform_symbolizer;
- return platform_symbolizer;
-}
-
-Symbolizer *Symbolizer::Init(const char *path_to_external) {
- CHECK_EQ(0, symbolizer_);
- return CreateAndStore(path_to_external);
-}
-
Symbolizer *Symbolizer::GetOrInit() {
SpinMutexLock l(&init_mu_);
- if (symbolizer_ == 0)
- return CreateAndStore(0);
- return symbolizer_;
+ if (symbolizer_)
+ return symbolizer_;
+ if ((symbolizer_ = PlatformInit()))
+ return symbolizer_;
+ return Disable();
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
index 7aead97038..5cc21d3f2b 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
@@ -14,6 +14,7 @@
#if SANITIZER_POSIX
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
@@ -52,107 +53,6 @@ static const char *DemangleCXXABI(const char *name) {
return name;
}
-#if defined(__x86_64__)
-static const char* const kSymbolizerArch = "--default-arch=x86_64";
-#elif defined(__i386__)
-static const char* const kSymbolizerArch = "--default-arch=i386";
-#elif defined(__powerpc64__)
-static const char* const kSymbolizerArch = "--default-arch=powerpc64";
-#else
-static const char* const kSymbolizerArch = "--default-arch=unknown";
-#endif
-
-static const int kSymbolizerStartupTimeMillis = 10;
-
-// Creates external symbolizer connected via pipe, user should write
-// to output_fd and read from input_fd.
-static bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
- int *input_fd, int *output_fd) {
- if (!FileExists(path_to_symbolizer)) {
- Report("WARNING: invalid path to external symbolizer!\n");
- return false;
- }
-
- int *infd = NULL;
- int *outfd = NULL;
- // The client program may close its stdin and/or stdout and/or stderr
- // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
- // In this case the communication between the forked processes may be
- // broken if either the parent or the child tries to close or duplicate
- // these descriptors. The loop below produces two pairs of file
- // descriptors, each greater than 2 (stderr).
- int sock_pair[5][2];
- for (int i = 0; i < 5; i++) {
- if (pipe(sock_pair[i]) == -1) {
- for (int j = 0; j < i; j++) {
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- Report("WARNING: Can't create a socket pair to start "
- "external symbolizer (errno: %d)\n", errno);
- return false;
- } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
- if (infd == NULL) {
- infd = sock_pair[i];
- } else {
- outfd = sock_pair[i];
- for (int j = 0; j < i; j++) {
- if (sock_pair[j] == infd) continue;
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- break;
- }
- }
- }
- CHECK(infd);
- CHECK(outfd);
-
- int pid = fork();
- if (pid == -1) {
- // Fork() failed.
- internal_close(infd[0]);
- internal_close(infd[1]);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- Report("WARNING: failed to fork external symbolizer "
- " (errno: %d)\n", errno);
- return false;
- } else if (pid == 0) {
- // Child subprocess.
- internal_close(STDOUT_FILENO);
- internal_close(STDIN_FILENO);
- internal_dup2(outfd[0], STDIN_FILENO);
- internal_dup2(infd[1], STDOUT_FILENO);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- internal_close(infd[0]);
- internal_close(infd[1]);
- for (int fd = getdtablesize(); fd > 2; fd--)
- internal_close(fd);
- execl(path_to_symbolizer, path_to_symbolizer, kSymbolizerArch, (char*)0);
- internal__exit(1);
- }
-
- // Continue execution in parent process.
- internal_close(outfd[0]);
- internal_close(infd[1]);
- *input_fd = infd[0];
- *output_fd = outfd[1];
-
- // Check that symbolizer subprocess started successfully.
- int pid_status;
- SleepForMillis(kSymbolizerStartupTimeMillis);
- int exited_pid = waitpid(pid, &pid_status, WNOHANG);
- if (exited_pid != 0) {
- // Either waitpid failed, or child has already exited.
- Report("WARNING: external symbolizer didn't start up correctly!\n");
- return false;
- }
-
- return true;
-}
-
// Extracts the prefix of "str" that consists of any characters not
// present in "delims" string, and copies this prefix to "result", allocating
// space for it.
@@ -192,29 +92,30 @@ static const char *ExtractUptr(const char *str, const char *delims,
return ret;
}
-// ExternalSymbolizer encapsulates communication between the tool and
-// external symbolizer program, running in a different subprocess,
-// For now we assume the following protocol:
-// For each request of the form
-// <module_name> <module_offset>
-// passed to STDIN, external symbolizer prints to STDOUT response:
-// <function_name>
-// <file_name>:<line_number>:<column_number>
-// <function_name>
-// <file_name>:<line_number>:<column_number>
-// ...
-// <empty line>
-// ExternalSymbolizer may not be used from two threads simultaneously.
-class ExternalSymbolizer {
+class ExternalSymbolizerInterface {
+ public:
+ // Can't declare pure virtual functions in sanitizer runtimes:
+ // __cxa_pure_virtual might be unavailable.
+ virtual char *SendCommand(bool is_data, const char *module_name,
+ uptr module_offset) {
+ UNIMPLEMENTED();
+ }
+};
+
+// SymbolizerProcess encapsulates communication between the tool and
+// external symbolizer program, running in a different subprocess.
+// SymbolizerProcess may not be used from two threads simultaneously.
+class SymbolizerProcess : public ExternalSymbolizerInterface {
public:
- explicit ExternalSymbolizer(const char *path)
+ explicit SymbolizerProcess(const char *path)
: path_(path),
input_fd_(kInvalidFd),
output_fd_(kInvalidFd),
times_restarted_(0),
- failed_to_start_(false) {
+ failed_to_start_(false),
+ reported_invalid_path_(false) {
CHECK(path_);
- CHECK_NE(path[0], '\0');
+ CHECK_NE(path_[0], '\0');
}
char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
@@ -231,16 +132,13 @@ class ExternalSymbolizer {
return 0;
}
- void Flush() {
- }
-
private:
bool Restart() {
if (input_fd_ != kInvalidFd)
internal_close(input_fd_);
if (output_fd_ != kInvalidFd)
internal_close(output_fd_);
- return StartSymbolizerSubprocess(path_, &input_fd_, &output_fd_);
+ return StartSymbolizerSubprocess();
}
char *SendCommandImpl(bool is_data, const char *module_name,
@@ -248,8 +146,9 @@ class ExternalSymbolizer {
if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd)
return 0;
CHECK(module_name);
- internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n",
- is_data ? "DATA " : "", module_name, module_offset);
+ if (!RenderInputCommand(buffer_, kBufferSize, is_data, module_name,
+ module_offset))
+ return 0;
if (!writeToSymbolizer(buffer_, internal_strlen(buffer_)))
return 0;
if (!readFromSymbolizer(buffer_, kBufferSize))
@@ -263,7 +162,7 @@ class ExternalSymbolizer {
uptr read_len = 0;
while (true) {
uptr just_read = internal_read(input_fd_, buffer + read_len,
- max_length - read_len);
+ max_length - read_len - 1);
// We can't read 0 bytes, as we don't expect external symbolizer to close
// its stdout.
if (just_read == 0 || just_read == (uptr)-1) {
@@ -271,12 +170,10 @@ class ExternalSymbolizer {
return false;
}
read_len += just_read;
- // Empty line marks the end of symbolizer output.
- if (read_len >= 2 && buffer[read_len - 1] == '\n' &&
- buffer[read_len - 2] == '\n') {
+ if (ReachedEndOfOutput(buffer, read_len))
break;
- }
}
+ buffer[read_len] = '\0';
return true;
}
@@ -291,6 +188,110 @@ class ExternalSymbolizer {
return true;
}
+ bool StartSymbolizerSubprocess() {
+ if (!FileExists(path_)) {
+ if (!reported_invalid_path_) {
+ Report("WARNING: invalid path to external symbolizer!\n");
+ reported_invalid_path_ = true;
+ }
+ return false;
+ }
+
+ int *infd = NULL;
+ int *outfd = NULL;
+ // The client program may close its stdin and/or stdout and/or stderr
+ // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
+ // In this case the communication between the forked processes may be
+ // broken if either the parent or the child tries to close or duplicate
+ // these descriptors. The loop below produces two pairs of file
+ // descriptors, each greater than 2 (stderr).
+ int sock_pair[5][2];
+ for (int i = 0; i < 5; i++) {
+ if (pipe(sock_pair[i]) == -1) {
+ for (int j = 0; j < i; j++) {
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ Report("WARNING: Can't create a socket pair to start "
+ "external symbolizer (errno: %d)\n", errno);
+ return false;
+ } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
+ if (infd == NULL) {
+ infd = sock_pair[i];
+ } else {
+ outfd = sock_pair[i];
+ for (int j = 0; j < i; j++) {
+ if (sock_pair[j] == infd) continue;
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ break;
+ }
+ }
+ }
+ CHECK(infd);
+ CHECK(outfd);
+
+ // Real fork() may call user callbacks registered with pthread_atfork().
+ int pid = internal_fork();
+ if (pid == -1) {
+ // Fork() failed.
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ Report("WARNING: failed to fork external symbolizer "
+ " (errno: %d)\n", errno);
+ return false;
+ } else if (pid == 0) {
+ // Child subprocess.
+ internal_close(STDOUT_FILENO);
+ internal_close(STDIN_FILENO);
+ internal_dup2(outfd[0], STDIN_FILENO);
+ internal_dup2(infd[1], STDOUT_FILENO);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--)
+ internal_close(fd);
+ ExecuteWithDefaultArgs(path_);
+ internal__exit(1);
+ }
+
+ // Continue execution in parent process.
+ internal_close(outfd[0]);
+ internal_close(infd[1]);
+ input_fd_ = infd[0];
+ output_fd_ = outfd[1];
+
+ // Check that symbolizer subprocess started successfully.
+ int pid_status;
+ SleepForMillis(kSymbolizerStartupTimeMillis);
+ int exited_pid = waitpid(pid, &pid_status, WNOHANG);
+ if (exited_pid != 0) {
+ // Either waitpid failed, or child has already exited.
+ Report("WARNING: external symbolizer didn't start up correctly!\n");
+ return false;
+ }
+
+ return true;
+ }
+
+ virtual bool RenderInputCommand(char *buffer, uptr max_length, bool is_data,
+ const char *module_name,
+ uptr module_offset) const {
+ UNIMPLEMENTED();
+ }
+
+ virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const {
+ UNIMPLEMENTED();
+ }
+
+ virtual void ExecuteWithDefaultArgs(const char *path_to_binary) const {
+ UNIMPLEMENTED();
+ }
+
const char *path_;
int input_fd_;
int output_fd_;
@@ -299,8 +300,127 @@ class ExternalSymbolizer {
char buffer_[kBufferSize];
static const uptr kMaxTimesRestarted = 5;
+ static const int kSymbolizerStartupTimeMillis = 10;
uptr times_restarted_;
bool failed_to_start_;
+ bool reported_invalid_path_;
+};
+
+// For now we assume the following protocol:
+// For each request of the form
+// <module_name> <module_offset>
+// passed to STDIN, external symbolizer prints to STDOUT response:
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// ...
+// <empty line>
+class LLVMSymbolizerProcess : public SymbolizerProcess {
+ public:
+ explicit LLVMSymbolizerProcess(const char *path) : SymbolizerProcess(path) {}
+
+ private:
+ bool RenderInputCommand(char *buffer, uptr max_length, bool is_data,
+ const char *module_name, uptr module_offset) const {
+ internal_snprintf(buffer, max_length, "%s\"%s\" 0x%zx\n",
+ is_data ? "DATA " : "", module_name, module_offset);
+ return true;
+ }
+
+ bool ReachedEndOfOutput(const char *buffer, uptr length) const {
+ // Empty line marks the end of llvm-symbolizer output.
+ return length >= 2 && buffer[length - 1] == '\n' &&
+ buffer[length - 2] == '\n';
+ }
+
+ void ExecuteWithDefaultArgs(const char *path_to_binary) const {
+#if defined(__x86_64__)
+ const char* const kSymbolizerArch = "--default-arch=x86_64";
+#elif defined(__i386__)
+ const char* const kSymbolizerArch = "--default-arch=i386";
+#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
+ const char* const kSymbolizerArch = "--default-arch=powerpc64";
+#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
+ const char* const kSymbolizerArch = "--default-arch=powerpc64le";
+#else
+ const char* const kSymbolizerArch = "--default-arch=unknown";
+#endif
+
+ const char *const inline_flag = common_flags()->symbolize_inline_frames
+ ? "--inlining=true"
+ : "--inlining=false";
+ execl(path_to_binary, path_to_binary, inline_flag, kSymbolizerArch,
+ (char *)0);
+ }
+};
+
+class Addr2LineProcess : public SymbolizerProcess {
+ public:
+ Addr2LineProcess(const char *path, const char *module_name)
+ : SymbolizerProcess(path), module_name_(internal_strdup(module_name)) {}
+
+ const char *module_name() const { return module_name_; }
+
+ private:
+ bool RenderInputCommand(char *buffer, uptr max_length, bool is_data,
+ const char *module_name, uptr module_offset) const {
+ if (is_data)
+ return false;
+ CHECK_EQ(0, internal_strcmp(module_name, module_name_));
+ internal_snprintf(buffer, max_length, "0x%zx\n", module_offset);
+ return true;
+ }
+
+ bool ReachedEndOfOutput(const char *buffer, uptr length) const {
+ // Output should consist of two lines.
+ int num_lines = 0;
+ for (uptr i = 0; i < length; ++i) {
+ if (buffer[i] == '\n')
+ num_lines++;
+ if (num_lines >= 2)
+ return true;
+ }
+ return false;
+ }
+
+ void ExecuteWithDefaultArgs(const char *path_to_binary) const {
+ execl(path_to_binary, path_to_binary, "-Cfe", module_name_, (char *)0);
+ }
+
+ const char *module_name_; // Owned, leaked.
+};
+
+class Addr2LinePool : public ExternalSymbolizerInterface {
+ public:
+ explicit Addr2LinePool(const char *addr2line_path,
+ LowLevelAllocator *allocator)
+ : addr2line_path_(addr2line_path), allocator_(allocator),
+ addr2line_pool_(16) {}
+
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ if (is_data)
+ return 0;
+ Addr2LineProcess *addr2line = 0;
+ for (uptr i = 0; i < addr2line_pool_.size(); ++i) {
+ if (0 ==
+ internal_strcmp(module_name, addr2line_pool_[i]->module_name())) {
+ addr2line = addr2line_pool_[i];
+ break;
+ }
+ }
+ if (!addr2line) {
+ addr2line =
+ new(*allocator_) Addr2LineProcess(addr2line_path_, module_name);
+ addr2line_pool_.push_back(addr2line);
+ }
+ return addr2line->SendCommand(is_data, module_name, module_offset);
+ }
+
+ private:
+ const char *addr2line_path_;
+ LowLevelAllocator *allocator_;
+ InternalMmapVector<Addr2LineProcess*> addr2line_pool_;
};
#if SANITIZER_SUPPORTS_WEAK_HOOKS
@@ -384,7 +504,7 @@ class InternalSymbolizer {
class POSIXSymbolizer : public Symbolizer {
public:
- POSIXSymbolizer(ExternalSymbolizer *external_symbolizer,
+ POSIXSymbolizer(ExternalSymbolizerInterface *external_symbolizer,
InternalSymbolizer *internal_symbolizer,
LibbacktraceSymbolizer *libbacktrace_symbolizer)
: Symbolizer(),
@@ -392,15 +512,14 @@ class POSIXSymbolizer : public Symbolizer {
internal_symbolizer_(internal_symbolizer),
libbacktrace_symbolizer_(libbacktrace_symbolizer) {}
- uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames) {
+ uptr SymbolizePC(uptr addr, AddressInfo *frames, uptr max_frames) {
BlockingMutexLock l(&mu_);
if (max_frames == 0)
return 0;
- LoadedModule *module = FindModuleForAddress(addr);
- if (module == 0)
+ const char *module_name;
+ uptr module_offset;
+ if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset))
return 0;
- const char *module_name = module->full_name();
- uptr module_offset = addr - module->base_address();
// First, try to use libbacktrace symbolizer (if it's available).
if (libbacktrace_symbolizer_ != 0) {
mu_.CheckLocked();
@@ -468,13 +587,13 @@ class POSIXSymbolizer : public Symbolizer {
return false;
const char *module_name = module->full_name();
uptr module_offset = addr - module->base_address();
- internal_memset(info, 0, sizeof(*info));
- info->address = addr;
+ info->Clear();
info->module = internal_strdup(module_name);
info->module_offset = module_offset;
+ // First, try to use libbacktrace symbolizer (if it's available).
if (libbacktrace_symbolizer_ != 0) {
mu_.CheckLocked();
- if (libbacktrace_symbolizer_->SymbolizeData(info))
+ if (libbacktrace_symbolizer_->SymbolizeData(addr, info))
return true;
}
const char *str = SendCommand(true, module_name, module_offset);
@@ -487,13 +606,15 @@ class POSIXSymbolizer : public Symbolizer {
return true;
}
- bool IsAvailable() {
- return internal_symbolizer_ != 0 || external_symbolizer_ != 0 ||
- libbacktrace_symbolizer_ != 0;
+ bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address) {
+ BlockingMutexLock l(&mu_);
+ return FindModuleNameAndOffsetForAddress(pc, module_name, module_address);
}
- bool IsExternalAvailable() {
- return external_symbolizer_ != 0;
+ bool CanReturnFileLineInfo() {
+ return internal_symbolizer_ != 0 || external_symbolizer_ != 0 ||
+ libbacktrace_symbolizer_ != 0;
}
void Flush() {
@@ -502,8 +623,6 @@ class POSIXSymbolizer : public Symbolizer {
SymbolizerScope sym_scope(this);
internal_symbolizer_->Flush();
}
- if (external_symbolizer_ != 0)
- external_symbolizer_->Flush();
}
const char *Demangle(const char *name) {
@@ -511,13 +630,13 @@ class POSIXSymbolizer : public Symbolizer {
// Run hooks even if we don't use internal symbolizer, as cxxabi
// demangle may call system functions.
SymbolizerScope sym_scope(this);
- if (internal_symbolizer_ != 0)
- return internal_symbolizer_->Demangle(name);
+ // Try to use libbacktrace demangler (if available).
if (libbacktrace_symbolizer_ != 0) {
- const char *demangled = libbacktrace_symbolizer_->Demangle(name);
- if (demangled)
- return demangled;
+ if (const char *demangled = libbacktrace_symbolizer_->Demangle(name))
+ return demangled;
}
+ if (internal_symbolizer_ != 0)
+ return internal_symbolizer_->Demangle(name);
return DemangleCXXABI(name);
}
@@ -540,6 +659,7 @@ class POSIXSymbolizer : public Symbolizer {
}
// Otherwise, fall back to external symbolizer.
if (external_symbolizer_) {
+ SymbolizerScope sym_scope(this);
return external_symbolizer_->SendCommand(is_data, module_name,
module_offset);
}
@@ -555,8 +675,7 @@ class POSIXSymbolizer : public Symbolizer {
CHECK(modules_);
n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts,
/* filter */ 0);
- // FIXME: Return this check when GetListOfModules is implemented on Mac.
- // CHECK_GT(n_modules_, 0);
+ CHECK_GT(n_modules_, 0);
CHECK_LT(n_modules_, kMaxNumberOfModuleContexts);
modules_fresh_ = true;
modules_were_reloaded = true;
@@ -577,6 +696,17 @@ class POSIXSymbolizer : public Symbolizer {
return 0;
}
+ bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name,
+ uptr *module_offset) {
+ mu_.CheckLocked();
+ LoadedModule *module = FindModuleForAddress(address);
+ if (module == 0)
+ return false;
+ *module_name = module->full_name();
+ *module_offset = address - module->base_address();
+ return true;
+ }
+
// 16K loaded modules should be enough for everyone.
static const uptr kMaxNumberOfModuleContexts = 1 << 14;
LoadedModule *modules_; // Array of module descriptions is leaked.
@@ -585,27 +715,42 @@ class POSIXSymbolizer : public Symbolizer {
bool modules_fresh_;
BlockingMutex mu_;
- ExternalSymbolizer *external_symbolizer_; // Leaked.
- InternalSymbolizer *const internal_symbolizer_; // Leaked.
- LibbacktraceSymbolizer *libbacktrace_symbolizer_; // Leaked.
+ ExternalSymbolizerInterface *external_symbolizer_; // Leaked.
+ InternalSymbolizer *const internal_symbolizer_; // Leaked.
+ LibbacktraceSymbolizer *libbacktrace_symbolizer_; // Leaked.
};
-Symbolizer *Symbolizer::PlatformInit(const char *path_to_external) {
+Symbolizer *Symbolizer::PlatformInit() {
+ if (!common_flags()->symbolize) {
+ return new(symbolizer_allocator_) POSIXSymbolizer(0, 0, 0);
+ }
InternalSymbolizer* internal_symbolizer =
InternalSymbolizer::get(&symbolizer_allocator_);
- ExternalSymbolizer *external_symbolizer = 0;
+ ExternalSymbolizerInterface *external_symbolizer = 0;
LibbacktraceSymbolizer *libbacktrace_symbolizer = 0;
if (!internal_symbolizer) {
libbacktrace_symbolizer =
LibbacktraceSymbolizer::get(&symbolizer_allocator_);
if (!libbacktrace_symbolizer) {
- // Find path to llvm-symbolizer if it's not provided.
- if (!path_to_external)
- path_to_external = FindPathToBinary("llvm-symbolizer");
- if (path_to_external && path_to_external[0] != '\0')
- external_symbolizer = new(symbolizer_allocator_)
- ExternalSymbolizer(path_to_external);
+ const char *path_to_external = common_flags()->external_symbolizer_path;
+ if (path_to_external && path_to_external[0] == '\0') {
+ // External symbolizer is explicitly disabled. Do nothing.
+ } else {
+ // Find path to llvm-symbolizer if it's not provided.
+ if (!path_to_external)
+ path_to_external = FindPathToBinary("llvm-symbolizer");
+ if (path_to_external) {
+ external_symbolizer = new(symbolizer_allocator_)
+ LLVMSymbolizerProcess(path_to_external);
+ } else if (common_flags()->allow_addr2line) {
+ // If llvm-symbolizer is not found, try to use addr2line.
+ if (const char *addr2line_path = FindPathToBinary("addr2line")) {
+ external_symbolizer = new(symbolizer_allocator_)
+ Addr2LinePool(addr2line_path, &symbolizer_allocator_);
+ }
+ }
+ }
}
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
index 446de8af29..a1ed4e9a7b 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
@@ -12,11 +12,113 @@
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
+#include <windows.h>
+#include <dbghelp.h>
+#pragma comment(lib, "dbghelp.lib")
+
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
-Symbolizer *Symbolizer::PlatformInit(const char *path_to_external) { return 0; }
+class WinSymbolizer : public Symbolizer {
+ public:
+ WinSymbolizer() : initialized_(false) {}
+
+ uptr SymbolizePC(uptr addr, AddressInfo *frames, uptr max_frames) {
+ if (max_frames == 0)
+ return 0;
+
+ BlockingMutexLock l(&dbghelp_mu_);
+ if (!initialized_) {
+ if (!TrySymInitialize()) {
+ // OK, maybe the client app has called SymInitialize already.
+ // That's a bit unfortunate for us as all the DbgHelp functions are
+ // single-threaded and we can't coordinate with the app.
+ // FIXME: Can we stop the other threads at this point?
+ // Anyways, we have to reconfigure stuff to make sure that SymInitialize
+ // has all the appropriate options set.
+ // Cross our fingers and reinitialize DbgHelp.
+ Report("*** WARNING: Failed to initialize DbgHelp! ***\n");
+ Report("*** Most likely this means that the app is already ***\n");
+ Report("*** using DbgHelp, possibly with incompatible flags. ***\n");
+ Report("*** Due to technical reasons, symbolization might crash ***\n");
+ Report("*** or produce wrong results. ***\n");
+ SymCleanup(GetCurrentProcess());
+ TrySymInitialize();
+ }
+ initialized_ = true;
+ }
+
+ // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
+ char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
+ PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
+ symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+ symbol->MaxNameLen = MAX_SYM_NAME;
+ DWORD64 offset = 0;
+ BOOL got_objname = SymFromAddr(GetCurrentProcess(),
+ (DWORD64)addr, &offset, symbol);
+ if (!got_objname)
+ return 0;
+
+ DWORD unused;
+ IMAGEHLP_LINE64 line_info;
+ line_info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+ BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(), (DWORD64)addr,
+ &unused, &line_info);
+ AddressInfo *info = &frames[0];
+ info->Clear();
+ info->function = internal_strdup(symbol->Name);
+ info->function_offset = (uptr)offset;
+ if (got_fileline) {
+ info->file = internal_strdup(line_info.FileName);
+ info->line = line_info.LineNumber;
+ }
+
+ IMAGEHLP_MODULE64 mod_info;
+ internal_memset(&mod_info, 0, sizeof(mod_info));
+ mod_info.SizeOfStruct = sizeof(mod_info);
+ if (SymGetModuleInfo64(GetCurrentProcess(), addr, &mod_info))
+ info->FillAddressAndModuleInfo(addr, mod_info.ImageName,
+ addr - (uptr)mod_info.BaseOfImage);
+ return 1;
+ }
+
+ bool CanReturnFileLineInfo() {
+ return true;
+ }
+
+ const char *Demangle(const char *name) {
+ CHECK(initialized_);
+ static char demangle_buffer[1000];
+ if (name[0] == '\01' &&
+ UnDecorateSymbolName(name + 1, demangle_buffer, sizeof(demangle_buffer),
+ UNDNAME_NAME_ONLY))
+ return demangle_buffer;
+ else
+ return name;
+ }
+
+ // FIXME: Implement GetModuleNameAndOffsetForPC().
+
+ private:
+ bool TrySymInitialize() {
+ SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES);
+ return SymInitialize(GetCurrentProcess(), 0, TRUE);
+ // FIXME: We don't call SymCleanup() on exit yet - should we?
+ }
+
+ // All DbgHelp functions are single threaded, so we should use a mutex to
+ // serialize accesses.
+ BlockingMutex dbghelp_mu_;
+ bool initialized_;
+};
+
+Symbolizer *Symbolizer::PlatformInit() {
+ static bool called_once = false;
+ CHECK(!called_once && "Shouldn't create more than one symbolizer");
+ called_once = true;
+ return new(symbolizer_allocator_) WinSymbolizer();
+}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc
index 6b2c915a3b..7667b753a8 100644
--- a/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc
@@ -9,7 +9,17 @@
//
//===----------------------------------------------------------------------===//
-#define internal_syscall syscall
+#if SANITIZER_FREEBSD
+# define SYSCALL(name) SYS_ ## name
+#else
+# define SYSCALL(name) __NR_ ## name
+#endif
+
+#if SANITIZER_FREEBSD && defined(__x86_64__)
+# define internal_syscall __syscall
+# else
+# define internal_syscall syscall
+#endif
bool internal_iserror(uptr retval, int *rverrno) {
if (retval == (uptr)-1) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
index 8810c7faa4..b610d66be4 100644
--- a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
@@ -9,6 +9,8 @@
//
//===----------------------------------------------------------------------===//
+#define SYSCALL(name) __NR_ ## name
+
static uptr internal_syscall(u64 nr) {
u64 retval;
asm volatile("syscall" : "=a"(retval) : "a"(nr) : "rcx", "r11",
diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
index 666955f6c9..bfa610443c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
@@ -15,8 +15,9 @@
namespace __sanitizer {
ThreadContextBase::ThreadContextBase(u32 tid)
- : tid(tid), unique_id(0), os_id(0), user_id(0), status(ThreadStatusInvalid),
- detached(false), reuse_count(0), parent_tid(0), next(0) {
+ : tid(tid), unique_id(0), reuse_count(), os_id(0), user_id(0),
+ status(ThreadStatusInvalid),
+ detached(false), parent_tid(0), next(0) {
name[0] = '\0';
}
@@ -76,7 +77,6 @@ void ThreadContextBase::SetCreated(uptr _user_id, u64 _unique_id,
void ThreadContextBase::Reset() {
status = ThreadStatusInvalid;
- reuse_count++;
SetName(0);
OnReset();
}
@@ -86,10 +86,11 @@ void ThreadContextBase::Reset() {
const u32 ThreadRegistry::kUnknownTid = ~0U;
ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
- u32 thread_quarantine_size)
+ u32 thread_quarantine_size, u32 max_reuse)
: context_factory_(factory),
max_threads_(max_threads),
thread_quarantine_size_(thread_quarantine_size),
+ max_reuse_(max_reuse),
mtx_(),
n_contexts_(0),
total_threads_(0),
@@ -128,8 +129,13 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
tctx = context_factory_(tid);
threads_[tid] = tctx;
} else {
+#ifndef SANITIZER_GO
Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
SanitizerToolName, max_threads_);
+#else
+ Printf("race: limit on %u simultaneously alive goroutines is exceeded,"
+ " dying\n", max_threads_);
+#endif
Die();
}
CHECK_NE(tctx, 0);
@@ -210,7 +216,7 @@ void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
}
}
-void ThreadRegistry::DetachThread(u32 tid) {
+void ThreadRegistry::DetachThread(u32 tid, void *arg) {
BlockingMutexLock l(&mtx_);
CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
@@ -219,6 +225,7 @@ void ThreadRegistry::DetachThread(u32 tid) {
Report("%s: Detach of non-existent thread\n", SanitizerToolName);
return;
}
+ tctx->OnDetached(arg);
if (tctx->status == ThreadStatusFinished) {
tctx->SetDead();
QuarantinePush(tctx);
@@ -275,6 +282,9 @@ void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
dead_threads_.pop_front();
CHECK_EQ(tctx->status, ThreadStatusDead);
tctx->Reset();
+ tctx->reuse_count++;
+ if (max_reuse_ > 0 && tctx->reuse_count >= max_reuse_)
+ return;
invalid_threads_.push_back(tctx);
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
index 81c270945d..d5a741bbfc 100644
--- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
+++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
@@ -36,13 +36,13 @@ class ThreadContextBase {
const u32 tid; // Thread ID. Main thread should have tid = 0.
u64 unique_id; // Unique thread ID.
+ u32 reuse_count; // Number of times this tid was reused.
uptr os_id; // PID (used for reporting).
uptr user_id; // Some opaque user thread id (e.g. pthread_t).
char name[64]; // As annotated by user.
ThreadStatus status;
bool detached;
- int reuse_count;
u32 parent_tid;
ThreadContextBase *next; // For storing thread contexts in a list.
@@ -66,6 +66,7 @@ class ThreadContextBase {
virtual void OnStarted(void *arg) {}
virtual void OnCreated(void *arg) {}
virtual void OnReset() {}
+ virtual void OnDetached(void *arg) {}
};
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
@@ -75,7 +76,7 @@ class ThreadRegistry {
static const u32 kUnknownTid;
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
- u32 thread_quarantine_size);
+ u32 thread_quarantine_size, u32 max_reuse = 0);
void GetNumberOfThreads(uptr *total = 0, uptr *running = 0, uptr *alive = 0);
uptr GetMaxAliveThreads();
@@ -108,7 +109,7 @@ class ThreadRegistry {
void SetThreadName(u32 tid, const char *name);
void SetThreadNameByUserId(uptr user_id, const char *name);
- void DetachThread(u32 tid);
+ void DetachThread(u32 tid, void *arg);
void JoinThread(u32 tid, void *arg);
void FinishThread(u32 tid);
void StartThread(u32 tid, uptr os_id, void *arg);
@@ -117,6 +118,7 @@ class ThreadRegistry {
const ThreadContextFactory context_factory_;
const u32 max_threads_;
const u32 thread_quarantine_size_;
+ const u32 max_reuse_;
BlockingMutex mtx_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc
new file mode 100644
index 0000000000..af828045b5
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc
@@ -0,0 +1,129 @@
+//===-- sanitizer_tls_get_addr.cc -----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Handle the __tls_get_addr call.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_tls_get_addr.h"
+
+#include "sanitizer_flags.h"
+#include "sanitizer_platform_interceptors.h"
+
+namespace __sanitizer {
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+
+// The actual parameter that comes to __tls_get_addr
+// is a pointer to a struct with two words in it:
+struct TlsGetAddrParam {
+ uptr dso_id;
+ uptr offset;
+};
+
+// Glibc starting from 2.19 allocates tls using __signal_safe_memalign,
+// which has such header.
+struct Glibc_2_19_tls_header {
+ uptr size;
+ uptr start;
+};
+
+// This must be static TLS
+__attribute__((tls_model("initial-exec")))
+static __thread DTLS dtls;
+
+// Make sure we properly destroy the DTLS objects:
+// this counter should never get too large.
+static atomic_uintptr_t number_of_live_dtls;
+
+static const uptr kDestroyedThread = -1;
+
+static inline void DTLS_Deallocate(DTLS::DTV *dtv, uptr size) {
+ if (!size) return;
+ VPrintf(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", dtv, size);
+ UnmapOrDie(dtv, size * sizeof(DTLS::DTV));
+ atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);
+}
+
+static inline void DTLS_Resize(uptr new_size) {
+ if (dtls.dtv_size >= new_size) return;
+ new_size = RoundUpToPowerOfTwo(new_size);
+ new_size = Max(new_size, 4096UL / sizeof(DTLS::DTV));
+ DTLS::DTV *new_dtv =
+ (DTLS::DTV *)MmapOrDie(new_size * sizeof(DTLS::DTV), "DTLS_Resize");
+ uptr num_live_dtls =
+ atomic_fetch_add(&number_of_live_dtls, 1, memory_order_relaxed);
+ VPrintf(2, "__tls_get_addr: DTLS_Resize %p %zd\n", &dtls, num_live_dtls);
+ CHECK_LT(num_live_dtls, 1 << 20);
+ uptr old_dtv_size = dtls.dtv_size;
+ DTLS::DTV *old_dtv = dtls.dtv;
+ if (old_dtv_size)
+ internal_memcpy(new_dtv, dtls.dtv, dtls.dtv_size * sizeof(DTLS::DTV));
+ dtls.dtv = new_dtv;
+ dtls.dtv_size = new_size;
+ if (old_dtv_size)
+ DTLS_Deallocate(old_dtv, old_dtv_size);
+}
+
+void DTLS_Destroy() {
+ if (!common_flags()->intercept_tls_get_addr) return;
+ VPrintf(2, "__tls_get_addr: DTLS_Destroy %p %zd\n", &dtls, dtls.dtv_size);
+ uptr s = dtls.dtv_size;
+ dtls.dtv_size = kDestroyedThread; // Do this before unmap for AS-safety.
+ DTLS_Deallocate(dtls.dtv, s);
+}
+
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res) {
+ if (!common_flags()->intercept_tls_get_addr) return 0;
+ TlsGetAddrParam *arg = reinterpret_cast<TlsGetAddrParam *>(arg_void);
+ uptr dso_id = arg->dso_id;
+ if (dtls.dtv_size == kDestroyedThread) return 0;
+ DTLS_Resize(dso_id + 1);
+ if (dtls.dtv[dso_id].beg) return 0;
+ uptr tls_size = 0;
+ uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset;
+ VPrintf(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
+ "num_live_dtls %zd\n",
+ arg, arg->dso_id, arg->offset, res, tls_beg, &tls_beg,
+ atomic_load(&number_of_live_dtls, memory_order_relaxed));
+ if (dtls.last_memalign_ptr == tls_beg) {
+ tls_size = dtls.last_memalign_size;
+ VPrintf(2, "__tls_get_addr: glibc <=2.18 suspected; tls={%p,%p}\n",
+ tls_beg, tls_size);
+ } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {
+ // We may want to check gnu_get_libc_version().
+ Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;
+ tls_size = header->size;
+ tls_beg = header->start;
+ VPrintf(2, "__tls_get_addr: glibc >=2.19 suspected; tls={%p %p}\n",
+ tls_beg, tls_size);
+ } else {
+ VPrintf(2, "__tls_get_addr: Can't guess glibc version\n");
+ // This may happen inside the DTOR of main thread, so just ignore it.
+ tls_size = 0;
+ }
+ dtls.dtv[dso_id].beg = tls_beg;
+ dtls.dtv[dso_id].size = tls_size;
+ return dtls.dtv + dso_id;
+}
+
+void DTLS_on_libc_memalign(void *ptr, uptr size) {
+ if (!common_flags()->intercept_tls_get_addr) return;
+ VPrintf(2, "DTLS_on_libc_memalign: %p %p\n", ptr, size);
+ dtls.last_memalign_ptr = reinterpret_cast<uptr>(ptr);
+ dtls.last_memalign_size = size;
+}
+
+DTLS *DTLS_Get() { return &dtls; }
+
+#else
+void DTLS_on_libc_memalign(void *ptr, uptr size) {}
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res) { return 0; }
+DTLS *DTLS_Get() { return 0; }
+void DTLS_Destroy() {}
+#endif // SANITIZER_INTERCEPT_TLS_GET_ADDR
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h
new file mode 100644
index 0000000000..8dc629f630
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h
@@ -0,0 +1,58 @@
+//===-- sanitizer_tls_get_addr.h --------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Handle the __tls_get_addr call.
+//
+// All this magic is specific to glibc and is required to workaround
+// the lack of interface that would tell us about the Dynamic TLS (DTLS).
+// https://sourceware.org/bugzilla/show_bug.cgi?id=16291
+//
+// The matters get worse because the glibc implementation changed between
+// 2.18 and 2.19:
+// https://groups.google.com/forum/#!topic/address-sanitizer/BfwYD8HMxTM
+//
+// Before 2.19, every DTLS chunk is allocated with __libc_memalign,
+// which we intercept and thus know where is the DTLS.
+// Since 2.19, DTLS chunks are allocated with __signal_safe_memalign,
+// which is an internal function that wraps a mmap call, neither of which
+// we can intercept. Luckily, __signal_safe_memalign has a simple parseable
+// header which we can use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_TLS_GET_ADDR_H
+#define SANITIZER_TLS_GET_ADDR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+struct DTLS {
+ // Array of DTLS chunks for the current Thread.
+ // If beg == 0, the chunk is unused.
+ struct DTV {
+ uptr beg, size;
+ };
+
+ uptr dtv_size;
+ DTV *dtv; // dtv_size elements, allocated by MmapOrDie.
+
+ // Auxiliary fields, don't access them outside sanitizer_tls_get_addr.cc
+ uptr last_memalign_size;
+ uptr last_memalign_ptr;
+};
+
+// Returns pointer and size of a linker-allocated TLS block.
+// Each block is returned exactly once.
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res);
+void DTLS_on_libc_memalign(void *ptr, uptr size);
+DTLS *DTLS_Get();
+void DTLS_Destroy(); // Make sure to call this before the thread is destroyed.
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_TLS_GET_ADDR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_unwind_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_unwind_posix_libcdep.cc
new file mode 100644
index 0000000000..b2ca931e9d
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_unwind_posix_libcdep.cc
@@ -0,0 +1,151 @@
+//===-- sanitizer_unwind_posix.cc ----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the unwind.h-based (aka "slow") stack unwinding routines
+// available to the tools on Linux, Android, FreeBSD and OS X.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_POSIX
+#include "sanitizer_common.h"
+#include "sanitizer_stacktrace.h"
+
+#if SANITIZER_ANDROID
+#include <dlfcn.h> // for dlopen()
+#endif
+
+#if SANITIZER_FREEBSD
+#define _GNU_SOURCE // to declare _Unwind_Backtrace() from <unwind.h>
+#endif
+#include <unwind.h>
+
+namespace __sanitizer {
+
+//------------------------- SlowUnwindStack -----------------------------------
+
+typedef struct {
+ uptr absolute_pc;
+ uptr stack_top;
+ uptr stack_size;
+} backtrace_frame_t;
+
+extern "C" {
+typedef void *(*acquire_my_map_info_list_func)();
+typedef void (*release_my_map_info_list_func)(void *map);
+typedef sptr (*unwind_backtrace_signal_arch_func)(
+ void *siginfo, void *sigcontext, void *map_info_list,
+ backtrace_frame_t *backtrace, uptr ignore_depth, uptr max_depth);
+acquire_my_map_info_list_func acquire_my_map_info_list;
+release_my_map_info_list_func release_my_map_info_list;
+unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
+} // extern "C"
+
+#if SANITIZER_ANDROID
+void SanitizerInitializeUnwinder() {
+ void *p = dlopen("libcorkscrew.so", RTLD_LAZY);
+ if (!p) {
+ VReport(1,
+ "Failed to open libcorkscrew.so. You may see broken stack traces "
+ "in SEGV reports.");
+ return;
+ }
+ acquire_my_map_info_list =
+ (acquire_my_map_info_list_func)(uptr)dlsym(p, "acquire_my_map_info_list");
+ release_my_map_info_list =
+ (release_my_map_info_list_func)(uptr)dlsym(p, "release_my_map_info_list");
+ unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym(
+ p, "unwind_backtrace_signal_arch");
+ if (!acquire_my_map_info_list || !release_my_map_info_list ||
+ !unwind_backtrace_signal_arch) {
+ VReport(1,
+ "Failed to find one of the required symbols in libcorkscrew.so. "
+ "You may see broken stack traces in SEGV reports.");
+ acquire_my_map_info_list = 0;
+ unwind_backtrace_signal_arch = 0;
+ release_my_map_info_list = 0;
+ }
+}
+#endif
+
+#ifdef __arm__
+#define UNWIND_STOP _URC_END_OF_STACK
+#define UNWIND_CONTINUE _URC_NO_REASON
+#else
+#define UNWIND_STOP _URC_NORMAL_STOP
+#define UNWIND_CONTINUE _URC_NO_REASON
+#endif
+
+uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
+#ifdef __arm__
+ uptr val;
+ _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
+ 15 /* r15 = PC */, _UVRSD_UINT32, &val);
+ CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
+ // Clear the Thumb bit.
+ return val & ~(uptr)1;
+#else
+ return _Unwind_GetIP(ctx);
+#endif
+}
+
+struct UnwindTraceArg {
+ BufferedStackTrace *stack;
+ uptr max_depth;
+};
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ UnwindTraceArg *arg = (UnwindTraceArg*)param;
+ CHECK_LT(arg->stack->size, arg->max_depth);
+ uptr pc = Unwind_GetIP(ctx);
+ arg->stack->trace_buffer[arg->stack->size++] = pc;
+ if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
+ return UNWIND_CONTINUE;
+}
+
+void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+ CHECK_GE(max_depth, 2);
+ size = 0;
+ UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
+ _Unwind_Backtrace(Unwind_Trace, &arg);
+ // We need to pop a few frames so that pc is on top.
+ uptr to_pop = LocatePcInTrace(pc);
+ // trace_buffer[0] belongs to the current function so we always pop it.
+ if (to_pop == 0 && size > 1)
+ to_pop = 1;
+ PopStackFrames(to_pop);
+ trace_buffer[0] = pc;
+}
+
+void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
+ uptr max_depth) {
+ CHECK_GE(max_depth, 2);
+ if (!unwind_backtrace_signal_arch) {
+ SlowUnwindStack(pc, max_depth);
+ return;
+ }
+
+ void *map = acquire_my_map_info_list();
+ CHECK(map);
+ InternalScopedBuffer<backtrace_frame_t> frames(kStackTraceMax);
+ // siginfo argument appears to be unused.
+ sptr res = unwind_backtrace_signal_arch(/* siginfo */ 0, context, map,
+ frames.data(),
+ /* ignore_depth */ 0, max_depth);
+ release_my_map_info_list(map);
+ if (res < 0) return;
+ CHECK_LE((uptr)res, kStackTraceMax);
+
+ size = 0;
+ // +2 compensate for libcorkscrew unwinder returning addresses of call
+ // instructions instead of raw return addresses.
+ for (sptr i = 0; i < res; ++i)
+ trace_buffer[size++] = frames[i].absolute_pc + 2;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_POSIX
diff --git a/libsanitizer/sanitizer_common/sanitizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_win.cc
index c48274e364..fbccef19cb 100644
--- a/libsanitizer/sanitizer_common/sanitizer_win.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_win.cc
@@ -15,9 +15,10 @@
#define WIN32_LEAN_AND_MEAN
#define NOGDI
-#include <stdlib.h>
-#include <io.h>
#include <windows.h>
+#include <dbghelp.h>
+#include <io.h>
+#include <stdlib.h>
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
@@ -62,6 +63,7 @@ uptr GetThreadSelf() {
return GetTid();
}
+#if !SANITIZER_GO
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
CHECK(stack_top);
@@ -74,12 +76,14 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
*stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
*stack_bottom = (uptr)mbi.AllocationBase;
}
+#endif // #if !SANITIZER_GO
void *MmapOrDie(uptr size, const char *mem_type) {
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (rv == 0) {
- Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s\n",
- size, size, mem_type);
+ Report("ERROR: %s failed to "
+ "allocate 0x%zx (%zd) bytes of %s (error code: %d)\n",
+ SanitizerToolName, size, size, mem_type, GetLastError());
CHECK("unable to mmap" && 0);
}
return rv;
@@ -87,8 +91,9 @@ void *MmapOrDie(uptr size, const char *mem_type) {
void UnmapOrDie(void *addr, uptr size) {
if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
- Report("ERROR: Failed to deallocate 0x%zx (%zd) bytes at address %p\n",
- size, size, addr);
+ Report("ERROR: %s failed to "
+ "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
+ SanitizerToolName, size, size, addr, GetLastError());
CHECK("unable to unmap" && 0);
}
}
@@ -99,8 +104,9 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (p == 0)
- Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at %p (%d)\n",
- size, size, fixed_addr, GetLastError());
+ Report("ERROR: %s failed to "
+ "allocate %p (%zd) bytes at %p (error code: %d)\n",
+ SanitizerToolName, size, size, fixed_addr, GetLastError());
return p;
}
@@ -108,6 +114,11 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
return MmapFixedNoReserve(fixed_addr, size);
}
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ // FIXME: make this really NoReserve?
+ return MmapOrDie(size, mem_type);
+}
+
void *Mprotect(uptr fixed_addr, uptr size) {
return VirtualAlloc((LPVOID)fixed_addr, size,
MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
@@ -127,6 +138,10 @@ void *MapFileToMemory(const char *file_name, uptr *buff_size) {
UNIMPLEMENTED();
}
+void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset) {
+ UNIMPLEMENTED();
+}
+
static const int kMaxEnvNameLength = 128;
static const DWORD kMaxEnvValueLength = 32767;
@@ -174,15 +189,16 @@ void DumpProcessMap() {
UNIMPLEMENTED();
}
-void DisableCoreDumper() {
- UNIMPLEMENTED();
+void DisableCoreDumperIfNecessary() {
+ // Do nothing.
}
void ReExec() {
UNIMPLEMENTED();
}
-void PrepareForSandboxing() {
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ (void)args;
// Nothing here for now.
}
@@ -194,6 +210,14 @@ void SetStackSizeLimitInBytes(uptr limit) {
UNIMPLEMENTED();
}
+bool AddressSpaceIsUnlimited() {
+ UNIMPLEMENTED();
+}
+
+void SetAddressSpaceUnlimited() {
+ UNIMPLEMENTED();
+}
+
char *FindPathToBinary(const char *name) {
// Nothing here for now.
return 0;
@@ -264,13 +288,48 @@ uptr internal_read(fd_t fd, void *buf, uptr count) {
uptr internal_write(fd_t fd, const void *buf, uptr count) {
if (fd != kStderrFd)
UNIMPLEMENTED();
- HANDLE err = GetStdHandle(STD_ERROR_HANDLE);
- if (err == 0)
- return 0; // FIXME: this might not work on some apps.
- DWORD ret;
- if (!WriteFile(err, buf, count, &ret, 0))
+
+ static HANDLE output_stream = 0;
+ // Abort immediately if we know printing is not possible.
+ if (output_stream == INVALID_HANDLE_VALUE)
return 0;
- return ret;
+
+ // If called for the first time, try to use stderr to output stuff,
+ // falling back to stdout if anything goes wrong.
+ bool fallback_to_stdout = false;
+ if (output_stream == 0) {
+ output_stream = GetStdHandle(STD_ERROR_HANDLE);
+ // We don't distinguish "no such handle" from error.
+ if (output_stream == 0)
+ output_stream = INVALID_HANDLE_VALUE;
+
+ if (output_stream == INVALID_HANDLE_VALUE) {
+ // Retry with stdout?
+ output_stream = GetStdHandle(STD_OUTPUT_HANDLE);
+ if (output_stream == 0)
+ output_stream = INVALID_HANDLE_VALUE;
+ if (output_stream == INVALID_HANDLE_VALUE)
+ return 0;
+ } else {
+ // Successfully got an stderr handle. However, if WriteFile() fails,
+ // we can still try to fallback to stdout.
+ fallback_to_stdout = true;
+ }
+ }
+
+ DWORD ret;
+ if (WriteFile(output_stream, buf, count, &ret, 0))
+ return ret;
+
+ // Re-try with stdout if using a valid stderr handle fails.
+ if (fallback_to_stdout) {
+ output_stream = GetStdHandle(STD_OUTPUT_HANDLE);
+ if (output_stream == 0)
+ output_stream = INVALID_HANDLE_VALUE;
+ if (output_stream != INVALID_HANDLE_VALUE)
+ return internal_write(fd, buf, count);
+ }
+ return 0;
}
uptr internal_stat(const char *path, void *buf) {
@@ -306,6 +365,14 @@ void internal__exit(int exitcode) {
ExitProcess(exitcode);
}
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ UNIMPLEMENTED();
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+ UNIMPLEMENTED();
+}
+
// ---------------------- BlockingMutex ---------------- {{{1
const uptr LOCK_UNINITIALIZED = 0;
const uptr LOCK_READY = (uptr)-1;
@@ -374,17 +441,51 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
#endif
}
-void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+#if !SANITIZER_GO
+void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+ CHECK_GE(max_depth, 2);
// FIXME: CaptureStackBackTrace might be too slow for us.
// FIXME: Compare with StackWalk64.
// FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
size = CaptureStackBackTrace(2, Min(max_depth, kStackTraceMax),
(void**)trace, 0);
+ if (size == 0)
+ return;
+
// Skip the RTL frames by searching for the PC in the stacktrace.
uptr pc_location = LocatePcInTrace(pc);
PopStackFrames(pc_location);
}
+void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
+ uptr max_depth) {
+ CONTEXT ctx = *(CONTEXT *)context;
+ STACKFRAME64 stack_frame;
+ memset(&stack_frame, 0, sizeof(stack_frame));
+ size = 0;
+#if defined(_WIN64)
+ int machine_type = IMAGE_FILE_MACHINE_AMD64;
+ stack_frame.AddrPC.Offset = ctx.Rip;
+ stack_frame.AddrFrame.Offset = ctx.Rbp;
+ stack_frame.AddrStack.Offset = ctx.Rsp;
+#else
+ int machine_type = IMAGE_FILE_MACHINE_I386;
+ stack_frame.AddrPC.Offset = ctx.Eip;
+ stack_frame.AddrFrame.Offset = ctx.Ebp;
+ stack_frame.AddrStack.Offset = ctx.Esp;
+#endif
+ stack_frame.AddrPC.Mode = AddrModeFlat;
+ stack_frame.AddrFrame.Mode = AddrModeFlat;
+ stack_frame.AddrStack.Mode = AddrModeFlat;
+ while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
+ &stack_frame, &ctx, NULL, &SymFunctionTableAccess64,
+ &SymGetModuleBase64, NULL) &&
+ size < Min(max_depth, kStackTraceMax)) {
+ trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
+ }
+}
+#endif // #if !SANITIZER_GO
+
void MaybeOpenReportFile() {
// Windows doesn't have native fork, and we don't support Cygwin or other
// environments that try to fake it, so the initial report_fd will always be
@@ -392,8 +493,6 @@ void MaybeOpenReportFile() {
}
void RawWrite(const char *buffer) {
- static const char *kRawWriteError =
- "RawWrite can't output requested buffer!\n";
uptr length = (uptr)internal_strlen(buffer);
if (length != internal_write(report_fd, buffer, length)) {
// stderr may be closed, but we may be able to print to the debugger
@@ -403,6 +502,29 @@ void RawWrite(const char *buffer) {
}
}
+void SetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void UnsetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {
+ (void)handler;
+ // FIXME: Decide what to do on Windows.
+}
+
+bool IsDeadlySignal(int signum) {
+ // FIXME: Decide what to do on Windows.
+ return false;
+}
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ // FIXME: Actually implement this function.
+ return true;
+}
+
} // namespace __sanitizer
#endif // _WIN32
diff --git a/libsanitizer/tsan/Makefile.am b/libsanitizer/tsan/Makefile.am
index 39ed2529c2..abfafb783f 100644
--- a/libsanitizer/tsan/Makefile.am
+++ b/libsanitizer/tsan/Makefile.am
@@ -6,42 +6,46 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la
tsan_files = \
- tsan_clock.cc \
- tsan_interface_atomic.cc \
- tsan_mutex.cc \
- tsan_report.cc \
- tsan_rtl_thread.cc \
- tsan_symbolize.cc \
- tsan_flags.cc \
- tsan_interface.cc \
- tsan_platform_linux.cc \
- tsan_rtl.cc \
- tsan_stat.cc \
- tsan_sync.cc \
- tsan_ignoreset.cc \
- tsan_interceptors.cc \
- tsan_md5.cc \
- tsan_platform_mac.cc \
- tsan_rtl_mutex.cc \
- tsan_suppressions.cc \
- tsan_interface_ann.cc \
- tsan_mman.cc \
- tsan_rtl_report.cc \
+ tsan_clock.cc \
tsan_fd.cc \
- tsan_interface_java.cc \
- tsan_mutexset.cc \
- tsan_symbolize_addr2line_linux.cc \
- tsan_rtl_amd64.S
+ tsan_flags.cc \
+ tsan_ignoreset.cc \
+ tsan_interceptors.cc \
+ tsan_interface_ann.cc \
+ tsan_interface_atomic.cc \
+ tsan_interface.cc \
+ tsan_interface_java.cc \
+ tsan_md5.cc \
+ tsan_mman.cc \
+ tsan_mutex.cc \
+ tsan_mutexset.cc \
+ tsan_platform_linux.cc \
+ tsan_platform_mac.cc \
+ tsan_platform_windows.cc \
+ tsan_report.cc \
+ tsan_rtl.cc \
+ tsan_rtl_mutex.cc \
+ tsan_rtl_report.cc \
+ tsan_rtl_thread.cc \
+ tsan_stack_trace.cc \
+ tsan_stat.cc \
+ tsan_suppressions.cc \
+ tsan_symbolize.cc \
+ tsan_sync.cc
-libtsan_la_SOURCES = $(tsan_files)
-libtsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la
+libtsan_la_SOURCES = $(tsan_files)
+EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S
+libtsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS)
+libtsan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS)
if LIBBACKTRACE_SUPPORTED
libtsan_la_LIBADD += $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
+libtsan_la_DEPENDENCIES +=$(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
endif
libtsan_la_LIBADD += $(LIBSTDCXX_RAW_CXX_LDFLAGS)
libtsan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libtsan)
diff --git a/libsanitizer/tsan/Makefile.in b/libsanitizer/tsan/Makefile.in
index 01c27b9b7d..c4e649008e 100644
--- a/libsanitizer/tsan/Makefile.in
+++ b/libsanitizer/tsan/Makefile.in
@@ -36,6 +36,7 @@ build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
@LIBBACKTRACE_SUPPORTED_TRUE@am__append_1 = $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
+@LIBBACKTRACE_SUPPORTED_TRUE@am__append_2 = $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
subdir = tsan
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
@@ -80,19 +81,15 @@ am__base_list = \
am__installdirs = "$(DESTDIR)$(toolexeclibdir)"
LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
am__DEPENDENCIES_1 =
-libtsan_la_DEPENDENCIES = \
- $(top_builddir)/sanitizer_common/libsanitizer_common.la \
- $(top_builddir)/interception/libinterception.la \
- $(am__append_1) $(am__DEPENDENCIES_1)
-am__objects_1 = tsan_clock.lo tsan_interface_atomic.lo tsan_mutex.lo \
- tsan_report.lo tsan_rtl_thread.lo tsan_symbolize.lo \
- tsan_flags.lo tsan_interface.lo tsan_platform_linux.lo \
- tsan_rtl.lo tsan_stat.lo tsan_sync.lo tsan_ignoreset.lo \
- tsan_interceptors.lo tsan_md5.lo tsan_platform_mac.lo \
- tsan_rtl_mutex.lo tsan_suppressions.lo tsan_interface_ann.lo \
- tsan_mman.lo tsan_rtl_report.lo tsan_fd.lo \
- tsan_interface_java.lo tsan_mutexset.lo \
- tsan_symbolize_addr2line_linux.lo tsan_rtl_amd64.lo
+am__objects_1 = tsan_clock.lo tsan_fd.lo tsan_flags.lo \
+ tsan_ignoreset.lo tsan_interceptors.lo tsan_interface_ann.lo \
+ tsan_interface_atomic.lo tsan_interface.lo \
+ tsan_interface_java.lo tsan_md5.lo tsan_mman.lo tsan_mutex.lo \
+ tsan_mutexset.lo tsan_platform_linux.lo tsan_platform_mac.lo \
+ tsan_platform_windows.lo tsan_report.lo tsan_rtl.lo \
+ tsan_rtl_mutex.lo tsan_rtl_report.lo tsan_rtl_thread.lo \
+ tsan_stack_trace.lo tsan_stat.lo tsan_suppressions.lo \
+ tsan_symbolize.lo tsan_sync.lo
am_libtsan_la_OBJECTS = $(am__objects_1)
libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@@ -125,7 +122,7 @@ CCLD = $(CC)
LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
$(LDFLAGS) -o $@
-SOURCES = $(libtsan_la_SOURCES)
+SOURCES = $(libtsan_la_SOURCES) $(EXTRA_libtsan_la_SOURCES)
ETAGS = etags
CTAGS = ctags
ACLOCAL = @ACLOCAL@
@@ -201,6 +198,7 @@ SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -276,42 +274,49 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
- -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
+ -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la
tsan_files = \
- tsan_clock.cc \
- tsan_interface_atomic.cc \
- tsan_mutex.cc \
- tsan_report.cc \
- tsan_rtl_thread.cc \
- tsan_symbolize.cc \
- tsan_flags.cc \
- tsan_interface.cc \
- tsan_platform_linux.cc \
- tsan_rtl.cc \
- tsan_stat.cc \
- tsan_sync.cc \
- tsan_ignoreset.cc \
- tsan_interceptors.cc \
- tsan_md5.cc \
- tsan_platform_mac.cc \
- tsan_rtl_mutex.cc \
- tsan_suppressions.cc \
- tsan_interface_ann.cc \
- tsan_mman.cc \
- tsan_rtl_report.cc \
+ tsan_clock.cc \
tsan_fd.cc \
- tsan_interface_java.cc \
- tsan_mutexset.cc \
- tsan_symbolize_addr2line_linux.cc \
- tsan_rtl_amd64.S
-
-libtsan_la_SOURCES = $(tsan_files)
+ tsan_flags.cc \
+ tsan_ignoreset.cc \
+ tsan_interceptors.cc \
+ tsan_interface_ann.cc \
+ tsan_interface_atomic.cc \
+ tsan_interface.cc \
+ tsan_interface_java.cc \
+ tsan_md5.cc \
+ tsan_mman.cc \
+ tsan_mutex.cc \
+ tsan_mutexset.cc \
+ tsan_platform_linux.cc \
+ tsan_platform_mac.cc \
+ tsan_platform_windows.cc \
+ tsan_report.cc \
+ tsan_rtl.cc \
+ tsan_rtl_mutex.cc \
+ tsan_rtl_report.cc \
+ tsan_rtl_thread.cc \
+ tsan_stack_trace.cc \
+ tsan_stat.cc \
+ tsan_suppressions.cc \
+ tsan_symbolize.cc \
+ tsan_sync.cc
+
+libtsan_la_SOURCES = $(tsan_files)
+EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S
libtsan_la_LIBADD = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/interception/libinterception.la \
- $(am__append_1) $(LIBSTDCXX_RAW_CXX_LDFLAGS)
+ $(TSAN_TARGET_DEPENDENT_OBJECTS) $(am__append_1) \
+ $(LIBSTDCXX_RAW_CXX_LDFLAGS)
+libtsan_la_DEPENDENCIES = \
+ $(top_builddir)/sanitizer_common/libsanitizer_common.la \
+ $(top_builddir)/interception/libinterception.la \
+ $(TSAN_TARGET_DEPENDENT_OBJECTS) $(am__append_2)
libtsan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libtsan)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
@@ -442,16 +447,17 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutexset.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_windows.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_report.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_amd64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_mutex.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_report.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_thread.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_stack_trace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_stat.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_symbolize.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_symbolize_addr2line_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_sync.Plo@am__quote@
.S.o:
diff --git a/libsanitizer/tsan/tsan_clock.cc b/libsanitizer/tsan/tsan_clock.cc
index 5d45a5d15f..a84caa952a 100644
--- a/libsanitizer/tsan/tsan_clock.cc
+++ b/libsanitizer/tsan/tsan_clock.cc
@@ -10,100 +10,418 @@
//===----------------------------------------------------------------------===//
#include "tsan_clock.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
-// It's possible to optimize clock operations for some important cases
-// so that they are O(1). The cases include singletons, once's, local mutexes.
-// First, SyncClock must be re-implemented to allow indexing by tid.
-// It must not necessarily be a full vector clock, though. For example it may
-// be a multi-level table.
-// Then, each slot in SyncClock must contain a dirty bit (it's united with
-// the clock value, so no space increase). The acquire algorithm looks
-// as follows:
-// void acquire(thr, tid, thr_clock, sync_clock) {
-// if (!sync_clock[tid].dirty)
-// return; // No new info to acquire.
-// // This handles constant reads of singleton pointers and
-// // stop-flags.
-// acquire_impl(thr_clock, sync_clock); // As usual, O(N).
-// sync_clock[tid].dirty = false;
-// sync_clock.dirty_count--;
-// }
-// The release operation looks as follows:
-// void release(thr, tid, thr_clock, sync_clock) {
-// // thr->sync_cache is a simple fixed-size hash-based cache that holds
-// // several previous sync_clock's.
-// if (thr->sync_cache[sync_clock] >= thr->last_acquire_epoch) {
-// // The thread did no acquire operations since last release on this clock.
-// // So update only the thread's slot (other slots can't possibly change).
-// sync_clock[tid].clock = thr->epoch;
-// if (sync_clock.dirty_count == sync_clock.cnt
-// || (sync_clock.dirty_count == sync_clock.cnt - 1
-// && sync_clock[tid].dirty == false))
-// // All dirty flags are set, bail out.
-// return;
-// set all dirty bits, but preserve the thread's bit. // O(N)
-// update sync_clock.dirty_count;
-// return;
+// SyncClock and ThreadClock implement vector clocks for sync variables
+// (mutexes, atomic variables, file descriptors, etc) and threads, respectively.
+// ThreadClock contains fixed-size vector clock for maximum number of threads.
+// SyncClock contains growable vector clock for currently necessary number of
+// threads.
+// Together they implement very simple model of operations, namely:
+//
+// void ThreadClock::acquire(const SyncClock *src) {
+// for (int i = 0; i < kMaxThreads; i++)
+// clock[i] = max(clock[i], src->clock[i]);
+// }
+//
+// void ThreadClock::release(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = max(dst->clock[i], clock[i]);
+// }
+//
+// void ThreadClock::ReleaseStore(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = clock[i];
// }
-// release_impl(thr_clock, sync_clock); // As usual, O(N).
-// set all dirty bits, but preserve the thread's bit.
-// // The previous step is combined with release_impl(), so that
-// // we scan the arrays only once.
-// update sync_clock.dirty_count;
-// }
+//
+// void ThreadClock::acq_rel(SyncClock *dst) {
+// acquire(dst);
+// release(dst);
+// }
+//
+// Conformance to this model is extensively verified in tsan_clock_test.cc.
+// However, the implementation is significantly more complex. The complexity
+// allows to implement important classes of use cases in O(1) instead of O(N).
+//
+// The use cases are:
+// 1. Singleton/once atomic that has a single release-store operation followed
+// by zillions of acquire-loads (the acquire-load is O(1)).
+// 2. Thread-local mutex (both lock and unlock can be O(1)).
+// 3. Leaf mutex (unlock is O(1)).
+// 4. A mutex shared by 2 threads (both lock and unlock can be O(1)).
+// 5. An atomic with a single writer (writes can be O(1)).
+// The implementation dynamically adopts to workload. So if an atomic is in
+// read-only phase, these reads will be O(1); if it later switches to read/write
+// phase, the implementation will correctly handle that by switching to O(N).
+//
+// Thread-safety note: all const operations on SyncClock's are conducted under
+// a shared lock; all non-const operations on SyncClock's are conducted under
+// an exclusive lock; ThreadClock's are private to respective threads and so
+// do not need any protection.
+//
+// Description of ThreadClock state:
+// clk_ - fixed size vector clock.
+// nclk_ - effective size of the vector clock (the rest is zeros).
+// tid_ - index of the thread associated with he clock ("current thread").
+// last_acquire_ - current thread time when it acquired something from
+// other threads.
+//
+// Description of SyncClock state:
+// clk_ - variable size vector clock, low kClkBits hold timestamp,
+// the remaining bits hold "acquired" flag (the actual value is thread's
+// reused counter);
+// if acquried == thr->reused_, then the respective thread has already
+// acquired this clock (except possibly dirty_tids_).
+// dirty_tids_ - holds up to two indeces in the vector clock that other threads
+// need to acquire regardless of "acquired" flag value;
+// release_store_tid_ - denotes that the clock state is a result of
+// release-store operation by the thread with release_store_tid_ index.
+// release_store_reused_ - reuse count of release_store_tid_.
+
+// We don't have ThreadState in these methods, so this is an ugly hack that
+// works only in C++.
+#ifndef TSAN_GO
+# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
+#else
+# define CPP_STAT_INC(typ) (void)0
+#endif
namespace __tsan {
-ThreadClock::ThreadClock() {
- nclk_ = 0;
- for (uptr i = 0; i < (uptr)kMaxTidInClock; i++)
- clk_[i] = 0;
+const unsigned kInvalidTid = (unsigned)-1;
+
+ThreadClock::ThreadClock(unsigned tid, unsigned reused)
+ : tid_(tid)
+ , reused_(reused + 1) { // 0 has special meaning
+ CHECK_LT(tid, kMaxTidInClock);
+ CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits);
+ nclk_ = tid_ + 1;
+ last_acquire_ = 0;
+ internal_memset(clk_, 0, sizeof(clk_));
+ clk_[tid_].reused = reused_;
}
-void ThreadClock::acquire(const SyncClock *src) {
+void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
DCHECK(nclk_ <= kMaxTid);
- DCHECK(src->clk_.Size() <= kMaxTid);
+ DCHECK(src->size_ <= kMaxTid);
+ CPP_STAT_INC(StatClockAcquire);
- const uptr nclk = src->clk_.Size();
- if (nclk == 0)
+ // Check if it's empty -> no need to do anything.
+ const uptr nclk = src->size_;
+ if (nclk == 0) {
+ CPP_STAT_INC(StatClockAcquireEmpty);
return;
+ }
+
+ // Check if we've already acquired src after the last release operation on src
+ bool acquired = false;
+ if (nclk > tid_) {
+ CPP_STAT_INC(StatClockAcquireLarge);
+ if (src->elem(tid_).reused == reused_) {
+ CPP_STAT_INC(StatClockAcquireRepeat);
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ unsigned tid = src->dirty_tids_[i];
+ if (tid != kInvalidTid) {
+ u64 epoch = src->elem(tid).epoch;
+ if (clk_[tid].epoch < epoch) {
+ clk_[tid].epoch = epoch;
+ acquired = true;
+ }
+ }
+ }
+ if (acquired) {
+ CPP_STAT_INC(StatClockAcquiredSomething);
+ last_acquire_ = clk_[tid_].epoch;
+ }
+ return;
+ }
+ }
+
+ // O(N) acquire.
+ CPP_STAT_INC(StatClockAcquireFull);
nclk_ = max(nclk_, nclk);
for (uptr i = 0; i < nclk; i++) {
- if (clk_[i] < src->clk_[i])
- clk_[i] = src->clk_[i];
+ u64 epoch = src->elem(i).epoch;
+ if (clk_[i].epoch < epoch) {
+ clk_[i].epoch = epoch;
+ acquired = true;
+ }
+ }
+
+ // Remember that this thread has acquired this clock.
+ if (nclk > tid_)
+ src->elem(tid_).reused = reused_;
+
+ if (acquired) {
+ CPP_STAT_INC(StatClockAcquiredSomething);
+ last_acquire_ = clk_[tid_].epoch;
}
}
-void ThreadClock::release(SyncClock *dst) const {
- DCHECK(nclk_ <= kMaxTid);
- DCHECK(dst->clk_.Size() <= kMaxTid);
+void ThreadClock::release(ClockCache *c, SyncClock *dst) const {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(dst->size_, kMaxTid);
+
+ if (dst->size_ == 0) {
+ // ReleaseStore will correctly set release_store_tid_,
+ // which can be important for future operations.
+ ReleaseStore(c, dst);
+ return;
+ }
+
+ CPP_STAT_INC(StatClockRelease);
+ // Check if we need to resize dst.
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
- if (dst->clk_.Size() < nclk_)
- dst->clk_.Resize(nclk_);
+ // Check if we had not acquired anything from other threads
+ // since the last release on dst. If so, we need to update
+ // only dst->elem(tid_).
+ if (dst->elem(tid_).epoch > last_acquire_) {
+ UpdateCurrentThread(dst);
+ if (dst->release_store_tid_ != tid_ ||
+ dst->release_store_reused_ != reused_)
+ dst->release_store_tid_ = kInvalidTid;
+ return;
+ }
+
+ // O(N) release.
+ CPP_STAT_INC(StatClockReleaseFull);
+ // First, remember whether we've acquired dst.
+ bool acquired = IsAlreadyAcquired(dst);
+ if (acquired)
+ CPP_STAT_INC(StatClockReleaseAcquired);
+ // Update dst->clk_.
for (uptr i = 0; i < nclk_; i++) {
- if (dst->clk_[i] < clk_[i])
- dst->clk_[i] = clk_[i];
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = max(ce.epoch, clk_[i].epoch);
+ ce.reused = 0;
}
+ // Clear 'acquired' flag in the remaining elements.
+ if (nclk_ < dst->size_)
+ CPP_STAT_INC(StatClockReleaseClearTail);
+ for (uptr i = nclk_; i < dst->size_; i++)
+ dst->elem(i).reused = 0;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+ dst->release_store_tid_ = kInvalidTid;
+ dst->release_store_reused_ = 0;
+ // If we've acquired dst, remember this fact,
+ // so that we don't need to acquire it on next acquire.
+ if (acquired)
+ dst->elem(tid_).reused = reused_;
}
-void ThreadClock::ReleaseStore(SyncClock *dst) const {
+void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const {
DCHECK(nclk_ <= kMaxTid);
- DCHECK(dst->clk_.Size() <= kMaxTid);
+ DCHECK(dst->size_ <= kMaxTid);
+ CPP_STAT_INC(StatClockStore);
- if (dst->clk_.Size() < nclk_)
- dst->clk_.Resize(nclk_);
- for (uptr i = 0; i < nclk_; i++)
- dst->clk_[i] = clk_[i];
- for (uptr i = nclk_; i < dst->clk_.Size(); i++)
- dst->clk_[i] = 0;
+ // Check if we need to resize dst.
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
+
+ if (dst->release_store_tid_ == tid_ &&
+ dst->release_store_reused_ == reused_ &&
+ dst->elem(tid_).epoch > last_acquire_) {
+ CPP_STAT_INC(StatClockStoreFast);
+ UpdateCurrentThread(dst);
+ return;
+ }
+
+ // O(N) release-store.
+ CPP_STAT_INC(StatClockStoreFull);
+ for (uptr i = 0; i < nclk_; i++) {
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = clk_[i].epoch;
+ ce.reused = 0;
+ }
+ // Clear the tail of dst->clk_.
+ if (nclk_ < dst->size_) {
+ for (uptr i = nclk_; i < dst->size_; i++) {
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = 0;
+ ce.reused = 0;
+ }
+ CPP_STAT_INC(StatClockStoreTail);
+ }
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+ dst->release_store_tid_ = tid_;
+ dst->release_store_reused_ = reused_;
+ // Rememeber that we don't need to acquire it in future.
+ dst->elem(tid_).reused = reused_;
}
-void ThreadClock::acq_rel(SyncClock *dst) {
- acquire(dst);
- release(dst);
+void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
+ CPP_STAT_INC(StatClockAcquireRelease);
+ acquire(c, dst);
+ ReleaseStore(c, dst);
+}
+
+// Updates only single element related to the current thread in dst->clk_.
+void ThreadClock::UpdateCurrentThread(SyncClock *dst) const {
+ // Update the threads time, but preserve 'acquired' flag.
+ dst->elem(tid_).epoch = clk_[tid_].epoch;
+
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ if (dst->dirty_tids_[i] == tid_) {
+ CPP_STAT_INC(StatClockReleaseFast1);
+ return;
+ }
+ if (dst->dirty_tids_[i] == kInvalidTid) {
+ CPP_STAT_INC(StatClockReleaseFast2);
+ dst->dirty_tids_[i] = tid_;
+ return;
+ }
+ }
+ // Reset all 'acquired' flags, O(N).
+ CPP_STAT_INC(StatClockReleaseSlow);
+ for (uptr i = 0; i < dst->size_; i++)
+ dst->elem(i).reused = 0;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+}
+
+// Checks whether the current threads has already acquired src.
+bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
+ if (src->elem(tid_).reused != reused_)
+ return false;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ unsigned tid = src->dirty_tids_[i];
+ if (tid != kInvalidTid) {
+ if (clk_[tid].epoch < src->elem(tid).epoch)
+ return false;
+ }
+ }
+ return true;
+}
+
+void SyncClock::Resize(ClockCache *c, uptr nclk) {
+ CPP_STAT_INC(StatClockReleaseResize);
+ if (RoundUpTo(nclk, ClockBlock::kClockCount) <=
+ RoundUpTo(size_, ClockBlock::kClockCount)) {
+ // Growing within the same block.
+ // Memory is already allocated, just increase the size.
+ size_ = nclk;
+ return;
+ }
+ if (nclk <= ClockBlock::kClockCount) {
+ // Grow from 0 to one-level table.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+ size_ = nclk;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ return;
+ }
+ // Growing two-level table.
+ if (size_ == 0) {
+ // Allocate first level table.
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ } else if (size_ <= ClockBlock::kClockCount) {
+ // Transform one-level table to two-level table.
+ u32 old = tab_idx_;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ tab_->table[0] = old;
+ }
+ // At this point we have first level table allocated.
+ // Add second level tables as necessary.
+ for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount);
+ i < nclk; i += ClockBlock::kClockCount) {
+ u32 idx = ctx->clock_alloc.Alloc(c);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ internal_memset(cb, 0, sizeof(*cb));
+ CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0);
+ tab_->table[i/ClockBlock::kClockCount] = idx;
+ }
+ size_ = nclk;
+}
+
+// Sets a single element in the vector clock.
+// This function is called only from weird places like AcquireGlobal.
+void ThreadClock::set(unsigned tid, u64 v) {
+ DCHECK_LT(tid, kMaxTid);
+ DCHECK_GE(v, clk_[tid].epoch);
+ clk_[tid].epoch = v;
+ if (nclk_ <= tid)
+ nclk_ = tid + 1;
+ last_acquire_ = clk_[tid_].epoch;
+}
+
+void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < nclk_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch);
+ printf("] reused=[");
+ for (uptr i = 0; i < nclk_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused);
+ printf("] tid=%u/%u last_acq=%llu",
+ tid_, reused_, last_acquire_);
}
SyncClock::SyncClock()
- : clk_(MBlockClock) {
+ : release_store_tid_(kInvalidTid)
+ , release_store_reused_()
+ , tab_()
+ , tab_idx_()
+ , size_() {
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dirty_tids_[i] = kInvalidTid;
+}
+
+SyncClock::~SyncClock() {
+ // Reset must be called before dtor.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+}
+
+void SyncClock::Reset(ClockCache *c) {
+ if (size_ == 0) {
+ // nothing
+ } else if (size_ <= ClockBlock::kClockCount) {
+ // One-level table.
+ ctx->clock_alloc.Free(c, tab_idx_);
+ } else {
+ // Two-level table.
+ for (uptr i = 0; i < size_; i += ClockBlock::kClockCount)
+ ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]);
+ ctx->clock_alloc.Free(c, tab_idx_);
+ }
+ tab_ = 0;
+ tab_idx_ = 0;
+ size_ = 0;
+ release_store_tid_ = kInvalidTid;
+ release_store_reused_ = 0;
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dirty_tids_[i] = kInvalidTid;
+}
+
+ClockElem &SyncClock::elem(unsigned tid) const {
+ DCHECK_LT(tid, size_);
+ if (size_ <= ClockBlock::kClockCount)
+ return tab_->clock[tid];
+ u32 idx = tab_->table[tid / ClockBlock::kClockCount];
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ return cb->clock[tid % ClockBlock::kClockCount];
+}
+
+void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch);
+ printf("] reused=[");
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
+ printf("] release_store_tid=%d/%d dirty_tids=%d/%d",
+ release_store_tid_, release_store_reused_,
+ dirty_tids_[0], dirty_tids_[1]);
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_clock.h b/libsanitizer/tsan/tsan_clock.h
index 8e4bf99ca8..3deb7f5198 100644
--- a/libsanitizer/tsan/tsan_clock.h
+++ b/libsanitizer/tsan/tsan_clock.h
@@ -12,65 +12,114 @@
#define TSAN_CLOCK_H
#include "tsan_defs.h"
-#include "tsan_vector.h"
+#include "tsan_dense_alloc.h"
namespace __tsan {
+struct ClockElem {
+ u64 epoch : kClkBits;
+ u64 reused : 64 - kClkBits;
+};
+
+struct ClockBlock {
+ static const uptr kSize = 512;
+ static const uptr kTableSize = kSize / sizeof(u32);
+ static const uptr kClockCount = kSize / sizeof(ClockElem);
+
+ union {
+ u32 table[kTableSize];
+ ClockElem clock[kClockCount];
+ };
+
+ ClockBlock() {
+ }
+};
+
+typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
+typedef DenseSlabAllocCache ClockCache;
+
// The clock that lives in sync variables (mutexes, atomics, etc).
class SyncClock {
public:
SyncClock();
+ ~SyncClock();
uptr size() const {
- return clk_.Size();
+ return size_;
}
- void Reset() {
- clk_.Reset();
+ u64 get(unsigned tid) const {
+ return elem(tid).epoch;
}
+ void Resize(ClockCache *c, uptr nclk);
+ void Reset(ClockCache *c);
+
+ void DebugDump(int(*printf)(const char *s, ...));
+
private:
- Vector<u64> clk_;
friend struct ThreadClock;
+ static const uptr kDirtyTids = 2;
+
+ unsigned release_store_tid_;
+ unsigned release_store_reused_;
+ unsigned dirty_tids_[kDirtyTids];
+ // tab_ contains indirect pointer to a 512b block using DenseSlabAlloc.
+ // If size_ <= 64, then tab_ points to an array with 64 ClockElem's.
+ // Otherwise, tab_ points to an array with 128 u32 elements,
+ // each pointing to the second-level 512b block with 64 ClockElem's.
+ ClockBlock *tab_;
+ u32 tab_idx_;
+ u32 size_;
+
+ ClockElem &elem(unsigned tid) const;
};
// The clock that lives in threads.
struct ThreadClock {
public:
- ThreadClock();
+ typedef DenseSlabAllocCache Cache;
+
+ explicit ThreadClock(unsigned tid, unsigned reused = 0);
u64 get(unsigned tid) const {
DCHECK_LT(tid, kMaxTidInClock);
- return clk_[tid];
+ return clk_[tid].epoch;
}
- void set(unsigned tid, u64 v) {
- DCHECK_LT(tid, kMaxTid);
- DCHECK_GE(v, clk_[tid]);
- clk_[tid] = v;
- if (nclk_ <= tid)
- nclk_ = tid + 1;
+ void set(unsigned tid, u64 v);
+
+ void set(u64 v) {
+ DCHECK_GE(v, clk_[tid_].epoch);
+ clk_[tid_].epoch = v;
}
- void tick(unsigned tid) {
- DCHECK_LT(tid, kMaxTid);
- clk_[tid]++;
- if (nclk_ <= tid)
- nclk_ = tid + 1;
+ void tick() {
+ clk_[tid_].epoch++;
}
uptr size() const {
return nclk_;
}
- void acquire(const SyncClock *src);
- void release(SyncClock *dst) const;
- void acq_rel(SyncClock *dst);
- void ReleaseStore(SyncClock *dst) const;
+ void acquire(ClockCache *c, const SyncClock *src);
+ void release(ClockCache *c, SyncClock *dst) const;
+ void acq_rel(ClockCache *c, SyncClock *dst);
+ void ReleaseStore(ClockCache *c, SyncClock *dst) const;
+
+ void DebugReset();
+ void DebugDump(int(*printf)(const char *s, ...));
private:
+ static const uptr kDirtyTids = SyncClock::kDirtyTids;
+ const unsigned tid_;
+ const unsigned reused_;
+ u64 last_acquire_;
uptr nclk_;
- u64 clk_[kMaxTidInClock];
+ ClockElem clk_[kMaxTidInClock];
+
+ bool IsAlreadyAcquired(const SyncClock *src) const;
+ void UpdateCurrentThread(SyncClock *dst) const;
};
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_defs.h b/libsanitizer/tsan/tsan_defs.h
index 3f20797ff4..076de73c9b 100644
--- a/libsanitizer/tsan/tsan_defs.h
+++ b/libsanitizer/tsan/tsan_defs.h
@@ -39,8 +39,8 @@ const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
const int kClkBits = 42;
+const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
const uptr kShadowStackSize = 64 * 1024;
-const uptr kTraceStackSize = 256;
#ifdef TSAN_SHADOW_COUNT
# if TSAN_SHADOW_COUNT == 2 \
@@ -51,6 +51,7 @@ const uptr kShadowCnt = TSAN_SHADOW_COUNT;
# endif
#else
// Count of shadow values in a shadow cell.
+#define TSAN_SHADOW_COUNT 4
const uptr kShadowCnt = 4;
#endif
@@ -63,6 +64,19 @@ const uptr kShadowSize = 8;
// Shadow memory is kShadowMultiplier times larger than user memory.
const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
+// That many user bytes are mapped onto a single meta shadow cell.
+// Must be less or equal to minimal memory allocator alignment.
+const uptr kMetaShadowCell = 8;
+
+// Size of a single meta shadow value (u32).
+const uptr kMetaShadowSize = 4;
+
+#if defined(TSAN_NO_HISTORY) && TSAN_NO_HISTORY
+const bool kCollectHistory = false;
+#else
+const bool kCollectHistory = true;
+#endif
+
#if defined(TSAN_COLLECT_STATS) && TSAN_COLLECT_STATS
const bool kCollectStats = true;
#else
@@ -157,8 +171,15 @@ struct Context;
struct ReportStack;
class ReportDesc;
class RegionAlloc;
-class StackTrace;
-struct MBlock;
+
+// Descriptor of user's memory block.
+struct MBlock {
+ u64 siz;
+ u32 stk;
+ u16 tid;
+};
+
+COMPILER_CHECK(sizeof(MBlock) == 16);
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_dense_alloc.h b/libsanitizer/tsan/tsan_dense_alloc.h
new file mode 100644
index 0000000000..651c112c78
--- /dev/null
+++ b/libsanitizer/tsan/tsan_dense_alloc.h
@@ -0,0 +1,135 @@
+//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
+// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
+// The only difference with traditional slab allocators is that DenseSlabAlloc
+// allocates/free indices of objects and provide a functionality to map
+// the index onto the real pointer. The index is u32, that is, 2 times smaller
+// than uptr (hense the Dense prefix).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_DENSE_ALLOC_H
+#define TSAN_DENSE_ALLOC_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+
+namespace __tsan {
+
+class DenseSlabAllocCache {
+ static const uptr kSize = 128;
+ typedef u32 IndexT;
+ uptr pos;
+ IndexT cache[kSize];
+ template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
+};
+
+template<typename T, uptr kL1Size, uptr kL2Size>
+class DenseSlabAlloc {
+ public:
+ typedef DenseSlabAllocCache Cache;
+ typedef typename Cache::IndexT IndexT;
+
+ DenseSlabAlloc() {
+ // Check that kL1Size and kL2Size are sane.
+ CHECK_EQ(kL1Size & (kL1Size - 1), 0);
+ CHECK_EQ(kL2Size & (kL2Size - 1), 0);
+ CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
+ // Check that it makes sense to use the dense alloc.
+ CHECK_GE(sizeof(T), sizeof(IndexT));
+ internal_memset(map_, 0, sizeof(map_));
+ freelist_ = 0;
+ fillpos_ = 0;
+ }
+
+ ~DenseSlabAlloc() {
+ for (uptr i = 0; i < kL1Size; i++) {
+ if (map_[i] != 0)
+ UnmapOrDie(map_[i], kL2Size * sizeof(T));
+ }
+ }
+
+ IndexT Alloc(Cache *c) {
+ if (c->pos == 0)
+ Refill(c);
+ return c->cache[--c->pos];
+ }
+
+ void Free(Cache *c, IndexT idx) {
+ DCHECK_NE(idx, 0);
+ if (c->pos == Cache::kSize)
+ Drain(c);
+ c->cache[c->pos++] = idx;
+ }
+
+ T *Map(IndexT idx) {
+ DCHECK_NE(idx, 0);
+ DCHECK_LE(idx, kL1Size * kL2Size);
+ return &map_[idx / kL2Size][idx % kL2Size];
+ }
+
+ void FlushCache(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ while (c->pos) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+
+ void InitCache(Cache *c) {
+ c->pos = 0;
+ internal_memset(c->cache, 0, sizeof(c->cache));
+ }
+
+ private:
+ T *map_[kL1Size];
+ SpinMutex mtx_;
+ IndexT freelist_;
+ uptr fillpos_;
+
+ void Refill(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ if (freelist_ == 0) {
+ if (fillpos_ == kL1Size) {
+ Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n");
+ Die();
+ }
+ T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator");
+ // Reserve 0 as invalid index.
+ IndexT start = fillpos_ == 0 ? 1 : 0;
+ for (IndexT i = start; i < kL2Size; i++) {
+ new(batch + i) T();
+ *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
+ }
+ *(IndexT*)(batch + kL2Size - 1) = 0;
+ freelist_ = fillpos_ * kL2Size + start;
+ map_[fillpos_++] = batch;
+ }
+ for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
+ IndexT idx = freelist_;
+ c->cache[c->pos++] = idx;
+ freelist_ = *(IndexT*)Map(idx);
+ }
+ }
+
+ void Drain(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ for (uptr i = 0; i < Cache::kSize / 2; i++) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+};
+
+} // namespace __tsan
+
+#endif // TSAN_DENSE_ALLOC_H
diff --git a/libsanitizer/tsan/tsan_fd.cc b/libsanitizer/tsan/tsan_fd.cc
index b7ac3111c8..10582035f2 100644
--- a/libsanitizer/tsan/tsan_fd.cc
+++ b/libsanitizer/tsan/tsan_fd.cc
@@ -45,8 +45,9 @@ static bool bogusfd(int fd) {
return fd < 0 || fd >= kTableSize;
}
-static FdSync *allocsync() {
- FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
+static FdSync *allocsync(ThreadState *thr, uptr pc) {
+ FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
+ false);
atomic_store(&s->rc, 1, memory_order_relaxed);
return s;
}
@@ -63,10 +64,7 @@ static void unref(ThreadState *thr, uptr pc, FdSync *s) {
CHECK_NE(s, &fdctx.globsync);
CHECK_NE(s, &fdctx.filesync);
CHECK_NE(s, &fdctx.socksync);
- SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s);
- if (v)
- DestroyAndFree(v);
- internal_free(s);
+ user_free(thr, pc, s, false);
}
}
}
@@ -79,13 +77,13 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc);
// We need this to reside in user memory to properly catch races on it.
- void *p = user_alloc(thr, pc, size);
+ void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
l1 = (uptr)p;
else
- user_free(thr, pc, p);
+ user_free(thr, pc, p, false);
}
return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
}
@@ -217,7 +215,7 @@ void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
- FdSync *s = allocsync();
+ FdSync *s = allocsync(thr, pc);
init(thr, pc, rfd, ref(s));
init(thr, pc, wfd, ref(s));
unref(thr, pc, s);
@@ -227,7 +225,7 @@ void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
- init(thr, pc, fd, allocsync());
+ init(thr, pc, fd, allocsync(thr, pc));
}
void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
@@ -248,7 +246,7 @@ void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
- init(thr, pc, fd, allocsync());
+ init(thr, pc, fd, allocsync(thr, pc));
}
void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
@@ -283,13 +281,13 @@ void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
init(thr, pc, fd, &fdctx.socksync);
}
-uptr File2addr(char *path) {
+uptr File2addr(const char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
}
-uptr Dir2addr(char *path) {
+uptr Dir2addr(const char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
diff --git a/libsanitizer/tsan/tsan_fd.h b/libsanitizer/tsan/tsan_fd.h
index 3306873223..57ae62cfe8 100644
--- a/libsanitizer/tsan/tsan_fd.h
+++ b/libsanitizer/tsan/tsan_fd.h
@@ -55,8 +55,8 @@ void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
void FdOnFork(ThreadState *thr, uptr pc);
-uptr File2addr(char *path);
-uptr Dir2addr(char *path);
+uptr File2addr(const char *path);
+uptr Dir2addr(const char *path);
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_flags.cc b/libsanitizer/tsan/tsan_flags.cc
index 158e24f824..6059f2771b 100644
--- a/libsanitizer/tsan/tsan_flags.cc
+++ b/libsanitizer/tsan/tsan_flags.cc
@@ -18,17 +18,13 @@
namespace __tsan {
Flags *flags() {
- return &CTX()->flags;
+ return &ctx->flags;
}
// Can be overriden in frontend.
#ifdef TSAN_EXTERNAL_HOOKS
-void OverrideFlags(Flags *f);
extern "C" const char* __tsan_default_options();
#else
-void WEAK OverrideFlags(Flags *f) {
- (void)f;
-}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
const char *WEAK __tsan_default_options() {
return "";
@@ -36,30 +32,32 @@ const char *WEAK __tsan_default_options() {
#endif
static void ParseFlags(Flags *f, const char *env) {
- ParseFlag(env, &f->enable_annotations, "enable_annotations");
- ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks");
- ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses");
- ParseFlag(env, &f->suppress_java, "suppress_java");
- ParseFlag(env, &f->report_bugs, "report_bugs");
- ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
- ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
- ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe");
- ParseFlag(env, &f->report_atomic_races, "report_atomic_races");
- ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics");
- ParseFlag(env, &f->suppressions, "suppressions");
- ParseFlag(env, &f->print_suppressions, "print_suppressions");
- ParseFlag(env, &f->print_benign, "print_benign");
- ParseFlag(env, &f->exitcode, "exitcode");
- ParseFlag(env, &f->halt_on_error, "halt_on_error");
- ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms");
- ParseFlag(env, &f->profile_memory, "profile_memory");
- ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
- ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms");
- ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb");
- ParseFlag(env, &f->stop_on_start, "stop_on_start");
- ParseFlag(env, &f->running_on_valgrind, "running_on_valgrind");
- ParseFlag(env, &f->history_size, "history_size");
- ParseFlag(env, &f->io_sync, "io_sync");
+ ParseFlag(env, &f->enable_annotations, "enable_annotations", "");
+ ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks", "");
+ ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses", "");
+ ParseFlag(env, &f->report_bugs, "report_bugs", "");
+ ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks", "");
+ ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked", "");
+ ParseFlag(env, &f->report_mutex_bugs, "report_mutex_bugs", "");
+ ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe", "");
+ ParseFlag(env, &f->report_atomic_races, "report_atomic_races", "");
+ ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics", "");
+ ParseFlag(env, &f->print_benign, "print_benign", "");
+ ParseFlag(env, &f->exitcode, "exitcode", "");
+ ParseFlag(env, &f->halt_on_error, "halt_on_error", "");
+ ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms", "");
+ ParseFlag(env, &f->profile_memory, "profile_memory", "");
+ ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms", "");
+ ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms", "");
+ ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb", "");
+ ParseFlag(env, &f->stop_on_start, "stop_on_start", "");
+ ParseFlag(env, &f->running_on_valgrind, "running_on_valgrind", "");
+ ParseFlag(env, &f->history_size, "history_size", "");
+ ParseFlag(env, &f->io_sync, "io_sync", "");
+ ParseFlag(env, &f->die_after_fork, "die_after_fork", "");
+
+ // DDFlags
+ ParseFlag(env, &f->second_deadlock_stack, "second_deadlock_stack", "");
}
void InitializeFlags(Flags *f, const char *env) {
@@ -69,15 +67,13 @@ void InitializeFlags(Flags *f, const char *env) {
f->enable_annotations = true;
f->suppress_equal_stacks = true;
f->suppress_equal_addresses = true;
- f->suppress_java = false;
f->report_bugs = true;
f->report_thread_leaks = true;
f->report_destroy_locked = true;
+ f->report_mutex_bugs = true;
f->report_signal_unsafe = true;
f->report_atomic_races = true;
f->force_seq_cst_atomics = false;
- f->suppressions = "";
- f->print_suppressions = false;
f->print_benign = false;
f->exitcode = 66;
f->halt_on_error = false;
@@ -90,19 +86,25 @@ void InitializeFlags(Flags *f, const char *env) {
f->running_on_valgrind = false;
f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
f->io_sync = 1;
+ f->die_after_fork = true;
+
+ // DDFlags
+ f->second_deadlock_stack = false;
- SetCommonFlagsDefaults(f);
+ CommonFlags *cf = common_flags();
+ SetCommonFlagsDefaults(cf);
+ // Override some common flags defaults.
+ cf->allow_addr2line = true;
+ cf->detect_deadlocks = true;
+ cf->print_suppressions = false;
+ cf->stack_trace_format = " #%n %f %S %M";
// Let a frontend override.
- OverrideFlags(f);
ParseFlags(f, __tsan_default_options());
- ParseCommonFlagsFromString(f, __tsan_default_options());
+ ParseCommonFlagsFromString(cf, __tsan_default_options());
// Override from command line.
ParseFlags(f, env);
- ParseCommonFlagsFromString(f, env);
-
- // Copy back to common flags.
- *common_flags() = *f;
+ ParseCommonFlagsFromString(cf, env);
// Sanity check.
if (!f->report_bugs) {
@@ -111,6 +113,8 @@ void InitializeFlags(Flags *f, const char *env) {
f->report_signal_unsafe = false;
}
+ if (cf->help) PrintFlagDescriptions();
+
if (f->history_size < 0 || f->history_size > 7) {
Printf("ThreadSanitizer: incorrect value for history_size"
" (must be [0..7])\n");
diff --git a/libsanitizer/tsan/tsan_flags.h b/libsanitizer/tsan/tsan_flags.h
index 05d11a451c..182d9b298a 100644
--- a/libsanitizer/tsan/tsan_flags.h
+++ b/libsanitizer/tsan/tsan_flags.h
@@ -12,34 +12,28 @@
#ifndef TSAN_FLAGS_H
#define TSAN_FLAGS_H
-// ----------- ATTENTION -------------
-// ThreadSanitizer user may provide its implementation of weak
-// symbol __tsan::OverrideFlags(__tsan::Flags). Therefore, this
-// header may be included in the user code, and shouldn't include
-// other headers from TSan or common sanitizer runtime.
-
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
namespace __tsan {
-struct Flags : CommonFlags {
+struct Flags : DDFlags {
// Enable dynamic annotations, otherwise they are no-ops.
bool enable_annotations;
- // Supress a race report if we've already output another race report
+ // Suppress a race report if we've already output another race report
// with the same stack.
bool suppress_equal_stacks;
- // Supress a race report if we've already output another race report
+ // Suppress a race report if we've already output another race report
// on the same address.
bool suppress_equal_addresses;
- // Suppress weird race reports that can be seen if JVM is embed
- // into the process.
- bool suppress_java;
// Turns off bug reporting entirely (useful for benchmarking).
bool report_bugs;
// Report thread leaks at exit?
bool report_thread_leaks;
// Report destruction of a locked mutex?
bool report_destroy_locked;
+ // Report incorrect usages of mutexes and mutex annotations?
+ bool report_mutex_bugs;
// Report violations of async signal-safety
// (e.g. malloc() call from a signal handler).
bool report_signal_unsafe;
@@ -48,10 +42,6 @@ struct Flags : CommonFlags {
// If set, all atomics are effectively sequentially consistent (seq_cst),
// regardless of what user actually specified.
bool force_seq_cst_atomics;
- // Suppressions filename.
- const char *suppressions;
- // Print matched suppressions at exit.
- bool print_suppressions;
// Print matched "benign" races at exit.
bool print_benign;
// Override exit status if something was reported.
@@ -85,6 +75,8 @@ struct Flags : CommonFlags {
// 1 - reasonable level of synchronization (write->read)
// 2 - global synchronization of all IO operations
int io_sync;
+ // Die after multi-threaded fork if the child creates new threads.
+ bool die_after_fork;
};
Flags *flags();
diff --git a/libsanitizer/tsan/tsan_interceptors.cc b/libsanitizer/tsan/tsan_interceptors.cc
index 0574beb91d..7f1b6e45a8 100644
--- a/libsanitizer/tsan/tsan_interceptors.cc
+++ b/libsanitizer/tsan/tsan_interceptors.cc
@@ -27,7 +27,17 @@
using namespace __tsan; // NOLINT
-const int kSigCount = 64;
+#if SANITIZER_FREEBSD
+#define __errno_location __error
+#define __libc_malloc __malloc
+#define __libc_realloc __realloc
+#define __libc_calloc __calloc
+#define __libc_free __free
+#define stdout __stdoutp
+#define stderr __stderrp
+#endif
+
+const int kSigCount = 65;
struct my_siginfo_t {
// The size is determined by looking at sizeof of real siginfo_t on linux.
@@ -45,12 +55,13 @@ DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
extern "C" int pthread_setspecific(unsigned key, const void *v);
-extern "C" int pthread_mutexattr_gettype(void *a, int *type);
+DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
extern "C" int pthread_yield();
extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
// REAL(sigfillset) defined in common interceptors.
DECLARE_REAL(int, sigfillset, __sanitizer_sigset_t *set)
+DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
extern "C" void *pthread_self();
extern "C" void _exit(int status);
extern "C" int *__errno_location();
@@ -59,7 +70,10 @@ extern "C" void *__libc_malloc(uptr size);
extern "C" void *__libc_calloc(uptr size, uptr n);
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
+#if !SANITIZER_FREEBSD
extern "C" int mallopt(int param, int value);
+#endif
+extern __sanitizer_FILE *stdout, *stderr;
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
const int EINVAL = 22;
@@ -71,6 +85,7 @@ const int SIGABRT = 6;
const int SIGFPE = 8;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
+const int SIGTERM = 15;
const int SIGBUS = 7;
const int SIGSYS = 31;
void *const MAP_FAILED = (void*)-1;
@@ -93,9 +108,14 @@ struct sigaction_t {
sighandler_t sa_handler;
void (*sa_sigaction)(int sig, my_siginfo_t *siginfo, void *uctx);
};
+#if SANITIZER_FREEBSD
+ int sa_flags;
+ __sanitizer_sigset_t sa_mask;
+#else
__sanitizer_sigset_t sa_mask;
int sa_flags;
void (*sa_restorer)();
+#endif
};
const sighandler_t SIG_DFL = (sighandler_t)0;
@@ -119,9 +139,9 @@ struct SignalDesc {
};
struct SignalContext {
- int in_blocking_func;
int int_signal_send;
- int pending_signal_count;
+ atomic_uintptr_t in_blocking_func;
+ atomic_uintptr_t have_pending_signals;
SignalDesc pending_signals[kSigCount];
};
@@ -133,7 +153,7 @@ static LibIgnore *libignore() {
}
void InitializeLibIgnore() {
- libignore()->Init(*GetSuppressionContext());
+ libignore()->Init(*SuppressionContext::Get());
libignore()->OnLibraryLoaded(0);
}
@@ -141,8 +161,7 @@ void InitializeLibIgnore() {
static SignalContext *SigCtx(ThreadState *thr) {
SignalContext *ctx = (SignalContext*)thr->signal_ctx;
- if (ctx == 0 && thr->is_alive) {
- ScopedInRtl in_rtl;
+ if (ctx == 0 && !thr->is_dead) {
ctx = (SignalContext*)MmapOrDie(sizeof(*ctx), "SignalContext");
MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
thr->signal_ctx = ctx;
@@ -159,7 +178,6 @@ class ScopedInterceptor {
private:
ThreadState *const thr_;
const uptr pc_;
- const int in_rtl_;
bool in_ignored_lib_;
};
@@ -167,16 +185,12 @@ ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
uptr pc)
: thr_(thr)
, pc_(pc)
- , in_rtl_(thr->in_rtl)
, in_ignored_lib_(false) {
- if (thr_->in_rtl == 0) {
+ if (!thr_->ignore_interceptors) {
Initialize(thr);
FuncEntry(thr, pc);
- thr_->in_rtl++;
- DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
- } else {
- thr_->in_rtl++;
}
+ DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) {
in_ignored_lib_ = true;
thr_->in_ignored_lib = true;
@@ -189,49 +203,66 @@ ScopedInterceptor::~ScopedInterceptor() {
thr_->in_ignored_lib = false;
ThreadIgnoreEnd(thr_, pc_);
}
- thr_->in_rtl--;
- if (thr_->in_rtl == 0) {
- FuncExit(thr_);
+ if (!thr_->ignore_interceptors) {
ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ CheckNoLocks(thr_);
}
- CHECK_EQ(in_rtl_, thr_->in_rtl);
}
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
ThreadState *thr = cur_thread(); \
- StatInc(thr, StatInterceptor); \
- StatInc(thr, StatInt_##func); \
const uptr caller_pc = GET_CALLER_PC(); \
ScopedInterceptor si(thr, #func, caller_pc); \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
/**/
#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
if (REAL(func) == 0) { \
- Printf("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
+ Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
Die(); \
- } \
- if (thr->in_rtl > 1 || thr->in_ignored_lib) \
+ } \
+ if (thr->ignore_interceptors || thr->in_ignored_lib) \
return REAL(func)(__VA_ARGS__); \
/**/
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+#if SANITIZER_FREEBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+#else
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
+#endif
#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
struct BlockingCall {
explicit BlockingCall(ThreadState *thr)
- : ctx(SigCtx(thr)) {
- ctx->in_blocking_func++;
+ : thr(thr)
+ , ctx(SigCtx(thr)) {
+ for (;;) {
+ atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
+ if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
+ break;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ ProcessPendingSignals(thr);
+ }
+ // When we are in a "blocking call", we process signals asynchronously
+ // (right when they arrive). In this context we do not expect to be
+ // executing any user/runtime code. The known interceptor sequence when
+ // this is not true is: pthread_join -> munmap(stack). It's fine
+ // to ignore munmap in this case -- we handle stack shadow separately.
+ thr->ignore_interceptors++;
}
~BlockingCall() {
- ctx->in_blocking_func--;
+ thr->ignore_interceptors--;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
}
+ ThreadState *thr;
SignalContext *ctx;
};
@@ -256,116 +287,78 @@ TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
return res;
}
-TSAN_INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
- SCOPED_INTERCEPTOR_RAW(dlopen, filename, flag);
- // dlopen will execute global constructors, so it must be not in rtl.
- CHECK_EQ(thr->in_rtl, 1);
- thr->in_rtl = 0;
- void *res = REAL(dlopen)(filename, flag);
- thr->in_rtl = 1;
- libignore()->OnLibraryLoaded(filename);
- return res;
-}
+// The sole reason tsan wraps atexit callbacks is to establish synchronization
+// between callback setup and callback execution.
+struct AtExitCtx {
+ void (*f)();
+ void *arg;
+};
-TSAN_INTERCEPTOR(int, dlclose, void *handle) {
- SCOPED_INTERCEPTOR_RAW(dlclose, handle);
- // dlclose will execute global destructors, so it must be not in rtl.
- CHECK_EQ(thr->in_rtl, 1);
- thr->in_rtl = 0;
- int res = REAL(dlclose)(handle);
- thr->in_rtl = 1;
- libignore()->OnLibraryUnloaded();
- return res;
+static void at_exit_wrapper(void *arg) {
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ Acquire(thr, pc, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(void *arg))ctx->f)(ctx->arg);
+ __libc_free(ctx);
}
-class AtExitContext {
- public:
- AtExitContext()
- : mtx_(MutexTypeAtExit, StatMtxAtExit)
- , pos_() {
- }
-
- typedef void(*atexit_t)();
-
- int atexit(ThreadState *thr, uptr pc, bool is_on_exit,
- atexit_t f, void *arg) {
- Lock l(&mtx_);
- if (pos_ == kMaxAtExit)
- return 1;
- Release(thr, pc, (uptr)this);
- stack_[pos_] = f;
- args_[pos_] = arg;
- is_on_exits_[pos_] = is_on_exit;
- pos_++;
- return 0;
- }
-
- void exit(ThreadState *thr, uptr pc) {
- CHECK_EQ(thr->in_rtl, 0);
- for (;;) {
- atexit_t f = 0;
- void *arg = 0;
- bool is_on_exit = false;
- {
- Lock l(&mtx_);
- if (pos_) {
- pos_--;
- f = stack_[pos_];
- arg = args_[pos_];
- is_on_exit = is_on_exits_[pos_];
- ScopedInRtl in_rtl;
- Acquire(thr, pc, (uptr)this);
- }
- }
- if (f == 0)
- break;
- DPrintf("#%d: executing atexit func %p\n", thr->tid, f);
- CHECK_EQ(thr->in_rtl, 0);
- if (is_on_exit)
- ((void(*)(int status, void *arg))f)(0, arg);
- else
- ((void(*)(void *arg, void *dso))f)(arg, 0);
- }
- }
-
- private:
- static const int kMaxAtExit = 128;
- Mutex mtx_;
- atexit_t stack_[kMaxAtExit];
- void *args_[kMaxAtExit];
- bool is_on_exits_[kMaxAtExit];
- int pos_;
-};
-
-static AtExitContext *atexit_ctx;
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso);
TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(atexit, f);
- return atexit_ctx->atexit(thr, pc, false, (void(*)())f, 0);
+ // We want to setup the atexit callback even if we are in ignored lib
+ // or after fork.
+ SCOPED_INTERCEPTOR_RAW(atexit, f);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
}
-TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
+TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
- return atexit_ctx->atexit(thr, pc, true, (void(*)())f, arg);
+ SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
}
-TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso) {
+ AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ ctx->f = f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(__cxa_atexit)(at_exit_wrapper, ctx, dso);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
+}
+
+static void on_exit_wrapper(int status, void *arg) {
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ Acquire(thr, pc, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
+ __libc_free(ctx);
+}
+
+TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
- if (dso) {
- // Memory allocation in __cxa_atexit will race with free during exit,
- // because we do not see synchronization around atexit callback list.
- ThreadIgnoreBegin(thr, pc);
- int res = REAL(__cxa_atexit)(f, arg, dso);
- ThreadIgnoreEnd(thr, pc);
- return res;
- }
- return atexit_ctx->atexit(thr, pc, false, (void(*)())f, arg);
+ SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
+ AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ ctx->f = (void(*)())f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(on_exit)(on_exit_wrapper, ctx);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
}
// Cleanup old bufs.
@@ -391,10 +384,21 @@ static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
buf->sp = sp;
buf->mangled_sp = mangled_sp;
buf->shadow_stack_pos = thr->shadow_stack_pos;
+ SignalContext *sctx = SigCtx(thr);
+ buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
+ buf->in_blocking_func = sctx ?
+ atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
+ false;
+ buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
+ memory_order_relaxed);
}
static void LongJmp(ThreadState *thr, uptr *env) {
+#if SANITIZER_FREEBSD
+ uptr mangled_sp = env[2];
+#else
uptr mangled_sp = env[6];
+#endif // SANITIZER_FREEBSD
// Find the saved buf by mangled_sp.
for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
JmpBuf *buf = &thr->jmp_bufs[i];
@@ -403,6 +407,14 @@ static void LongJmp(ThreadState *thr, uptr *env) {
// Unwind the stack.
while (thr->shadow_stack_pos > buf->shadow_stack_pos)
FuncExit(thr);
+ SignalContext *sctx = SigCtx(thr);
+ if (sctx) {
+ sctx->int_signal_send = buf->int_signal_send;
+ atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
+ memory_order_relaxed);
+ }
+ atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
+ memory_order_relaxed);
JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
return;
}
@@ -413,7 +425,6 @@ static void LongJmp(ThreadState *thr, uptr *env) {
// FIXME: put everything below into a common extern "C" block?
extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) {
- ScopedInRtl in_rtl;
SetJmp(cur_thread(), sp, mangled_sp);
}
@@ -540,7 +551,7 @@ TSAN_INTERCEPTOR(void, cfree, void *p) {
TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
- return user_alloc_usable_size(thr, pc, p);
+ return user_alloc_usable_size(p);
}
#define OPERATOR_NEW_BODY(mangled_name) \
@@ -587,21 +598,21 @@ void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
user_free(thr, pc, ptr);
SANITIZER_INTERFACE_ATTRIBUTE
-void operator delete(void *ptr);
-void operator delete(void *ptr) {
+void operator delete(void *ptr) throw();
+void operator delete(void *ptr) throw() {
OPERATOR_DELETE_BODY(_ZdlPv);
}
SANITIZER_INTERFACE_ATTRIBUTE
-void operator delete[](void *ptr);
-void operator delete[](void *ptr) {
- OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
+void operator delete[](void *ptr) throw();
+void operator delete[](void *ptr) throw() {
+ OPERATOR_DELETE_BODY(_ZdaPv);
}
SANITIZER_INTERFACE_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&);
void operator delete(void *ptr, std::nothrow_t const&) {
- OPERATOR_DELETE_BODY(_ZdaPv);
+ OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -643,20 +654,6 @@ TSAN_INTERCEPTOR(int, memcmp, const void *s1, const void *s2, uptr n) {
return res;
}
-TSAN_INTERCEPTOR(void*, memchr, void *s, int c, uptr n) {
- SCOPED_TSAN_INTERCEPTOR(memchr, s, c, n);
- void *res = REAL(memchr)(s, c, n);
- uptr len = res ? (char*)res - (char*)s + 1 : n;
- MemoryAccessRange(thr, pc, (uptr)s, len, false);
- return res;
-}
-
-TSAN_INTERCEPTOR(void*, memrchr, char *s, int c, uptr n) {
- SCOPED_TSAN_INTERCEPTOR(memrchr, s, c, n);
- MemoryAccessRange(thr, pc, (uptr)s, n, false);
- return REAL(memrchr)(s, c, n);
-}
-
TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) {
SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n);
MemoryAccessRange(thr, pc, (uptr)dst, n, true);
@@ -746,6 +743,7 @@ TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot,
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot,
int flags, int fd, u64 off) {
SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off);
@@ -759,6 +757,10 @@ TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot,
}
return res;
}
+#define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64)
+#else
+#define TSAN_MAYBE_INTERCEPT_MMAP64
+#endif
TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
@@ -767,21 +769,36 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
return user_alloc(thr, pc, sz, align);
}
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
+#else
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif
+
+TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
+ return user_alloc(thr, pc, sz, align);
+}
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(valloc, sz);
return user_alloc(thr, pc, sz, GetPageSizeCached());
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
sz = RoundUp(sz, GetPageSizeCached());
return user_alloc(thr, pc, sz, GetPageSizeCached());
}
+#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
+#else
+#define TSAN_MAYBE_INTERCEPT_PVALLOC
+#endif
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
@@ -827,7 +844,6 @@ static void thread_finalize(void *v) {
return;
}
{
- ScopedInRtl in_rtl;
ThreadState *thr = cur_thread();
ThreadFinish(thr);
SignalContext *sctx = thr->signal_ctx;
@@ -852,17 +868,19 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
int tid = 0;
{
ThreadState *thr = cur_thread();
- ScopedInRtl in_rtl;
+ // Thread-local state is not initialized yet.
+ ScopedIgnoreInterceptors ignore;
+ ThreadIgnoreBegin(thr, 0);
if (pthread_setspecific(g_thread_finalize_key,
(void *)kPthreadDestructorIterations)) {
Printf("ThreadSanitizer: failed to set thread key\n");
Die();
}
+ ThreadIgnoreEnd(thr, 0);
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
pthread_yield();
atomic_store(&p->tid, 0, memory_order_release);
ThreadStart(thr, tid, GetTid());
- CHECK_EQ(thr->in_rtl, 1);
}
void *res = callback(param);
// Prevent the callback from being tail called,
@@ -875,6 +893,17 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
TSAN_INTERCEPTOR(int, pthread_create,
void *th, void *attr, void *(*callback)(void*), void * param) {
SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
+ if (ctx->after_multithreaded_fork) {
+ if (flags()->die_after_fork) {
+ Report("ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported. Dying (set die_after_fork=0 to override)\n");
+ Die();
+ } else {
+ VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported (pid %d). Continuing because of "
+ "die_after_fork=0, but you are on your own\n", internal_getpid());
+ }
+ }
__sanitizer_pthread_attr_t myattr;
if (attr == 0) {
pthread_attr_init(&myattr);
@@ -882,13 +911,20 @@ TSAN_INTERCEPTOR(int, pthread_create,
}
int detached = 0;
REAL(pthread_attr_getdetachstate)(attr, &detached);
- AdjustStackSizeLinux(attr);
+ AdjustStackSize(attr);
ThreadParam p;
p.callback = callback;
p.param = param;
atomic_store(&p.tid, 0, memory_order_relaxed);
- int res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ int res = -1;
+ {
+ // Otherwise we see false positives in pthread stack manipulation.
+ ScopedIgnoreInterceptors ignore;
+ ThreadIgnoreBegin(thr, pc);
+ res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ ThreadIgnoreEnd(thr, pc);
+ }
if (res == 0) {
int tid = ThreadCreate(thr, pc, *(uptr*)th, detached);
CHECK_NE(tid, 0);
@@ -904,7 +940,9 @@ TSAN_INTERCEPTOR(int, pthread_create,
TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
int tid = ThreadTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
int res = BLOCK_REAL(pthread_join)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
if (res == 0) {
ThreadJoin(thr, pc, tid);
}
@@ -921,6 +959,122 @@ TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
return res;
}
+// Problem:
+// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
+// pthread_cond_t has different size in the different versions.
+// If call new REAL functions for old pthread_cond_t, they will corrupt memory
+// after pthread_cond_t (old cond is smaller).
+// If we call old REAL functions for new pthread_cond_t, we will lose some
+// functionality (e.g. old functions do not support waiting against
+// CLOCK_REALTIME).
+// Proper handling would require to have 2 versions of interceptors as well.
+// But this is messy, in particular requires linker scripts when sanitizer
+// runtime is linked into a shared library.
+// Instead we assume we don't have dynamic libraries built against old
+// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
+// that allows to work with old libraries (but this mode does not support
+// some features, e.g. pthread_condattr_getpshared).
+static void *init_cond(void *c, bool force = false) {
+ // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
+ // So we allocate additional memory on the side large enough to hold
+ // any pthread_cond_t object. Always call new REAL functions, but pass
+ // the aux object to them.
+ // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
+ // first word of pthread_cond_t to zero.
+ // It's all relevant only for linux.
+ if (!common_flags()->legacy_pthread_cond)
+ return c;
+ atomic_uintptr_t *p = (atomic_uintptr_t*)c;
+ uptr cond = atomic_load(p, memory_order_acquire);
+ if (!force && cond != 0)
+ return (void*)cond;
+ void *newcond = WRAP(malloc)(pthread_cond_t_sz);
+ internal_memset(newcond, 0, pthread_cond_t_sz);
+ if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
+ memory_order_acq_rel))
+ return newcond;
+ WRAP(free)(newcond);
+ return (void*)cond;
+}
+
+struct CondMutexUnlockCtx {
+ ThreadState *thr;
+ uptr pc;
+ void *m;
+};
+
+static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
+ MutexLock(arg->thr, arg->pc, (uptr)arg->m);
+}
+
+INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
+ void *cond = init_cond(c, true);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ return REAL(pthread_cond_init)(cond, a);
+}
+
+INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+ MutexUnlock(thr, pc, (uptr)m);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ CondMutexUnlockCtx arg = {thr, pc, m};
+ // This ensures that we handle mutex lock even in case of pthread_cancel.
+ // See test/tsan/cond_cancel.cc.
+ int res = call_pthread_cancel_with_cleanup(
+ (int(*)(void *c, void *m, void *abstime))REAL(pthread_cond_wait),
+ cond, m, 0, (void(*)(void *arg))cond_mutex_unlock, &arg);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
+ MutexUnlock(thr, pc, (uptr)m);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ CondMutexUnlockCtx arg = {thr, pc, m};
+ // This ensures that we handle mutex lock even in case of pthread_cancel.
+ // See test/tsan/cond_cancel.cc.
+ int res = call_pthread_cancel_with_cleanup(
+ REAL(pthread_cond_timedwait), cond, m, abstime,
+ (void(*)(void *arg))cond_mutex_unlock, &arg);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_signal, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_signal)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_broadcast)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_destroy, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ int res = REAL(pthread_cond_destroy)(cond);
+ if (common_flags()->legacy_pthread_cond) {
+ // Free our aux cond and zero the pointer to not leave dangling pointers.
+ WRAP(free)(cond);
+ atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
+ }
+ return res;
+}
+
TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
int res = REAL(pthread_mutex_init)(m, a);
@@ -928,7 +1082,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
bool recursive = false;
if (a) {
int type = 0;
- if (pthread_mutexattr_gettype(a, &type) == 0)
+ if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
recursive = (type == PTHREAD_MUTEX_RECURSIVE
|| type == PTHREAD_MUTEX_RECURSIVE_NP);
}
@@ -952,7 +1106,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
if (res == EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
if (res == 0 || res == EOWNERDEAD)
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
return res;
}
@@ -996,7 +1150,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
int res = REAL(pthread_spin_trylock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
}
return res;
}
@@ -1039,7 +1193,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
int res = REAL(pthread_rwlock_tryrdlock)(m);
if (res == 0) {
- MutexReadLock(thr, pc, (uptr)m);
+ MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true);
}
return res;
}
@@ -1066,7 +1220,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
int res = REAL(pthread_rwlock_trywrlock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
}
return res;
}
@@ -1087,23 +1241,6 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
return res;
}
-TSAN_INTERCEPTOR(int, pthread_cond_destroy, void *c) {
- SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, c);
- MemoryWrite(thr, pc, (uptr)c, kSizeLog1);
- int res = REAL(pthread_cond_destroy)(c);
- return res;
-}
-
-TSAN_INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m,
- void *abstime) {
- SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, c, m, abstime);
- MutexUnlock(thr, pc, (uptr)m);
- MemoryRead(thr, pc, (uptr)c, kSizeLog1);
- int res = REAL(pthread_cond_timedwait)(c, m, abstime);
- MutexLock(thr, pc, (uptr)m);
- return res;
-}
-
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
@@ -1132,19 +1269,13 @@ TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
- // Using SCOPED_INTERCEPTOR_RAW, because if we are called from an ignored lib,
- // the user callback must be executed with thr->in_rtl == 0.
if (o == 0 || f == 0)
return EINVAL;
atomic_uint32_t *a = static_cast<atomic_uint32_t*>(o);
u32 v = atomic_load(a, memory_order_acquire);
if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
memory_order_relaxed)) {
- const int old_in_rtl = thr->in_rtl;
- thr->in_rtl = 0;
(*f)();
- CHECK_EQ(thr->in_rtl, 0);
- thr->in_rtl = old_in_rtl;
if (!thr->in_ignored_lib)
Release(thr, pc, (uptr)o);
atomic_store(a, 2, memory_order_release);
@@ -1214,73 +1345,135 @@ TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf);
return REAL(__xstat)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___XSTAT TSAN_INTERCEPT(__xstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___XSTAT
+#endif
TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) {
+#if SANITIZER_FREEBSD
+ SCOPED_TSAN_INTERCEPTOR(stat, path, buf);
+ return REAL(stat)(path, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf);
return REAL(__xstat)(0, path, buf);
+#endif
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf);
return REAL(__xstat64)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___XSTAT64 TSAN_INTERCEPT(__xstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___XSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf);
return REAL(__xstat64)(0, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT_STAT64 TSAN_INTERCEPT(stat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_STAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf);
return REAL(__lxstat)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___LXSTAT TSAN_INTERCEPT(__lxstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___LXSTAT
+#endif
TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) {
+#if SANITIZER_FREEBSD
+ SCOPED_TSAN_INTERCEPTOR(lstat, path, buf);
+ return REAL(lstat)(path, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf);
return REAL(__lxstat)(0, path, buf);
+#endif
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf);
return REAL(__lxstat64)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___LXSTAT64 TSAN_INTERCEPT(__lxstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___LXSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf);
return REAL(__lxstat64)(0, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT_LSTAT64 TSAN_INTERCEPT(lstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_LSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat)(version, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT
+#endif
TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
+#if SANITIZER_FREEBSD
+ SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(fstat)(fd, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat)(0, fd, buf);
+#endif
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat64)(version, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat64)(0, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_FSTAT64
+#endif
TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
@@ -1290,6 +1483,7 @@ TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
return fd;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
int fd = REAL(open64)(name, flags, mode);
@@ -1297,6 +1491,10 @@ TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
FdFileCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
+#else
+#define TSAN_MAYBE_INTERCEPT_OPEN64
+#endif
TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
@@ -1306,6 +1504,7 @@ TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
return fd;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
int fd = REAL(creat64)(name, mode);
@@ -1313,6 +1512,10 @@ TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
FdFileCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_CREAT64
+#endif
TSAN_INTERCEPTOR(int, dup, int oldfd) {
SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
@@ -1338,6 +1541,7 @@ TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
return newfd2;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
int fd = REAL(eventfd)(initval, flags);
@@ -1345,7 +1549,12 @@ TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
FdEventCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_EVENTFD
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
if (fd >= 0)
@@ -1355,7 +1564,12 @@ TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
FdSignalCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, inotify_init, int fake) {
SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
int fd = REAL(inotify_init)(fake);
@@ -1363,7 +1577,12 @@ TSAN_INTERCEPTOR(int, inotify_init, int fake) {
FdInotifyCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
int fd = REAL(inotify_init1)(flags);
@@ -1371,6 +1590,10 @@ TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
FdInotifyCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
+#endif
TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
@@ -1413,6 +1636,7 @@ TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_create, int size) {
SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
int fd = REAL(epoll_create)(size);
@@ -1420,7 +1644,12 @@ TSAN_INTERCEPTOR(int, epoll_create, int size) {
FdPollCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE TSAN_INTERCEPT(epoll_create)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
int fd = REAL(epoll_create1)(flags);
@@ -1428,6 +1657,10 @@ TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
FdPollCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1 TSAN_INTERCEPT(epoll_create1)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1
+#endif
TSAN_INTERCEPTOR(int, close, int fd) {
SCOPED_TSAN_INTERCEPTOR(close, fd);
@@ -1436,14 +1669,20 @@ TSAN_INTERCEPTOR(int, close, int fd) {
return REAL(close)(fd);
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __close, int fd) {
SCOPED_TSAN_INTERCEPTOR(__close, fd);
if (fd >= 0)
FdClose(thr, pc, fd);
return REAL(__close)(fd);
}
+#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
+#else
+#define TSAN_MAYBE_INTERCEPT___CLOSE
+#endif
// glibc guts
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
int fds[64];
@@ -1454,6 +1693,10 @@ TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
}
REAL(__res_iclose)(state, free_addr);
}
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
+#else
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
+#endif
TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
@@ -1509,10 +1752,9 @@ TSAN_INTERCEPTOR(int, unlink, char *path) {
return res;
}
-TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
- SCOPED_TSAN_INTERCEPTOR(fopen, path, mode);
- void *res = REAL(fopen)(path, mode);
- Acquire(thr, pc, File2addr(path));
+TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
+ void *res = REAL(tmpfile)(fake);
if (res) {
int fd = fileno_unlocked(res);
if (fd >= 0)
@@ -1521,15 +1763,10 @@ TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
return res;
}
-TSAN_INTERCEPTOR(void*, freopen, char *path, char *mode, void *stream) {
- SCOPED_TSAN_INTERCEPTOR(freopen, path, mode, stream);
- if (stream) {
- int fd = fileno_unlocked(stream);
- if (fd >= 0)
- FdClose(thr, pc, fd);
- }
- void *res = REAL(freopen)(path, mode, stream);
- Acquire(thr, pc, File2addr(path));
+#if !SANITIZER_FREEBSD
+TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
+ void *res = REAL(tmpfile64)(fake);
if (res) {
int fd = fileno_unlocked(res);
if (fd >= 0)
@@ -1537,19 +1774,10 @@ TSAN_INTERCEPTOR(void*, freopen, char *path, char *mode, void *stream) {
}
return res;
}
-
-TSAN_INTERCEPTOR(int, fclose, void *stream) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fclose, stream);
- if (stream) {
- int fd = fileno_unlocked(stream);
- if (fd >= 0)
- FdClose(thr, pc, fd);
- }
- }
- return REAL(fclose)(stream);
-}
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
+#else
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64
+#endif
TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
// libc file streams can call user-supplied functions, see fopencookie.
@@ -1569,14 +1797,6 @@ TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
return REAL(fwrite)(p, size, nmemb, f);
}
-TSAN_INTERCEPTOR(int, fflush, void *stream) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fflush, stream);
- }
- return REAL(fflush)(stream);
-}
-
TSAN_INTERCEPTOR(void, abort, int fake) {
SCOPED_TSAN_INTERCEPTOR(abort, fake);
REAL(fflush)(0);
@@ -1604,6 +1824,7 @@ TSAN_INTERCEPTOR(void*, opendir, char *path) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
if (epfd >= 0)
@@ -1615,7 +1836,12 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
int res = REAL(epoll_ctl)(epfd, op, fd, ev);
return res;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL TSAN_INTERCEPT(epoll_ctl)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
if (epfd >= 0)
@@ -1625,31 +1851,119 @@ TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
FdAcquire(thr, pc, epfd);
return res;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT TSAN_INTERCEPT(epoll_wait)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT
+#endif
+
+namespace __tsan {
+
+static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
+ bool sigact, int sig, my_siginfo_t *info, void *uctx) {
+ if (acquire)
+ Acquire(thr, 0, (uptr)&sigactions[sig]);
+ // Ensure that the handler does not spoil errno.
+ const int saved_errno = errno;
+ errno = 99;
+ // Need to remember pc before the call, because the handler can reset it.
+ uptr pc = sigact ?
+ (uptr)sigactions[sig].sa_sigaction :
+ (uptr)sigactions[sig].sa_handler;
+ pc += 1; // return address is expected, OutputReport() will undo this
+ if (sigact)
+ sigactions[sig].sa_sigaction(sig, info, uctx);
+ else
+ sigactions[sig].sa_handler(sig);
+ // We do not detect errno spoiling for SIGTERM,
+ // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
+ // tsan reports false positive in such case.
+ // It's difficult to properly detect this situation (reraise),
+ // because in async signal processing case (when handler is called directly
+ // from rtl_generic_sighandler) we have not yet received the reraised
+ // signal; and it looks too fragile to intercept all ways to reraise a signal.
+ if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
+ VarSizeStackTrace stack;
+ ObtainCurrentStack(thr, pc, &stack);
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeErrnoInSignal);
+ if (!IsFiredSuppression(ctx, rep, stack)) {
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+ }
+ }
+ errno = saved_errno;
+}
+
+void ProcessPendingSignals(ThreadState *thr) {
+ SignalContext *sctx = SigCtx(thr);
+ if (sctx == 0 ||
+ atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
+ return;
+ atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ // These are too big for stack.
+ static THREADLOCAL __sanitizer_sigset_t emptyset, oldset;
+ REAL(sigfillset)(&emptyset);
+ pthread_sigmask(SIG_SETMASK, &emptyset, &oldset);
+ for (int sig = 0; sig < kSigCount; sig++) {
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed) {
+ signal->armed = false;
+ if (sigactions[sig].sa_handler != SIG_DFL
+ && sigactions[sig].sa_handler != SIG_IGN) {
+ CallUserSignalHandler(thr, false, true, signal->sigaction,
+ sig, &signal->siginfo, &signal->ctx);
+ }
+ }
+ }
+ pthread_sigmask(SIG_SETMASK, &oldset, 0);
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
+}
+
+} // namespace __tsan
+
+static bool is_sync_signal(SignalContext *sctx, int sig) {
+ return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
+ // If we are sending signal to ourselves, we must process it now.
+ (sctx && sig == sctx->int_signal_send);
+}
void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
my_siginfo_t *info, void *ctx) {
ThreadState *thr = cur_thread();
SignalContext *sctx = SigCtx(thr);
+ if (sig < 0 || sig >= kSigCount) {
+ VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
+ return;
+ }
// Don't mess with synchronous signals.
- if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
- sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
- // If we are sending signal to ourselves, we must process it now.
- (sctx && sig == sctx->int_signal_send) ||
+ const bool sync = is_sync_signal(sctx, sig);
+ if (sync ||
// If we are in blocking function, we can safely process it now
// (but check if we are in a recursive interceptor,
// i.e. pthread_join()->munmap()).
- (sctx && sctx->in_blocking_func == 1 && thr->in_rtl == 1)) {
- int in_rtl = thr->in_rtl;
- thr->in_rtl = 0;
- CHECK_EQ(thr->in_signal_handler, false);
- thr->in_signal_handler = true;
- if (sigact)
- sigactions[sig].sa_sigaction(sig, info, ctx);
- else
- sigactions[sig].sa_handler(sig);
- CHECK_EQ(thr->in_signal_handler, true);
- thr->in_signal_handler = false;
- thr->in_rtl = in_rtl;
+ (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
+ // We ignore interceptors in blocking functions,
+ // temporary enbled them again while we are calling user function.
+ int const i = thr->ignore_interceptors;
+ thr->ignore_interceptors = 0;
+ atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
+ CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
+ thr->ignore_interceptors = i;
+ atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
+ } else {
+ // Be very conservative with when we do acquire in this case.
+ // It's unsafe to do acquire in async handlers, because ThreadState
+ // can be in inconsistent state.
+ // SIGSYS looks relatively safe -- it's synchronous and can actually
+ // need some global state.
+ bool acq = (sig == SIGSYS);
+ CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
+ }
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
return;
}
@@ -1663,7 +1977,7 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
internal_memcpy(&signal->siginfo, info, sizeof(*info));
if (ctx)
internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
- sctx->pending_signal_count++;
+ atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
}
}
@@ -1691,6 +2005,7 @@ TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
else
newact.sa_handler = rtl_sighandler;
}
+ ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
int res = REAL(sigaction)(sig, &newact, 0);
return res;
}
@@ -1769,57 +2084,48 @@ TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
// and can report false race between malloc and free
// inside of getaddrinfo. So ignore memory accesses.
ThreadIgnoreBegin(thr, pc);
- // getaddrinfo calls fopen, which can be intercepted by user.
- thr->in_rtl--;
- CHECK_EQ(thr->in_rtl, 0);
int res = REAL(getaddrinfo)(node, service, hints, rv);
- thr->in_rtl++;
ThreadIgnoreEnd(thr, pc);
return res;
}
-// Linux kernel has a bug that leads to kernel deadlock if a process
-// maps TBs of memory and then calls mlock().
-static void MlockIsUnsupported() {
- static atomic_uint8_t printed;
- if (atomic_exchange(&printed, 1, memory_order_relaxed))
- return;
- if (flags()->verbosity > 0)
- Printf("INFO: ThreadSanitizer ignores mlock/mlockall/munlock/munlockall\n");
-}
-
-TSAN_INTERCEPTOR(int, mlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, munlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, mlockall, int flags) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, munlockall, void) {
- MlockIsUnsupported();
- return 0;
-}
-
TSAN_INTERCEPTOR(int, fork, int fake) {
+ if (cur_thread()->in_symbolizer)
+ return REAL(fork)(fake);
SCOPED_INTERCEPTOR_RAW(fork, fake);
+ ForkBefore(thr, pc);
int pid = REAL(fork)(fake);
if (pid == 0) {
// child
+ ForkChildAfter(thr, pc);
FdOnFork(thr, pc);
} else if (pid > 0) {
// parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
}
return pid;
}
+TSAN_INTERCEPTOR(int, vfork, int fake) {
+ // Some programs (e.g. openjdk) call close for all file descriptors
+ // in the child process. Under tsan it leads to false positives, because
+ // address space is shared, so the parent process also thinks that
+ // the descriptors are closed (while they are actually not).
+ // This leads to false positives due to missed synchronization.
+ // Strictly saying this is undefined behavior, because vfork child is not
+ // allowed to call any functions other than exec/exit. But this is what
+ // openjdk does, so we want to handle it.
+ // We could disable interceptors in the child process. But it's not possible
+ // to simply intercept and wrap vfork, because vfork child is not allowed
+ // to return from the function that calls vfork, and that's exactly what
+ // we would do. So this would require some assembly trickery as well.
+ // Instead we simply turn vfork into fork.
+ return WRAP(fork)(fake);
+}
+
static int OnExit(ThreadState *thr) {
int status = Finalize(thr);
REAL(fflush)(0);
@@ -1841,19 +2147,22 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
}
#include "sanitizer_common/sanitizer_platform_interceptors.h"
-// Causes interceptor recursion (getpwuid_r() calls fopen())
-#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
-#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
// Causes interceptor recursion (getaddrinfo() and fopen())
#undef SANITIZER_INTERCEPT_GETADDRINFO
-#undef SANITIZER_INTERCEPT_GETNAMEINFO
-// Causes interceptor recursion (glob64() calls lstat64())
-#undef SANITIZER_INTERCEPT_GLOB
+// There interceptors do not seem to be strictly necessary for tsan.
+// But we see cases where the interceptors consume 70% of execution time.
+// Memory blocks passed to fgetgrent_r are "written to" by tsan several times.
+// First, there is some recursion (getgrnam_r calls fgetgrent_r), and each
+// function "writes to" the buffer. Then, the same memory is "written to"
+// twice, first as buf and then as pwbufp (both of them refer to the same
+// addresses).
+#undef SANITIZER_INTERCEPT_GETPWENT
+#undef SANITIZER_INTERCEPT_GETPWENT_R
+#undef SANITIZER_INTERCEPT_FGETPWENT
+#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
-#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
- do { \
- } while (false)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
@@ -1871,6 +2180,31 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
ctx = (void *)&_ctx; \
(void) ctx;
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx;
+
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
+ Acquire(thr, pc, File2addr(path)); \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdFileCreate(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdClose(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) \
+ libignore()->OnLibraryLoaded(filename)
+
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
+ libignore()->OnLibraryUnloaded()
+
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
@@ -1887,7 +2221,7 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
- CTX()->thread_registry->SetThreadNameByUserId(thread, name)
+ __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
@@ -1914,6 +2248,8 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define TSAN_SYSCALL() \
ThreadState *thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return; \
ScopedSyscall scoped_syscall(thr) \
/**/
@@ -1922,15 +2258,11 @@ struct ScopedSyscall {
explicit ScopedSyscall(ThreadState *thr)
: thr(thr) {
- if (thr->in_rtl == 0)
- Initialize(thr);
- thr->in_rtl++;
+ Initialize(thr);
}
~ScopedSyscall() {
- thr->in_rtl--;
- if (thr->in_rtl == 0)
- ProcessPendingSignals(thr);
+ ProcessPendingSignals(thr);
}
};
@@ -1942,12 +2274,12 @@ static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
static void syscall_acquire(uptr pc, uptr addr) {
TSAN_SYSCALL();
Acquire(thr, pc, addr);
- Printf("syscall_acquire(%p)\n", addr);
+ DPrintf("syscall_acquire(%p)\n", addr);
}
static void syscall_release(uptr pc, uptr addr) {
TSAN_SYSCALL();
- Printf("syscall_release(%p)\n", addr);
+ DPrintf("syscall_release(%p)\n", addr);
Release(thr, pc, addr);
}
@@ -1959,26 +2291,32 @@ static void syscall_fd_close(uptr pc, int fd) {
static USED void syscall_fd_acquire(uptr pc, int fd) {
TSAN_SYSCALL();
FdAcquire(thr, pc, fd);
- Printf("syscall_fd_acquire(%p)\n", fd);
+ DPrintf("syscall_fd_acquire(%p)\n", fd);
}
static USED void syscall_fd_release(uptr pc, int fd) {
TSAN_SYSCALL();
- Printf("syscall_fd_release(%p)\n", fd);
+ DPrintf("syscall_fd_release(%p)\n", fd);
FdRelease(thr, pc, fd);
}
static void syscall_pre_fork(uptr pc) {
TSAN_SYSCALL();
+ ForkBefore(thr, pc);
}
-static void syscall_post_fork(uptr pc, int res) {
+static void syscall_post_fork(uptr pc, int pid) {
TSAN_SYSCALL();
- if (res == 0) {
+ if (pid == 0) {
// child
+ ForkChildAfter(thr, pc);
FdOnFork(thr, pc);
- } else if (res > 0) {
+ } else if (pid > 0) {
// parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
}
}
@@ -2024,81 +2362,34 @@ namespace __tsan {
static void finalize(void *arg) {
ThreadState *thr = cur_thread();
- uptr pc = 0;
- atexit_ctx->exit(thr, pc);
int status = Finalize(thr);
- REAL(fflush)(0);
+ // Make sure the output is not lost.
+ // Flushing all the streams here may freeze the process if a child thread is
+ // performing file stream operations at the same time.
+ REAL(fflush)(stdout);
+ REAL(fflush)(stderr);
if (status)
REAL(_exit)(status);
}
-void ProcessPendingSignals(ThreadState *thr) {
- CHECK_EQ(thr->in_rtl, 0);
- SignalContext *sctx = SigCtx(thr);
- if (sctx == 0 || sctx->pending_signal_count == 0 || thr->in_signal_handler)
- return;
- Context *ctx = CTX();
- thr->in_signal_handler = true;
- sctx->pending_signal_count = 0;
- // These are too big for stack.
- static THREADLOCAL __sanitizer_sigset_t emptyset, oldset;
- REAL(sigfillset)(&emptyset);
- pthread_sigmask(SIG_SETMASK, &emptyset, &oldset);
- for (int sig = 0; sig < kSigCount; sig++) {
- SignalDesc *signal = &sctx->pending_signals[sig];
- if (signal->armed) {
- signal->armed = false;
- if (sigactions[sig].sa_handler != SIG_DFL
- && sigactions[sig].sa_handler != SIG_IGN) {
- // Insure that the handler does not spoil errno.
- const int saved_errno = errno;
- errno = 0;
- if (signal->sigaction)
- sigactions[sig].sa_sigaction(sig, &signal->siginfo, &signal->ctx);
- else
- sigactions[sig].sa_handler(sig);
- if (flags()->report_bugs && errno != 0) {
- ScopedInRtl in_rtl;
- __tsan::StackTrace stack;
- uptr pc = signal->sigaction ?
- (uptr)sigactions[sig].sa_sigaction :
- (uptr)sigactions[sig].sa_handler;
- pc += 1; // return address is expected, OutputReport() will undo this
- stack.Init(&pc, 1);
- ThreadRegistryLock l(ctx->thread_registry);
- ScopedReport rep(ReportTypeErrnoInSignal);
- if (!IsFiredSuppression(ctx, rep, stack)) {
- rep.AddStack(&stack);
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
- }
- }
- errno = saved_errno;
- }
- }
- }
- pthread_sigmask(SIG_SETMASK, &oldset, 0);
- CHECK_EQ(thr->in_signal_handler, true);
- thr->in_signal_handler = false;
-}
-
static void unreachable() {
- Printf("FATAL: ThreadSanitizer: unreachable called\n");
+ Report("FATAL: ThreadSanitizer: unreachable called\n");
Die();
}
void InitializeInterceptors() {
- CHECK_GT(cur_thread()->in_rtl, 0);
-
// We need to setup it early, because functions like dlsym() can call it.
REAL(memset) = internal_memset;
REAL(memcpy) = internal_memcpy;
REAL(memcmp) = internal_memcmp;
// Instruct libc malloc to consume less memory.
+#if !SANITIZER_FREEBSD
mallopt(1, 0); // M_MXFAST
mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
+#endif
- SANITIZER_COMMON_INTERCEPTORS_INIT;
+ InitializeCommonInterceptors();
// We can not use TSAN_INTERCEPT to get setjmp addr,
// because it does &setjmp and setjmp is not present in some versions of libc.
@@ -2118,18 +2409,16 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(free);
TSAN_INTERCEPT(cfree);
TSAN_INTERCEPT(mmap);
- TSAN_INTERCEPT(mmap64);
+ TSAN_MAYBE_INTERCEPT_MMAP64;
TSAN_INTERCEPT(munmap);
- TSAN_INTERCEPT(memalign);
+ TSAN_MAYBE_INTERCEPT_MEMALIGN;
TSAN_INTERCEPT(valloc);
- TSAN_INTERCEPT(pvalloc);
+ TSAN_MAYBE_INTERCEPT_PVALLOC;
TSAN_INTERCEPT(posix_memalign);
TSAN_INTERCEPT(strlen);
TSAN_INTERCEPT(memset);
TSAN_INTERCEPT(memcpy);
- TSAN_INTERCEPT(memchr);
- TSAN_INTERCEPT(memrchr);
TSAN_INTERCEPT(memmove);
TSAN_INTERCEPT(memcmp);
TSAN_INTERCEPT(strchr);
@@ -2144,6 +2433,13 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_join);
TSAN_INTERCEPT(pthread_detach);
+ TSAN_INTERCEPT_VER(pthread_cond_init, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_signal, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_broadcast, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_wait, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_timedwait, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_destroy, "GLIBC_2.3.2");
+
TSAN_INTERCEPT(pthread_mutex_init);
TSAN_INTERCEPT(pthread_mutex_destroy);
TSAN_INTERCEPT(pthread_mutex_trylock);
@@ -2165,9 +2461,6 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
TSAN_INTERCEPT(pthread_rwlock_unlock);
- INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2");
- INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2");
-
TSAN_INTERCEPT(pthread_barrier_init);
TSAN_INTERCEPT(pthread_barrier_destroy);
TSAN_INTERCEPT(pthread_barrier_wait);
@@ -2183,38 +2476,38 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(sem_getvalue);
TSAN_INTERCEPT(stat);
- TSAN_INTERCEPT(__xstat);
- TSAN_INTERCEPT(stat64);
- TSAN_INTERCEPT(__xstat64);
+ TSAN_MAYBE_INTERCEPT___XSTAT;
+ TSAN_MAYBE_INTERCEPT_STAT64;
+ TSAN_MAYBE_INTERCEPT___XSTAT64;
TSAN_INTERCEPT(lstat);
- TSAN_INTERCEPT(__lxstat);
- TSAN_INTERCEPT(lstat64);
- TSAN_INTERCEPT(__lxstat64);
+ TSAN_MAYBE_INTERCEPT___LXSTAT;
+ TSAN_MAYBE_INTERCEPT_LSTAT64;
+ TSAN_MAYBE_INTERCEPT___LXSTAT64;
TSAN_INTERCEPT(fstat);
- TSAN_INTERCEPT(__fxstat);
- TSAN_INTERCEPT(fstat64);
- TSAN_INTERCEPT(__fxstat64);
+ TSAN_MAYBE_INTERCEPT___FXSTAT;
+ TSAN_MAYBE_INTERCEPT_FSTAT64;
+ TSAN_MAYBE_INTERCEPT___FXSTAT64;
TSAN_INTERCEPT(open);
- TSAN_INTERCEPT(open64);
+ TSAN_MAYBE_INTERCEPT_OPEN64;
TSAN_INTERCEPT(creat);
- TSAN_INTERCEPT(creat64);
+ TSAN_MAYBE_INTERCEPT_CREAT64;
TSAN_INTERCEPT(dup);
TSAN_INTERCEPT(dup2);
TSAN_INTERCEPT(dup3);
- TSAN_INTERCEPT(eventfd);
- TSAN_INTERCEPT(signalfd);
- TSAN_INTERCEPT(inotify_init);
- TSAN_INTERCEPT(inotify_init1);
+ TSAN_MAYBE_INTERCEPT_EVENTFD;
+ TSAN_MAYBE_INTERCEPT_SIGNALFD;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
TSAN_INTERCEPT(socket);
TSAN_INTERCEPT(socketpair);
TSAN_INTERCEPT(connect);
TSAN_INTERCEPT(bind);
TSAN_INTERCEPT(listen);
- TSAN_INTERCEPT(epoll_create);
- TSAN_INTERCEPT(epoll_create1);
+ TSAN_MAYBE_INTERCEPT_EPOLL_CREATE;
+ TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1;
TSAN_INTERCEPT(close);
- TSAN_INTERCEPT(__close);
- TSAN_INTERCEPT(__res_iclose);
+ TSAN_MAYBE_INTERCEPT___CLOSE;
+ TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
TSAN_INTERCEPT(pipe);
TSAN_INTERCEPT(pipe2);
@@ -2223,19 +2516,17 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(recv);
TSAN_INTERCEPT(unlink);
- TSAN_INTERCEPT(fopen);
- TSAN_INTERCEPT(freopen);
- TSAN_INTERCEPT(fclose);
+ TSAN_INTERCEPT(tmpfile);
+ TSAN_MAYBE_INTERCEPT_TMPFILE64;
TSAN_INTERCEPT(fread);
TSAN_INTERCEPT(fwrite);
- TSAN_INTERCEPT(fflush);
TSAN_INTERCEPT(abort);
TSAN_INTERCEPT(puts);
TSAN_INTERCEPT(rmdir);
TSAN_INTERCEPT(opendir);
- TSAN_INTERCEPT(epoll_ctl);
- TSAN_INTERCEPT(epoll_wait);
+ TSAN_MAYBE_INTERCEPT_EPOLL_CTL;
+ TSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
TSAN_INTERCEPT(sigaction);
TSAN_INTERCEPT(signal);
@@ -2249,14 +2540,8 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(gettimeofday);
TSAN_INTERCEPT(getaddrinfo);
- TSAN_INTERCEPT(mlock);
- TSAN_INTERCEPT(munlock);
- TSAN_INTERCEPT(mlockall);
- TSAN_INTERCEPT(munlockall);
-
TSAN_INTERCEPT(fork);
- TSAN_INTERCEPT(dlopen);
- TSAN_INTERCEPT(dlclose);
+ TSAN_INTERCEPT(vfork);
TSAN_INTERCEPT(on_exit);
TSAN_INTERCEPT(__cxa_atexit);
TSAN_INTERCEPT(_exit);
@@ -2264,9 +2549,6 @@ void InitializeInterceptors() {
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
REAL(atexit) = (int(*)(void(*)()))unreachable;
- atexit_ctx = new(internal_alloc(MBlockAtExit, sizeof(AtExitContext)))
- AtExitContext();
-
if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
Printf("ThreadSanitizer: failed to setup atexit callback\n");
Die();
@@ -2280,16 +2562,19 @@ void InitializeInterceptors() {
FdInit();
}
-void internal_start_thread(void(*func)(void *arg), void *arg) {
- // Start the thread with signals blocked, otherwise it can steal users
- // signals.
- __sanitizer_kernel_sigset_t set, old;
+void *internal_start_thread(void(*func)(void *arg), void *arg) {
+ // Start the thread with signals blocked, otherwise it can steal user signals.
+ __sanitizer_sigset_t set, old;
internal_sigfillset(&set);
internal_sigprocmask(SIG_SETMASK, &set, &old);
void *th;
REAL(pthread_create)(&th, 0, (void*(*)(void *arg))func, arg);
- REAL(pthread_detach)(th);
internal_sigprocmask(SIG_SETMASK, &old, 0);
+ return th;
+}
+
+void internal_join_thread(void *th) {
+ REAL(pthread_join)(th, 0);
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_interface_ann.cc b/libsanitizer/tsan/tsan_interface_ann.cc
index 38224f429c..b44e56898d 100644
--- a/libsanitizer/tsan/tsan_interface_ann.cc
+++ b/libsanitizer/tsan/tsan_interface_ann.cc
@@ -31,22 +31,17 @@ class ScopedAnnotation {
public:
ScopedAnnotation(ThreadState *thr, const char *aname, const char *f, int l,
uptr pc)
- : thr_(thr)
- , in_rtl_(thr->in_rtl) {
- CHECK_EQ(thr_->in_rtl, 0);
+ : thr_(thr) {
FuncEntry(thr_, pc);
- thr_->in_rtl++;
DPrintf("#%d: annotation %s() %s:%d\n", thr_->tid, aname, f, l);
}
~ScopedAnnotation() {
- thr_->in_rtl--;
- CHECK_EQ(in_rtl_, thr_->in_rtl);
FuncExit(thr_);
+ CheckNoLocks(thr_);
}
private:
ThreadState *const thr_;
- const int in_rtl_;
};
#define SCOPED_ANNOTATION(typ) \
@@ -56,8 +51,8 @@ class ScopedAnnotation {
const uptr caller_pc = (uptr)__builtin_return_address(0); \
StatInc(thr, StatAnnotation); \
StatInc(thr, Stat##typ); \
- ScopedAnnotation sa(thr, __FUNCTION__, f, l, caller_pc); \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
/**/
@@ -129,8 +124,6 @@ static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
ExpectRace *race = FindRace(list, addr, size);
- if (race == 0 && AlternativeAddress(addr))
- race = FindRace(list, AlternativeAddress(addr), size);
if (race == 0)
return false;
DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
@@ -309,7 +302,7 @@ void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
ExpectRace *race = dyn_ann_ctx->expect.next;
if (race->hitcount == 0) {
- CTX()->nmissed_expected++;
+ ctx->nmissed_expected++;
ReportMissedExpectedRace(race);
}
race->prev->next = race->next;
@@ -459,4 +452,6 @@ const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
void INTERFACE_ATTRIBUTE
AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
+void INTERFACE_ATTRIBUTE
+AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
} // extern "C"
diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc
index 180d87b799..15fc21c462 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.cc
+++ b/libsanitizer/tsan/tsan_interface_atomic.cc
@@ -10,7 +10,7 @@
//===----------------------------------------------------------------------===//
// ThreadSanitizer atomic operations are based on C++11/C1x standards.
-// For background see C++11 standard. A slightly older, publically
+// For background see C++11 standard. A slightly older, publicly
// available draft of the standard (not entirely up-to-date, but close enough
// for casual browsing) is available here:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
@@ -19,72 +19,40 @@
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "tsan_interface_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
#include "tsan_flags.h"
#include "tsan_rtl.h"
using namespace __tsan; // NOLINT
-#define SCOPED_ATOMIC(func, ...) \
- const uptr callpc = (uptr)__builtin_return_address(0); \
- uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
- mo = ConvertOrder(mo); \
- mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
- ThreadState *const thr = cur_thread(); \
- AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
- ScopedAtomic sa(thr, callpc, a, mo, __FUNCTION__); \
- return Atomic##func(thr, pc, __VA_ARGS__); \
-/**/
-
-// Some shortcuts.
-typedef __tsan_memory_order morder;
-typedef __tsan_atomic8 a8;
-typedef __tsan_atomic16 a16;
-typedef __tsan_atomic32 a32;
-typedef __tsan_atomic64 a64;
-typedef __tsan_atomic128 a128;
-const morder mo_relaxed = __tsan_memory_order_relaxed;
-const morder mo_consume = __tsan_memory_order_consume;
-const morder mo_acquire = __tsan_memory_order_acquire;
-const morder mo_release = __tsan_memory_order_release;
-const morder mo_acq_rel = __tsan_memory_order_acq_rel;
-const morder mo_seq_cst = __tsan_memory_order_seq_cst;
+// These should match declarations from public tsan_interface_atomic.h header.
+typedef unsigned char a8;
+typedef unsigned short a16; // NOLINT
+typedef unsigned int a32;
+typedef unsigned long long a64; // NOLINT
+#if !defined(TSAN_GO) && (defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302))
+__extension__ typedef __int128 a128;
+# define __TSAN_HAS_INT128 1
+#else
+# define __TSAN_HAS_INT128 0
+#endif
-class ScopedAtomic {
- public:
- ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
- morder mo, const char *func)
- : thr_(thr) {
- CHECK_EQ(thr_->in_rtl, 0);
- ProcessPendingSignals(thr);
- FuncEntry(thr_, pc);
- DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
- thr_->in_rtl++;
- }
- ~ScopedAtomic() {
- thr_->in_rtl--;
- CHECK_EQ(thr_->in_rtl, 0);
- FuncExit(thr_);
- }
- private:
- ThreadState *thr_;
-};
+#ifndef TSAN_GO
+// Protects emulation of 128-bit atomic operations.
+static StaticSpinMutex mutex128;
+#endif
-static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
- StatInc(thr, StatAtomic);
- StatInc(thr, t);
- StatInc(thr, size == 1 ? StatAtomic1
- : size == 2 ? StatAtomic2
- : size == 4 ? StatAtomic4
- : size == 8 ? StatAtomic8
- : StatAtomic16);
- StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
- : mo == mo_consume ? StatAtomicConsume
- : mo == mo_acquire ? StatAtomicAcquire
- : mo == mo_release ? StatAtomicRelease
- : mo == mo_acq_rel ? StatAtomicAcq_Rel
- : StatAtomicSeq_Cst);
-}
+// Part of ABI, do not change.
+// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
+typedef enum {
+ mo_relaxed,
+ mo_consume,
+ mo_acquire,
+ mo_release,
+ mo_acq_rel,
+ mo_seq_cst
+} morder;
static bool IsLoadOrder(morder mo) {
return mo == mo_relaxed || mo == mo_consume
@@ -108,27 +76,6 @@ static bool IsAcqRelOrder(morder mo) {
return mo == mo_acq_rel || mo == mo_seq_cst;
}
-static morder ConvertOrder(morder mo) {
- if (mo > (morder)100500) {
- mo = morder(mo - 100500);
- if (mo == morder(1 << 0))
- mo = mo_relaxed;
- else if (mo == morder(1 << 1))
- mo = mo_consume;
- else if (mo == morder(1 << 2))
- mo = mo_acquire;
- else if (mo == morder(1 << 3))
- mo = mo_release;
- else if (mo == morder(1 << 4))
- mo = mo_acq_rel;
- else if (mo == morder(1 << 5))
- mo = mo_seq_cst;
- }
- CHECK_GE(mo, mo_relaxed);
- CHECK_LE(mo, mo_seq_cst);
- return mo;
-}
-
template<typename T> T func_xchg(volatile T *v, T op) {
T res = __sync_lock_test_and_set(v, op);
// __sync_lock_test_and_set does not contain full barrier.
@@ -176,50 +123,58 @@ template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
-#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(TSAN_GO)
a128 func_xchg(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = op;
return cmp;
}
a128 func_add(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp + op;
return cmp;
}
a128 func_sub(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp - op;
return cmp;
}
a128 func_and(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp & op;
return cmp;
}
a128 func_or(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp | op;
return cmp;
}
a128 func_xor(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp ^ op;
return cmp;
}
a128 func_nand(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = ~(cmp & op);
return cmp;
}
a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
+ SpinMutexLock lock(&mutex128);
a128 cur = *v;
if (cur == cmp)
*v = xch;
@@ -241,26 +196,80 @@ static int SizeLog() {
// this leads to false negatives only in very obscure cases.
}
+#ifndef TSAN_GO
+static atomic_uint8_t *to_atomic(const volatile a8 *a) {
+ return (atomic_uint8_t*)a;
+}
+
+static atomic_uint16_t *to_atomic(const volatile a16 *a) {
+ return (atomic_uint16_t*)a;
+}
+#endif
+
+static atomic_uint32_t *to_atomic(const volatile a32 *a) {
+ return (atomic_uint32_t*)a;
+}
+
+static atomic_uint64_t *to_atomic(const volatile a64 *a) {
+ return (atomic_uint64_t*)a;
+}
+
+static memory_order to_mo(morder mo) {
+ switch (mo) {
+ case mo_relaxed: return memory_order_relaxed;
+ case mo_consume: return memory_order_consume;
+ case mo_acquire: return memory_order_acquire;
+ case mo_release: return memory_order_release;
+ case mo_acq_rel: return memory_order_acq_rel;
+ case mo_seq_cst: return memory_order_seq_cst;
+ }
+ CHECK(0);
+ return memory_order_seq_cst;
+}
+
+template<typename T>
+static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
+ return atomic_load(to_atomic(a), to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !defined(TSAN_GO)
+static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ return *a;
+}
+#endif
+
template<typename T>
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
morder mo) {
CHECK(IsLoadOrder(mo));
// This fast-path is critical for performance.
// Assume the access is atomic.
- if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
+ if (!IsAcquireOrder(mo)) {
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
- return *a; // as if atomic
+ return NoTsanAtomicLoad(a, mo);
}
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
AcquireImpl(thr, pc, &s->clock);
- T v = *a;
+ T v = NoTsanAtomicLoad(a, mo);
s->mtx.ReadUnlock();
- __sync_synchronize();
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return v;
}
template<typename T>
+static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
+ atomic_store(to_atomic(a), v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !defined(TSAN_GO)
+static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ *a = v;
+}
+#endif
+
+template<typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
CHECK(IsStoreOrder(mo));
@@ -269,21 +278,18 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
// so must reset the clock.
- if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
- *a = v; // as if atomic
+ if (!IsReleaseOrder(mo)) {
+ NoTsanAtomicStore(a, v, mo);
return;
}
__sync_synchronize();
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
ReleaseImpl(thr, pc, &s->clock);
- *a = v;
+ NoTsanAtomicStore(a, v, mo);
s->mtx.Unlock();
- // Trainling memory barrier to provide sequential consistency
- // for Dekker-like store-load synchronization.
- __sync_synchronize();
}
template<typename T, T (*F)(volatile T *v, T op)>
@@ -291,7 +297,7 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = 0;
if (mo != mo_relaxed) {
- s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -309,6 +315,41 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
}
template<typename T>
+static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
+ return func_xchg(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
+ return func_add(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
+ return func_sub(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
+ return func_and(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
+ return func_or(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
+ return func_xor(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
+ return func_nand(a, v);
+}
+
+template<typename T>
static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
@@ -351,13 +392,37 @@ static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
}
template<typename T>
+static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
+ return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128
+static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ a128 old = *c;
+ a128 cur = func_cas(a, old, v);
+ if (cur == old)
+ return true;
+ *c = cur;
+ return false;
+}
+#endif
+
+template<typename T>
+static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
+ NoTsanAtomicCAS(a, &c, v, mo, fmo);
+ return c;
+}
+
+template<typename T>
static bool AtomicCAS(ThreadState *thr, uptr pc,
volatile T *a, T *c, T v, morder mo, morder fmo) {
(void)fmo; // Unused because llvm does not pass it yet.
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = 0;
+ bool write_lock = mo != mo_acquire && mo != mo_consume;
if (mo != mo_relaxed) {
- s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -370,8 +435,12 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
}
T cc = *c;
T pr = func_cas(a, cc, v);
- if (s)
- s->mtx.Unlock();
+ if (s) {
+ if (write_lock)
+ s->mtx.Unlock();
+ else
+ s->mtx.ReadUnlock();
+ }
if (pr == cc)
return true;
*c = pr;
@@ -385,293 +454,498 @@ static T AtomicCAS(ThreadState *thr, uptr pc,
return c;
}
+#ifndef TSAN_GO
+static void NoTsanAtomicFence(morder mo) {
+ __sync_synchronize();
+}
+
static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
// FIXME(dvyukov): not implemented.
__sync_synchronize();
}
+#endif
+
+// Interface functions follow.
+#ifndef TSAN_GO
+
+// C/C++
+
+#define SCOPED_ATOMIC(func, ...) \
+ const uptr callpc = (uptr)__builtin_return_address(0); \
+ uptr pc = StackTrace::GetCurrentPc(); \
+ mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
+ ThreadState *const thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return NoTsanAtomic##func(__VA_ARGS__); \
+ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
+ ScopedAtomic sa(thr, callpc, a, mo, __func__); \
+ return Atomic##func(thr, pc, __VA_ARGS__); \
+/**/
+
+class ScopedAtomic {
+ public:
+ ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
+ morder mo, const char *func)
+ : thr_(thr) {
+ FuncEntry(thr_, pc);
+ DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
+ }
+ ~ScopedAtomic() {
+ ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ }
+ private:
+ ThreadState *thr_;
+};
+
+static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
+ StatInc(thr, StatAtomic);
+ StatInc(thr, t);
+ StatInc(thr, size == 1 ? StatAtomic1
+ : size == 2 ? StatAtomic2
+ : size == 4 ? StatAtomic4
+ : size == 8 ? StatAtomic8
+ : StatAtomic16);
+ StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
+ : mo == mo_consume ? StatAtomicConsume
+ : mo == mo_acquire ? StatAtomicAcquire
+ : mo == mo_release ? StatAtomicRelease
+ : mo == mo_acq_rel ? StatAtomicAcq_Rel
+ : StatAtomicSeq_Cst);
+}
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_thread_fence(morder mo) {
char* a = 0;
SCOPED_ATOMIC(Fence, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_signal_fence(morder mo) {
}
+} // extern "C"
+
+#else // #ifndef TSAN_GO
+
+// Go
+
+#define ATOMIC(func, ...) \
+ if (thr->ignore_sync) { \
+ NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+#define ATOMIC_RET(func, ret, ...) \
+ if (thr->ignore_sync) { \
+ (ret) = NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a32 cur = 0;
+ a32 cmp = *(a32*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
+ *(bool*)(a+16) = (cur == cmp);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a64 cur = 0;
+ a64 cmp = *(a64*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
+ *(bool*)(a+24) = (cur == cmp);
+}
+} // extern "C"
+#endif // #ifndef TSAN_GO
diff --git a/libsanitizer/tsan/tsan_interface_java.cc b/libsanitizer/tsan/tsan_interface_java.cc
index 70b5e5fcc6..6e1ec6de35 100644
--- a/libsanitizer/tsan/tsan_interface_java.cc
+++ b/libsanitizer/tsan/tsan_interface_java.cc
@@ -20,54 +20,17 @@
using namespace __tsan; // NOLINT
-namespace __tsan {
-
-const uptr kHeapShadow = 0x300000000000ull;
-const uptr kHeapAlignment = 8;
+const jptr kHeapAlignment = 8;
-struct BlockDesc {
- bool begin;
- Mutex mtx;
- SyncVar *head;
-
- BlockDesc()
- : mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock)
- , head() {
- CHECK_EQ(begin, false);
- begin = true;
- }
-
- ~BlockDesc() {
- CHECK_EQ(begin, true);
- begin = false;
- ThreadState *thr = cur_thread();
- SyncVar *s = head;
- while (s) {
- SyncVar *s1 = s->next;
- StatInc(thr, StatSyncDestroyed);
- s->mtx.Lock();
- s->mtx.Unlock();
- thr->mset.Remove(s->GetId());
- DestroyAndFree(s);
- s = s1;
- }
- }
-};
+namespace __tsan {
struct JavaContext {
const uptr heap_begin;
const uptr heap_size;
- BlockDesc *heap_shadow;
JavaContext(jptr heap_begin, jptr heap_size)
: heap_begin(heap_begin)
, heap_size(heap_size) {
- uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc);
- heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size);
- if ((uptr)heap_shadow != kHeapShadow) {
- Printf("ThreadSanitizer: failed to mmap Java heap shadow\n");
- Die();
- }
}
};
@@ -77,13 +40,9 @@ class ScopedJavaFunc {
: thr_(thr) {
Initialize(thr_);
FuncEntry(thr, pc);
- CHECK_EQ(thr_->in_rtl, 0);
- thr_->in_rtl++;
}
~ScopedJavaFunc() {
- thr_->in_rtl--;
- CHECK_EQ(thr_->in_rtl, 0);
FuncExit(thr_);
// FIXME(dvyukov): process pending signals.
}
@@ -95,69 +54,12 @@ class ScopedJavaFunc {
static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
static JavaContext *jctx;
-static BlockDesc *getblock(uptr addr) {
- uptr i = (addr - jctx->heap_begin) / kHeapAlignment;
- return &jctx->heap_shadow[i];
-}
-
-static uptr USED getmem(BlockDesc *b) {
- uptr i = b - jctx->heap_shadow;
- uptr p = jctx->heap_begin + i * kHeapAlignment;
- CHECK_GE(p, jctx->heap_begin);
- CHECK_LT(p, jctx->heap_begin + jctx->heap_size);
- return p;
-}
-
-static BlockDesc *getblockbegin(uptr addr) {
- for (BlockDesc *b = getblock(addr);; b--) {
- CHECK_GE(b, jctx->heap_shadow);
- if (b->begin)
- return b;
- }
- return 0;
-}
-
-SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
- bool write_lock, bool create) {
- if (jctx == 0 || addr < jctx->heap_begin
- || addr >= jctx->heap_begin + jctx->heap_size)
- return 0;
- BlockDesc *b = getblockbegin(addr);
- DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b);
- Lock l(&b->mtx);
- SyncVar *s = b->head;
- for (; s; s = s->next) {
- if (s->addr == addr) {
- DPrintf("#%d: found existing sync for %p\n", thr->tid, addr);
- break;
- }
- }
- if (s == 0 && create) {
- DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
- s = CTX()->synctab.Create(thr, pc, addr);
- s->next = b->head;
- b->head = s;
- }
- if (s) {
- if (write_lock)
- s->mtx.Lock();
- else
- s->mtx.ReadLock();
- }
- return s;
-}
-
-SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
- // We do not destroy Java mutexes other than in __tsan_java_free().
- return 0;
-}
-
} // namespace __tsan
#define SCOPED_JAVA_FUNC(func) \
ThreadState *thr = cur_thread(); \
const uptr caller_pc = GET_CALLER_PC(); \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
ScopedJavaFunc scoped(thr, caller_pc); \
/**/
@@ -194,8 +96,7 @@ void __tsan_java_alloc(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- BlockDesc *b = getblock(ptr);
- new(b) BlockDesc();
+ OnUserAlloc(thr, pc, ptr, size, false);
}
void __tsan_java_free(jptr ptr, jptr size) {
@@ -208,12 +109,7 @@ void __tsan_java_free(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- BlockDesc *beg = getblock(ptr);
- BlockDesc *end = getblock(ptr + size);
- for (BlockDesc *b = beg; b != end; b++) {
- if (b->begin)
- b->~BlockDesc();
- }
+ ctx->metamap.FreeRange(thr, pc, ptr, size);
}
void __tsan_java_move(jptr src, jptr dst, jptr size) {
@@ -228,42 +124,36 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) {
CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
CHECK_GE(dst, jctx->heap_begin);
CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
- CHECK(dst >= src + size || src >= dst + size);
+ CHECK_NE(dst, src);
+ CHECK_NE(size, 0);
// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
- { // NOLINT
- BlockDesc *s = getblock(src);
- BlockDesc *d = getblock(dst);
- BlockDesc *send = getblock(src + size);
- for (; s != send; s++, d++) {
- CHECK_EQ(d->begin, false);
- if (s->begin) {
- DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d));
- new(d) BlockDesc;
- d->head = s->head;
- for (SyncVar *sync = d->head; sync; sync = sync->next) {
- uptr newaddr = sync->addr - src + dst;
- DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr);
- sync->addr = newaddr;
- }
- s->head = 0;
- s->~BlockDesc();
- }
- }
+ ctx->metamap.MoveMemory(src, dst, size);
+
+ // Move shadow.
+ u64 *s = (u64*)MemToShadow(src);
+ u64 *d = (u64*)MemToShadow(dst);
+ u64 *send = (u64*)MemToShadow(src + size);
+ uptr inc = 1;
+ if (dst > src) {
+ s = (u64*)MemToShadow(src + size) - 1;
+ d = (u64*)MemToShadow(dst + size) - 1;
+ send = (u64*)MemToShadow(src) - 1;
+ inc = -1;
}
-
- { // NOLINT
- u64 *s = (u64*)MemToShadow(src);
- u64 *d = (u64*)MemToShadow(dst);
- u64 *send = (u64*)MemToShadow(src + size);
- for (; s != send; s++, d++) {
- *d = *s;
- *s = 0;
- }
+ for (; s != send; s += inc, d += inc) {
+ *d = *s;
+ *s = 0;
}
}
+void __tsan_java_finalize() {
+ SCOPED_JAVA_FUNC(__tsan_java_finalize);
+ DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
+ AcquireGlobal(thr, 0);
+}
+
void __tsan_java_mutex_lock(jptr addr) {
SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
diff --git a/libsanitizer/tsan/tsan_interface_java.h b/libsanitizer/tsan/tsan_interface_java.h
index 885ff28975..9af1f3fe21 100644
--- a/libsanitizer/tsan/tsan_interface_java.h
+++ b/libsanitizer/tsan/tsan_interface_java.h
@@ -48,8 +48,13 @@ void __tsan_java_alloc(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
// Callback for memory move by GC.
// Can be aggregated for several objects (preferably).
-// The ranges must not overlap.
+// The ranges can overlap.
void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
+// This function must be called on the finalizer thread
+// before executing a batch of finalizers.
+// It ensures necessary synchronization between
+// java object creation and finalization.
+void __tsan_java_finalize() INTERFACE_ATTRIBUTE;
// Mutex lock.
// Addr is any unique address associated with the mutex.
diff --git a/libsanitizer/tsan/tsan_mman.cc b/libsanitizer/tsan/tsan_mman.cc
index 832374becf..d89610a2cc 100644
--- a/libsanitizer/tsan/tsan_mman.cc
+++ b/libsanitizer/tsan/tsan_mman.cc
@@ -8,6 +8,7 @@
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_mman.h"
@@ -16,43 +17,17 @@
#include "tsan_flags.h"
// May be overriden by front-end.
-extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
+extern "C" void WEAK __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
-extern "C" void WEAK __tsan_free_hook(void *ptr) {
+extern "C" void WEAK __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
namespace __tsan {
-COMPILER_CHECK(sizeof(MBlock) == 16);
-
-void MBlock::Lock() {
- atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
- uptr v = atomic_load(a, memory_order_relaxed);
- for (int iter = 0;; iter++) {
- if (v & 1) {
- if (iter < 10)
- proc_yield(20);
- else
- internal_sched_yield();
- v = atomic_load(a, memory_order_relaxed);
- continue;
- }
- if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
- break;
- }
-}
-
-void MBlock::Unlock() {
- atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
- uptr v = atomic_load(a, memory_order_relaxed);
- DCHECK(v & 1);
- atomic_store(a, v & ~1, memory_order_relaxed);
-}
-
struct MapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
void OnUnmap(uptr p, uptr size) const {
@@ -86,66 +61,58 @@ void AllocatorPrintStats() {
}
static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
- if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
+ if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 ||
+ !flags()->report_signal_unsafe)
return;
- Context *ctx = CTX();
- StackTrace stack;
- stack.ObtainCurrent(thr, pc);
+ VarSizeStackTrace stack;
+ ObtainCurrentStack(thr, pc, &stack);
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeSignalUnsafe);
if (!IsFiredSuppression(ctx, rep, stack)) {
- rep.AddStack(&stack);
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
}
}
-void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
- CHECK_GT(thr->in_rtl, 0);
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return AllocatorReturnNull();
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
if (p == 0)
return 0;
- MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
- b->Init(sz, thr->tid, CurrentStackId(thr, pc));
- if (CTX() && CTX()->initialized) {
- if (thr->ignore_reads_and_writes == 0)
- MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
- else
- MemoryResetRange(thr, pc, (uptr)p, sz);
- }
- DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
- SignalUnsafeCall(thr, pc);
+ if (ctx && ctx->initialized)
+ OnUserAlloc(thr, pc, (uptr)p, sz, true);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
return p;
}
-void user_free(ThreadState *thr, uptr pc, void *p) {
- CHECK_GT(thr->in_rtl, 0);
- CHECK_NE(p, (void*)0);
- DPrintf("#%d: free(%p)\n", thr->tid, p);
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- if (b->ListHead()) {
- MBlock::ScopedLock l(b);
- for (SyncVar *s = b->ListHead(); s;) {
- SyncVar *res = s;
- s = s->next;
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- DestroyAndFree(res);
- }
- b->ListReset();
- }
- if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
- if (thr->ignore_reads_and_writes == 0)
- MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
- }
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
+ if (ctx && ctx->initialized)
+ OnUserFree(thr, pc, (uptr)p, true);
allocator()->Deallocate(&thr->alloc_cache, p);
- SignalUnsafeCall(thr, pc);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
+}
+
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
+ DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+ ctx->metamap.AllocBlock(thr, pc, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)p, sz);
+}
+
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
+ CHECK_NE(p, (void*)0);
+ uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
+ DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeFreed(thr, pc, (uptr)p, sz);
}
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
- CHECK_GT(thr->in_rtl, 0);
void *p2 = 0;
// FIXME: Handle "shrinking" more efficiently,
// it seems that some software actually does this.
@@ -154,9 +121,8 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
if (p2 == 0)
return 0;
if (p) {
- MBlock *b = user_mblock(thr, p);
- CHECK_NE(b, 0);
- internal_memcpy(p2, p, min(b->Size(), sz));
+ uptr oldsz = user_alloc_usable_size(p);
+ internal_memcpy(p2, p, min(oldsz, sz));
}
}
if (p)
@@ -164,43 +130,29 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
return p2;
}
-uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
- CHECK_GT(thr->in_rtl, 0);
+uptr user_alloc_usable_size(const void *p) {
if (p == 0)
return 0;
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- return b ? b->Size() : 0;
-}
-
-MBlock *user_mblock(ThreadState *thr, void *p) {
- CHECK_NE(p, 0);
- Allocator *a = allocator();
- void *b = a->GetBlockBegin(p);
- if (b == 0)
- return 0;
- return (MBlock*)a->GetMetaData(b);
+ MBlock *b = ctx->metamap.GetBlock((uptr)p);
+ return b ? b->siz : 0;
}
void invoke_malloc_hook(void *ptr, uptr size) {
- Context *ctx = CTX();
ThreadState *thr = cur_thread();
- if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __tsan_malloc_hook(ptr, size);
+ __sanitizer_malloc_hook(ptr, size);
}
void invoke_free_hook(void *ptr) {
- Context *ctx = CTX();
ThreadState *thr = cur_thread();
- if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __tsan_free_hook(ptr);
+ __sanitizer_free_hook(ptr);
}
void *internal_alloc(MBlockType typ, uptr sz) {
ThreadState *thr = cur_thread();
- CHECK_GT(thr->in_rtl, 0);
- CHECK_LE(sz, InternalSizeClassMap::kMaxSize);
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
@@ -210,7 +162,6 @@ void *internal_alloc(MBlockType typ, uptr sz) {
void internal_free(void *p) {
ThreadState *thr = cur_thread();
- CHECK_GT(thr->in_rtl, 0);
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
@@ -223,51 +174,42 @@ void internal_free(void *p) {
using namespace __tsan;
extern "C" {
-uptr __tsan_get_current_allocated_bytes() {
- u64 stats[AllocatorStatCount];
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
- u64 m = stats[AllocatorStatMalloced];
- u64 f = stats[AllocatorStatFreed];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatAllocated];
}
-uptr __tsan_get_heap_size() {
- u64 stats[AllocatorStatCount];
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
- u64 m = stats[AllocatorStatMmapped];
- u64 f = stats[AllocatorStatUnmapped];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatMapped];
}
-uptr __tsan_get_free_bytes() {
+uptr __sanitizer_get_free_bytes() {
return 1;
}
-uptr __tsan_get_unmapped_bytes() {
+uptr __sanitizer_get_unmapped_bytes() {
return 1;
}
-uptr __tsan_get_estimated_allocated_size(uptr size) {
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
-bool __tsan_get_ownership(void *p) {
+int __sanitizer_get_ownership(const void *p) {
return allocator()->GetBlockBegin(p) != 0;
}
-uptr __tsan_get_allocated_size(void *p) {
- if (p == 0)
- return 0;
- p = allocator()->GetBlockBegin(p);
- if (p == 0)
- return 0;
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- return b->Size();
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return user_alloc_usable_size(p);
}
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
allocator()->SwallowCache(&thr->alloc_cache);
internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
+ ctx->metamap.OnThreadIdle(thr);
}
} // extern "C"
diff --git a/libsanitizer/tsan/tsan_mman.h b/libsanitizer/tsan/tsan_mman.h
index 90faaffa1f..fe020af9d2 100644
--- a/libsanitizer/tsan/tsan_mman.h
+++ b/libsanitizer/tsan/tsan_mman.h
@@ -24,15 +24,12 @@ void AllocatorPrintStats();
// For user allocations.
void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
- uptr align = kDefaultAlignment);
+ uptr align = kDefaultAlignment, bool signal = true);
// Does not accept NULL.
-void user_free(ThreadState *thr, uptr pc, void *p);
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
-uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p);
-// Given the pointer p into a valid allocated block,
-// returns the descriptor of the block.
-MBlock *user_mblock(ThreadState *thr, void *p);
+uptr user_alloc_usable_size(const void *p);
// Invoking malloc/free hooks that may be installed by the user.
void invoke_malloc_hook(void *ptr, uptr size);
@@ -60,7 +57,6 @@ enum MBlockType {
MBlockSuppression,
MBlockExpectRace,
MBlockSignal,
- MBlockFD,
MBlockJmpBuf,
// This must be the last.
diff --git a/libsanitizer/tsan/tsan_mutex.cc b/libsanitizer/tsan/tsan_mutex.cc
index e7846c53e4..2e49b9d2de 100644
--- a/libsanitizer/tsan/tsan_mutex.cc
+++ b/libsanitizer/tsan/tsan_mutex.cc
@@ -29,15 +29,16 @@ static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
/*0 MutexTypeInvalid*/ {},
/*1 MutexTypeTrace*/ {MutexTypeLeaf},
/*2 MutexTypeThreads*/ {MutexTypeReport},
- /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeSyncVar,
+ /*3 MutexTypeReport*/ {MutexTypeSyncVar,
MutexTypeMBlock, MutexTypeJavaMBlock},
- /*4 MutexTypeSyncVar*/ {},
- /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
+ /*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
+ /*5 MutexTypeSyncTab*/ {}, // unused
/*6 MutexTypeSlab*/ {MutexTypeLeaf},
/*7 MutexTypeAnnotations*/ {},
- /*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
+ /*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
/*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
/*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
+ /*11 MutexTypeDDetector*/ {},
};
static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
@@ -121,12 +122,12 @@ void InitializeMutex() {
#endif
}
-DeadlockDetector::DeadlockDetector() {
+InternalDeadlockDetector::InternalDeadlockDetector() {
// Rely on zero initialization because some mutexes can be locked before ctor.
}
#if TSAN_DEBUG && !TSAN_GO
-void DeadlockDetector::Lock(MutexType t) {
+void InternalDeadlockDetector::Lock(MutexType t) {
// Printf("LOCK %d @%zu\n", t, seq_ + 1);
CHECK_GT(t, MutexTypeInvalid);
CHECK_LT(t, MutexTypeCount);
@@ -153,12 +154,24 @@ void DeadlockDetector::Lock(MutexType t) {
}
}
-void DeadlockDetector::Unlock(MutexType t) {
+void InternalDeadlockDetector::Unlock(MutexType t) {
// Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
CHECK(locked_[t]);
locked_[t] = 0;
}
+
+void InternalDeadlockDetector::CheckNoLocks() {
+ for (int i = 0; i != MutexTypeCount; i++) {
+ CHECK_EQ(locked_[i], 0);
+ }
+}
+#endif
+
+void CheckNoLocks(ThreadState *thr) {
+#if TSAN_DEBUG && !TSAN_GO
+ thr->internal_deadlock_detector.CheckNoLocks();
#endif
+}
const uptr kUnlocked = 0;
const uptr kWriteLock = 1;
@@ -208,7 +221,7 @@ Mutex::~Mutex() {
void Mutex::Lock() {
#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Lock(type_);
+ cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr cmp = kUnlocked;
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
@@ -219,7 +232,7 @@ void Mutex::Lock() {
cmp = kUnlocked;
if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
memory_order_acquire)) {
-#if TSAN_COLLECT_STATS
+#if TSAN_COLLECT_STATS && !TSAN_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
@@ -233,13 +246,13 @@ void Mutex::Unlock() {
(void)prev;
DCHECK_NE(prev & kWriteLock, 0);
#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Unlock(type_);
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
void Mutex::ReadLock() {
#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Lock(type_);
+ cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
if ((prev & kWriteLock) == 0)
@@ -247,7 +260,7 @@ void Mutex::ReadLock() {
for (Backoff backoff; backoff.Do();) {
prev = atomic_load(&state_, memory_order_acquire);
if ((prev & kWriteLock) == 0) {
-#if TSAN_COLLECT_STATS
+#if TSAN_COLLECT_STATS && !TSAN_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
@@ -261,7 +274,7 @@ void Mutex::ReadUnlock() {
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Unlock(type_);
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
diff --git a/libsanitizer/tsan/tsan_mutex.h b/libsanitizer/tsan/tsan_mutex.h
index 6d14505933..c27bc2b445 100644
--- a/libsanitizer/tsan/tsan_mutex.h
+++ b/libsanitizer/tsan/tsan_mutex.h
@@ -29,6 +29,7 @@ enum MutexType {
MutexTypeAtExit,
MutexTypeMBlock,
MutexTypeJavaMBlock,
+ MutexTypeDDetector,
// This must be the last.
MutexTypeCount
@@ -63,11 +64,12 @@ class Mutex {
typedef GenericScopedLock<Mutex> Lock;
typedef GenericScopedReadLock<Mutex> ReadLock;
-class DeadlockDetector {
+class InternalDeadlockDetector {
public:
- DeadlockDetector();
+ InternalDeadlockDetector();
void Lock(MutexType t);
void Unlock(MutexType t);
+ void CheckNoLocks();
private:
u64 seq_;
u64 locked_[MutexTypeCount];
@@ -75,6 +77,10 @@ class DeadlockDetector {
void InitializeMutex();
+// Checks that the current thread does not hold any runtime locks
+// (e.g. when returning from an interceptor).
+void CheckNoLocks(ThreadState *thr);
+
} // namespace __tsan
#endif // TSAN_MUTEX_H
diff --git a/libsanitizer/tsan/tsan_mutexset.h b/libsanitizer/tsan/tsan_mutexset.h
index df36b462c6..7fdc194109 100644
--- a/libsanitizer/tsan/tsan_mutexset.h
+++ b/libsanitizer/tsan/tsan_mutexset.h
@@ -36,6 +36,10 @@ class MutexSet {
uptr Size() const;
Desc Get(uptr i) const;
+ void operator=(const MutexSet &other) {
+ internal_memcpy(this, &other, sizeof(*this));
+ }
+
private:
#ifndef TSAN_GO
uptr size_;
@@ -43,6 +47,7 @@ class MutexSet {
#endif
void RemovePos(uptr i);
+ MutexSet(const MutexSet&);
};
// Go does not have mutexes, so do not spend memory and time.
diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h
index 164ee45d6c..35837d2132 100644
--- a/libsanitizer/tsan/tsan_platform.h
+++ b/libsanitizer/tsan/tsan_platform.h
@@ -10,151 +10,249 @@
// Platform-specific code.
//===----------------------------------------------------------------------===//
+#ifndef TSAN_PLATFORM_H
+#define TSAN_PLATFORM_H
+
+#if !defined(__LP64__) && !defined(_WIN64)
+# error "Only 64-bit is supported"
+#endif
+
+#include "tsan_defs.h"
+#include "tsan_trace.h"
+
+namespace __tsan {
+
+#if !defined(TSAN_GO)
+
/*
-C++ linux memory layout:
-0000 0000 0000 - 03c0 0000 0000: protected
-03c0 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 6000 0000 0000: protected
+C/C++ on linux and freebsd
+0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings
+0100 0000 0000 - 0200 0000 0000: -
+0200 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack
-
-C++ COMPAT linux memory layout:
-0000 0000 0000 - 0400 0000 0000: protected
-0400 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 2900 0000 0000: protected
-2900 0000 0000 - 2c00 0000 0000: modules
-2c00 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7d00 0000 0000: -
-7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7f00 0000 0000: -
-7f00 0000 0000 - 7fff ffff ffff: main thread stack
+7e00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+*/
-Go linux and darwin memory layout:
-0000 0000 0000 - 0000 1000 0000: executable
-0000 1000 0000 - 00f8 0000 0000: -
-00c0 0000 0000 - 00e0 0000 0000: heap
-00e0 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 1380 0000 0000: shadow
-1460 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7fff ffff ffff: -
+const uptr kMetaShadowBeg = 0x300000000000ull;
+const uptr kMetaShadowEnd = 0x400000000000ull;
+const uptr kTraceMemBeg = 0x600000000000ull;
+const uptr kTraceMemEnd = 0x620000000000ull;
+const uptr kShadowBeg = 0x020000000000ull;
+const uptr kShadowEnd = 0x100000000000ull;
+const uptr kHeapMemBeg = 0x7d0000000000ull;
+const uptr kHeapMemEnd = 0x7e0000000000ull;
+const uptr kLoAppMemBeg = 0x000000001000ull;
+const uptr kLoAppMemEnd = 0x010000000000ull;
+const uptr kHiAppMemBeg = 0x7e8000000000ull;
+const uptr kHiAppMemEnd = 0x800000000000ull;
+const uptr kAppMemMsk = 0x7c0000000000ull;
+const uptr kAppMemXor = 0x020000000000ull;
-Go windows memory layout:
-0000 0000 0000 - 0000 1000 0000: executable
-0000 1000 0000 - 00f8 0000 0000: -
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return (mem >= kHeapMemBeg && mem < kHeapMemEnd) ||
+ (mem >= kLoAppMemBeg && mem < kLoAppMemEnd) ||
+ (mem >= kHiAppMemBeg && mem < kHiAppMemEnd);
+}
+
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
+
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
+
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (((x) & ~(kAppMemMsk | (kShadowCell - 1)))
+ ^ kAppMemXor) * kShadowCnt;
+}
+
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((((x) & ~(kAppMemMsk | (kMetaShadowCell - 1)))
+ ^ kAppMemXor) / kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ if (s >= MemToShadow(kLoAppMemBeg) && s <= MemToShadow(kLoAppMemEnd - 1))
+ return (s / kShadowCnt) ^ kAppMemXor;
+ else
+ return ((s / kShadowCnt) ^ kAppMemXor) | kAppMemMsk;
+}
+
+static USED uptr UserRegions[] = {
+ kLoAppMemBeg, kLoAppMemEnd,
+ kHiAppMemBeg, kHiAppMemEnd,
+ kHeapMemBeg, kHeapMemEnd,
+};
+
+#elif defined(TSAN_GO) && !SANITIZER_WINDOWS
+
+/* Go on linux, darwin and freebsd
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
-00e0 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 0560 0000 0000: shadow
-0560 0000 0000 - 0760 0000 0000: traces
-0760 0000 0000 - 07ff ffff ffff: -
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
*/
-#ifndef TSAN_PLATFORM_H
-#define TSAN_PLATFORM_H
+const uptr kMetaShadowBeg = 0x300000000000ull;
+const uptr kMetaShadowEnd = 0x400000000000ull;
+const uptr kTraceMemBeg = 0x600000000000ull;
+const uptr kTraceMemEnd = 0x620000000000ull;
+const uptr kShadowBeg = 0x200000000000ull;
+const uptr kShadowEnd = 0x238000000000ull;
+const uptr kAppMemBeg = 0x000000001000ull;
+const uptr kAppMemEnd = 0x00e000000000ull;
-#include "tsan_defs.h"
-#include "tsan_trace.h"
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return mem >= kAppMemBeg && mem < kAppMemEnd;
+}
-#if defined(__LP64__) || defined(_WIN64)
-namespace __tsan {
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
-#if defined(TSAN_GO)
-static const uptr kLinuxAppMemBeg = 0x000000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL;
-# if SANITIZER_WINDOWS
-static const uptr kLinuxShadowMsk = 0x010000000000ULL;
-# else
-static const uptr kLinuxShadowMsk = 0x200000000000ULL;
-# endif
-// TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout,
-// when memory addresses are of the 0x2axxxxxxxxxx form.
-// The option is enabled with 'setarch x86_64 -L'.
-#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
-static const uptr kLinuxAppMemBeg = 0x290000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
-#else
-static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
-#endif
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
-static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
+}
-#if SANITIZER_WINDOWS
-const uptr kTraceMemBegin = 0x056000000000ULL;
-#else
-const uptr kTraceMemBegin = 0x600000000000ULL;
-#endif
-const uptr kTraceMemSize = 0x020000000000ULL;
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+}
-// This has to be a macro to allow constant initialization of constants below.
-#ifndef TSAN_GO
-#define MemToShadow(addr) \
- (((addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
-#else
-#define MemToShadow(addr) \
- ((((addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
-#endif
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ return (s & ~kShadowBeg) / kShadowCnt;
+}
+
+static USED uptr UserRegions[] = {
+ kAppMemBeg, kAppMemEnd,
+};
+
+#elif defined(TSAN_GO) && SANITIZER_WINDOWS
+
+/* Go on windows
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00f8 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 0380 0000 0000: shadow
+0380 0000 0000 - 0560 0000 0000: -
+0560 0000 0000 - 0760 0000 0000: traces
+0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
+07d0 0000 0000 - 8000 0000 0000: -
+*/
-static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);
-static const uptr kLinuxShadowEnd =
- MemToShadow(kLinuxAppMemEnd) | 0xff;
+const uptr kMetaShadowBeg = 0x076000000000ull;
+const uptr kMetaShadowEnd = 0x07d000000000ull;
+const uptr kTraceMemBeg = 0x056000000000ull;
+const uptr kTraceMemEnd = 0x076000000000ull;
+const uptr kShadowBeg = 0x010000000000ull;
+const uptr kShadowEnd = 0x038000000000ull;
+const uptr kAppMemBeg = 0x000000001000ull;
+const uptr kAppMemEnd = 0x00e000000000ull;
-static inline bool IsAppMem(uptr mem) {
- return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd;
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return mem >= kAppMemBeg && mem < kAppMemEnd;
}
-static inline bool IsShadowMem(uptr mem) {
- return mem >= kLinuxShadowBeg && mem <= kLinuxShadowEnd;
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
}
-static inline uptr ShadowToMem(uptr shadow) {
- CHECK(IsShadowMem(shadow));
-#ifdef TSAN_GO
- return (shadow & ~kLinuxShadowMsk) / kShadowCnt;
-#else
- return (shadow / kShadowCnt) | kLinuxAppMemMsk;
-#endif
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
}
-// For COMPAT mapping returns an alternative address
-// that mapped to the same shadow address.
-// COMPAT mapping is not quite one-to-one.
-static inline uptr AlternativeAddress(uptr addr) {
-#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
- return (addr & ~kLinuxAppMemMsk) | 0x280000000000ULL;
-#else
- return 0;
-#endif
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
}
-void FlushShadowMemory();
-void WriteMemoryProfile(char *buf, uptr buf_size);
-uptr GetRSS();
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | kMetaShadowEnd);
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ // FIXME(dvyukov): this is most likely wrong as the mapping is not bijection.
+ return (x & ~kShadowBeg) / kShadowCnt;
+}
-const char *InitializePlatform();
-void FinalizePlatform();
+static USED uptr UserRegions[] = {
+ kAppMemBeg, kAppMemEnd,
+};
+
+#else
+# error "Unknown platform"
+#endif
// The additional page is to catch shadow stack overflow as paging fault.
-const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace) + 4096
- + 4095) & ~4095;
+// Windows wants 64K alignment for mmaps.
+const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
+ + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
uptr ALWAYS_INLINE GetThreadTrace(int tid) {
- uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize;
- DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize;
+ DCHECK_LT(p, kTraceMemEnd);
return p;
}
uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) {
- uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize
+ uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize
+ kTraceSize * sizeof(Event);
- DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ DCHECK_LT(p, kTraceMemEnd);
return p;
}
-void internal_start_thread(void(*func)(void*), void *arg);
+void InitializePlatform();
+void FlushShadowMemory();
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
+uptr GetRSS();
+
+void *internal_start_thread(void(*func)(void*), void *arg);
+void internal_join_thread(void *th);
// Says whether the addr relates to a global var.
// Guesses with high probability, may yield both false positives and negatives.
@@ -162,10 +260,10 @@ bool IsGlobalVar(uptr addr);
int ExtractResolvFDs(void *state, int *fds, int nfd);
int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
-} // namespace __tsan
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg);
-#else // defined(__LP64__) || defined(_WIN64)
-# error "Only 64-bit is supported"
-#endif
+} // namespace __tsan
#endif // TSAN_PLATFORM_H
diff --git a/libsanitizer/tsan/tsan_platform_linux.cc b/libsanitizer/tsan/tsan_platform_linux.cc
index fe69430b71..32591316ea 100644
--- a/libsanitizer/tsan/tsan_platform_linux.cc
+++ b/libsanitizer/tsan/tsan_platform_linux.cc
@@ -7,17 +7,18 @@
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
-// Linux-specific code.
+// Linux- and FreeBSD-specific code.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "tsan_flags.h"
@@ -30,7 +31,6 @@
#include <string.h>
#include <stdarg.h>
#include <sys/mman.h>
-#include <sys/prctl.h>
#include <sys/syscall.h>
#include <sys/socket.h>
#include <sys/time.h>
@@ -41,9 +41,10 @@
#include <errno.h>
#include <sched.h>
#include <dlfcn.h>
+#if SANITIZER_LINUX
#define __need_res_state
#include <resolv.h>
-#include <malloc.h>
+#endif
#ifdef sa_handler
# undef sa_handler
@@ -53,87 +54,111 @@
# undef sa_sigaction
#endif
-extern "C" struct mallinfo __libc_mallinfo();
+#if SANITIZER_FREEBSD
+extern "C" void *__libc_stack_end;
+void *__libc_stack_end = 0;
+#endif
namespace __tsan {
+static uptr g_data_start;
+static uptr g_data_end;
+
const uptr kPageSize = 4096;
+enum {
+ MemTotal = 0,
+ MemShadow = 1,
+ MemMeta = 2,
+ MemFile = 3,
+ MemMmap = 4,
+ MemTrace = 5,
+ MemHeap = 6,
+ MemOther = 7,
+ MemCount = 8,
+};
+
+void FillProfileCallback(uptr p, uptr rss, bool file,
+ uptr *mem, uptr stats_size) {
+ mem[MemTotal] += rss;
+ if (p >= kShadowBeg && p < kShadowEnd)
+ mem[MemShadow] += rss;
+ else if (p >= kMetaShadowBeg && p < kMetaShadowEnd)
+ mem[MemMeta] += rss;
#ifndef TSAN_GO
-ScopedInRtl::ScopedInRtl()
- : thr_(cur_thread()) {
- in_rtl_ = thr_->in_rtl;
- thr_->in_rtl++;
- errno_ = errno;
-}
-
-ScopedInRtl::~ScopedInRtl() {
- thr_->in_rtl--;
- errno = errno_;
- CHECK_EQ(in_rtl_, thr_->in_rtl);
-}
+ else if (p >= kHeapMemBeg && p < kHeapMemEnd)
+ mem[MemHeap] += rss;
+ else if (p >= kLoAppMemBeg && p < kLoAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
+ else if (p >= kHiAppMemBeg && p < kHiAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
#else
-ScopedInRtl::ScopedInRtl() {
-}
-
-ScopedInRtl::~ScopedInRtl() {
-}
+ else if (p >= kAppMemBeg && p < kAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
#endif
-
-void FillProfileCallback(uptr start, uptr rss, bool file,
- uptr *mem, uptr stats_size) {
- CHECK_EQ(7, stats_size);
- mem[6] += rss; // total
- start >>= 40;
- if (start < 0x10) // shadow
- mem[0] += rss;
- else if (start >= 0x20 && start < 0x30) // compat modules
- mem[file ? 1 : 2] += rss;
- else if (start >= 0x7e) // modules
- mem[file ? 1 : 2] += rss;
- else if (start >= 0x60 && start < 0x62) // traces
- mem[3] += rss;
- else if (start >= 0x7d && start < 0x7e) // heap
- mem[4] += rss;
- else // other
- mem[5] += rss;
+ else if (p >= kTraceMemBeg && p < kTraceMemEnd)
+ mem[MemTrace] += rss;
+ else
+ mem[MemOther] += rss;
}
-void WriteMemoryProfile(char *buf, uptr buf_size) {
- uptr mem[7] = {};
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+ uptr mem[MemCount] = {};
__sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
- char *buf_pos = buf;
- char *buf_end = buf + buf_size;
- buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
- "RSS %zd MB: shadow:%zd file:%zd mmap:%zd trace:%zd heap:%zd other:%zd\n",
- mem[6] >> 20, mem[0] >> 20, mem[1] >> 20, mem[2] >> 20,
- mem[3] >> 20, mem[4] >> 20, mem[5] >> 20);
- struct mallinfo mi = __libc_mallinfo();
- buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
- "mallinfo: arena=%d mmap=%d fordblks=%d keepcost=%d\n",
- mi.arena >> 20, mi.hblkhd >> 20, mi.fordblks >> 20, mi.keepcost >> 20);
+ StackDepotStats *stacks = StackDepotGetStats();
+ internal_snprintf(buf, buf_size,
+ "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
+ " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
+ mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
+ mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
+ mem[MemHeap] >> 20, mem[MemOther] >> 20,
+ stacks->allocated >> 20, stacks->n_uniq_ids,
+ nlive, nthread);
}
uptr GetRSS() {
- uptr mem[7] = {};
- __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
- return mem[6];
+ uptr fd = OpenFile("/proc/self/statm", false);
+ if ((sptr)fd < 0)
+ return 0;
+ char buf[64];
+ uptr len = internal_read(fd, buf, sizeof(buf) - 1);
+ internal_close(fd);
+ if ((sptr)len <= 0)
+ return 0;
+ buf[len] = 0;
+ // The format of the file is:
+ // 1084 89 69 11 0 79 0
+ // We need the second number which is RSS in 4K units.
+ char *pos = buf;
+ // Skip the first number.
+ while (*pos >= '0' && *pos <= '9')
+ pos++;
+ // Skip whitespaces.
+ while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
+ pos++;
+ // Read the number.
+ uptr rss = 0;
+ while (*pos >= '0' && *pos <= '9')
+ rss = rss * 10 + *pos++ - '0';
+ return rss * 4096;
}
-
+#if SANITIZER_LINUX
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
- FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg);
+ FlushUnneededShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
}
+#endif
void FlushShadowMemory() {
+#if SANITIZER_LINUX
StopTheWorld(FlushShadowMemoryCallback, 0);
+#endif
}
#ifndef TSAN_GO
static void ProtectRange(uptr beg, uptr end) {
- ScopedInRtl in_rtl;
CHECK_LE(beg, end);
if (beg == end)
return;
@@ -143,9 +168,7 @@ static void ProtectRange(uptr beg, uptr end) {
Die();
}
}
-#endif
-#ifndef TSAN_GO
// Mark shadow for .rodata sections with the special kShadowRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
@@ -159,17 +182,19 @@ static void MapRodata() {
#endif
if (tmpdir == 0)
return;
- char filename[256];
- internal_snprintf(filename, sizeof(filename), "%s/tsan.rodata.%d",
+ char name[256];
+ internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
tmpdir, (int)internal_getpid());
- uptr openrv = internal_open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
+ uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
if (internal_iserror(openrv))
return;
+ internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
fd_t fd = openrv;
// Fill the file with kShadowRodata.
const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
InternalScopedBuffer<u64> marker(kMarkerSize);
- for (u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
+ // volatile to prevent insertion of memset
+ for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
*p = kShadowRodata;
internal_write(fd, marker.data(), marker.size());
// Map the file into memory.
@@ -177,13 +202,12 @@ static void MapRodata() {
MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
if (internal_iserror(page)) {
internal_close(fd);
- internal_unlink(filename);
return;
}
// Map the file into shadow of .rodata sections.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset, prot;
- char name[128];
+ // Reusing the buffer 'name'.
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
if (name[0] != 0 && name[0] != '['
&& (prot & MemoryMappingLayout::kProtectionRead)
@@ -200,66 +224,63 @@ static void MapRodata() {
}
}
internal_close(fd);
- internal_unlink(filename);
}
void InitializeShadowMemory() {
- uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
- kLinuxShadowEnd - kLinuxShadowBeg);
- if (shadow != kLinuxShadowBeg) {
+ // Map memory shadow.
+ uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
+ kShadowEnd - kShadowBeg);
+ if (shadow != kShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg);
+ "to link with -pie (%p, %p).\n", shadow, kShadowBeg);
Die();
}
- const uptr kClosedLowBeg = 0x200000;
- const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
- const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
- const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin);
- ProtectRange(kClosedLowBeg, kClosedLowEnd);
- ProtectRange(kClosedMidBeg, kClosedMidEnd);
- DPrintf("kClosedLow %zx-%zx (%zuGB)\n",
- kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30);
- DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
- kLinuxShadowBeg, kLinuxShadowEnd,
- (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
- DPrintf("kClosedMid %zx-%zx (%zuGB)\n",
- kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30);
- DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
- kLinuxAppMemBeg, kLinuxAppMemEnd,
- (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
- DPrintf("stack %zx\n", (uptr)&shadow);
-
- MapRodata();
-}
+ // This memory range is used for thread stacks and large user mmaps.
+ // Frequently a thread uses only a small part of stack and similarly
+ // a program uses a small part of large mmap. On some programs
+ // we see 20% memory usage reduction without huge pages for this range.
+#ifdef MADV_NOHUGEPAGE
+ madvise((void*)MemToShadow(0x7f0000000000ULL),
+ 0x10000000000ULL * kShadowMultiplier, MADV_NOHUGEPAGE);
#endif
-
-static uptr g_data_start;
-static uptr g_data_end;
-
-#ifndef TSAN_GO
-static void CheckPIE() {
- // Ensure that the binary is indeed compiled with -pie.
- MemoryMappingLayout proc_maps(true);
- uptr start, end;
- if (proc_maps.Next(&start, &end,
- /*offset*/0, /*filename*/0, /*filename_size*/0,
- /*protection*/0)) {
- if ((u64)start < kLinuxAppMemBeg) {
- Printf("FATAL: ThreadSanitizer can not mmap the shadow memory ("
- "something is mapped at 0x%zx < 0x%zx)\n",
- start, kLinuxAppMemBeg);
- Printf("FATAL: Make sure to compile with -fPIE"
- " and to link with -pie.\n");
- Die();
- }
+ DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
+ kShadowBeg, kShadowEnd,
+ (kShadowEnd - kShadowBeg) >> 30);
+
+ // Map meta shadow.
+ uptr meta_size = kMetaShadowEnd - kMetaShadowBeg;
+ uptr meta = (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size);
+ if (meta != kMetaShadowBeg) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ Printf("FATAL: Make sure to compile with -fPIE and "
+ "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg);
+ Die();
}
+ DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
+ meta, meta + meta_size, meta_size >> 30);
+
+ MapRodata();
}
static void InitDataSeg() {
MemoryMappingLayout proc_maps(true);
uptr start, end, offset;
char name[128];
+#if SANITIZER_FREEBSD
+ // On FreeBSD BSS is usually the last block allocated within the
+ // low range and heap is the last block allocated within the range
+ // 0x800000000-0x8ffffffff.
+ while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
+ /*protection*/ 0)) {
+ DPrintf("%p-%p %p %s\n", start, end, offset, name);
+ if ((start & 0xffff00000000ULL) == 0 && (end & 0xffff00000000ULL) == 0 &&
+ name[0] == '\0') {
+ g_data_start = start;
+ g_data_end = end;
+ }
+ }
+#else
bool prev_is_data = false;
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
/*protection*/ 0)) {
@@ -275,34 +296,39 @@ static void InitDataSeg() {
g_data_end = end;
prev_is_data = is_data;
}
+#endif
DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);
CHECK_LT(g_data_start, g_data_end);
CHECK_GE((uptr)&g_data_start, g_data_start);
CHECK_LT((uptr)&g_data_start, g_data_end);
}
-#endif // #ifndef TSAN_GO
-
-static rlim_t getlim(int res) {
- rlimit rlim;
- CHECK_EQ(0, getrlimit(res, &rlim));
- return rlim.rlim_cur;
-}
+static void CheckAndProtect() {
+ // Ensure that the binary is indeed compiled with -pie.
+ MemoryMappingLayout proc_maps(true);
+ uptr p, end;
+ while (proc_maps.Next(&p, &end, 0, 0, 0, 0)) {
+ if (IsAppMem(p))
+ continue;
+ if (p >= kHeapMemEnd &&
+ p < kHeapMemEnd + PrimaryAllocator::AdditionalSize())
+ continue;
+ if (p >= 0xf000000000000000ull) // vdso
+ break;
+ Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
+ Die();
+ }
-static void setlim(int res, rlim_t lim) {
- // The following magic is to prevent clang from replacing it with memset.
- volatile rlimit rlim;
- rlim.rlim_cur = lim;
- rlim.rlim_max = lim;
- setrlimit(res, (rlimit*)&rlim);
+ ProtectRange(kLoAppMemEnd, kShadowBeg);
+ ProtectRange(kShadowEnd, kMetaShadowBeg);
+ ProtectRange(kMetaShadowEnd, kTraceMemBeg);
+ ProtectRange(kTraceMemEnd, kHeapMemBeg);
+ ProtectRange(kHeapMemEnd + PrimaryAllocator::AdditionalSize(), kHiAppMemBeg);
}
+#endif // #ifndef TSAN_GO
-const char *InitializePlatform() {
- void *p = 0;
- if (sizeof(p) == 8) {
- // Disable core dumps, dumping of 16TB usually takes a bit long.
- setlim(RLIMIT_CORE, 0);
- }
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
// Go maps shadow memory lazily and works fine with limited address space.
// Unlimited stack is not a problem as well, because the executable
@@ -312,21 +338,21 @@ const char *InitializePlatform() {
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
// we re-exec the program with limited stack size as a best effort.
- if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
+ if (StackSizeIsUnlimited()) {
const uptr kMaxStackSize = 32 * 1024 * 1024;
- Report("WARNING: Program is run with unlimited stack size, which "
- "wouldn't work with ThreadSanitizer.\n");
- Report("Re-execing with stack size limited to %zd bytes.\n",
- kMaxStackSize);
+ VReport(1, "Program is run with unlimited stack size, which wouldn't "
+ "work with ThreadSanitizer.\n"
+ "Re-execing with stack size limited to %zd bytes.\n",
+ kMaxStackSize);
SetStackSizeLimitInBytes(kMaxStackSize);
reexec = true;
}
- if (getlim(RLIMIT_AS) != (rlim_t)-1) {
+ if (!AddressSpaceIsUnlimited()) {
Report("WARNING: Program is run with limited virtual address space,"
" which wouldn't work with ThreadSanitizer.\n");
Report("Re-execing with unlimited virtual address space.\n");
- setlim(RLIMIT_AS, -1);
+ SetAddressSpaceUnlimited();
reexec = true;
}
if (reexec)
@@ -334,11 +360,10 @@ const char *InitializePlatform() {
}
#ifndef TSAN_GO
- CheckPIE();
+ CheckAndProtect();
InitTlsSize();
InitDataSeg();
#endif
- return GetEnv(kTsanOptionsEnv);
}
bool IsGlobalVar(uptr addr) {
@@ -350,6 +375,7 @@ bool IsGlobalVar(uptr addr) {
// This is required to properly "close" the fds, because we do not see internal
// closes within glibc. The code is a pure hack.
int ExtractResolvFDs(void *state, int *fds, int nfd) {
+#if SANITIZER_LINUX
int cnt = 0;
__res_state *statp = (__res_state*)state;
for (int i = 0; i < MAXNS && cnt < nfd; i++) {
@@ -357,6 +383,9 @@ int ExtractResolvFDs(void *state, int *fds, int nfd) {
fds[cnt++] = statp->_u._ext.nssocks[i];
}
return cnt;
+#else
+ return 0;
+#endif
}
// Extract file descriptors passed via UNIX domain sockets.
@@ -378,9 +407,20 @@ int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
}
return res;
}
-#endif
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(c, m, abstime);
+ pthread_cleanup_pop(0);
+ return res;
+}
+#endif
} // namespace __tsan
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
diff --git a/libsanitizer/tsan/tsan_platform_mac.cc b/libsanitizer/tsan/tsan_platform_mac.cc
index 3dca611dc9..2091318a67 100644
--- a/libsanitizer/tsan/tsan_platform_mac.cc
+++ b/libsanitizer/tsan/tsan_platform_mac.cc
@@ -38,55 +38,56 @@
namespace __tsan {
-ScopedInRtl::ScopedInRtl() {
+uptr GetShadowMemoryConsumption() {
+ return 0;
}
-ScopedInRtl::~ScopedInRtl() {
+void FlushShadowMemory() {
}
-uptr GetShadowMemoryConsumption() {
- return 0;
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
-void FlushShadowMemory() {
+uptr GetRSS() {
+ return 0;
}
#ifndef TSAN_GO
void InitializeShadowMemory() {
- uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
- kLinuxShadowEnd - kLinuxShadowBeg);
- if (shadow != kLinuxShadowBeg) {
+ uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
+ kShadowEnd - kShadowBeg);
+ if (shadow != kShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
"to link with -pie.\n");
Die();
}
- DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
- kLinuxShadowBeg, kLinuxShadowEnd,
- (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
- DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
- kLinuxAppMemBeg, kLinuxAppMemEnd,
- (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
+ DPrintf("kShadow %zx-%zx (%zuGB)\n",
+ kShadowBeg, kShadowEnd,
+ (kShadowEnd - kShadowBeg) >> 30);
+ DPrintf("kAppMem %zx-%zx (%zuGB)\n",
+ kAppMemBeg, kAppMemEnd,
+ (kAppMemEnd - kAppMemBeg) >> 30);
}
#endif
-const char *InitializePlatform() {
- void *p = 0;
- if (sizeof(p) == 8) {
- // Disable core dumps, dumping of 16TB usually takes a bit long.
- // The following magic is to prevent clang from replacing it with memset.
- volatile rlimit lim;
- lim.rlim_cur = 0;
- lim.rlim_max = 0;
- setrlimit(RLIMIT_CORE, (rlimit*)&lim);
- }
-
- return GetEnv(kTsanOptionsEnv);
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
}
-void FinalizePlatform() {
- fflush(0);
+#ifndef TSAN_GO
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(c, m, abstime);
+ pthread_cleanup_pop(0);
+ return res;
}
+#endif
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_platform_windows.cc b/libsanitizer/tsan/tsan_platform_windows.cc
index 6e49ef42f0..3f81c54e3e 100644
--- a/libsanitizer/tsan/tsan_platform_windows.cc
+++ b/libsanitizer/tsan/tsan_platform_windows.cc
@@ -19,12 +19,6 @@
namespace __tsan {
-ScopedInRtl::ScopedInRtl() {
-}
-
-ScopedInRtl::~ScopedInRtl() {
-}
-
uptr GetShadowMemoryConsumption() {
return 0;
}
@@ -32,12 +26,14 @@ uptr GetShadowMemoryConsumption() {
void FlushShadowMemory() {
}
-const char *InitializePlatform() {
- return GetEnv(kTsanOptionsEnv);
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+}
+
+uptr GetRSS() {
+ return 0;
}
-void FinalizePlatform() {
- fflush(0);
+void InitializePlatform() {
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_report.cc b/libsanitizer/tsan/tsan_report.cc
index f2484166e8..6feab81383 100644
--- a/libsanitizer/tsan/tsan_report.cc
+++ b/libsanitizer/tsan/tsan_report.cc
@@ -11,13 +11,33 @@
#include "tsan_report.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
namespace __tsan {
-class Decorator: private __sanitizer::AnsiColorDecorator {
+ReportStack::ReportStack() : next(nullptr), info(), suppressable(false) {}
+
+ReportStack *ReportStack::New(uptr addr) {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack));
+ ReportStack *res = new(mem) ReportStack();
+ res->info.address = addr;
+ return res;
+}
+
+ReportLocation::ReportLocation(ReportLocationType type)
+ : type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0),
+ fd(0), suppressable(false), stack(nullptr) {}
+
+ReportLocation *ReportLocation::New(ReportLocationType type) {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation));
+ return new(mem) ReportLocation(type);
+}
+
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
+ Decorator() : SanitizerCommonDecorator() { }
const char *Warning() { return Red(); }
const char *EndWarning() { return Default(); }
const char *Access() { return Blue(); }
@@ -38,6 +58,7 @@ ReportDesc::ReportDesc()
, locs(MBlockReportLoc)
, mutexes(MBlockReportMutex)
, threads(MBlockReportThread)
+ , unique_tids(MBlockReportThread)
, sleep()
, count() {
}
@@ -67,14 +88,26 @@ static const char *ReportTypeString(ReportType typ) {
return "data race on vptr (ctor/dtor vs virtual call)";
if (typ == ReportTypeUseAfterFree)
return "heap-use-after-free";
+ if (typ == ReportTypeVptrUseAfterFree)
+ return "heap-use-after-free (virtual call vs free)";
if (typ == ReportTypeThreadLeak)
return "thread leak";
if (typ == ReportTypeMutexDestroyLocked)
return "destroy of a locked mutex";
+ if (typ == ReportTypeMutexDoubleLock)
+ return "double lock of a mutex";
+ if (typ == ReportTypeMutexBadUnlock)
+ return "unlock of an unlocked mutex (or by a wrong thread)";
+ if (typ == ReportTypeMutexBadReadLock)
+ return "read lock of a write locked mutex";
+ if (typ == ReportTypeMutexBadReadUnlock)
+ return "read unlock of a write locked mutex";
if (typ == ReportTypeSignalUnsafe)
return "signal-unsafe call inside of a signal";
if (typ == ReportTypeErrnoInSignal)
return "signal handler spoils errno";
+ if (typ == ReportTypeDeadlock)
+ return "lock-order-inversion (potential deadlock)";
return "";
}
@@ -83,14 +116,11 @@ void PrintStack(const ReportStack *ent) {
Printf(" [failed to restore the stack]\n\n");
return;
}
- for (int i = 0; ent; ent = ent->next, i++) {
- Printf(" #%d %s %s:%d", i, ent->func, ent->file, ent->line);
- if (ent->col)
- Printf(":%d", ent->col);
- if (ent->module && ent->offset)
- Printf(" (%s+%p)\n", ent->module, (void*)ent->offset);
- else
- Printf(" (%p)\n", (void*)ent->pc);
+ for (int i = 0; ent && ent->info.address; ent = ent->next, i++) {
+ InternalScopedString res(2 * GetPageSizeCached());
+ RenderFrame(&res, common_flags()->stack_trace_format, i, ent->info,
+ common_flags()->strip_path_prefix, "__interceptor_");
+ Printf("%s\n", res.data());
}
Printf("\n");
}
@@ -132,12 +162,15 @@ static void PrintLocation(const ReportLocation *loc) {
bool print_stack = false;
Printf("%s", d.Location());
if (loc->type == ReportLocationGlobal) {
+ const DataInfo &global = loc->global;
Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n",
- loc->name, loc->size, loc->addr, loc->module, loc->offset);
+ global.name, global.size, global.start,
+ StripModuleName(global.module), global.module_offset);
} else if (loc->type == ReportLocationHeap) {
char thrbuf[kThreadBufSize];
Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
- loc->size, loc->addr, thread_name(thrbuf, loc->tid));
+ loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
print_stack = true;
} else if (loc->type == ReportLocationStack) {
Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
@@ -153,6 +186,17 @@ static void PrintLocation(const ReportLocation *loc) {
PrintStack(loc->stack);
}
+static void PrintMutexShort(const ReportMutex *rm, const char *after) {
+ Decorator d;
+ Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.EndMutex(), after);
+}
+
+static void PrintMutexShortWithAddress(const ReportMutex *rm,
+ const char *after) {
+ Decorator d;
+ Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.EndMutex(), after);
+}
+
static void PrintMutex(const ReportMutex *rm) {
Decorator d;
if (rm->destroyed) {
@@ -161,7 +205,7 @@ static void PrintMutex(const ReportMutex *rm) {
Printf("%s", d.EndMutex());
} else {
Printf("%s", d.Mutex());
- Printf(" Mutex M%llu created at:\n", rm->id);
+ Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr);
Printf("%s", d.EndMutex());
PrintStack(rm->stack);
}
@@ -221,10 +265,42 @@ void PrintReport(const ReportDesc *rep) {
(int)internal_getpid());
Printf("%s", d.EndWarning());
- for (uptr i = 0; i < rep->stacks.Size(); i++) {
- if (i)
- Printf(" and:\n");
- PrintStack(rep->stacks[i]);
+ if (rep->typ == ReportTypeDeadlock) {
+ char thrbuf[kThreadBufSize];
+ Printf(" Cycle in lock order graph: ");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutexShortWithAddress(rep->mutexes[i], " => ");
+ PrintMutexShort(rep->mutexes[0], "\n\n");
+ CHECK_GT(rep->mutexes.Size(), 0U);
+ CHECK_EQ(rep->mutexes.Size() * (flags()->second_deadlock_stack ? 2 : 1),
+ rep->stacks.Size());
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[(i + 1) % rep->mutexes.Size()],
+ " acquired here while holding mutex ");
+ PrintMutexShort(rep->mutexes[i], " in ");
+ Printf("%s", d.ThreadDescription());
+ Printf("%s:\n", thread_name(thrbuf, rep->unique_tids[i]));
+ Printf("%s", d.EndThreadDescription());
+ if (flags()->second_deadlock_stack) {
+ PrintStack(rep->stacks[2*i]);
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[i],
+ " previously acquired by the same thread here:\n");
+ PrintStack(rep->stacks[2*i+1]);
+ } else {
+ PrintStack(rep->stacks[i]);
+ if (i == 0)
+ Printf(" Hint: use TSAN_OPTIONS=second_deadlock_stack=1 "
+ "to get more informative warning message\n\n");
+ }
+ }
+ } else {
+ for (uptr i = 0; i < rep->stacks.Size(); i++) {
+ if (i)
+ Printf(" and:\n");
+ PrintStack(rep->stacks[i]);
+ }
}
for (uptr i = 0; i < rep->mops.Size(); i++)
@@ -236,8 +312,10 @@ void PrintReport(const ReportDesc *rep) {
for (uptr i = 0; i < rep->locs.Size(); i++)
PrintLocation(rep->locs[i]);
- for (uptr i = 0; i < rep->mutexes.Size(); i++)
- PrintMutex(rep->mutexes[i]);
+ if (rep->typ != ReportTypeDeadlock) {
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutex(rep->mutexes[i]);
+ }
for (uptr i = 0; i < rep->threads.Size(); i++)
PrintThread(rep->threads[i]);
@@ -245,8 +323,10 @@ void PrintReport(const ReportDesc *rep) {
if (rep->typ == ReportTypeThreadLeak && rep->count > 1)
Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
- if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep)))
- ReportErrorSummary(rep_typ_str, ent->file, ent->line, ent->func);
+ if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep))) {
+ const AddressInfo &info = ent->info;
+ ReportErrorSummary(rep_typ_str, info.file, info.line, info.function);
+ }
Printf("==================\n");
}
@@ -261,8 +341,9 @@ void PrintStack(const ReportStack *ent) {
return;
}
for (int i = 0; ent; ent = ent->next, i++) {
- Printf(" %s()\n %s:%d +0x%zx\n",
- ent->func, ent->file, ent->line, (void*)ent->offset);
+ const AddressInfo &info = ent->info;
+ Printf(" %s()\n %s:%d +0x%zx\n", info.function, info.file, info.line,
+ (void *)info.module_offset);
}
}
@@ -289,11 +370,26 @@ static void PrintThread(const ReportThread *rt) {
void PrintReport(const ReportDesc *rep) {
Printf("==================\n");
- Printf("WARNING: DATA RACE");
- for (uptr i = 0; i < rep->mops.Size(); i++)
- PrintMop(rep->mops[i], i == 0);
- for (uptr i = 0; i < rep->threads.Size(); i++)
- PrintThread(rep->threads[i]);
+ if (rep->typ == ReportTypeRace) {
+ Printf("WARNING: DATA RACE");
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+ } else if (rep->typ == ReportTypeDeadlock) {
+ Printf("WARNING: DEADLOCK\n");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf("Goroutine %d lock mutex %d while holding mutex %d:\n",
+ 999, rep->mutexes[i]->id,
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i]);
+ Printf("\n");
+ Printf("Mutex %d was previously locked here:\n",
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i + 1]);
+ Printf("\n");
+ }
+ }
Printf("==================\n");
}
diff --git a/libsanitizer/tsan/tsan_report.h b/libsanitizer/tsan/tsan_report.h
index c0eef9eb02..3d99c7a7c2 100644
--- a/libsanitizer/tsan/tsan_report.h
+++ b/libsanitizer/tsan/tsan_report.h
@@ -11,6 +11,7 @@
#ifndef TSAN_REPORT_H
#define TSAN_REPORT_H
+#include "sanitizer_common/sanitizer_symbolizer.h"
#include "tsan_defs.h"
#include "tsan_vector.h"
@@ -20,21 +21,26 @@ enum ReportType {
ReportTypeRace,
ReportTypeVptrRace,
ReportTypeUseAfterFree,
+ ReportTypeVptrUseAfterFree,
ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked,
+ ReportTypeMutexDoubleLock,
+ ReportTypeMutexBadUnlock,
+ ReportTypeMutexBadReadLock,
+ ReportTypeMutexBadReadUnlock,
ReportTypeSignalUnsafe,
- ReportTypeErrnoInSignal
+ ReportTypeErrnoInSignal,
+ ReportTypeDeadlock
};
struct ReportStack {
ReportStack *next;
- char *module;
- uptr offset;
- uptr pc;
- char *func;
- char *file;
- int line;
- int col;
+ AddressInfo info;
+ bool suppressable;
+ static ReportStack *New(uptr addr);
+
+ private:
+ ReportStack();
};
struct ReportMopMutex {
@@ -64,16 +70,17 @@ enum ReportLocationType {
struct ReportLocation {
ReportLocationType type;
- uptr addr;
- uptr size;
- char *module;
- uptr offset;
+ DataInfo global;
+ uptr heap_chunk_start;
+ uptr heap_chunk_size;
int tid;
int fd;
- char *name;
- char *file;
- int line;
+ bool suppressable;
ReportStack *stack;
+
+ static ReportLocation *New(ReportLocationType type);
+ private:
+ explicit ReportLocation(ReportLocationType type);
};
struct ReportThread {
@@ -87,6 +94,7 @@ struct ReportThread {
struct ReportMutex {
u64 id;
+ uptr addr;
bool destroyed;
ReportStack *stack;
};
@@ -99,6 +107,7 @@ class ReportDesc {
Vector<ReportLocation*> locs;
Vector<ReportMutex*> mutexes;
Vector<ReportThread*> threads;
+ Vector<int> unique_tids;
ReportStack *sleep;
int count;
diff --git a/libsanitizer/tsan/tsan_rtl.cc b/libsanitizer/tsan/tsan_rtl.cc
index 573eeb8a91..bf971b62da 100644
--- a/libsanitizer/tsan/tsan_rtl.cc
+++ b/libsanitizer/tsan/tsan_rtl.cc
@@ -23,6 +23,16 @@
#include "tsan_suppressions.h"
#include "tsan_symbolize.h"
+#ifdef __SSE3__
+// <emmintrin.h> transitively includes <stdlib.h>,
+// and it's prohibited to include std headers into tsan runtime.
+// So we do this dirty trick.
+#define _MM_MALLOC_H_INCLUDED
+#define __MM_MALLOC_H
+#include <emmintrin.h>
+typedef __m128i m128;
+#endif
+
volatile int __tsan_resumed = 0;
extern "C" void __tsan_resume() {
@@ -35,22 +45,21 @@ namespace __tsan {
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+Context *ctx;
// Can be overriden by a front-end.
#ifdef TSAN_EXTERNAL_HOOKS
bool OnFinalize(bool failed);
+void OnInitialize();
#else
SANITIZER_INTERFACE_ATTRIBUTE
bool WEAK OnFinalize(bool failed) {
return failed;
}
+SANITIZER_INTERFACE_ATTRIBUTE
+void WEAK OnInitialize() {}
#endif
-static Context *ctx;
-Context *CTX() {
- return ctx;
-}
-
static char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadContextBase *CreateThreadContext(u32 tid) {
@@ -74,7 +83,7 @@ Context::Context()
, nreported()
, nmissed_expected()
, thread_registry(new(thread_registry_placeholder) ThreadRegistry(
- CreateThreadContext, kMaxTid, kThreadQuarantineSize))
+ CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
, racy_stacks(MBlockRacyStacks)
, racy_addresses(MBlockRacyAddresses)
, fired_suppressions(8) {
@@ -82,13 +91,15 @@ Context::Context()
// The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size)
: fast_state(tid, epoch)
// Do not touch these, rely on zero initialization,
// they may be accessed before the ctor.
// , ignore_reads_and_writes()
- // , in_rtl()
+ // , ignore_interceptors()
+ , clock(tid, reuse_count)
#ifndef TSAN_GO
, jmp_bufs(MBlockJmpBuf)
#endif
@@ -97,7 +108,11 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
, stk_addr(stk_addr)
, stk_size(stk_size)
, tls_addr(tls_addr)
- , tls_size(tls_size) {
+ , tls_size(tls_size)
+#ifndef TSAN_GO
+ , last_sleep_clock(tid)
+#endif
+{
}
static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
@@ -105,62 +120,68 @@ static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
uptr n_running_threads;
ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
InternalScopedBuffer<char> buf(4096);
- internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
- i, n_threads, n_running_threads);
- internal_write(fd, buf.data(), internal_strlen(buf.data()));
- WriteMemoryProfile(buf.data(), buf.size());
+ WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
internal_write(fd, buf.data(), internal_strlen(buf.data()));
}
static void BackgroundThread(void *arg) {
- ScopedInRtl in_rtl;
- Context *ctx = CTX();
+#ifndef TSAN_GO
+ // This is a non-initialized non-user thread, nothing to see here.
+ // We don't use ScopedIgnoreInterceptors, because we want ignores to be
+ // enabled even when the thread function exits (e.g. during pthread thread
+ // shutdown code).
+ cur_thread()->ignore_interceptors++;
+#endif
const u64 kMs2Ns = 1000 * 1000;
fd_t mprof_fd = kInvalidFd;
if (flags()->profile_memory && flags()->profile_memory[0]) {
- InternalScopedBuffer<char> filename(4096);
- internal_snprintf(filename.data(), filename.size(), "%s.%d",
- flags()->profile_memory, (int)internal_getpid());
- uptr openrv = OpenFile(filename.data(), true);
- if (internal_iserror(openrv)) {
- Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
- &filename[0]);
+ if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
+ mprof_fd = 1;
+ } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
+ mprof_fd = 2;
} else {
- mprof_fd = openrv;
+ InternalScopedBuffer<char> filename(4096);
+ internal_snprintf(filename.data(), filename.size(), "%s.%d",
+ flags()->profile_memory, (int)internal_getpid());
+ uptr openrv = OpenFile(filename.data(), true);
+ if (internal_iserror(openrv)) {
+ Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+ &filename[0]);
+ } else {
+ mprof_fd = openrv;
+ }
}
}
u64 last_flush = NanoTime();
uptr last_rss = 0;
- for (int i = 0; ; i++) {
- SleepForSeconds(1);
+ for (int i = 0;
+ atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
+ i++) {
+ SleepForMillis(100);
u64 now = NanoTime();
// Flush memory if requested.
if (flags()->flush_memory_ms > 0) {
if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: periodic memory flush\n");
+ VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
FlushShadowMemory();
last_flush = NanoTime();
}
}
+ // GetRSS can be expensive on huge programs, so don't do it every 100ms.
if (flags()->memory_limit_mb > 0) {
uptr rss = GetRSS();
uptr limit = uptr(flags()->memory_limit_mb) << 20;
- if (flags()->verbosity > 0) {
- Printf("ThreadSanitizer: memory flush check"
- " RSS=%llu LAST=%llu LIMIT=%llu\n",
- (u64)rss>>20, (u64)last_rss>>20, (u64)limit>>20);
- }
+ VPrintf(1, "ThreadSanitizer: memory flush check"
+ " RSS=%llu LAST=%llu LIMIT=%llu\n",
+ (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
if (2 * rss > limit + last_rss) {
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: flushing memory due to RSS\n");
+ VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
FlushShadowMemory();
rss = GetRSS();
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
+ VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
}
last_rss = rss;
}
@@ -185,6 +206,18 @@ static void BackgroundThread(void *arg) {
}
}
+static void StartBackgroundThread() {
+ ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
+}
+
+#ifndef TSAN_GO
+static void StopBackgroundThread() {
+ atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
+ internal_join_thread(ctx->background_thread);
+ ctx->background_thread = 0;
+}
+#endif
+
void DontNeedShadowFor(uptr addr, uptr size) {
uptr shadow_beg = MemToShadow(addr);
uptr shadow_end = MemToShadow(addr + size);
@@ -192,13 +225,43 @@ void DontNeedShadowFor(uptr addr, uptr size) {
}
void MapShadow(uptr addr, uptr size) {
+ // Global data is not 64K aligned, but there are no adjacent mappings,
+ // so we can get away with unaligned mapping.
+ // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
+
+ // Meta shadow is 2:1, so tread carefully.
+ static bool data_mapped = false;
+ static uptr mapped_meta_end = 0;
+ uptr meta_begin = (uptr)MemToMeta(addr);
+ uptr meta_end = (uptr)MemToMeta(addr + size);
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (!data_mapped) {
+ // First call maps data+bss.
+ data_mapped = true;
+ MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
+ } else {
+ // Mapping continous heap.
+ // Windows wants 64K alignment.
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (meta_end <= mapped_meta_end)
+ return;
+ if (meta_begin < mapped_meta_end)
+ meta_begin = mapped_meta_end;
+ MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
+ mapped_meta_end = meta_end;
+ }
+ VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
+ addr, addr+size, meta_begin, meta_end);
}
void MapThreadTrace(uptr addr, uptr size) {
DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
- CHECK_GE(addr, kTraceMemBegin);
- CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
+ CHECK_GE(addr, kTraceMemBeg);
+ CHECK_LE(addr + size, kTraceMemEnd);
+ CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
uptr addr1 = (uptr)MmapFixedNoReserve(addr, size);
if (addr1 != addr) {
Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
@@ -207,57 +270,75 @@ void MapThreadTrace(uptr addr, uptr size) {
}
}
+static void CheckShadowMapping() {
+ for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
+ const uptr beg = UserRegions[i];
+ const uptr end = UserRegions[i + 1];
+ VPrintf(3, "checking shadow region %p-%p\n", beg, end);
+ for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
+ for (int x = -1; x <= 1; x++) {
+ const uptr p = p0 + x;
+ if (p < beg || p >= end)
+ continue;
+ const uptr s = MemToShadow(p);
+ VPrintf(3, " checking pointer %p -> %p\n", p, s);
+ CHECK(IsAppMem(p));
+ CHECK(IsShadowMem(s));
+ CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
+ const uptr m = (uptr)MemToMeta(p);
+ CHECK(IsMetaMem(m));
+ }
+ }
+ }
+}
+
void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist.
static bool is_initialized = false;
if (is_initialized)
return;
is_initialized = true;
+ // We are not ready to handle interceptors yet.
+ ScopedIgnoreInterceptors ignore;
SanitizerToolName = "ThreadSanitizer";
// Install tool-specific callbacks in sanitizer_common.
SetCheckFailedCallback(TsanCheckFailed);
- ScopedInRtl in_rtl;
+ ctx = new(ctx_placeholder) Context;
+ const char *options = GetEnv(kTsanOptionsEnv);
+ InitializeFlags(&ctx->flags, options);
#ifndef TSAN_GO
InitializeAllocator();
#endif
InitializeInterceptors();
- const char *env = InitializePlatform();
+ CheckShadowMapping();
+ InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
- ctx = new(ctx_placeholder) Context;
#ifndef TSAN_GO
InitializeShadowMemory();
#endif
- InitializeFlags(&ctx->flags, env);
// Setup correct file descriptor for error reports.
- __sanitizer_set_report_path(flags()->log_path);
+ __sanitizer_set_report_path(common_flags()->log_path);
InitializeSuppressions();
#ifndef TSAN_GO
InitializeLibIgnore();
- // Initialize external symbolizer before internal threads are started.
- const char *external_symbolizer = flags()->external_symbolizer_path;
- bool external_symbolizer_started =
- Symbolizer::Init(external_symbolizer)->IsExternalAvailable();
- if (external_symbolizer != 0 && external_symbolizer[0] != '\0' &&
- !external_symbolizer_started) {
- Printf("Failed to start external symbolizer: '%s'\n",
- external_symbolizer);
- Die();
- }
- Symbolizer::Get()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+ Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
#endif
- internal_start_thread(&BackgroundThread, 0);
+ StartBackgroundThread();
+#ifndef TSAN_GO
+ SetSandboxingCallback(StopBackgroundThread);
+#endif
+ if (common_flags()->detect_deadlocks)
+ ctx->dd = DDetector::Create(flags());
- if (ctx->flags.verbosity)
- Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
- (int)internal_getpid());
+ VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ (int)internal_getpid());
// Initialize thread 0.
int tid = ThreadCreate(thr, 0, 0, true);
CHECK_EQ(tid, 0);
ThreadStart(thr, tid, internal_getpid());
- CHECK_EQ(thr->in_rtl, 1);
ctx->initialized = true;
if (flags()->stop_on_start) {
@@ -266,11 +347,11 @@ void Initialize(ThreadState *thr) {
(int)internal_getpid());
while (__tsan_resumed == 0) {}
}
+
+ OnInitialize();
}
int Finalize(ThreadState *thr) {
- ScopedInRtl in_rtl;
- Context *ctx = __tsan::ctx;
bool failed = false;
if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
@@ -283,7 +364,7 @@ int Finalize(ThreadState *thr) {
ctx->report_mtx.Unlock();
#ifndef TSAN_GO
- if (ctx->flags.verbosity)
+ if (common_flags()->verbosity)
AllocatorPrintStats();
#endif
@@ -304,7 +385,7 @@ int Finalize(ThreadState *thr) {
ctx->nmissed_expected);
}
- if (flags()->print_suppressions)
+ if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
#ifndef TSAN_GO
if (flags()->print_benign)
@@ -319,30 +400,81 @@ int Finalize(ThreadState *thr) {
}
#ifndef TSAN_GO
+void ForkBefore(ThreadState *thr, uptr pc) {
+ ctx->thread_registry->Lock();
+ ctx->report_mtx.Lock();
+}
+
+void ForkParentAfter(ThreadState *thr, uptr pc) {
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+}
+
+void ForkChildAfter(ThreadState *thr, uptr pc) {
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+
+ uptr nthread = 0;
+ ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
+ VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
+ " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
+ if (nthread == 1) {
+ internal_start_thread(&BackgroundThread, 0);
+ } else {
+ // We've just forked a multi-threaded process. We cannot reasonably function
+ // after that (some mutexes may be locked before fork). So just enable
+ // ignores for everything in the hope that we will exec soon.
+ ctx->after_multithreaded_fork = true;
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, pc);
+ ThreadIgnoreSyncBegin(thr, pc);
+ }
+}
+#endif
+
+#ifdef TSAN_GO
+NOINLINE
+void GrowShadowStack(ThreadState *thr) {
+ const int sz = thr->shadow_stack_end - thr->shadow_stack;
+ const int newsz = 2 * sz;
+ uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
+ newsz * sizeof(uptr));
+ internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = newstack;
+ thr->shadow_stack_pos = newstack + sz;
+ thr->shadow_stack_end = newstack + newsz;
+}
+#endif
+
u32 CurrentStackId(ThreadState *thr, uptr pc) {
if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
return 0;
- if (pc) {
+ if (pc != 0) {
+#ifndef TSAN_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
}
- u32 id = StackDepotPut(thr->shadow_stack,
- thr->shadow_stack_pos - thr->shadow_stack);
- if (pc)
+ u32 id = StackDepotPut(
+ StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
+ if (pc != 0)
thr->shadow_stack_pos--;
return id;
}
-#endif
void TraceSwitch(ThreadState *thr) {
thr->nomalloc++;
- ScopedInRtl in_rtl;
Trace *thr_trace = ThreadTrace(thr->tid);
Lock l(&thr_trace->mtx);
unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
TraceHeader *hdr = &thr_trace->headers[trace];
hdr->epoch0 = thr->fast_state.epoch();
- hdr->stack0.ObtainCurrent(thr, 0);
+ ObtainCurrentStack(thr, 0, &hdr->stack0);
hdr->mset0 = thr->mset;
thr->nomalloc--;
}
@@ -392,7 +524,8 @@ void StoreIfNotYetStored(u64 *sp, u64 *s) {
*s = 0;
}
-static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
+ALWAYS_INLINE
+void HandleRace(ThreadState *thr, u64 *shadow_mem,
Shadow cur, Shadow old) {
thr->racy_state[0] = cur.raw();
thr->racy_state[1] = old.raw();
@@ -404,16 +537,12 @@ static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
#endif
}
-static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
- return old.epoch() >= thr->fast_synch_epoch;
-}
-
static inline bool HappensBefore(Shadow old, ThreadState *thr) {
return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
}
-ALWAYS_INLINE USED
-void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ALWAYS_INLINE
+void MemoryAccessImpl1(ThreadState *thr, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur) {
StatInc(thr, StatMop);
@@ -491,13 +620,13 @@ void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
while (size) {
int size1 = 1;
int kAccessSizeLog = kSizeLog1;
- if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) {
+ if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
size1 = 8;
kAccessSizeLog = kSizeLog8;
- } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) {
+ } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
size1 = 4;
kAccessSizeLog = kSizeLog4;
- } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) {
+ } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
size1 = 2;
kAccessSizeLog = kSizeLog2;
}
@@ -507,6 +636,92 @@ void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
}
+ALWAYS_INLINE
+bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ Shadow cur(a);
+ for (uptr i = 0; i < kShadowCnt; i++) {
+ Shadow old(LoadShadow(&s[i]));
+ if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
+ old.TidWithIgnore() == cur.TidWithIgnore() &&
+ old.epoch() > sync_epoch &&
+ old.IsAtomic() == cur.IsAtomic() &&
+ old.IsRead() <= cur.IsRead())
+ return true;
+ }
+ return false;
+}
+
+#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
+#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
+ _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
+ (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
+ALWAYS_INLINE
+bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ // This is an optimized version of ContainsSameAccessSlow.
+ // load current access into access[0:63]
+ const m128 access = _mm_cvtsi64_si128(a);
+ // duplicate high part of access in addr0:
+ // addr0[0:31] = access[32:63]
+ // addr0[32:63] = access[32:63]
+ // addr0[64:95] = access[32:63]
+ // addr0[96:127] = access[32:63]
+ const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
+ // load 4 shadow slots
+ const m128 shadow0 = _mm_load_si128((__m128i*)s);
+ const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
+ // load high parts of 4 shadow slots into addr_vect:
+ // addr_vect[0:31] = shadow0[32:63]
+ // addr_vect[32:63] = shadow0[96:127]
+ // addr_vect[64:95] = shadow1[32:63]
+ // addr_vect[96:127] = shadow1[96:127]
+ m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
+ if (!is_write) {
+ // set IsRead bit in addr_vect
+ const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
+ const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
+ addr_vect = _mm_or_si128(addr_vect, rw_mask);
+ }
+ // addr0 == addr_vect?
+ const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
+ // epoch1[0:63] = sync_epoch
+ const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
+ // epoch[0:31] = sync_epoch[0:31]
+ // epoch[32:63] = sync_epoch[0:31]
+ // epoch[64:95] = sync_epoch[0:31]
+ // epoch[96:127] = sync_epoch[0:31]
+ const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
+ // load low parts of shadow cell epochs into epoch_vect:
+ // epoch_vect[0:31] = shadow0[0:31]
+ // epoch_vect[32:63] = shadow0[64:95]
+ // epoch_vect[64:95] = shadow1[0:31]
+ // epoch_vect[96:127] = shadow1[64:95]
+ const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
+ // epoch_vect >= sync_epoch?
+ const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
+ // addr_res & epoch_res
+ const m128 res = _mm_and_si128(addr_res, epoch_res);
+ // mask[0] = res[7]
+ // mask[1] = res[15]
+ // ...
+ // mask[15] = res[127]
+ const int mask = _mm_movemask_epi8(res);
+ return mask != 0;
+}
+#endif
+
+ALWAYS_INLINE
+bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
+ bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
+ // NOTE: this check can fail if the shadow is concurrently mutated
+ // by other threads.
+ DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
+ return res;
+#else
+ return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
+#endif
+}
+
ALWAYS_INLINE USED
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
@@ -528,7 +743,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
#endif
- if (*shadow_mem == kShadowRodata) {
+ if (kCppMode && *shadow_mem == kShadowRodata) {
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
StatInc(thr, StatMop);
@@ -539,20 +754,54 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
FastState fast_state = thr->fast_state;
- if (fast_state.GetIgnoreBit())
+ if (fast_state.GetIgnoreBit()) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopIgnored);
return;
- fast_state.IncrementEpoch();
- thr->fast_state = fast_state;
+ }
+
Shadow cur(fast_state);
cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
cur.SetWrite(kAccessIsWrite);
cur.SetAtomic(kIsAtomic);
- // We must not store to the trace if we do not store to the shadow.
- // That is, this call must be moved somewhere below.
- TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
+
+ if (kCollectHistory) {
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+ cur.IncrementEpoch();
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ shadow_mem, cur);
+}
+
+// Called by MemoryAccessRange in tsan_rtl_thread.cc
+ALWAYS_INLINE USED
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur) {
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
- MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
shadow_mem, cur);
}
@@ -581,7 +830,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do it only for C/C++.
- if (kGoMode || size < 64*1024) {
+ if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
u64 *p = (u64*)MemToShadow(addr);
CHECK(IsShadowMem((uptr)p));
CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
@@ -631,8 +880,10 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
thr->is_freeing = true;
MemoryAccessRange(thr, pc, addr, size, true);
thr->is_freeing = false;
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.MarkAsFreed();
@@ -642,8 +893,10 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
}
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.SetWrite(true);
@@ -653,11 +906,12 @@ void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
ALWAYS_INLINE USED
void FuncEntry(ThreadState *thr, uptr pc) {
- DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncEnter);
DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ }
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
@@ -665,17 +919,8 @@ void FuncEntry(ThreadState *thr, uptr pc) {
#ifndef TSAN_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
- if (thr->shadow_stack_pos == thr->shadow_stack_end) {
- const int sz = thr->shadow_stack_end - thr->shadow_stack;
- const int newsz = 2 * sz;
- uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
- newsz * sizeof(uptr));
- internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
- internal_free(thr->shadow_stack);
- thr->shadow_stack = newstack;
- thr->shadow_stack_pos = newstack + sz;
- thr->shadow_stack_end = newstack + newsz;
- }
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
#endif
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
@@ -683,11 +928,12 @@ void FuncEntry(ThreadState *thr, uptr pc) {
ALWAYS_INLINE USED
void FuncExit(ThreadState *thr) {
- DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncExit);
DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ }
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
#ifndef TSAN_GO
@@ -702,7 +948,8 @@ void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
#ifndef TSAN_GO
- thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
+ if (!ctx->after_multithreaded_fork)
+ thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
@@ -723,7 +970,8 @@ void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
#ifndef TSAN_GO
- thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
+ if (!ctx->after_multithreaded_fork)
+ thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
@@ -733,7 +981,7 @@ void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
CHECK_GE(thr->ignore_sync, 0);
#ifndef TSAN_GO
if (thr->ignore_sync == 0)
- thr->mop_ignore_set.Reset();
+ thr->sync_ignore_set.Reset();
#endif
}
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h
index 20493ea4d3..a5b88e0c35 100644
--- a/libsanitizer/tsan/tsan_rtl.h
+++ b/libsanitizer/tsan/tsan_rtl.h
@@ -28,6 +28,7 @@
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_asm.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "sanitizer_common/sanitizer_libignore.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
@@ -41,6 +42,7 @@
#include "tsan_platform.h"
#include "tsan_mutexset.h"
#include "tsan_ignoreset.h"
+#include "tsan_stack_trace.h"
#if SANITIZER_WORDSIZE != 64
# error "ThreadSanitizer is supported only on 64-bit platforms"
@@ -48,87 +50,9 @@
namespace __tsan {
-// Descriptor of user's memory block.
-struct MBlock {
- /*
- u64 mtx : 1; // must be first
- u64 lst : 44;
- u64 stk : 31; // on word boundary
- u64 tid : kTidBits;
- u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39
- */
- u64 raw[2];
-
- void Init(uptr siz, u32 tid, u32 stk) {
- raw[0] = raw[1] = 0;
- raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
- raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
- raw[0] |= (u64)stk << (1 + 44);
- raw[1] |= (u64)stk >> (64 - 44 - 1);
- DCHECK_EQ(Size(), siz);
- DCHECK_EQ(Tid(), tid);
- DCHECK_EQ(StackId(), stk);
- }
-
- u32 Tid() const {
- return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
- }
-
- uptr Size() const {
- return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
- }
-
- u32 StackId() const {
- return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
- }
-
- SyncVar *ListHead() const {
- return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
- }
-
- void ListPush(SyncVar *v) {
- SyncVar *lst = ListHead();
- v->next = lst;
- u64 x = (u64)v ^ (u64)lst;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), v);
- }
-
- SyncVar *ListPop() {
- SyncVar *lst = ListHead();
- SyncVar *nxt = lst->next;
- lst->next = 0;
- u64 x = (u64)lst ^ (u64)nxt;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), nxt);
- return lst;
- }
-
- void ListReset() {
- SyncVar *lst = ListHead();
- u64 x = (u64)lst;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), 0);
- }
-
- void Lock();
- void Unlock();
- typedef GenericScopedLock<MBlock> ScopedLock;
-};
-
#ifndef TSAN_GO
-#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
-const uptr kAllocatorSpace = 0x7d0000000000ULL;
-#else
-const uptr kAllocatorSpace = 0x7d0000000000ULL;
-#endif
-const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
-
struct MapUnmapCallback;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
+typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
@@ -145,14 +69,14 @@ const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
// FastState (from most significant bit):
// ignore : 1
// tid : kTidBits
-// epoch : kClkBits
// unused : -
// history_size : 3
+// epoch : kClkBits
class FastState {
public:
FastState(u64 tid, u64 epoch) {
x_ = tid << kTidShift;
- x_ |= epoch << kClkShift;
+ x_ |= epoch;
DCHECK_EQ(tid, this->tid());
DCHECK_EQ(epoch, this->epoch());
DCHECK_EQ(GetIgnoreBit(), false);
@@ -177,13 +101,13 @@ class FastState {
}
u64 epoch() const {
- u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
+ u64 res = x_ & ((1ull << kClkBits) - 1);
return res;
}
void IncrementEpoch() {
u64 old_epoch = epoch();
- x_ += 1 << kClkShift;
+ x_ += 1;
DCHECK_EQ(old_epoch + 1, epoch());
(void)old_epoch;
}
@@ -195,17 +119,19 @@ class FastState {
void SetHistorySize(int hs) {
CHECK_GE(hs, 0);
CHECK_LE(hs, 7);
- x_ = (x_ & ~7) | hs;
+ x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
}
+ ALWAYS_INLINE
int GetHistorySize() const {
- return (int)(x_ & 7);
+ return (int)((x_ >> kHistoryShift) & kHistoryMask);
}
void ClearHistorySize() {
- x_ &= ~7;
+ SetHistorySize(0);
}
+ ALWAYS_INLINE
u64 GetTracePos() const {
const int hs = GetHistorySize();
// When hs == 0, the trace consists of 2 parts.
@@ -216,20 +142,21 @@ class FastState {
private:
friend class Shadow;
static const int kTidShift = 64 - kTidBits - 1;
- static const int kClkShift = kTidShift - kClkBits;
static const u64 kIgnoreBit = 1ull << 63;
static const u64 kFreedBit = 1ull << 63;
+ static const u64 kHistoryShift = kClkBits;
+ static const u64 kHistoryMask = 7;
u64 x_;
};
// Shadow (from most significant bit):
// freed : 1
// tid : kTidBits
-// epoch : kClkBits
// is_atomic : 1
// is_read : 1
// size_log : 2
// addr0 : 3
+// epoch : kClkBits
class Shadow : public FastState {
public:
explicit Shadow(u64 x)
@@ -242,10 +169,10 @@ class Shadow : public FastState {
}
void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
- DCHECK_EQ(x_ & 31, 0);
+ DCHECK_EQ((x_ >> kClkBits) & 31, 0);
DCHECK_LE(addr0, 7);
DCHECK_LE(kAccessSizeLog, 3);
- x_ |= (kAccessSizeLog << 3) | addr0;
+ x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
DCHECK_EQ(kAccessSizeLog, size_log());
DCHECK_EQ(addr0, this->addr0());
}
@@ -278,47 +205,34 @@ class Shadow : public FastState {
return shifted_xor == 0;
}
- static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
- u64 masked_xor = (s1.x_ ^ s2.x_) & 31;
+ static ALWAYS_INLINE
+ bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
+ u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
return masked_xor == 0;
}
- static inline bool TwoRangesIntersect(Shadow s1, Shadow s2,
+ static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
unsigned kS2AccessSize) {
bool res = false;
u64 diff = s1.addr0() - s2.addr0();
if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
// if (s1.addr0() + size1) > s2.addr0()) return true;
- if (s1.size() > -diff) res = true;
+ if (s1.size() > -diff)
+ res = true;
} else {
// if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
- if (kS2AccessSize > diff) res = true;
+ if (kS2AccessSize > diff)
+ res = true;
}
- DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2));
- DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1));
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
return res;
}
- // The idea behind the offset is as follows.
- // Consider that we have 8 bool's contained within a single 8-byte block
- // (mapped to a single shadow "cell"). Now consider that we write to the bools
- // from a single thread (which we consider the common case).
- // W/o offsetting each access will have to scan 4 shadow values at average
- // to find the corresponding shadow value for the bool.
- // With offsetting we start scanning shadow with the offset so that
- // each access hits necessary shadow straight off (at least in an expected
- // optimistic case).
- // This logic works seamlessly for any layout of user data. For example,
- // if user data is {int, short, char, char}, then accesses to the int are
- // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
- // from a single thread won't need to scan all 8 shadow values.
- unsigned ComputeSearchOffset() {
- return x_ & 7;
- }
- u64 addr0() const { return x_ & 7; }
- u64 size() const { return 1ull << size_log(); }
- bool IsWrite() const { return !IsRead(); }
- bool IsRead() const { return x_ & kReadBit; }
+ u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
+ u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
+ bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
+ bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
// The idea behind the freed bit is as follows.
// When the memory is freed (or otherwise unaccessible) we write to the shadow
@@ -343,15 +257,14 @@ class Shadow : public FastState {
return res;
}
- bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
- // analyzes 5-th bit (is_read) and 6-th bit (is_atomic)
- bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift)
- | (kIsAtomic << kAtomicShift));
+ bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
+ bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
+ | (u64(kIsAtomic) << kAtomicShift));
DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
return v;
}
- bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
+ bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
bool v = ((x_ >> kReadShift) & 3)
<= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
@@ -359,7 +272,7 @@ class Shadow : public FastState {
return v;
}
- bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
+ bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
bool v = ((x_ >> kReadShift) & 3)
>= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
@@ -368,14 +281,14 @@ class Shadow : public FastState {
}
private:
- static const u64 kReadShift = 5;
+ static const u64 kReadShift = 5 + kClkBits;
static const u64 kReadBit = 1ull << kReadShift;
- static const u64 kAtomicShift = 6;
+ static const u64 kAtomicShift = 6 + kClkBits;
static const u64 kAtomicBit = 1ull << kAtomicShift;
- u64 size_log() const { return (x_ >> 3) & 3; }
+ u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
- static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
+ static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
if (s1.addr0() == s2.addr0()) return true;
if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
return true;
@@ -390,6 +303,9 @@ struct SignalContext;
struct JmpBuf {
uptr sp;
uptr mangled_sp;
+ int int_signal_send;
+ bool in_blocking_func;
+ uptr in_signal_handler;
uptr *shadow_stack_pos;
};
@@ -431,14 +347,14 @@ struct ThreadState {
AllocatorCache alloc_cache;
InternalAllocatorCache internal_alloc_cache;
Vector<JmpBuf> jmp_bufs;
+ int ignore_interceptors;
#endif
u64 stat[StatCnt];
const int tid;
const int unique_id;
- int in_rtl;
bool in_symbolizer;
bool in_ignored_lib;
- bool is_alive;
+ bool is_dead;
bool is_freeing;
bool is_vptr_access;
const uptr stk_addr;
@@ -447,11 +363,17 @@ struct ThreadState {
const uptr tls_size;
ThreadContext *tctx;
- DeadlockDetector deadlock_detector;
+ InternalDeadlockDetector internal_deadlock_detector;
+ DDPhysicalThread *dd_pt;
+ DDLogicalThread *dd_lt;
- bool in_signal_handler;
+ atomic_uintptr_t in_signal_handler;
SignalContext *signal_ctx;
+ DenseSlabAllocCache block_cache;
+ DenseSlabAllocCache sync_cache;
+ DenseSlabAllocCache clock_cache;
+
#ifndef TSAN_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
@@ -462,13 +384,13 @@ struct ThreadState {
int nomalloc;
explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size);
};
-Context *CTX();
-
#ifndef TSAN_GO
+__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
INLINE ThreadState *cur_thread() {
return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
@@ -480,11 +402,7 @@ class ThreadContext : public ThreadContextBase {
explicit ThreadContext(int tid);
~ThreadContext();
ThreadState *thr;
-#ifdef TSAN_GO
- StackTrace creation_stack;
-#else
u32 creation_stack_id;
-#endif
SyncClock sync;
// Epoch at which the thread had started.
// If we see an event from the thread stamped by an older epoch,
@@ -499,6 +417,7 @@ class ThreadContext : public ThreadContextBase {
void OnStarted(void *arg);
void OnCreated(void *arg);
void OnReset();
+ void OnDetached(void *arg);
};
struct RacyStacks {
@@ -527,20 +446,27 @@ struct Context {
Context();
bool initialized;
+ bool after_multithreaded_fork;
- SyncTab synctab;
+ MetaMap metamap;
Mutex report_mtx;
int nreported;
int nmissed_expected;
atomic_uint64_t last_symbolize_time_ns;
+ void *background_thread;
+ atomic_uint32_t stop_background_thread;
+
ThreadRegistry *thread_registry;
Vector<RacyStacks> racy_stacks;
Vector<RacyAddress> racy_addresses;
// Number of fired suppressions may be large enough.
InternalMmapVector<FiredSuppression> fired_suppressions;
+ DDetector *dd;
+
+ ClockAlloc clock_alloc;
Flags flags;
@@ -549,14 +475,20 @@ struct Context {
u64 int_alloc_siz[MBlockTypeCount];
};
-class ScopedInRtl {
- public:
- ScopedInRtl();
- ~ScopedInRtl();
- private:
- ThreadState*thr_;
- int in_rtl_;
- int errno_;
+extern Context *ctx; // The one and the only global runtime context.
+
+struct ScopedIgnoreInterceptors {
+ ScopedIgnoreInterceptors() {
+#ifndef TSAN_GO
+ cur_thread()->ignore_interceptors++;
+#endif
+ }
+
+ ~ScopedIgnoreInterceptors() {
+#ifndef TSAN_GO
+ cur_thread()->ignore_interceptors--;
+#endif
+ }
};
class ScopedReport {
@@ -564,11 +496,14 @@ class ScopedReport {
explicit ScopedReport(ReportType typ);
~ScopedReport();
- void AddStack(const StackTrace *stack);
- void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
+ void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
const MutexSet *mset);
- void AddThread(const ThreadContext *tctx);
+ void AddStack(StackTrace stack, bool suppressable = false);
+ void AddThread(const ThreadContext *tctx, bool suppressable = false);
+ void AddThread(int unique_tid, bool suppressable = false);
+ void AddUniqueTid(int unique_tid);
void AddMutex(const SyncVar *s);
+ u64 AddMutex(u64 id);
void AddLocation(uptr addr, uptr size);
void AddSleep(u32 stack_id);
void SetCount(int count);
@@ -576,16 +511,31 @@ class ScopedReport {
const ReportDesc *GetReport() const;
private:
- Context *ctx_;
ReportDesc *rep_;
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore_interceptors_;
- void AddMutex(u64 id);
+ void AddDeadMutex(u64 id);
ScopedReport(const ScopedReport&);
void operator = (const ScopedReport&);
};
-void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset);
+
+template<typename StackTraceTy>
+void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
+ uptr size = thr->shadow_stack_pos - thr->shadow_stack;
+ uptr start = 0;
+ if (size + !!toppc > kStackTraceMax) {
+ start = size + !!toppc - kStackTraceMax;
+ size = kStackTraceMax - !!toppc;
+ }
+ stack->Init(&thr->shadow_stack[start], size, toppc);
+}
+
void StatAggregate(u64 *dst, u64 *src);
void StatOutput(u64 *stat);
@@ -606,15 +556,14 @@ void InitializeInterceptors();
void InitializeLibIgnore();
void InitializeDynamicAnnotations();
+void ForkBefore(ThreadState *thr, uptr pc);
+void ForkParentAfter(ThreadState *thr, uptr pc);
+void ForkChildAfter(ThreadState *thr, uptr pc);
+
void ReportRace(ThreadState *thr);
-bool OutputReport(Context *ctx,
- const ScopedReport &srep,
- const ReportStack *suppress_stack1 = 0,
- const ReportStack *suppress_stack2 = 0,
- const ReportLocation *suppress_loc = 0);
-bool IsFiredSuppression(Context *ctx,
- const ScopedReport &srep,
- const StackTrace &trace);
+bool OutputReport(ThreadState *thr, const ScopedReport &srep);
+bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
+ StackTrace trace);
bool IsExpectedReport(uptr addr, uptr size);
void PrintMatchedBenignRaces();
bool FrameIsInternal(const ReportStack *frame);
@@ -635,14 +584,13 @@ ReportStack *SkipTsanInternalFrames(ReportStack *ent);
u32 CurrentStackId(ThreadState *thr, uptr pc);
ReportStack *SymbolizeStackId(u32 stack_id);
void PrintCurrentStack(ThreadState *thr, uptr pc);
-void PrintCurrentStackSlow(); // uses libunwind
+void PrintCurrentStackSlow(uptr pc); // uses libunwind
void Initialize(ThreadState *thr);
int Finalize(ThreadState *thr);
-SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
- bool write_lock, bool create);
-SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
@@ -707,9 +655,10 @@ void ProcessPendingSignals(ThreadState *thr);
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init);
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1);
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
+ bool try_lock = false);
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
@@ -730,7 +679,7 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
// The trick is that the call preserves all registers and the compiler
// does not treat it as a call.
// If it does not work for you, use normal call.
-#if TSAN_DEBUG == 0
+#if TSAN_DEBUG == 0 && defined(__x86_64__)
// The caller may not create the stack frame for itself at all,
// so we create a reserve stack frame for it (1024b must be enough).
#define HACKY_CALL(f) \
@@ -754,6 +703,8 @@ Trace *ThreadTrace(int tid);
extern "C" void __tsan_trace_switch();
void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
EventType typ, u64 addr) {
+ if (!kCollectHistory)
+ return;
DCHECK_GE((int)typ, 0);
DCHECK_LE((int)typ, 7);
DCHECK_EQ(GetLsb(addr, 61), addr);
diff --git a/libsanitizer/tsan/tsan_rtl_amd64.S b/libsanitizer/tsan/tsan_rtl_amd64.S
index 71a2ecda9d..6df36a500a 100644
--- a/libsanitizer/tsan/tsan_rtl_amd64.S
+++ b/libsanitizer/tsan/tsan_rtl_amd64.S
@@ -170,10 +170,15 @@ setjmp:
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 8(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 16(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// restore env parameter
@@ -197,10 +202,15 @@ _setjmp:
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 8(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 16(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// restore env parameter
@@ -231,10 +241,15 @@ sigsetjmp:
sub $8, %rsp
CFI_ADJUST_CFA_OFFSET(8)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 24(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 32(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// unalign stack frame
@@ -272,10 +287,15 @@ __sigsetjmp:
sub $8, %rsp
CFI_ADJUST_CFA_OFFSET(8)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 24(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 32(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// unalign stack frame
@@ -296,7 +316,7 @@ __sigsetjmp:
CFI_ENDPROC
.size __sigsetjmp, .-__sigsetjmp
-#ifdef __linux__
+#if defined(__FreeBSD__) || defined(__linux__)
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
#endif
diff --git a/libsanitizer/tsan/tsan_rtl_mutex.cc b/libsanitizer/tsan/tsan_rtl_mutex.cc
index d9a3a3baa3..d731b4bfb5 100644
--- a/libsanitizer/tsan/tsan_rtl_mutex.cc
+++ b/libsanitizer/tsan/tsan_rtl_mutex.cc
@@ -9,6 +9,9 @@
//
//===----------------------------------------------------------------------===//
+#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
+#include <sanitizer_common/sanitizer_stackdepot.h>
+
#include "tsan_rtl.h"
#include "tsan_flags.h"
#include "tsan_sync.h"
@@ -18,10 +21,51 @@
namespace __tsan {
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
+
+struct Callback : DDCallback {
+ ThreadState *thr;
+ uptr pc;
+
+ Callback(ThreadState *thr, uptr pc)
+ : thr(thr)
+ , pc(pc) {
+ DDCallback::pt = thr->dd_pt;
+ DDCallback::lt = thr->dd_lt;
+ }
+
+ virtual u32 Unwind() {
+ return CurrentStackId(thr, pc);
+ }
+ virtual int UniqueTid() {
+ return thr->unique_id;
+ }
+};
+
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ s->dd.ctx = s->GetId();
+}
+
+static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
+ uptr addr, u64 mid) {
+ // In Go, these misuses are either impossible, or detected by std lib,
+ // or false positives (e.g. unlock in a different thread).
+ if (kGoMode)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(typ);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+}
+
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
StatInc(thr, StatMutexCreate);
if (!linker_init && IsAppMem(addr)) {
@@ -30,16 +74,16 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
s->is_rw = rw;
s->is_recursive = recursive;
s->is_linker_init = linker_init;
+ if (kCppMode && s->creation_stack_id == 0)
+ s->creation_stack_id = CurrentStackId(thr, pc);
s->mtx.Unlock();
}
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
StatInc(thr, StatMutexDestroy);
#ifndef TSAN_GO
@@ -48,53 +92,74 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
if (IsGlobalVar(addr))
return;
#endif
- SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
- if (s == 0)
- return;
if (IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ if (s == 0)
+ return;
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexDestroy(&cb, &s->dd);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ }
+ bool unlock_locked = false;
if (flags()->report_destroy_locked
&& s->owner_tid != SyncVar::kInvalidTid
&& !s->is_broken) {
s->is_broken = true;
+ unlock_locked = true;
+ }
+ u64 mid = s->GetId();
+ u32 last_lock = s->last_lock;
+ if (!unlock_locked)
+ s->Reset(thr); // must not reset it before the report is printed
+ s->mtx.Unlock();
+ if (unlock_locked) {
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeMutexDestroyLocked);
- rep.AddMutex(s);
- StackTrace trace;
- trace.ObtainCurrent(thr, pc);
- rep.AddStack(&trace);
- FastState last(s->last_lock);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace);
+ FastState last(last_lock);
RestoreStack(last.tid(), last.epoch(), &trace, 0);
- rep.AddStack(&trace);
- rep.AddLocation(s->addr, 1);
- OutputReport(ctx, rep);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+ }
+ if (unlock_locked) {
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ if (s != 0) {
+ s->Reset(thr);
+ s->mtx.Unlock();
+ }
}
- thr->mset.Remove(s->GetId());
- DestroyAndFree(s);
+ thr->mset.Remove(mid);
+ // s will be destroyed and freed in MetaMap::FreeBlock.
}
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
- CHECK_GT(thr->in_rtl, 0);
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
CHECK_GT(rec, 0);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
+ bool report_double_lock = false;
if (s->owner_tid == SyncVar::kInvalidTid) {
CHECK_EQ(s->recursion, 0);
s->owner_tid = thr->tid;
s->last_lock = thr->fast_state.raw();
} else if (s->owner_tid == thr->tid) {
CHECK_GT(s->recursion, 0);
- } else {
- Printf("ThreadSanitizer WARNING: double lock of mutex %p\n", addr);
- PrintCurrentStack(thr, pc);
+ } else if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_double_lock = true;
}
if (s->recursion == 0) {
StatInc(thr, StatMutexLock);
@@ -105,30 +170,36 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
}
s->recursion += rec;
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
+ if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
+ Callback cb(thr, pc);
+ if (!try_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_double_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0;
- if (s->recursion == 0) {
- if (!s->is_broken) {
+ bool report_bad_unlock = false;
+ if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (flags()->report_mutex_bugs && !s->is_broken) {
s->is_broken = true;
- Printf("ThreadSanitizer WARNING: unlock of unlocked mutex %p\n", addr);
- PrintCurrentStack(thr, pc);
- }
- } else if (s->owner_tid != thr->tid) {
- if (!s->is_broken) {
- s->is_broken = true;
- Printf("ThreadSanitizer WARNING: mutex %p is unlocked by wrong thread\n",
- addr);
- PrintCurrentStack(thr, pc);
+ report_bad_unlock = true;
}
} else {
rec = all ? s->recursion : 1;
@@ -142,56 +213,97 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
}
}
thr->mset.Del(s->GetId(), true);
+ if (common_flags()->detect_deadlocks && s->recursion == 0 &&
+ !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks && !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
return rec;
}
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+ bool report_bad_lock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- Printf("ThreadSanitizer WARNING: read lock of a write locked mutex %p\n",
- addr);
- PrintCurrentStack(thr, pc);
+ if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_bad_lock = true;
+ }
}
AcquireImpl(thr, pc, &s->clock);
s->last_lock = thr->fast_state.raw();
thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ if (!trylock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
+ }
+ u64 mid = s->GetId();
s->mtx.ReadUnlock();
+ // Can't touch s after this point.
+ if (report_bad_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadUnlock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ bool report_bad_unlock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- Printf("ThreadSanitizer WARNING: read unlock of a write locked mutex %p\n",
- addr);
- PrintCurrentStack(thr, pc);
+ if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_bad_unlock = true;
+ }
}
ReleaseImpl(thr, pc, &s->read_clock);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
- thr->mset.Del(s->GetId(), false);
+ // Can't touch s after this point.
+ thr->mset.Del(mid, false);
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
bool write = true;
+ bool report_bad_unlock = false;
if (s->owner_tid == SyncVar::kInvalidTid) {
// Seems to be read unlock.
write = false;
@@ -214,30 +326,37 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
}
} else if (!s->is_broken) {
s->is_broken = true;
- Printf("ThreadSanitizer WARNING: mutex %p is unlock by wrong thread\n",
- addr);
- PrintCurrentStack(thr, pc);
+ report_bad_unlock = true;
}
thr->mset.Del(s->GetId(), write);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
s->owner_tid = SyncVar::kInvalidTid;
s->recursion = 0;
s->mtx.Unlock();
}
void Acquire(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
AcquireImpl(thr, pc, &s->clock);
s->mtx.ReadUnlock();
}
@@ -255,17 +374,16 @@ void AcquireGlobal(ThreadState *thr, uptr pc) {
DPrintf("#%d: AcquireGlobal\n", thr->tid);
if (thr->ignore_sync)
return;
- ThreadRegistryLock l(CTX()->thread_registry);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
UpdateClockCallback, thr);
}
void Release(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: Release %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -274,11 +392,10 @@ void Release(ThreadState *thr, uptr pc, uptr addr) {
}
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -301,8 +418,8 @@ void AfterSleep(ThreadState *thr, uptr pc) {
if (thr->ignore_sync)
return;
thr->last_sleep_stack_id = CurrentStackId(thr, pc);
- ThreadRegistryLock l(CTX()->thread_registry);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
UpdateSleepClockCallback, thr);
}
#endif
@@ -310,37 +427,63 @@ void AfterSleep(ThreadState *thr, uptr pc) {
void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
- thr->clock.acquire(c);
+ thr->clock.set(thr->fast_state.epoch());
+ thr->clock.acquire(&thr->clock_cache, c);
StatInc(thr, StatSyncAcquire);
}
void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(c);
+ thr->clock.release(&thr->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.ReleaseStore(c);
+ thr->clock.ReleaseStore(&thr->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.acq_rel(c);
+ thr->clock.acq_rel(&thr->clock_cache, c);
StatInc(thr, StatSyncAcquire);
StatInc(thr, StatSyncRelease);
}
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
+ if (r == 0)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeDeadlock);
+ for (int i = 0; i < r->n; i++) {
+ rep.AddMutex(r->loop[i].mtx_ctx0);
+ rep.AddUniqueTid((int)r->loop[i].thr_ctx);
+ rep.AddThread((int)r->loop[i].thr_ctx);
+ }
+ uptr dummy_pc = 0x42;
+ for (int i = 0; i < r->n; i++) {
+ for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
+ u32 stk = r->loop[i].stk[j];
+ if (stk) {
+ rep.AddStack(StackDepotGet(stk), true);
+ } else {
+ // Sometimes we fail to extract the stack trace (FIXME: investigate),
+ // but we should still produce some stack trace in the report.
+ rep.AddStack(StackTrace(&dummy_pc, 1), true);
+ }
+ }
+ }
+ OutputReport(thr, rep);
+}
+
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_rtl_report.cc b/libsanitizer/tsan/tsan_rtl_report.cc
index f2248afeab..f86cfd4681 100644
--- a/libsanitizer/tsan/tsan_rtl_report.cc
+++ b/libsanitizer/tsan/tsan_rtl_report.cc
@@ -28,15 +28,18 @@ namespace __tsan {
using namespace __sanitizer; // NOLINT
-static ReportStack *SymbolizeStack(const StackTrace& trace);
+static ReportStack *SymbolizeStack(StackTrace trace);
void TsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
- ScopedInRtl in_rtl;
+ // There is high probability that interceptors will check-fail as well,
+ // on the other hand there is no sense in processing interceptors
+ // since we are going to die soon.
+ ScopedIgnoreInterceptors ignore;
Printf("FATAL: ThreadSanitizer CHECK failed: "
"%s:%d \"%s\" (0x%zx, 0x%zx)\n",
file, line, cond, (uptr)v1, (uptr)v2);
- PrintCurrentStackSlow();
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
Die();
}
@@ -54,27 +57,16 @@ bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
static void StackStripMain(ReportStack *stack) {
ReportStack *last_frame = 0;
ReportStack *last_frame2 = 0;
- const char *prefix = "__interceptor_";
- uptr prefix_len = internal_strlen(prefix);
- const char *path_prefix = flags()->strip_path_prefix;
- uptr path_prefix_len = internal_strlen(path_prefix);
- char *pos;
for (ReportStack *ent = stack; ent; ent = ent->next) {
- if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
- ent->func += prefix_len;
- if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
- ent->file = pos + path_prefix_len;
- if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
- ent->file += 2;
last_frame2 = last_frame;
last_frame = ent;
}
if (last_frame2 == 0)
return;
- const char *last = last_frame->func;
+ const char *last = last_frame->info.function;
#ifndef TSAN_GO
- const char *last2 = last_frame2->func;
+ const char *last2 = last_frame2->info.function;
// Strip frame above 'main'
if (last2 && 0 == internal_strcmp(last2, "main")) {
last_frame2->next = 0;
@@ -99,42 +91,39 @@ static void StackStripMain(ReportStack *stack) {
#endif
}
-#ifndef TSAN_GO
ReportStack *SymbolizeStackId(u32 stack_id) {
- uptr ssz = 0;
- const uptr *stack = StackDepotGet(stack_id, &ssz);
- if (stack == 0)
+ if (stack_id == 0)
return 0;
- StackTrace trace;
- trace.Init(stack, ssz);
- return SymbolizeStack(trace);
+ StackTrace stack = StackDepotGet(stack_id);
+ if (stack.trace == nullptr)
+ return nullptr;
+ return SymbolizeStack(stack);
}
-#endif
-static ReportStack *SymbolizeStack(const StackTrace& trace) {
- if (trace.IsEmpty())
+static ReportStack *SymbolizeStack(StackTrace trace) {
+ if (trace.size == 0)
return 0;
ReportStack *stack = 0;
- for (uptr si = 0; si < trace.Size(); si++) {
- const uptr pc = trace.Get(si);
+ for (uptr si = 0; si < trace.size; si++) {
+ const uptr pc = trace.trace[si];
#ifndef TSAN_GO
// We obtain the return address, that is, address of the next instruction,
// so offset it by 1 byte.
- const uptr pc1 = __sanitizer::StackTrace::GetPreviousInstructionPc(pc);
+ const uptr pc1 = StackTrace::GetPreviousInstructionPc(pc);
#else
// FIXME(dvyukov): Go sometimes uses address of a function as top pc.
uptr pc1 = pc;
- if (si != trace.Size() - 1)
+ if (si != trace.size - 1)
pc1 -= 1;
#endif
ReportStack *ent = SymbolizeCode(pc1);
CHECK_NE(ent, 0);
ReportStack *last = ent;
while (last->next) {
- last->pc = pc; // restore original pc for report
+ last->info.address = pc; // restore original pc for report
last = last->next;
}
- last->pc = pc; // restore original pc for report
+ last->info.address = pc; // restore original pc for report
last->next = stack;
stack = ent;
}
@@ -143,28 +132,28 @@ static ReportStack *SymbolizeStack(const StackTrace& trace) {
}
ScopedReport::ScopedReport(ReportType typ) {
- ctx_ = CTX();
- ctx_->thread_registry->CheckLocked();
+ ctx->thread_registry->CheckLocked();
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
rep_ = new(mem) ReportDesc;
rep_->typ = typ;
- ctx_->report_mtx.Lock();
+ ctx->report_mtx.Lock();
CommonSanitizerReportMutex.Lock();
}
ScopedReport::~ScopedReport() {
CommonSanitizerReportMutex.Unlock();
- ctx_->report_mtx.Unlock();
+ ctx->report_mtx.Unlock();
DestroyAndFree(rep_);
}
-void ScopedReport::AddStack(const StackTrace *stack) {
+void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
ReportStack **rs = rep_->stacks.PushBack();
- *rs = SymbolizeStack(*stack);
+ *rs = SymbolizeStack(stack);
+ (*rs)->suppressable = suppressable;
}
-void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
- const StackTrace *stack, const MutexSet *mset) {
+void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
+ const MutexSet *mset) {
void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
ReportMop *mop = new(mem) ReportMop;
rep_->mops.PushBack(mop);
@@ -173,30 +162,22 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
mop->size = s.size();
mop->write = s.IsWrite();
mop->atomic = s.IsAtomic();
- mop->stack = SymbolizeStack(*stack);
+ mop->stack = SymbolizeStack(stack);
+ if (mop->stack)
+ mop->stack->suppressable = true;
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
- u64 uid = 0;
- uptr addr = SyncVar::SplitId(d.id, &uid);
- SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false);
- // Check that the mutex is still alive.
- // Another mutex can be created at the same address,
- // so check uid as well.
- if (s && s->CheckId(uid)) {
- ReportMopMutex mtx = {s->uid, d.write};
- mop->mset.PushBack(mtx);
- AddMutex(s);
- } else {
- ReportMopMutex mtx = {d.id, d.write};
- mop->mset.PushBack(mtx);
- AddMutex(d.id);
- }
- if (s)
- s->mtx.ReadUnlock();
+ u64 mid = this->AddMutex(d.id);
+ ReportMopMutex mtx = {mid, d.write};
+ mop->mset.PushBack(mtx);
}
}
-void ScopedReport::AddThread(const ThreadContext *tctx) {
+void ScopedReport::AddUniqueTid(int unique_tid) {
+ rep_->unique_tids.PushBack(unique_tid);
+}
+
+void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
for (uptr i = 0; i < rep_->threads.Size(); i++) {
if ((u32)rep_->threads[i]->id == tctx->tid)
return;
@@ -207,19 +188,16 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
rt->id = tctx->tid;
rt->pid = tctx->os_id;
rt->running = (tctx->status == ThreadStatusRunning);
- rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
+ rt->name = internal_strdup(tctx->name);
rt->parent_tid = tctx->parent_tid;
rt->stack = 0;
-#ifdef TSAN_GO
- rt->stack = SymbolizeStack(tctx->creation_stack);
-#else
rt->stack = SymbolizeStackId(tctx->creation_stack_id);
-#endif
+ if (rt->stack)
+ rt->stack->suppressable = suppressable;
}
#ifndef TSAN_GO
static ThreadContext *FindThreadByUidLocked(int unique_id) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) {
ThreadContext *tctx = static_cast<ThreadContext*>(
@@ -232,7 +210,6 @@ static ThreadContext *FindThreadByUidLocked(int unique_id) {
}
static ThreadContext *FindThreadByTidLocked(int tid) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
return static_cast<ThreadContext*>(
ctx->thread_registry->GetThreadLocked(tid));
@@ -250,7 +227,6 @@ static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
}
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
ThreadContext *tctx = static_cast<ThreadContext*>(
ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
@@ -264,6 +240,13 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
}
#endif
+void ScopedReport::AddThread(int unique_tid, bool suppressable) {
+#ifndef TSAN_GO
+ if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
+ AddThread(tctx, suppressable);
+#endif
+}
+
void ScopedReport::AddMutex(const SyncVar *s) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == s->uid)
@@ -273,14 +256,31 @@ void ScopedReport::AddMutex(const SyncVar *s) {
ReportMutex *rm = new(mem) ReportMutex();
rep_->mutexes.PushBack(rm);
rm->id = s->uid;
+ rm->addr = s->addr;
rm->destroyed = false;
- rm->stack = 0;
-#ifndef TSAN_GO
rm->stack = SymbolizeStackId(s->creation_stack_id);
-#endif
}
-void ScopedReport::AddMutex(u64 id) {
+u64 ScopedReport::AddMutex(u64 id) {
+ u64 uid = 0;
+ u64 mid = id;
+ uptr addr = SyncVar::SplitId(id, &uid);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ // Check that the mutex is still alive.
+ // Another mutex can be created at the same address,
+ // so check uid as well.
+ if (s && s->CheckId(uid)) {
+ mid = s->uid;
+ AddMutex(s);
+ } else {
+ AddDeadMutex(id);
+ }
+ if (s)
+ s->mtx.Unlock();
+ return mid;
+}
+
+void ScopedReport::AddDeadMutex(u64 id) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == id)
return;
@@ -289,6 +289,7 @@ void ScopedReport::AddMutex(u64 id) {
ReportMutex *rm = new(mem) ReportMutex();
rep_->mutexes.PushBack(rm);
rm->id = id;
+ rm->addr = 0;
rm->destroyed = true;
rm->stack = 0;
}
@@ -300,51 +301,46 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
int fd = -1;
int creat_tid = -1;
u32 creat_stack = 0;
- if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
- || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
- rep_->locs.PushBack(loc);
- loc->type = ReportLocationFD;
+ if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
+ ReportLocation *loc = ReportLocation::New(ReportLocationFD);
loc->fd = fd;
loc->tid = creat_tid;
loc->stack = SymbolizeStackId(creat_stack);
+ rep_->locs.PushBack(loc);
ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
if (tctx)
AddThread(tctx);
return;
}
MBlock *b = 0;
- if (allocator()->PointerIsMine((void*)addr)
- && (b = user_mblock(0, (void*)addr))) {
- ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void*)addr)) {
+ void *block_begin = a->GetBlockBegin((void*)addr);
+ if (block_begin)
+ b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b != 0) {
+ ThreadContext *tctx = FindThreadByTidLocked(b->tid);
+ ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
+ loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
+ loc->heap_chunk_size = b->siz;
+ loc->tid = tctx ? tctx->tid : b->tid;
+ loc->stack = SymbolizeStackId(b->stk);
rep_->locs.PushBack(loc);
- loc->type = ReportLocationHeap;
- loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
- loc->size = b->Size();
- loc->tid = tctx ? tctx->tid : b->Tid();
- loc->name = 0;
- loc->file = 0;
- loc->line = 0;
- loc->stack = 0;
- loc->stack = SymbolizeStackId(b->StackId());
if (tctx)
AddThread(tctx);
return;
}
bool is_stack = false;
if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
- rep_->locs.PushBack(loc);
- loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
+ ReportLocation *loc =
+ ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
loc->tid = tctx->tid;
+ rep_->locs.PushBack(loc);
AddThread(tctx);
}
- ReportLocation *loc = SymbolizeData(addr);
- if (loc) {
+ if (ReportLocation *loc = SymbolizeData(addr)) {
+ loc->suppressable = true;
rep_->locs.PushBack(loc);
return;
}
@@ -365,11 +361,11 @@ const ReportDesc *ScopedReport::GetReport() const {
return rep_;
}
-void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
ThreadContext *tctx = static_cast<ThreadContext*>(
ctx->thread_registry->GetThreadLocked(tid));
@@ -391,13 +387,13 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
InternalScopedBuffer<uptr> stack(kShadowStackSize);
- for (uptr i = 0; i < hdr->stack0.Size(); i++) {
- stack[i] = hdr->stack0.Get(i);
+ for (uptr i = 0; i < hdr->stack0.size; i++) {
+ stack[i] = hdr->stack0.trace[i];
DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
}
if (mset)
*mset = hdr->mset0;
- uptr pos = hdr->stack0.Size();
+ uptr pos = hdr->stack0.size;
Event *events = (Event*)GetThreadTrace(tid);
for (uptr i = ebegin; i <= eend; i++) {
Event ev = events[i];
@@ -432,14 +428,13 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
stk->Init(stack.data(), pos);
}
-static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
- uptr addr_min, uptr addr_max) {
- Context *ctx = CTX();
+static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+ uptr addr_min, uptr addr_max) {
bool equal_stack = false;
RacyStacks hash;
if (flags()->suppress_equal_stacks) {
- hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
- hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
if (hash == ctx->racy_stacks[i]) {
DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
@@ -472,13 +467,12 @@ static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
return false;
}
-static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
- uptr addr_min, uptr addr_max) {
- Context *ctx = CTX();
+static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+ uptr addr_min, uptr addr_max) {
if (flags()->suppress_equal_stacks) {
RacyStacks hash;
- hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
- hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
ctx->racy_stacks.PushBack(hash);
}
if (flags()->suppress_equal_addresses) {
@@ -487,25 +481,31 @@ static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
}
}
-bool OutputReport(Context *ctx,
- const ScopedReport &srep,
- const ReportStack *suppress_stack1,
- const ReportStack *suppress_stack2,
- const ReportLocation *suppress_loc) {
+bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
const ReportDesc *rep = srep.GetReport();
Suppression *supp = 0;
- uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1, &supp);
- if (suppress_pc == 0)
- suppress_pc = IsSuppressed(rep->typ, suppress_stack2, &supp);
- if (suppress_pc == 0)
- suppress_pc = IsSuppressed(rep->typ, suppress_loc, &supp);
+ uptr suppress_pc = 0;
+ for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
+ for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp);
+ for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
+ for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp);
if (suppress_pc != 0) {
FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
ctx->fired_suppressions.push_back(s);
}
- if (OnReport(rep, suppress_pc != 0))
- return false;
+ {
+ bool old_is_freeing = thr->is_freeing;
+ thr->is_freeing = false;
+ bool suppressed = OnReport(rep, suppress_pc != 0);
+ thr->is_freeing = old_is_freeing;
+ if (suppressed)
+ return false;
+ }
PrintReport(rep);
ctx->nreported++;
if (flags()->halt_on_error)
@@ -513,15 +513,14 @@ bool OutputReport(Context *ctx,
return true;
}
-bool IsFiredSuppression(Context *ctx,
- const ScopedReport &srep,
- const StackTrace &trace) {
+bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
+ StackTrace trace) {
for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
continue;
- for (uptr j = 0; j < trace.Size(); j++) {
+ for (uptr j = 0; j < trace.size; j++) {
FiredSuppression *s = &ctx->fired_suppressions[k];
- if (trace.Get(j) == s->pc) {
+ if (trace.trace[j] == s->pc) {
if (s->supp)
s->supp->hit_count++;
return true;
@@ -548,45 +547,13 @@ static bool IsFiredSuppression(Context *ctx,
}
bool FrameIsInternal(const ReportStack *frame) {
- return frame != 0 && frame->file != 0
- && (internal_strstr(frame->file, "tsan_interceptors.cc") ||
- internal_strstr(frame->file, "sanitizer_common_interceptors.inc") ||
- internal_strstr(frame->file, "tsan_interface_"));
-}
-
-// On programs that use Java we see weird reports like:
-// WARNING: ThreadSanitizer: data race (pid=22512)
-// Read of size 8 at 0x7d2b00084318 by thread 100:
-// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
-// #1 <null> <null>:0 (0x7f7ad9b40193)
-// Previous write of size 8 at 0x7d2b00084318 by thread 105:
-// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
-// #1 <null> <null>:0 (0x7f7ad9b42707)
-static bool IsJavaNonsense(const ReportDesc *rep) {
-#ifndef TSAN_GO
- for (uptr i = 0; i < rep->mops.Size(); i++) {
- ReportMop *mop = rep->mops[i];
- ReportStack *frame = mop->stack;
- if (frame == 0
- || (frame->func == 0 && frame->file == 0 && frame->line == 0
- && frame->module == 0)) {
- return true;
- }
- if (FrameIsInternal(frame)) {
- frame = frame->next;
- if (frame == 0
- || (frame->func == 0 && frame->file == 0 && frame->line == 0
- && frame->module == 0)) {
- if (frame) {
- FiredSuppression supp = {rep->typ, frame->pc, 0};
- CTX()->fired_suppressions.push_back(supp);
- }
- return true;
- }
- }
- }
-#endif
- return false;
+ if (frame == 0)
+ return false;
+ const char *file = frame->info.file;
+ return file != 0 &&
+ (internal_strstr(file, "tsan_interceptors.cc") ||
+ internal_strstr(file, "sanitizer_common_interceptors.inc") ||
+ internal_strstr(file, "tsan_interface_"));
}
static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
@@ -603,10 +570,14 @@ static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
}
void ReportRace(ThreadState *thr) {
+ CheckNoLocks(thr);
+
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore;
+
if (!flags()->report_bugs)
return;
- ScopedInRtl in_rtl;
-
if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
return;
@@ -631,11 +602,12 @@ void ReportRace(ThreadState *thr) {
return;
}
- Context *ctx = CTX();
ThreadRegistryLock l0(ctx->thread_registry);
ReportType typ = ReportTypeRace;
- if (thr->is_vptr_access)
+ if (thr->is_vptr_access && freed)
+ typ = ReportTypeVptrUseAfterFree;
+ else if (thr->is_vptr_access)
typ = ReportTypeVptrRace;
else if (freed)
typ = ReportTypeUseAfterFree;
@@ -643,9 +615,9 @@ void ReportRace(ThreadState *thr) {
if (IsFiredSuppression(ctx, rep, addr))
return;
const uptr kMop = 2;
- StackTrace traces[kMop];
+ VarSizeStackTrace traces[kMop];
const uptr toppc = TraceTopPC(thr);
- traces[0].ObtainCurrent(thr, toppc);
+ ObtainCurrentStack(thr, toppc, &traces[0]);
if (IsFiredSuppression(ctx, rep, traces[0]))
return;
InternalScopedBuffer<MutexSet> mset2(1);
@@ -660,13 +632,10 @@ void ReportRace(ThreadState *thr) {
for (uptr i = 0; i < kMop; i++) {
Shadow s(thr->racy_state[i]);
- rep.AddMemoryAccess(addr, s, &traces[i],
+ rep.AddMemoryAccess(addr, s, traces[i],
i == 0 ? &thr->mset : mset2.data());
}
- if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
- return;
-
for (uptr i = 0; i < kMop; i++) {
FastState s(thr->racy_state[i]);
ThreadContext *tctx = static_cast<ThreadContext*>(
@@ -686,37 +655,40 @@ void ReportRace(ThreadState *thr) {
}
#endif
- ReportLocation *suppress_loc = rep.GetReport()->locs.Size() ?
- rep.GetReport()->locs[0] : 0;
- if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack,
- rep.GetReport()->mops[1]->stack,
- suppress_loc))
+ if (!OutputReport(thr, rep))
return;
AddRacyStacks(thr, traces, addr_min, addr_max);
}
void PrintCurrentStack(ThreadState *thr, uptr pc) {
- StackTrace trace;
- trace.ObtainCurrent(thr, pc);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
PrintStack(SymbolizeStack(trace));
}
-void PrintCurrentStackSlow() {
+void PrintCurrentStackSlow(uptr pc) {
#ifndef TSAN_GO
- __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
- sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
- ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(),
- 0, 0, 0, false);
+ BufferedStackTrace *ptrace =
+ new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
+ BufferedStackTrace();
+ ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
for (uptr i = 0; i < ptrace->size / 2; i++) {
- uptr tmp = ptrace->trace[i];
- ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1];
- ptrace->trace[ptrace->size - i - 1] = tmp;
+ uptr tmp = ptrace->trace_buffer[i];
+ ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
+ ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
}
- StackTrace trace;
- trace.Init(ptrace->trace, ptrace->size);
- PrintStack(SymbolizeStack(trace));
+ PrintStack(SymbolizeStack(*ptrace));
#endif
}
} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+}
+} // extern "C"
diff --git a/libsanitizer/tsan/tsan_rtl_thread.cc b/libsanitizer/tsan/tsan_rtl_thread.cc
index dea6698353..d75445aa8b 100644
--- a/libsanitizer/tsan/tsan_rtl_thread.cc
+++ b/libsanitizer/tsan/tsan_rtl_thread.cc
@@ -34,13 +34,13 @@ ThreadContext::~ThreadContext() {
#endif
void ThreadContext::OnDead() {
- sync.Reset();
+ CHECK_EQ(sync.size(), 0);
}
void ThreadContext::OnJoined(void *arg) {
ThreadState *caller_thr = static_cast<ThreadState *>(arg);
AcquireImpl(caller_thr, 0, &sync);
- sync.Reset();
+ sync.Reset(&caller_thr->clock_cache);
}
struct OnCreatedArgs {
@@ -57,21 +57,22 @@ void ThreadContext::OnCreated(void *arg) {
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
ReleaseImpl(args->thr, 0, &sync);
-#ifdef TSAN_GO
- creation_stack.ObtainCurrent(args->thr, args->pc);
-#else
creation_stack_id = CurrentStackId(args->thr, args->pc);
-#endif
if (reuse_count == 0)
StatInc(args->thr, StatThreadMaxTid);
}
void ThreadContext::OnReset() {
- sync.Reset();
+ CHECK_EQ(sync.size(), 0);
FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event));
//!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
}
+void ThreadContext::OnDetached(void *arg) {
+ ThreadState *thr1 = static_cast<ThreadState*>(arg);
+ sync.Reset(&thr1->clock_cache);
+}
+
struct OnStartedArgs {
ThreadState *thr;
uptr stk_addr;
@@ -87,8 +88,8 @@ void ThreadContext::OnStarted(void *arg) {
// from different threads.
epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
epoch1 = (u64)-1;
- new(thr) ThreadState(CTX(), tid, unique_id,
- epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
+ new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
+ args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
#ifndef TSAN_GO
thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
thr->shadow_stack_pos = thr->shadow_stack;
@@ -104,19 +105,23 @@ void ThreadContext::OnStarted(void *arg) {
#ifndef TSAN_GO
AllocatorThreadStart(thr);
#endif
+ if (common_flags()->detect_deadlocks) {
+ thr->dd_pt = ctx->dd->CreatePhysicalThread();
+ thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
+ }
+ thr->fast_state.SetHistorySize(flags()->history_size);
+ // Commit switch to the new part of the trace.
+ // TraceAddEvent will reset stack0/mset0 in the new part for us.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+
thr->fast_synch_epoch = epoch0;
AcquireImpl(thr, 0, &sync);
- thr->fast_state.SetHistorySize(flags()->history_size);
- const uptr trace = (epoch0 / kTracePartSize) % TraceParts();
- Trace *thr_trace = ThreadTrace(thr->tid);
- thr_trace->headers[trace].epoch0 = epoch0;
StatInc(thr, StatSyncAcquire);
- sync.Reset();
+ sync.Reset(&thr->clock_cache);
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
"tls_addr=%zx tls_size=%zx\n",
tid, (uptr)epoch0, args->stk_addr, args->stk_size,
args->tls_addr, args->tls_size);
- thr->is_alive = true;
}
void ThreadContext::OnFinished() {
@@ -128,11 +133,17 @@ void ThreadContext::OnFinished() {
}
epoch1 = thr->fast_state.epoch();
+ if (common_flags()->detect_deadlocks) {
+ ctx->dd->DestroyPhysicalThread(thr->dd_pt);
+ ctx->dd->DestroyLogicalThread(thr->dd_lt);
+ }
+ ctx->clock_alloc.FlushCache(&thr->clock_cache);
+ ctx->metamap.OnThreadIdle(thr);
#ifndef TSAN_GO
AllocatorThreadFinish(thr);
#endif
thr->~ThreadState();
- StatAggregate(CTX()->stat, thr->stat);
+ StatAggregate(ctx->stat, thr->stat);
thr = 0;
}
@@ -177,6 +188,8 @@ static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
}
static void ThreadCheckIgnore(ThreadState *thr) {
+ if (ctx->after_multithreaded_fork)
+ return;
if (thr->ignore_reads_and_writes)
ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
if (thr->ignore_sync)
@@ -187,36 +200,31 @@ static void ThreadCheckIgnore(ThreadState *thr) {}
#endif
void ThreadFinalize(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
ThreadCheckIgnore(thr);
#ifndef TSAN_GO
if (!flags()->report_thread_leaks)
return;
- ThreadRegistryLock l(CTX()->thread_registry);
+ ThreadRegistryLock l(ctx->thread_registry);
Vector<ThreadLeak> leaks(MBlockScopedBuf);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
MaybeReportThreadLeak, &leaks);
for (uptr i = 0; i < leaks.Size(); i++) {
ScopedReport rep(ReportTypeThreadLeak);
- rep.AddThread(leaks[i].tctx);
+ rep.AddThread(leaks[i].tctx, true);
rep.SetCount(leaks[i].count);
- OutputReport(CTX(), rep);
+ OutputReport(thr, rep);
}
#endif
}
int ThreadCount(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
- Context *ctx = CTX();
uptr result;
ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
return (int)result;
}
int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
- CHECK_GT(thr->in_rtl, 0);
StatInc(thr, StatThreadCreate);
- Context *ctx = CTX();
OnCreatedArgs args = { thr, pc };
int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args);
DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid);
@@ -225,8 +233,6 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
}
void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
uptr stk_addr = 0;
uptr stk_size = 0;
uptr tls_addr = 0;
@@ -259,18 +265,24 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
tr->Lock();
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
tr->Unlock();
+
+#ifndef TSAN_GO
+ if (ctx->after_multithreaded_fork) {
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+ }
+#endif
}
void ThreadFinish(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
ThreadCheckIgnore(thr);
StatInc(thr, StatThreadFinish);
if (thr->stk_addr && thr->stk_size)
DontNeedShadowFor(thr->stk_addr, thr->stk_size);
if (thr->tls_addr && thr->tls_size)
DontNeedShadowFor(thr->tls_addr, thr->tls_size);
- thr->is_alive = false;
- Context *ctx = CTX();
+ thr->is_dead = true;
ctx->thread_registry->FinishThread(thr->tid);
}
@@ -284,33 +296,26 @@ static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
}
int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
- CHECK_GT(thr->in_rtl, 0);
- Context *ctx = CTX();
int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
return res;
}
void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
- CHECK_GT(thr->in_rtl, 0);
CHECK_GT(tid, 0);
CHECK_LT(tid, kMaxTid);
DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
- Context *ctx = CTX();
ctx->thread_registry->JoinThread(tid, thr);
}
void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
- CHECK_GT(thr->in_rtl, 0);
CHECK_GT(tid, 0);
CHECK_LT(tid, kMaxTid);
- Context *ctx = CTX();
- ctx->thread_registry->DetachThread(tid);
+ ctx->thread_registry->DetachThread(tid, thr);
}
void ThreadSetName(ThreadState *thr, const char *name) {
- CHECK_GT(thr->in_rtl, 0);
- CTX()->thread_registry->SetThreadName(thr->tid, name);
+ ctx->thread_registry->SetThreadName(thr->tid, name);
}
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
diff --git a/libsanitizer/tsan/tsan_stack_trace.cc b/libsanitizer/tsan/tsan_stack_trace.cc
new file mode 100644
index 0000000000..3734e0e497
--- /dev/null
+++ b/libsanitizer/tsan/tsan_stack_trace.cc
@@ -0,0 +1,44 @@
+//===-- tsan_stack_trace.cc -----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_stack_trace.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+VarSizeStackTrace::VarSizeStackTrace()
+ : StackTrace(nullptr, 0), trace_buffer(nullptr) {}
+
+VarSizeStackTrace::~VarSizeStackTrace() {
+ ResizeBuffer(0);
+}
+
+void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
+ if (trace_buffer) {
+ internal_free(trace_buffer);
+ }
+ trace_buffer =
+ (new_size > 0)
+ ? (uptr *)internal_alloc(MBlockStackTrace,
+ new_size * sizeof(trace_buffer[0]))
+ : nullptr;
+ trace = trace_buffer;
+ size = new_size;
+}
+
+void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+ ResizeBuffer(cnt + !!extra_top_pc);
+ internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+ if (extra_top_pc)
+ trace_buffer[cnt] = extra_top_pc;
+}
+
+} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_stack_trace.h b/libsanitizer/tsan/tsan_stack_trace.h
new file mode 100644
index 0000000000..b097a9b269
--- /dev/null
+++ b/libsanitizer/tsan/tsan_stack_trace.h
@@ -0,0 +1,37 @@
+//===-- tsan_stack_trace.h --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_STACK_TRACE_H
+#define TSAN_STACK_TRACE_H
+
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+// StackTrace which calls malloc/free to allocate the buffer for
+// addresses in stack traces.
+struct VarSizeStackTrace : public StackTrace {
+ uptr *trace_buffer; // Owned.
+
+ VarSizeStackTrace();
+ ~VarSizeStackTrace();
+ void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+
+ private:
+ void ResizeBuffer(uptr new_size);
+
+ VarSizeStackTrace(const VarSizeStackTrace &);
+ void operator=(const VarSizeStackTrace &);
+};
+
+} // namespace __tsan
+
+#endif // TSAN_STACK_TRACE_H
diff --git a/libsanitizer/tsan/tsan_stat.cc b/libsanitizer/tsan/tsan_stat.cc
index 6bc345397a..cdb4835855 100644
--- a/libsanitizer/tsan/tsan_stat.cc
+++ b/libsanitizer/tsan/tsan_stat.cc
@@ -35,6 +35,7 @@ void StatOutput(u64 *stat) {
name[StatMop4] = " size 4 ";
name[StatMop8] = " size 8 ";
name[StatMopSame] = " Including same ";
+ name[StatMopIgnored] = " Including ignored ";
name[StatMopRange] = " Including range ";
name[StatMopRodata] = " Including .rodata ";
name[StatMopRangeRodata] = " Including .rodata range ";
@@ -72,6 +73,28 @@ void StatOutput(u64 *stat) {
name[StatSyncAcquire] = " acquired ";
name[StatSyncRelease] = " released ";
+ name[StatClockAcquire] = "Clock acquire ";
+ name[StatClockAcquireEmpty] = " empty clock ";
+ name[StatClockAcquireFastRelease] = " fast from release-store ";
+ name[StatClockAcquireLarge] = " contains my tid ";
+ name[StatClockAcquireRepeat] = " repeated (fast) ";
+ name[StatClockAcquireFull] = " full (slow) ";
+ name[StatClockAcquiredSomething] = " acquired something ";
+ name[StatClockRelease] = "Clock release ";
+ name[StatClockReleaseResize] = " resize ";
+ name[StatClockReleaseFast1] = " fast1 ";
+ name[StatClockReleaseFast2] = " fast2 ";
+ name[StatClockReleaseSlow] = " dirty overflow (slow) ";
+ name[StatClockReleaseFull] = " full (slow) ";
+ name[StatClockReleaseAcquired] = " was acquired ";
+ name[StatClockReleaseClearTail] = " clear tail ";
+ name[StatClockStore] = "Clock release store ";
+ name[StatClockStoreResize] = " resize ";
+ name[StatClockStoreFast] = " fast ";
+ name[StatClockStoreFull] = " slow ";
+ name[StatClockStoreTail] = " clear tail ";
+ name[StatClockAcquireRelease] = "Clock acquire-release ";
+
name[StatAtomic] = "Atomic operations ";
name[StatAtomicLoad] = " Including load ";
name[StatAtomicStore] = " store ";
@@ -96,338 +119,6 @@ void StatOutput(u64 *stat) {
name[StatAtomic8] = " size 8 ";
name[StatAtomic16] = " size 16 ";
- name[StatInterceptor] = "Interceptors ";
- name[StatInt_longjmp] = " longjmp ";
- name[StatInt_siglongjmp] = " siglongjmp ";
- name[StatInt_malloc] = " malloc ";
- name[StatInt___libc_memalign] = " __libc_memalign ";
- name[StatInt_calloc] = " calloc ";
- name[StatInt_realloc] = " realloc ";
- name[StatInt_free] = " free ";
- name[StatInt_cfree] = " cfree ";
- name[StatInt_malloc_usable_size] = " malloc_usable_size ";
- name[StatInt_mmap] = " mmap ";
- name[StatInt_mmap64] = " mmap64 ";
- name[StatInt_munmap] = " munmap ";
- name[StatInt_memalign] = " memalign ";
- name[StatInt_valloc] = " valloc ";
- name[StatInt_pvalloc] = " pvalloc ";
- name[StatInt_posix_memalign] = " posix_memalign ";
- name[StatInt__Znwm] = " _Znwm ";
- name[StatInt__ZnwmRKSt9nothrow_t] = " _ZnwmRKSt9nothrow_t ";
- name[StatInt__Znam] = " _Znam ";
- name[StatInt__ZnamRKSt9nothrow_t] = " _ZnamRKSt9nothrow_t ";
- name[StatInt__ZdlPv] = " _ZdlPv ";
- name[StatInt__ZdlPvRKSt9nothrow_t] = " _ZdlPvRKSt9nothrow_t ";
- name[StatInt__ZdaPv] = " _ZdaPv ";
- name[StatInt__ZdaPvRKSt9nothrow_t] = " _ZdaPvRKSt9nothrow_t ";
- name[StatInt_strlen] = " strlen ";
- name[StatInt_memset] = " memset ";
- name[StatInt_memcpy] = " memcpy ";
- name[StatInt_textdomain] = " textdomain ";
- name[StatInt_strcmp] = " strcmp ";
- name[StatInt_memchr] = " memchr ";
- name[StatInt_memrchr] = " memrchr ";
- name[StatInt_memmove] = " memmove ";
- name[StatInt_memcmp] = " memcmp ";
- name[StatInt_strchr] = " strchr ";
- name[StatInt_strchrnul] = " strchrnul ";
- name[StatInt_strrchr] = " strrchr ";
- name[StatInt_strncmp] = " strncmp ";
- name[StatInt_strcpy] = " strcpy ";
- name[StatInt_strncpy] = " strncpy ";
- name[StatInt_strstr] = " strstr ";
- name[StatInt_strdup] = " strdup ";
- name[StatInt_strcasecmp] = " strcasecmp ";
- name[StatInt_strncasecmp] = " strncasecmp ";
- name[StatInt_atexit] = " atexit ";
- name[StatInt__exit] = " _exit ";
- name[StatInt___cxa_guard_acquire] = " __cxa_guard_acquire ";
- name[StatInt___cxa_guard_release] = " __cxa_guard_release ";
- name[StatInt___cxa_guard_abort] = " __cxa_guard_abort ";
- name[StatInt_pthread_create] = " pthread_create ";
- name[StatInt_pthread_join] = " pthread_join ";
- name[StatInt_pthread_detach] = " pthread_detach ";
- name[StatInt_pthread_mutex_init] = " pthread_mutex_init ";
- name[StatInt_pthread_mutex_destroy] = " pthread_mutex_destroy ";
- name[StatInt_pthread_mutex_lock] = " pthread_mutex_lock ";
- name[StatInt_pthread_mutex_trylock] = " pthread_mutex_trylock ";
- name[StatInt_pthread_mutex_timedlock] = " pthread_mutex_timedlock ";
- name[StatInt_pthread_mutex_unlock] = " pthread_mutex_unlock ";
- name[StatInt_pthread_spin_init] = " pthread_spin_init ";
- name[StatInt_pthread_spin_destroy] = " pthread_spin_destroy ";
- name[StatInt_pthread_spin_lock] = " pthread_spin_lock ";
- name[StatInt_pthread_spin_trylock] = " pthread_spin_trylock ";
- name[StatInt_pthread_spin_unlock] = " pthread_spin_unlock ";
- name[StatInt_pthread_rwlock_init] = " pthread_rwlock_init ";
- name[StatInt_pthread_rwlock_destroy] = " pthread_rwlock_destroy ";
- name[StatInt_pthread_rwlock_rdlock] = " pthread_rwlock_rdlock ";
- name[StatInt_pthread_rwlock_tryrdlock] = " pthread_rwlock_tryrdlock ";
- name[StatInt_pthread_rwlock_timedrdlock]
- = " pthread_rwlock_timedrdlock ";
- name[StatInt_pthread_rwlock_wrlock] = " pthread_rwlock_wrlock ";
- name[StatInt_pthread_rwlock_trywrlock] = " pthread_rwlock_trywrlock ";
- name[StatInt_pthread_rwlock_timedwrlock]
- = " pthread_rwlock_timedwrlock ";
- name[StatInt_pthread_rwlock_unlock] = " pthread_rwlock_unlock ";
- name[StatInt_pthread_cond_init] = " pthread_cond_init ";
- name[StatInt_pthread_cond_destroy] = " pthread_cond_destroy ";
- name[StatInt_pthread_cond_signal] = " pthread_cond_signal ";
- name[StatInt_pthread_cond_broadcast] = " pthread_cond_broadcast ";
- name[StatInt_pthread_cond_wait] = " pthread_cond_wait ";
- name[StatInt_pthread_cond_timedwait] = " pthread_cond_timedwait ";
- name[StatInt_pthread_barrier_init] = " pthread_barrier_init ";
- name[StatInt_pthread_barrier_destroy] = " pthread_barrier_destroy ";
- name[StatInt_pthread_barrier_wait] = " pthread_barrier_wait ";
- name[StatInt_pthread_once] = " pthread_once ";
- name[StatInt_pthread_getschedparam] = " pthread_getschedparam ";
- name[StatInt_pthread_setname_np] = " pthread_setname_np ";
- name[StatInt_sem_init] = " sem_init ";
- name[StatInt_sem_destroy] = " sem_destroy ";
- name[StatInt_sem_wait] = " sem_wait ";
- name[StatInt_sem_trywait] = " sem_trywait ";
- name[StatInt_sem_timedwait] = " sem_timedwait ";
- name[StatInt_sem_post] = " sem_post ";
- name[StatInt_sem_getvalue] = " sem_getvalue ";
- name[StatInt_stat] = " stat ";
- name[StatInt___xstat] = " __xstat ";
- name[StatInt_stat64] = " stat64 ";
- name[StatInt___xstat64] = " __xstat64 ";
- name[StatInt_lstat] = " lstat ";
- name[StatInt___lxstat] = " __lxstat ";
- name[StatInt_lstat64] = " lstat64 ";
- name[StatInt___lxstat64] = " __lxstat64 ";
- name[StatInt_fstat] = " fstat ";
- name[StatInt___fxstat] = " __fxstat ";
- name[StatInt_fstat64] = " fstat64 ";
- name[StatInt___fxstat64] = " __fxstat64 ";
- name[StatInt_open] = " open ";
- name[StatInt_open64] = " open64 ";
- name[StatInt_creat] = " creat ";
- name[StatInt_creat64] = " creat64 ";
- name[StatInt_dup] = " dup ";
- name[StatInt_dup2] = " dup2 ";
- name[StatInt_dup3] = " dup3 ";
- name[StatInt_eventfd] = " eventfd ";
- name[StatInt_signalfd] = " signalfd ";
- name[StatInt_inotify_init] = " inotify_init ";
- name[StatInt_inotify_init1] = " inotify_init1 ";
- name[StatInt_socket] = " socket ";
- name[StatInt_socketpair] = " socketpair ";
- name[StatInt_connect] = " connect ";
- name[StatInt_bind] = " bind ";
- name[StatInt_listen] = " listen ";
- name[StatInt_accept] = " accept ";
- name[StatInt_accept4] = " accept4 ";
- name[StatInt_epoll_create] = " epoll_create ";
- name[StatInt_epoll_create1] = " epoll_create1 ";
- name[StatInt_close] = " close ";
- name[StatInt___close] = " __close ";
- name[StatInt___res_iclose] = " __res_iclose ";
- name[StatInt_pipe] = " pipe ";
- name[StatInt_pipe2] = " pipe2 ";
- name[StatInt_read] = " read ";
- name[StatInt_prctl] = " prctl ";
- name[StatInt_pread] = " pread ";
- name[StatInt_pread64] = " pread64 ";
- name[StatInt_readv] = " readv ";
- name[StatInt_preadv] = " preadv ";
- name[StatInt_preadv64] = " preadv64 ";
- name[StatInt_write] = " write ";
- name[StatInt_pwrite] = " pwrite ";
- name[StatInt_pwrite64] = " pwrite64 ";
- name[StatInt_writev] = " writev ";
- name[StatInt_pwritev] = " pwritev ";
- name[StatInt_pwritev64] = " pwritev64 ";
- name[StatInt_send] = " send ";
- name[StatInt_sendmsg] = " sendmsg ";
- name[StatInt_recv] = " recv ";
- name[StatInt_recvmsg] = " recvmsg ";
- name[StatInt_unlink] = " unlink ";
- name[StatInt_fopen] = " fopen ";
- name[StatInt_freopen] = " freopen ";
- name[StatInt_fclose] = " fclose ";
- name[StatInt_fread] = " fread ";
- name[StatInt_fwrite] = " fwrite ";
- name[StatInt_fflush] = " fflush ";
- name[StatInt_abort] = " abort ";
- name[StatInt_puts] = " puts ";
- name[StatInt_rmdir] = " rmdir ";
- name[StatInt_opendir] = " opendir ";
- name[StatInt_epoll_ctl] = " epoll_ctl ";
- name[StatInt_epoll_wait] = " epoll_wait ";
- name[StatInt_poll] = " poll ";
- name[StatInt_ppoll] = " ppoll ";
- name[StatInt_sigaction] = " sigaction ";
- name[StatInt_signal] = " signal ";
- name[StatInt_sigsuspend] = " sigsuspend ";
- name[StatInt_raise] = " raise ";
- name[StatInt_kill] = " kill ";
- name[StatInt_pthread_kill] = " pthread_kill ";
- name[StatInt_sleep] = " sleep ";
- name[StatInt_usleep] = " usleep ";
- name[StatInt_nanosleep] = " nanosleep ";
- name[StatInt_gettimeofday] = " gettimeofday ";
- name[StatInt_fork] = " fork ";
- name[StatInt_vscanf] = " vscanf ";
- name[StatInt_vsscanf] = " vsscanf ";
- name[StatInt_vfscanf] = " vfscanf ";
- name[StatInt_scanf] = " scanf ";
- name[StatInt_sscanf] = " sscanf ";
- name[StatInt_fscanf] = " fscanf ";
- name[StatInt___isoc99_vscanf] = " vscanf ";
- name[StatInt___isoc99_vsscanf] = " vsscanf ";
- name[StatInt___isoc99_vfscanf] = " vfscanf ";
- name[StatInt___isoc99_scanf] = " scanf ";
- name[StatInt___isoc99_sscanf] = " sscanf ";
- name[StatInt___isoc99_fscanf] = " fscanf ";
- name[StatInt_on_exit] = " on_exit ";
- name[StatInt___cxa_atexit] = " __cxa_atexit ";
- name[StatInt_localtime] = " localtime ";
- name[StatInt_localtime_r] = " localtime_r ";
- name[StatInt_gmtime] = " gmtime ";
- name[StatInt_gmtime_r] = " gmtime_r ";
- name[StatInt_ctime] = " ctime ";
- name[StatInt_ctime_r] = " ctime_r ";
- name[StatInt_asctime] = " asctime ";
- name[StatInt_asctime_r] = " asctime_r ";
- name[StatInt_strptime] = " strptime ";
- name[StatInt_frexp] = " frexp ";
- name[StatInt_frexpf] = " frexpf ";
- name[StatInt_frexpl] = " frexpl ";
- name[StatInt_getpwnam] = " getpwnam ";
- name[StatInt_getpwuid] = " getpwuid ";
- name[StatInt_getgrnam] = " getgrnam ";
- name[StatInt_getgrgid] = " getgrgid ";
- name[StatInt_getpwnam_r] = " getpwnam_r ";
- name[StatInt_getpwuid_r] = " getpwuid_r ";
- name[StatInt_getgrnam_r] = " getgrnam_r ";
- name[StatInt_getgrgid_r] = " getgrgid_r ";
- name[StatInt_clock_getres] = " clock_getres ";
- name[StatInt_clock_gettime] = " clock_gettime ";
- name[StatInt_clock_settime] = " clock_settime ";
- name[StatInt_getitimer] = " getitimer ";
- name[StatInt_setitimer] = " setitimer ";
- name[StatInt_time] = " time ";
- name[StatInt_glob] = " glob ";
- name[StatInt_glob64] = " glob64 ";
- name[StatInt_wait] = " wait ";
- name[StatInt_waitid] = " waitid ";
- name[StatInt_waitpid] = " waitpid ";
- name[StatInt_wait3] = " wait3 ";
- name[StatInt_wait4] = " wait4 ";
- name[StatInt_inet_ntop] = " inet_ntop ";
- name[StatInt_inet_pton] = " inet_pton ";
- name[StatInt_inet_aton] = " inet_aton ";
- name[StatInt_getaddrinfo] = " getaddrinfo ";
- name[StatInt_getnameinfo] = " getnameinfo ";
- name[StatInt_getsockname] = " getsockname ";
- name[StatInt_gethostent] = " gethostent ";
- name[StatInt_gethostbyname] = " gethostbyname ";
- name[StatInt_gethostbyname2] = " gethostbyname2 ";
- name[StatInt_gethostbyaddr] = " gethostbyaddr ";
- name[StatInt_gethostent_r] = " gethostent_r ";
- name[StatInt_gethostbyname_r] = " gethostbyname_r ";
- name[StatInt_gethostbyname2_r] = " gethostbyname2_r ";
- name[StatInt_gethostbyaddr_r] = " gethostbyaddr_r ";
- name[StatInt_getsockopt] = " getsockopt ";
- name[StatInt_modf] = " modf ";
- name[StatInt_modff] = " modff ";
- name[StatInt_modfl] = " modfl ";
- name[StatInt_getpeername] = " getpeername ";
- name[StatInt_ioctl] = " ioctl ";
- name[StatInt_sysinfo] = " sysinfo ";
- name[StatInt_readdir] = " readdir ";
- name[StatInt_readdir64] = " readdir64 ";
- name[StatInt_readdir_r] = " readdir_r ";
- name[StatInt_readdir64_r] = " readdir64_r ";
- name[StatInt_ptrace] = " ptrace ";
- name[StatInt_setlocale] = " setlocale ";
- name[StatInt_getcwd] = " getcwd ";
- name[StatInt_get_current_dir_name] = " get_current_dir_name ";
- name[StatInt_strtoimax] = " strtoimax ";
- name[StatInt_strtoumax] = " strtoumax ";
- name[StatInt_mbstowcs] = " mbstowcs ";
- name[StatInt_mbsrtowcs] = " mbsrtowcs ";
- name[StatInt_mbsnrtowcs] = " mbsnrtowcs ";
- name[StatInt_wcstombs] = " wcstombs ";
- name[StatInt_wcsrtombs] = " wcsrtombs ";
- name[StatInt_wcsnrtombs] = " wcsnrtombs ";
- name[StatInt_tcgetattr] = " tcgetattr ";
- name[StatInt_realpath] = " realpath ";
- name[StatInt_canonicalize_file_name] = " canonicalize_file_name ";
- name[StatInt_confstr] = " confstr ";
- name[StatInt_sched_getaffinity] = " sched_getaffinity ";
- name[StatInt_strerror] = " strerror ";
- name[StatInt_strerror_r] = " strerror_r ";
- name[StatInt___xpg_strerror_r] = " __xpg_strerror_r ";
- name[StatInt_scandir] = " scandir ";
- name[StatInt_scandir64] = " scandir64 ";
- name[StatInt_getgroups] = " getgroups ";
- name[StatInt_wordexp] = " wordexp ";
- name[StatInt_sigwait] = " sigwait ";
- name[StatInt_sigwaitinfo] = " sigwaitinfo ";
- name[StatInt_sigtimedwait] = " sigtimedwait ";
- name[StatInt_sigemptyset] = " sigemptyset ";
- name[StatInt_sigfillset] = " sigfillset ";
- name[StatInt_sigpending] = " sigpending ";
- name[StatInt_sigprocmask] = " sigprocmask ";
- name[StatInt_backtrace] = " backtrace ";
- name[StatInt_backtrace_symbols] = " backtrace_symbols ";
- name[StatInt_dlopen] = " dlopen ";
- name[StatInt_dlclose] = " dlclose ";
- name[StatInt_getmntent] = " getmntent ";
- name[StatInt_getmntent_r] = " getmntent_r ";
- name[StatInt_statfs] = " statfs ";
- name[StatInt_statfs64] = " statfs64 ";
- name[StatInt_fstatfs] = " fstatfs ";
- name[StatInt_fstatfs64] = " fstatfs64 ";
- name[StatInt_statvfs] = " statvfs ";
- name[StatInt_statvfs64] = " statvfs64 ";
- name[StatInt_fstatvfs] = " fstatvfs ";
- name[StatInt_fstatvfs64] = " fstatvfs64 ";
- name[StatInt_initgroups] = " initgroups ";
- name[StatInt_ether_ntoa] = " ether_ntoa ";
- name[StatInt_ether_aton] = " ether_aton ";
- name[StatInt_ether_ntoa_r] = " ether_ntoa_r ";
- name[StatInt_ether_aton_r] = " ether_aton_r ";
- name[StatInt_ether_ntohost] = " ether_ntohost ";
- name[StatInt_ether_hostton] = " ether_hostton ";
- name[StatInt_ether_line] = " ether_line ";
- name[StatInt_shmctl] = " shmctl ";
- name[StatInt_random_r] = " random_r ";
- name[StatInt_tmpnam] = " tmpnam ";
- name[StatInt_tmpnam_r] = " tmpnam_r ";
- name[StatInt_tempnam] = " tempnam ";
- name[StatInt_sincos] = " sincos ";
- name[StatInt_sincosf] = " sincosf ";
- name[StatInt_sincosl] = " sincosl ";
- name[StatInt_remquo] = " remquo ";
- name[StatInt_remquof] = " remquof ";
- name[StatInt_remquol] = " remquol ";
- name[StatInt_lgamma] = " lgamma ";
- name[StatInt_lgammaf] = " lgammaf ";
- name[StatInt_lgammal] = " lgammal ";
- name[StatInt_lgamma_r] = " lgamma_r ";
- name[StatInt_lgammaf_r] = " lgammaf_r ";
- name[StatInt_lgammal_r] = " lgammal_r ";
- name[StatInt_drand48_r] = " drand48_r ";
- name[StatInt_lrand48_r] = " lrand48_r ";
- name[StatInt_getline] = " getline ";
- name[StatInt_getdelim] = " getdelim ";
- name[StatInt_iconv] = " iconv ";
- name[StatInt_times] = " times ";
-
- name[StatInt_pthread_attr_getdetachstate] = " pthread_addr_getdetachstate "; // NOLINT
- name[StatInt_pthread_attr_getguardsize] = " pthread_addr_getguardsize "; // NOLINT
- name[StatInt_pthread_attr_getschedparam] = " pthread_addr_getschedparam "; // NOLINT
- name[StatInt_pthread_attr_getschedpolicy] = " pthread_addr_getschedpolicy "; // NOLINT
- name[StatInt_pthread_attr_getinheritsched] = " pthread_addr_getinheritsched "; // NOLINT
- name[StatInt_pthread_attr_getscope] = " pthread_addr_getscope "; // NOLINT
- name[StatInt_pthread_attr_getstacksize] = " pthread_addr_getstacksize "; // NOLINT
- name[StatInt_pthread_attr_getstack] = " pthread_addr_getstack "; // NOLINT
- name[StatInt_pthread_attr_getaffinity_np] = " pthread_addr_getaffinity_np "; // NOLINT
-
name[StatAnnotation] = "Dynamic annotations ";
name[StatAnnotateHappensBefore] = " HappensBefore ";
name[StatAnnotateHappensAfter] = " HappensAfter ";
@@ -475,11 +166,12 @@ void StatOutput(u64 *stat) {
name[StatMtxAnnotations] = " Annotations ";
name[StatMtxMBlock] = " MBlock ";
name[StatMtxJavaMBlock] = " JavaMBlock ";
+ name[StatMtxDeadlockDetector] = " DeadlockDetector ";
name[StatMtxFD] = " FD ";
Printf("Statistics:\n");
for (int i = 0; i < StatCnt; i++)
- Printf("%s: %zu\n", name[i], (uptr)stat[i]);
+ Printf("%s: %16zu\n", name[i], (uptr)stat[i]);
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_stat.h b/libsanitizer/tsan/tsan_stat.h
index 3e08313d1a..132656f035 100644
--- a/libsanitizer/tsan/tsan_stat.h
+++ b/libsanitizer/tsan/tsan_stat.h
@@ -24,6 +24,7 @@ enum StatType {
StatMop4,
StatMop8,
StatMopSame,
+ StatMopIgnored,
StatMopRange,
StatMopRodata,
StatMopRangeRodata,
@@ -67,6 +68,32 @@ enum StatType {
StatSyncAcquire,
StatSyncRelease,
+ // Clocks - acquire.
+ StatClockAcquire,
+ StatClockAcquireEmpty,
+ StatClockAcquireFastRelease,
+ StatClockAcquireLarge,
+ StatClockAcquireRepeat,
+ StatClockAcquireFull,
+ StatClockAcquiredSomething,
+ // Clocks - release.
+ StatClockRelease,
+ StatClockReleaseResize,
+ StatClockReleaseFast1,
+ StatClockReleaseFast2,
+ StatClockReleaseSlow,
+ StatClockReleaseFull,
+ StatClockReleaseAcquired,
+ StatClockReleaseClearTail,
+ // Clocks - release store.
+ StatClockStore,
+ StatClockStoreResize,
+ StatClockStoreFast,
+ StatClockStoreFull,
+ StatClockStoreTail,
+ // Clocks - acquire-release.
+ StatClockAcquireRelease,
+
// Atomics.
StatAtomic,
StatAtomicLoad,
@@ -92,337 +119,6 @@ enum StatType {
StatAtomic8,
StatAtomic16,
- // Interceptors.
- StatInterceptor,
- StatInt_longjmp,
- StatInt_siglongjmp,
- StatInt_malloc,
- StatInt___libc_memalign,
- StatInt_calloc,
- StatInt_realloc,
- StatInt_free,
- StatInt_cfree,
- StatInt_malloc_usable_size,
- StatInt_mmap,
- StatInt_mmap64,
- StatInt_munmap,
- StatInt_memalign,
- StatInt_valloc,
- StatInt_pvalloc,
- StatInt_posix_memalign,
- StatInt__Znwm,
- StatInt__ZnwmRKSt9nothrow_t,
- StatInt__Znam,
- StatInt__ZnamRKSt9nothrow_t,
- StatInt__ZdlPv,
- StatInt__ZdlPvRKSt9nothrow_t,
- StatInt__ZdaPv,
- StatInt__ZdaPvRKSt9nothrow_t,
- StatInt_strlen,
- StatInt_memset,
- StatInt_memcpy,
- StatInt_textdomain,
- StatInt_strcmp,
- StatInt_memchr,
- StatInt_memrchr,
- StatInt_memmove,
- StatInt_memcmp,
- StatInt_strchr,
- StatInt_strchrnul,
- StatInt_strrchr,
- StatInt_strncmp,
- StatInt_strcpy,
- StatInt_strncpy,
- StatInt_strcasecmp,
- StatInt_strncasecmp,
- StatInt_strstr,
- StatInt_strdup,
- StatInt_atexit,
- StatInt__exit,
- StatInt___cxa_guard_acquire,
- StatInt___cxa_guard_release,
- StatInt___cxa_guard_abort,
- StatInt_pthread_create,
- StatInt_pthread_join,
- StatInt_pthread_detach,
- StatInt_pthread_mutex_init,
- StatInt_pthread_mutex_destroy,
- StatInt_pthread_mutex_lock,
- StatInt_pthread_mutex_trylock,
- StatInt_pthread_mutex_timedlock,
- StatInt_pthread_mutex_unlock,
- StatInt_pthread_spin_init,
- StatInt_pthread_spin_destroy,
- StatInt_pthread_spin_lock,
- StatInt_pthread_spin_trylock,
- StatInt_pthread_spin_unlock,
- StatInt_pthread_rwlock_init,
- StatInt_pthread_rwlock_destroy,
- StatInt_pthread_rwlock_rdlock,
- StatInt_pthread_rwlock_tryrdlock,
- StatInt_pthread_rwlock_timedrdlock,
- StatInt_pthread_rwlock_wrlock,
- StatInt_pthread_rwlock_trywrlock,
- StatInt_pthread_rwlock_timedwrlock,
- StatInt_pthread_rwlock_unlock,
- StatInt_pthread_cond_init,
- StatInt_pthread_cond_destroy,
- StatInt_pthread_cond_signal,
- StatInt_pthread_cond_broadcast,
- StatInt_pthread_cond_wait,
- StatInt_pthread_cond_timedwait,
- StatInt_pthread_barrier_init,
- StatInt_pthread_barrier_destroy,
- StatInt_pthread_barrier_wait,
- StatInt_pthread_once,
- StatInt_pthread_getschedparam,
- StatInt_pthread_setname_np,
- StatInt_sem_init,
- StatInt_sem_destroy,
- StatInt_sem_wait,
- StatInt_sem_trywait,
- StatInt_sem_timedwait,
- StatInt_sem_post,
- StatInt_sem_getvalue,
- StatInt_stat,
- StatInt___xstat,
- StatInt_stat64,
- StatInt___xstat64,
- StatInt_lstat,
- StatInt___lxstat,
- StatInt_lstat64,
- StatInt___lxstat64,
- StatInt_fstat,
- StatInt___fxstat,
- StatInt_fstat64,
- StatInt___fxstat64,
- StatInt_open,
- StatInt_open64,
- StatInt_creat,
- StatInt_creat64,
- StatInt_dup,
- StatInt_dup2,
- StatInt_dup3,
- StatInt_eventfd,
- StatInt_signalfd,
- StatInt_inotify_init,
- StatInt_inotify_init1,
- StatInt_socket,
- StatInt_socketpair,
- StatInt_connect,
- StatInt_bind,
- StatInt_listen,
- StatInt_accept,
- StatInt_accept4,
- StatInt_epoll_create,
- StatInt_epoll_create1,
- StatInt_close,
- StatInt___close,
- StatInt___res_iclose,
- StatInt_pipe,
- StatInt_pipe2,
- StatInt_read,
- StatInt_prctl,
- StatInt_pread,
- StatInt_pread64,
- StatInt_readv,
- StatInt_preadv,
- StatInt_preadv64,
- StatInt_write,
- StatInt_pwrite,
- StatInt_pwrite64,
- StatInt_writev,
- StatInt_pwritev,
- StatInt_pwritev64,
- StatInt_send,
- StatInt_sendmsg,
- StatInt_recv,
- StatInt_recvmsg,
- StatInt_unlink,
- StatInt_fopen,
- StatInt_freopen,
- StatInt_fclose,
- StatInt_fread,
- StatInt_fwrite,
- StatInt_fflush,
- StatInt_abort,
- StatInt_puts,
- StatInt_rmdir,
- StatInt_opendir,
- StatInt_epoll_ctl,
- StatInt_epoll_wait,
- StatInt_poll,
- StatInt_ppoll,
- StatInt_sigaction,
- StatInt_signal,
- StatInt_sigsuspend,
- StatInt_raise,
- StatInt_kill,
- StatInt_pthread_kill,
- StatInt_sleep,
- StatInt_usleep,
- StatInt_nanosleep,
- StatInt_gettimeofday,
- StatInt_fork,
- StatInt_vscanf,
- StatInt_vsscanf,
- StatInt_vfscanf,
- StatInt_scanf,
- StatInt_sscanf,
- StatInt_fscanf,
- StatInt___isoc99_vscanf,
- StatInt___isoc99_vsscanf,
- StatInt___isoc99_vfscanf,
- StatInt___isoc99_scanf,
- StatInt___isoc99_sscanf,
- StatInt___isoc99_fscanf,
- StatInt_on_exit,
- StatInt___cxa_atexit,
- StatInt_localtime,
- StatInt_localtime_r,
- StatInt_gmtime,
- StatInt_gmtime_r,
- StatInt_ctime,
- StatInt_ctime_r,
- StatInt_asctime,
- StatInt_asctime_r,
- StatInt_strptime,
- StatInt_frexp,
- StatInt_frexpf,
- StatInt_frexpl,
- StatInt_getpwnam,
- StatInt_getpwuid,
- StatInt_getgrnam,
- StatInt_getgrgid,
- StatInt_getpwnam_r,
- StatInt_getpwuid_r,
- StatInt_getgrnam_r,
- StatInt_getgrgid_r,
- StatInt_clock_getres,
- StatInt_clock_gettime,
- StatInt_clock_settime,
- StatInt_getitimer,
- StatInt_setitimer,
- StatInt_time,
- StatInt_glob,
- StatInt_glob64,
- StatInt_wait,
- StatInt_waitid,
- StatInt_waitpid,
- StatInt_wait3,
- StatInt_wait4,
- StatInt_inet_ntop,
- StatInt_inet_pton,
- StatInt_inet_aton,
- StatInt_getaddrinfo,
- StatInt_getnameinfo,
- StatInt_getsockname,
- StatInt_gethostent,
- StatInt_gethostbyname,
- StatInt_gethostbyname2,
- StatInt_gethostbyaddr,
- StatInt_gethostent_r,
- StatInt_gethostbyname_r,
- StatInt_gethostbyname2_r,
- StatInt_gethostbyaddr_r,
- StatInt_getsockopt,
- StatInt_modf,
- StatInt_modff,
- StatInt_modfl,
- StatInt_getpeername,
- StatInt_ioctl,
- StatInt_sysinfo,
- StatInt_readdir,
- StatInt_readdir64,
- StatInt_readdir_r,
- StatInt_readdir64_r,
- StatInt_ptrace,
- StatInt_setlocale,
- StatInt_getcwd,
- StatInt_get_current_dir_name,
- StatInt_strtoimax,
- StatInt_strtoumax,
- StatInt_mbstowcs,
- StatInt_mbsrtowcs,
- StatInt_mbsnrtowcs,
- StatInt_wcstombs,
- StatInt_wcsrtombs,
- StatInt_wcsnrtombs,
- StatInt_tcgetattr,
- StatInt_realpath,
- StatInt_canonicalize_file_name,
- StatInt_confstr,
- StatInt_sched_getaffinity,
- StatInt_strerror,
- StatInt_strerror_r,
- StatInt___xpg_strerror_r,
- StatInt_scandir,
- StatInt_scandir64,
- StatInt_getgroups,
- StatInt_wordexp,
- StatInt_sigwait,
- StatInt_sigwaitinfo,
- StatInt_sigtimedwait,
- StatInt_sigemptyset,
- StatInt_sigfillset,
- StatInt_sigpending,
- StatInt_sigprocmask,
- StatInt_backtrace,
- StatInt_backtrace_symbols,
- StatInt_dlopen,
- StatInt_dlclose,
- StatInt_getmntent,
- StatInt_getmntent_r,
- StatInt_statfs,
- StatInt_statfs64,
- StatInt_fstatfs,
- StatInt_fstatfs64,
- StatInt_statvfs,
- StatInt_statvfs64,
- StatInt_fstatvfs,
- StatInt_fstatvfs64,
- StatInt_initgroups,
- StatInt_ether_ntoa,
- StatInt_ether_aton,
- StatInt_ether_ntoa_r,
- StatInt_ether_aton_r,
- StatInt_ether_ntohost,
- StatInt_ether_hostton,
- StatInt_ether_line,
- StatInt_shmctl,
- StatInt_random_r,
- StatInt_tmpnam,
- StatInt_tmpnam_r,
- StatInt_tempnam,
- StatInt_sincos,
- StatInt_sincosf,
- StatInt_sincosl,
- StatInt_remquo,
- StatInt_remquof,
- StatInt_remquol,
- StatInt_lgamma,
- StatInt_lgammaf,
- StatInt_lgammal,
- StatInt_lgamma_r,
- StatInt_lgammaf_r,
- StatInt_lgammal_r,
- StatInt_drand48_r,
- StatInt_lrand48_r,
- StatInt_getline,
- StatInt_getdelim,
- StatInt_iconv,
- StatInt_times,
-
- StatInt_pthread_attr_getdetachstate,
- StatInt_pthread_attr_getguardsize,
- StatInt_pthread_attr_getschedparam,
- StatInt_pthread_attr_getschedpolicy,
- StatInt_pthread_attr_getinheritsched,
- StatInt_pthread_attr_getscope,
- StatInt_pthread_attr_getstacksize,
- StatInt_pthread_attr_getstack,
- StatInt_pthread_attr_getaffinity_np,
-
// Dynamic annotations.
StatAnnotation,
StatAnnotateHappensBefore,
@@ -472,6 +168,7 @@ enum StatType {
StatMtxAtExit,
StatMtxMBlock,
StatMtxJavaMBlock,
+ StatMtxDeadlockDetector,
StatMtxFD,
// This must be the last.
diff --git a/libsanitizer/tsan/tsan_suppressions.cc b/libsanitizer/tsan/tsan_suppressions.cc
index fa0c30dc28..76460d90d9 100644
--- a/libsanitizer/tsan/tsan_suppressions.cc
+++ b/libsanitizer/tsan/tsan_suppressions.cc
@@ -39,55 +39,16 @@ extern "C" const char *WEAK __tsan_default_suppressions() {
namespace __tsan {
-static SuppressionContext* g_ctx;
-
-static char *ReadFile(const char *filename) {
- if (filename == 0 || filename[0] == 0)
- return 0;
- InternalScopedBuffer<char> tmp(4*1024);
- if (filename[0] == '/' || GetPwd() == 0)
- internal_snprintf(tmp.data(), tmp.size(), "%s", filename);
- else
- internal_snprintf(tmp.data(), tmp.size(), "%s/%s", GetPwd(), filename);
- uptr openrv = OpenFile(tmp.data(), false);
- if (internal_iserror(openrv)) {
- Printf("ThreadSanitizer: failed to open suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- fd_t fd = openrv;
- const uptr fsize = internal_filesize(fd);
- if (fsize == (uptr)-1) {
- Printf("ThreadSanitizer: failed to stat suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- char *buf = (char*)internal_alloc(MBlockSuppression, fsize + 1);
- if (fsize != internal_read(fd, buf, fsize)) {
- Printf("ThreadSanitizer: failed to read suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- internal_close(fd);
- buf[fsize] = 0;
- return buf;
-}
+static bool suppressions_inited = false;
void InitializeSuppressions() {
- ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
- g_ctx = new(placeholder_) SuppressionContext;
- const char *supp = ReadFile(flags()->suppressions);
- g_ctx->Parse(supp);
+ CHECK(!suppressions_inited);
+ SuppressionContext::InitIfNecessary();
#ifndef TSAN_GO
- supp = __tsan_default_suppressions();
- g_ctx->Parse(supp);
- g_ctx->Parse(std_suppressions);
+ SuppressionContext::Get()->Parse(__tsan_default_suppressions());
+ SuppressionContext::Get()->Parse(std_suppressions);
#endif
-}
-
-SuppressionContext *GetSuppressionContext() {
- CHECK_NE(g_ctx, 0);
- return g_ctx;
+ suppressions_inited = true;
}
SuppressionType conv(ReportType typ) {
@@ -97,62 +58,74 @@ SuppressionType conv(ReportType typ) {
return SuppressionRace;
else if (typ == ReportTypeUseAfterFree)
return SuppressionRace;
+ else if (typ == ReportTypeVptrUseAfterFree)
+ return SuppressionRace;
else if (typ == ReportTypeThreadLeak)
return SuppressionThread;
else if (typ == ReportTypeMutexDestroyLocked)
return SuppressionMutex;
+ else if (typ == ReportTypeMutexDoubleLock)
+ return SuppressionMutex;
+ else if (typ == ReportTypeMutexBadUnlock)
+ return SuppressionMutex;
+ else if (typ == ReportTypeMutexBadReadLock)
+ return SuppressionMutex;
+ else if (typ == ReportTypeMutexBadReadUnlock)
+ return SuppressionMutex;
else if (typ == ReportTypeSignalUnsafe)
return SuppressionSignal;
else if (typ == ReportTypeErrnoInSignal)
return SuppressionNone;
+ else if (typ == ReportTypeDeadlock)
+ return SuppressionDeadlock;
Printf("ThreadSanitizer: unknown report type %d\n", typ),
Die();
}
uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
- CHECK(g_ctx);
- if (!g_ctx->SuppressionCount() || stack == 0) return 0;
+ if (!SuppressionContext::Get()->SuppressionCount() || stack == 0 ||
+ !stack->suppressable)
+ return 0;
SuppressionType stype = conv(typ);
if (stype == SuppressionNone)
return 0;
Suppression *s;
for (const ReportStack *frame = stack; frame; frame = frame->next) {
- if (g_ctx->Match(frame->func, stype, &s) ||
- g_ctx->Match(frame->file, stype, &s) ||
- g_ctx->Match(frame->module, stype, &s)) {
+ const AddressInfo &info = frame->info;
+ if (SuppressionContext::Get()->Match(info.function, stype, &s) ||
+ SuppressionContext::Get()->Match(info.file, stype, &s) ||
+ SuppressionContext::Get()->Match(info.module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++;
*sp = s;
- return frame->pc;
+ return info.address;
}
}
return 0;
}
uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
- CHECK(g_ctx);
- if (!g_ctx->SuppressionCount() || loc == 0 ||
- loc->type != ReportLocationGlobal)
+ if (!SuppressionContext::Get()->SuppressionCount() || loc == 0 ||
+ loc->type != ReportLocationGlobal || !loc->suppressable)
return 0;
SuppressionType stype = conv(typ);
if (stype == SuppressionNone)
return 0;
Suppression *s;
- if (g_ctx->Match(loc->name, stype, &s) ||
- g_ctx->Match(loc->file, stype, &s) ||
- g_ctx->Match(loc->module, stype, &s)) {
+ const DataInfo &global = loc->global;
+ if (SuppressionContext::Get()->Match(global.name, stype, &s) ||
+ SuppressionContext::Get()->Match(global.module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++;
*sp = s;
- return loc->addr;
+ return global.start;
}
return 0;
}
void PrintMatchedSuppressions() {
- CHECK(g_ctx);
InternalMmapVector<Suppression *> matched(1);
- g_ctx->GetMatched(&matched);
+ SuppressionContext::Get()->GetMatched(&matched);
if (!matched.size())
return;
int hit_count = 0;
diff --git a/libsanitizer/tsan/tsan_suppressions.h b/libsanitizer/tsan/tsan_suppressions.h
index 2939e9a8b9..e38d81ece8 100644
--- a/libsanitizer/tsan/tsan_suppressions.h
+++ b/libsanitizer/tsan/tsan_suppressions.h
@@ -20,7 +20,6 @@ void InitializeSuppressions();
void PrintMatchedSuppressions();
uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp);
uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp);
-SuppressionContext *GetSuppressionContext();
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_symbolize.cc b/libsanitizer/tsan/tsan_symbolize.cc
index c0e794be2d..795f838991 100644
--- a/libsanitizer/tsan/tsan_symbolize.cc
+++ b/libsanitizer/tsan/tsan_symbolize.cc
@@ -24,46 +24,16 @@ void EnterSymbolizer() {
ThreadState *thr = cur_thread();
CHECK(!thr->in_symbolizer);
thr->in_symbolizer = true;
+ thr->ignore_interceptors++;
}
void ExitSymbolizer() {
ThreadState *thr = cur_thread();
CHECK(thr->in_symbolizer);
thr->in_symbolizer = false;
+ thr->ignore_interceptors--;
}
-ReportStack *NewReportStackEntry(uptr addr) {
- ReportStack *ent = (ReportStack*)internal_alloc(MBlockReportStack,
- sizeof(ReportStack));
- internal_memset(ent, 0, sizeof(*ent));
- ent->pc = addr;
- return ent;
-}
-
-static ReportStack *NewReportStackEntry(const AddressInfo &info) {
- ReportStack *ent = NewReportStackEntry(info.address);
- ent->module = StripModuleName(info.module);
- ent->offset = info.module_offset;
- if (info.function)
- ent->func = internal_strdup(info.function);
- if (info.file)
- ent->file = internal_strdup(info.file);
- ent->line = info.line;
- ent->col = info.column;
- return ent;
-}
-
-
- ReportStack *next;
- char *module;
- uptr offset;
- uptr pc;
- char *func;
- char *file;
- int line;
- int col;
-
-
// Denotes fake PC values that come from JIT/JAVA/etc.
// For such PC values __tsan_symbolize_external() will be called.
const uptr kExternalPCBit = 1ULL << 60;
@@ -91,34 +61,29 @@ ReportStack *SymbolizeCode(uptr addr) {
static char func_buf[1024];
static char file_buf[1024];
int line, col;
+ ReportStack *ent = ReportStack::New(addr);
if (!__tsan_symbolize_external(addr, func_buf, sizeof(func_buf),
file_buf, sizeof(file_buf), &line, &col))
- return NewReportStackEntry(addr);
- ReportStack *ent = NewReportStackEntry(addr);
- ent->module = 0;
- ent->offset = 0;
- ent->func = internal_strdup(func_buf);
- ent->file = internal_strdup(file_buf);
- ent->line = line;
- ent->col = col;
+ return ent;
+ ent->info.function = internal_strdup(func_buf);
+ ent->info.file = internal_strdup(file_buf);
+ ent->info.line = line;
+ ent->info.column = col;
return ent;
}
- if (!Symbolizer::Get()->IsAvailable())
- return SymbolizeCodeAddr2Line(addr);
static const uptr kMaxAddrFrames = 16;
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++)
new(&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
+ uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
addr, addr_frames.data(), kMaxAddrFrames);
if (addr_frames_num == 0)
- return NewReportStackEntry(addr);
+ return ReportStack::New(addr);
ReportStack *top = 0;
ReportStack *bottom = 0;
for (uptr i = 0; i < addr_frames_num; i++) {
- ReportStack *cur_entry = NewReportStackEntry(addr_frames[i]);
- CHECK(cur_entry);
- addr_frames[i].Clear();
+ ReportStack *cur_entry = ReportStack::New(addr);
+ cur_entry->info = addr_frames[i];
if (i == 0)
top = cur_entry;
else
@@ -129,28 +94,16 @@ ReportStack *SymbolizeCode(uptr addr) {
}
ReportLocation *SymbolizeData(uptr addr) {
- if (!Symbolizer::Get()->IsAvailable())
- return 0;
DataInfo info;
- if (!Symbolizer::Get()->SymbolizeData(addr, &info))
+ if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
return 0;
- ReportLocation *ent = (ReportLocation*)internal_alloc(MBlockReportStack,
- sizeof(ReportLocation));
- internal_memset(ent, 0, sizeof(*ent));
- ent->type = ReportLocationGlobal;
- ent->module = StripModuleName(info.module);
- ent->offset = info.module_offset;
- if (info.name)
- ent->name = internal_strdup(info.name);
- ent->addr = info.start;
- ent->size = info.size;
+ ReportLocation *ent = ReportLocation::New(ReportLocationGlobal);
+ ent->global = info;
return ent;
}
void SymbolizeFlush() {
- if (!Symbolizer::Get()->IsAvailable())
- return;
- Symbolizer::Get()->Flush();
+ Symbolizer::GetOrInit()->Flush();
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_symbolize.h b/libsanitizer/tsan/tsan_symbolize.h
index 892c11c066..0d9077ed37 100644
--- a/libsanitizer/tsan/tsan_symbolize.h
+++ b/libsanitizer/tsan/tsan_symbolize.h
@@ -22,8 +22,6 @@ ReportStack *SymbolizeCode(uptr addr);
ReportLocation *SymbolizeData(uptr addr);
void SymbolizeFlush();
-ReportStack *SymbolizeCodeAddr2Line(uptr addr);
-
ReportStack *NewReportStackEntry(uptr addr);
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc b/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc
deleted file mode 100644
index c278a42f31..0000000000
--- a/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc
+++ /dev/null
@@ -1,191 +0,0 @@
-//===-- tsan_symbolize_addr2line.cc ---------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_symbolize.h"
-#include "tsan_mman.h"
-#include "tsan_rtl.h"
-#include "tsan_platform.h"
-
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <errno.h>
-#include <link.h>
-#include <linux/limits.h>
-#include <sys/types.h>
-
-namespace __tsan {
-
-struct ModuleDesc {
- const char *fullname;
- const char *name;
- uptr base;
- int inp_fd;
- int out_fd;
-};
-
-struct SectionDesc {
- SectionDesc *next;
- ModuleDesc *module;
- uptr base;
- uptr end;
-};
-
-struct DlIteratePhdrCtx {
- SectionDesc *sections;
- bool is_first;
-};
-
-static void NOINLINE InitModule(ModuleDesc *m) {
- int outfd[2] = {};
- if (pipe(&outfd[0])) {
- Printf("ThreadSanitizer: outfd pipe() failed (%d)\n", errno);
- Die();
- }
- int infd[2] = {};
- if (pipe(&infd[0])) {
- Printf("ThreadSanitizer: infd pipe() failed (%d)\n", errno);
- Die();
- }
- int pid = fork();
- if (pid == 0) {
- internal_close(STDOUT_FILENO);
- internal_close(STDIN_FILENO);
- internal_dup2(outfd[0], STDIN_FILENO);
- internal_dup2(infd[1], STDOUT_FILENO);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- internal_close(infd[0]);
- internal_close(infd[1]);
- for (int fd = getdtablesize(); fd > 2; fd--)
- internal_close(fd);
- execl("/usr/bin/addr2line", "/usr/bin/addr2line", "-Cfe", m->fullname, 0);
- _exit(0);
- } else if (pid < 0) {
- Printf("ThreadSanitizer: failed to fork symbolizer\n");
- Die();
- }
- internal_close(outfd[0]);
- internal_close(infd[1]);
- m->inp_fd = infd[0];
- m->out_fd = outfd[1];
-}
-
-static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
- DlIteratePhdrCtx *ctx = (DlIteratePhdrCtx*)arg;
- InternalScopedBuffer<char> tmp(128);
- if (ctx->is_first) {
- internal_snprintf(tmp.data(), tmp.size(), "/proc/%d/exe",
- (int)internal_getpid());
- info->dlpi_name = tmp.data();
- }
- ctx->is_first = false;
- if (info->dlpi_name == 0 || info->dlpi_name[0] == 0)
- return 0;
- ModuleDesc *m = (ModuleDesc*)internal_alloc(MBlockReportStack,
- sizeof(ModuleDesc));
- m->fullname = internal_strdup(info->dlpi_name);
- m->name = internal_strrchr(m->fullname, '/');
- if (m->name)
- m->name += 1;
- else
- m->name = m->fullname;
- m->base = (uptr)info->dlpi_addr;
- m->inp_fd = -1;
- m->out_fd = -1;
- DPrintf2("Module %s %zx\n", m->name, m->base);
- for (int i = 0; i < info->dlpi_phnum; i++) {
- const Elf64_Phdr *s = &info->dlpi_phdr[i];
- DPrintf2(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx"
- " p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n",
- (uptr)s->p_type, (uptr)s->p_offset, (uptr)s->p_vaddr,
- (uptr)s->p_paddr, (uptr)s->p_filesz, (uptr)s->p_memsz,
- (uptr)s->p_flags, (uptr)s->p_align);
- if (s->p_type != PT_LOAD)
- continue;
- SectionDesc *sec = (SectionDesc*)internal_alloc(MBlockReportStack,
- sizeof(SectionDesc));
- sec->module = m;
- sec->base = info->dlpi_addr + s->p_vaddr;
- sec->end = sec->base + s->p_memsz;
- sec->next = ctx->sections;
- ctx->sections = sec;
- DPrintf2(" Section %zx-%zx\n", sec->base, sec->end);
- }
- return 0;
-}
-
-static SectionDesc *InitSections() {
- DlIteratePhdrCtx ctx = {0, true};
- dl_iterate_phdr(dl_iterate_phdr_cb, &ctx);
- return ctx.sections;
-}
-
-static SectionDesc *GetSectionDesc(uptr addr) {
- static SectionDesc *sections = 0;
- if (sections == 0)
- sections = InitSections();
- for (SectionDesc *s = sections; s; s = s->next) {
- if (addr >= s->base && addr < s->end) {
- if (s->module->inp_fd == -1)
- InitModule(s->module);
- return s;
- }
- }
- return 0;
-}
-
-ReportStack *SymbolizeCodeAddr2Line(uptr addr) {
- SectionDesc *s = GetSectionDesc(addr);
- if (s == 0)
- return NewReportStackEntry(addr);
- ModuleDesc *m = s->module;
- uptr offset = addr - m->base;
- char addrstr[32];
- internal_snprintf(addrstr, sizeof(addrstr), "%p\n", (void*)offset);
- if (0 >= internal_write(m->out_fd, addrstr, internal_strlen(addrstr))) {
- Printf("ThreadSanitizer: can't write from symbolizer (%d, %d)\n",
- m->out_fd, errno);
- Die();
- }
- InternalScopedBuffer<char> func(1024);
- ssize_t len = internal_read(m->inp_fd, func.data(), func.size() - 1);
- if (len <= 0) {
- Printf("ThreadSanitizer: can't read from symbolizer (%d, %d)\n",
- m->inp_fd, errno);
- Die();
- }
- func.data()[len] = 0;
- ReportStack *res = NewReportStackEntry(addr);
- res->module = internal_strdup(m->name);
- res->offset = offset;
- char *pos = (char*)internal_strchr(func.data(), '\n');
- if (pos && func[0] != '?') {
- res->func = (char*)internal_alloc(MBlockReportStack, pos - func.data() + 1);
- internal_memcpy(res->func, func.data(), pos - func.data());
- res->func[pos - func.data()] = 0;
- char *pos2 = (char*)internal_strchr(pos, ':');
- if (pos2) {
- res->file = (char*)internal_alloc(MBlockReportStack, pos2 - pos - 1 + 1);
- internal_memcpy(res->file, pos + 1, pos2 - pos - 1);
- res->file[pos2 - pos - 1] = 0;
- res->line = atoi(pos2 + 1);
- }
- }
- return res;
-}
-
-ReportStack *SymbolizeDataAddr2Line(uptr addr) {
- return 0;
-}
-
-} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_sync.cc b/libsanitizer/tsan/tsan_sync.cc
index 0c5be105f6..2209199ac4 100644
--- a/libsanitizer/tsan/tsan_sync.cc
+++ b/libsanitizer/tsan/tsan_sync.cc
@@ -15,290 +15,209 @@
namespace __tsan {
-SyncVar::SyncVar(uptr addr, u64 uid)
- : mtx(MutexTypeSyncVar, StatMtxSyncVar)
- , addr(addr)
- , uid(uid)
- , owner_tid(kInvalidTid)
- , last_lock()
- , recursion()
- , is_rw()
- , is_recursive()
- , is_broken()
- , is_linker_init() {
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
+
+SyncVar::SyncVar()
+ : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
+ Reset(0);
+}
+
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
+ this->addr = addr;
+ this->uid = uid;
+ this->next = 0;
+
+ creation_stack_id = 0;
+ if (kCppMode) // Go does not use them
+ creation_stack_id = CurrentStackId(thr, pc);
+ if (common_flags()->detect_deadlocks)
+ DDMutexInit(thr, pc, this);
+}
+
+void SyncVar::Reset(ThreadState *thr) {
+ uid = 0;
+ creation_stack_id = 0;
+ owner_tid = kInvalidTid;
+ last_lock = 0;
+ recursion = 0;
+ is_rw = 0;
+ is_recursive = 0;
+ is_broken = 0;
+ is_linker_init = 0;
+
+ if (thr == 0) {
+ CHECK_EQ(clock.size(), 0);
+ CHECK_EQ(read_clock.size(), 0);
+ } else {
+ clock.Reset(&thr->clock_cache);
+ read_clock.Reset(&thr->clock_cache);
+ }
}
-SyncTab::Part::Part()
- : mtx(MutexTypeSyncTab, StatMtxSyncTab)
- , val() {
+MetaMap::MetaMap() {
+ atomic_store(&uid_gen_, 0, memory_order_relaxed);
}
-SyncTab::SyncTab() {
+void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ u32 idx = block_alloc_.Alloc(&thr->block_cache);
+ MBlock *b = block_alloc_.Map(idx);
+ b->siz = sz;
+ b->tid = thr->tid;
+ b->stk = CurrentStackId(thr, pc);
+ u32 *meta = MemToMeta(p);
+ DCHECK_EQ(*meta, 0);
+ *meta = idx | kFlagBlock;
}
-SyncTab::~SyncTab() {
- for (int i = 0; i < kPartCount; i++) {
- while (tab_[i].val) {
- SyncVar *tmp = tab_[i].val;
- tab_[i].val = tmp->next;
- DestroyAndFree(tmp);
+uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
+ MBlock* b = GetBlock(p);
+ if (b == 0)
+ return 0;
+ uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
+ FreeRange(thr, pc, p, sz);
+ return sz;
+}
+
+void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ u32 *meta = MemToMeta(p);
+ u32 *end = MemToMeta(p + sz);
+ if (end == meta)
+ end++;
+ for (; meta < end; meta++) {
+ u32 idx = *meta;
+ *meta = 0;
+ for (;;) {
+ if (idx == 0)
+ break;
+ if (idx & kFlagBlock) {
+ block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
+ break;
+ } else if (idx & kFlagSync) {
+ DCHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ u32 next = s->next;
+ s->Reset(thr);
+ sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
+ idx = next;
+ } else {
+ CHECK(0);
+ }
}
}
}
-SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock) {
- return GetAndLock(thr, pc, addr, write_lock, true);
+MBlock* MetaMap::GetBlock(uptr p) {
+ u32 *meta = MemToMeta(p);
+ u32 idx = *meta;
+ for (;;) {
+ if (idx == 0)
+ return 0;
+ if (idx & kFlagBlock)
+ return block_alloc_.Map(idx & ~kFlagMask);
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ idx = s->next;
+ }
}
-SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
- return GetAndLock(0, 0, addr, write_lock, false);
+SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock) {
+ return GetAndLock(thr, pc, addr, write_lock, true);
}
-SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
- StatInc(thr, StatSyncCreated);
- void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
- const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
- SyncVar *res = new(mem) SyncVar(addr, uid);
-#ifndef TSAN_GO
- res->creation_stack_id = CurrentStackId(thr, pc);
-#endif
- return res;
+SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
+ return GetAndLock(0, 0, addr, true, false);
}
-SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
+SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock, bool create) {
-#ifndef TSAN_GO
- { // NOLINT
- SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
- if (res)
- return res;
- }
-
- // Here we ask only PrimaryAllocator, because
- // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
- // the hashmap anyway.
- if (PrimaryAllocator::PointerIsMine((void*)addr)) {
- MBlock *b = user_mblock(thr, (void*)addr);
- CHECK_NE(b, 0);
- MBlock::ScopedLock l(b);
- SyncVar *res = 0;
- for (res = b->ListHead(); res; res = res->next) {
- if (res->addr == addr)
+ u32 *meta = MemToMeta(addr);
+ u32 idx0 = *meta;
+ u32 myidx = 0;
+ SyncVar *mys = 0;
+ for (;;) {
+ u32 idx = idx0;
+ for (;;) {
+ if (idx == 0)
break;
- }
- if (res == 0) {
- if (!create)
- return 0;
- res = Create(thr, pc, addr);
- b->ListPush(res);
- }
- if (write_lock)
- res->mtx.Lock();
- else
- res->mtx.ReadLock();
- return res;
- }
-#endif
-
- Part *p = &tab_[PartIdx(addr)];
- {
- ReadLock l(&p->mtx);
- for (SyncVar *res = p->val; res; res = res->next) {
- if (res->addr == addr) {
+ if (idx & kFlagBlock)
+ break;
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ if (s->addr == addr) {
+ if (myidx != 0) {
+ mys->Reset(thr);
+ sync_alloc_.Free(&thr->sync_cache, myidx);
+ }
if (write_lock)
- res->mtx.Lock();
+ s->mtx.Lock();
else
- res->mtx.ReadLock();
- return res;
+ s->mtx.ReadLock();
+ return s;
}
+ idx = s->next;
}
- }
- if (!create)
- return 0;
- {
- Lock l(&p->mtx);
- SyncVar *res = p->val;
- for (; res; res = res->next) {
- if (res->addr == addr)
- break;
- }
- if (res == 0) {
- res = Create(thr, pc, addr);
- res->next = p->val;
- p->val = res;
+ if (!create)
+ return 0;
+ if (*meta != idx0) {
+ idx0 = *meta;
+ continue;
}
- if (write_lock)
- res->mtx.Lock();
- else
- res->mtx.ReadLock();
- return res;
- }
-}
-SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
-#ifndef TSAN_GO
- { // NOLINT
- SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
- if (res)
- return res;
- }
- if (PrimaryAllocator::PointerIsMine((void*)addr)) {
- MBlock *b = user_mblock(thr, (void*)addr);
- CHECK_NE(b, 0);
- SyncVar *res = 0;
- {
- MBlock::ScopedLock l(b);
- res = b->ListHead();
- if (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- b->ListPop();
- } else {
- SyncVar **prev = &res->next;
- res = *prev;
- while (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- *prev = res->next;
- break;
- }
- prev = &res->next;
- res = *prev;
- }
- }
- if (res) {
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- }
- }
+ if (myidx == 0) {
+ const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+ myidx = sync_alloc_.Alloc(&thr->sync_cache);
+ mys = sync_alloc_.Map(myidx);
+ mys->Init(thr, pc, addr, uid);
}
- return res;
- }
-#endif
-
- Part *p = &tab_[PartIdx(addr)];
- SyncVar *res = 0;
- {
- Lock l(&p->mtx);
- SyncVar **prev = &p->val;
- res = *prev;
- while (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- *prev = res->next;
- break;
- }
- prev = &res->next;
- res = *prev;
+ mys->next = idx0;
+ if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
+ myidx | kFlagSync, memory_order_release)) {
+ if (write_lock)
+ mys->mtx.Lock();
+ else
+ mys->mtx.ReadLock();
+ return mys;
}
}
- if (res) {
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- }
- return res;
-}
-
-int SyncTab::PartIdx(uptr addr) {
- return (addr >> 3) % kPartCount;
-}
-
-StackTrace::StackTrace()
- : n_()
- , s_()
- , c_() {
}
-StackTrace::StackTrace(uptr *buf, uptr cnt)
- : n_()
- , s_(buf)
- , c_(cnt) {
- CHECK_NE(buf, 0);
- CHECK_NE(cnt, 0);
-}
-
-StackTrace::~StackTrace() {
- Reset();
-}
-
-void StackTrace::Reset() {
- if (s_ && !c_) {
- CHECK_NE(n_, 0);
- internal_free(s_);
- s_ = 0;
- }
- n_ = 0;
-}
-
-void StackTrace::Init(const uptr *pcs, uptr cnt) {
- Reset();
- if (cnt == 0)
- return;
- if (c_) {
- CHECK_NE(s_, 0);
- CHECK_LE(cnt, c_);
- } else {
- s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
+void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
+ // src and dst can overlap,
+ // there are no concurrent accesses to the regions (e.g. stop-the-world).
+ CHECK_NE(src, dst);
+ CHECK_NE(sz, 0);
+ uptr diff = dst - src;
+ u32 *src_meta = MemToMeta(src);
+ u32 *dst_meta = MemToMeta(dst);
+ u32 *src_meta_end = MemToMeta(src + sz);
+ uptr inc = 1;
+ if (dst > src) {
+ src_meta = MemToMeta(src + sz) - 1;
+ dst_meta = MemToMeta(dst + sz) - 1;
+ src_meta_end = MemToMeta(src) - 1;
+ inc = -1;
}
- n_ = cnt;
- internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
-}
-
-void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
- Reset();
- n_ = thr->shadow_stack_pos - thr->shadow_stack;
- if (n_ + !!toppc == 0)
- return;
- uptr start = 0;
- if (c_) {
- CHECK_NE(s_, 0);
- if (n_ + !!toppc > c_) {
- start = n_ - c_ + !!toppc;
- n_ = c_ - !!toppc;
- }
- } else {
- // Cap potentially huge stacks.
- if (n_ + !!toppc > kTraceStackSize) {
- start = n_ - kTraceStackSize + !!toppc;
- n_ = kTraceStackSize - !!toppc;
+ for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
+ CHECK_EQ(*dst_meta, 0);
+ u32 idx = *src_meta;
+ *src_meta = 0;
+ *dst_meta = idx;
+ // Patch the addresses in sync objects.
+ while (idx != 0) {
+ if (idx & kFlagBlock)
+ break;
+ CHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ s->addr += diff;
+ idx = s->next;
}
- s_ = (uptr*)internal_alloc(MBlockStackTrace,
- (n_ + !!toppc) * sizeof(s_[0]));
- }
- for (uptr i = 0; i < n_; i++)
- s_[i] = thr->shadow_stack[start + i];
- if (toppc) {
- s_[n_] = toppc;
- n_++;
}
}
-void StackTrace::CopyFrom(const StackTrace& other) {
- Reset();
- Init(other.Begin(), other.Size());
-}
-
-bool StackTrace::IsEmpty() const {
- return n_ == 0;
-}
-
-uptr StackTrace::Size() const {
- return n_;
-}
-
-uptr StackTrace::Get(uptr i) const {
- CHECK_LT(i, n_);
- return s_[i];
-}
-
-const uptr *StackTrace::Begin() const {
- return s_;
+void MetaMap::OnThreadIdle(ThreadState *thr) {
+ block_alloc_.FlushCache(&thr->block_cache);
+ sync_alloc_.FlushCache(&thr->sync_cache);
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_sync.h b/libsanitizer/tsan/tsan_sync.h
index 2867a8ac79..6ed3715ee0 100644
--- a/libsanitizer/tsan/tsan_sync.h
+++ b/libsanitizer/tsan/tsan_sync.h
@@ -13,50 +13,22 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
-#include "tsan_clock.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_defs.h"
+#include "tsan_clock.h"
#include "tsan_mutex.h"
+#include "tsan_dense_alloc.h"
namespace __tsan {
-class SlabCache;
-
-class StackTrace {
- public:
- StackTrace();
- // Initialized the object in "static mode",
- // in this mode it never calls malloc/free but uses the provided buffer.
- StackTrace(uptr *buf, uptr cnt);
- ~StackTrace();
- void Reset();
-
- void Init(const uptr *pcs, uptr cnt);
- void ObtainCurrent(ThreadState *thr, uptr toppc);
- bool IsEmpty() const;
- uptr Size() const;
- uptr Get(uptr i) const;
- const uptr *Begin() const;
- void CopyFrom(const StackTrace& other);
-
- private:
- uptr n_;
- uptr *s_;
- const uptr c_;
-
- StackTrace(const StackTrace&);
- void operator = (const StackTrace&);
-};
-
struct SyncVar {
- explicit SyncVar(uptr addr, u64 uid);
+ SyncVar();
static const int kInvalidTid = -1;
+ uptr addr; // overwritten by DenseSlabAlloc freelist
Mutex mtx;
- uptr addr;
- const u64 uid; // Globally unique id.
- SyncClock clock;
- SyncClock read_clock; // Used for rw mutexes only.
+ u64 uid; // Globally unique id.
u32 creation_stack_id;
int owner_tid; // Set only by exclusive owners.
u64 last_lock;
@@ -65,9 +37,16 @@ struct SyncVar {
bool is_recursive;
bool is_broken;
bool is_linker_init;
- SyncVar *next; // In SyncTab hashtable.
+ u32 next; // in MetaMap
+ DDMutex dd;
+ SyncClock read_clock; // Used for rw mutexes only.
+ // The clock is placed last, so that it is situated on a different cache line
+ // with the mtx. This reduces contention for hot sync objects.
+ SyncClock clock;
+
+ void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
+ void Reset(ThreadState *thr);
- uptr GetMemoryConsumption();
u64 GetId() const {
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
return GetLsb((u64)addr | (uid << 47), 61);
@@ -82,42 +61,39 @@ struct SyncVar {
}
};
-class SyncTab {
+/* MetaMap allows to map arbitrary user pointers onto various descriptors.
+ Currently it maps pointers to heap block descriptors and sync var descs.
+ It uses 1/2 direct shadow, see tsan_platform.h.
+*/
+class MetaMap {
public:
- SyncTab();
- ~SyncTab();
+ MetaMap();
+
+ void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ uptr FreeBlock(ThreadState *thr, uptr pc, uptr p);
+ void FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ MBlock* GetBlock(uptr p);
SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock);
- SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
-
- // If the SyncVar does not exist, returns 0.
- SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr);
+ SyncVar* GetIfExistsAndLock(uptr addr);
- SyncVar* Create(ThreadState *thr, uptr pc, uptr addr);
+ void MoveMemory(uptr src, uptr dst, uptr sz);
- uptr GetMemoryConsumption(uptr *nsync);
+ void OnThreadIdle(ThreadState *thr);
private:
- struct Part {
- Mutex mtx;
- SyncVar *val;
- char pad[kCacheLineSize - sizeof(Mutex) - sizeof(SyncVar*)]; // NOLINT
- Part();
- };
-
- // FIXME: Implement something more sane.
- static const int kPartCount = 1009;
- Part tab_[kPartCount];
+ static const u32 kFlagMask = 3 << 30;
+ static const u32 kFlagBlock = 1 << 30;
+ static const u32 kFlagSync = 2 << 30;
+ typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc;
+ typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc;
+ BlockAlloc block_alloc_;
+ SyncAlloc sync_alloc_;
atomic_uint64_t uid_gen_;
- int PartIdx(uptr addr);
-
- SyncVar* GetAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock, bool create);
-
- SyncTab(const SyncTab&); // Not implemented.
- void operator = (const SyncTab&); // Not implemented.
+ SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
+ bool create);
};
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_trace.h b/libsanitizer/tsan/tsan_trace.h
index 93ed8d907e..8eceb634a5 100644
--- a/libsanitizer/tsan/tsan_trace.h
+++ b/libsanitizer/tsan/tsan_trace.h
@@ -13,7 +13,7 @@
#include "tsan_defs.h"
#include "tsan_mutex.h"
-#include "tsan_sync.h"
+#include "tsan_stack_trace.h"
#include "tsan_mutexset.h"
namespace __tsan {
@@ -40,21 +40,15 @@ enum EventType {
typedef u64 Event;
struct TraceHeader {
- StackTrace stack0; // Start stack for the trace.
- u64 epoch0; // Start epoch for the trace.
- MutexSet mset0;
-#ifndef TSAN_GO
- uptr stack0buf[kTraceStackSize];
-#endif
-
- TraceHeader()
#ifndef TSAN_GO
- : stack0(stack0buf, kTraceStackSize)
+ BufferedStackTrace stack0; // Start stack for the trace.
#else
- : stack0()
+ VarSizeStackTrace stack0;
#endif
- , epoch0() {
- }
+ u64 epoch0; // Start epoch for the trace.
+ MutexSet mset0;
+
+ TraceHeader() : stack0(), epoch0() {}
};
struct Trace {
diff --git a/libsanitizer/tsan/tsan_update_shadow_word_inl.h b/libsanitizer/tsan/tsan_update_shadow_word_inl.h
index 42caf80d34..91def7bb1e 100644
--- a/libsanitizer/tsan/tsan_update_shadow_word_inl.h
+++ b/libsanitizer/tsan/tsan_update_shadow_word_inl.h
@@ -14,8 +14,7 @@
do {
StatInc(thr, StatShadowProcessed);
const unsigned kAccessSize = 1 << kAccessSizeLog;
- unsigned off = cur.ComputeSearchOffset();
- u64 *sp = &shadow_mem[(idx + off) % kShadowCnt];
+ u64 *sp = &shadow_mem[idx];
old = LoadShadow(sp);
if (old.IsZero()) {
StatInc(thr, StatShadowZero);
@@ -31,16 +30,6 @@ do {
// same thread?
if (Shadow::TidsAreEqual(old, cur)) {
StatInc(thr, StatShadowSameThread);
- if (OldIsInSameSynchEpoch(old, thr)) {
- if (old.IsRWNotWeaker(kAccessIsWrite, kIsAtomic)) {
- // found a slot that holds effectively the same info
- // (that is, same tid, same sync epoch and same size)
- StatInc(thr, StatMopSame);
- return;
- }
- StoreIfNotYetStored(sp, &store_word);
- break;
- }
if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))
StoreIfNotYetStored(sp, &store_word);
break;
diff --git a/libsanitizer/tsan/tsan_vector.h b/libsanitizer/tsan/tsan_vector.h
index 4da8b83d5c..c0485513ee 100644
--- a/libsanitizer/tsan/tsan_vector.h
+++ b/libsanitizer/tsan/tsan_vector.h
@@ -56,10 +56,18 @@ class Vector {
return begin_[i];
}
- T *PushBack(T v = T()) {
+ T *PushBack() {
EnsureSize(Size() + 1);
- end_[-1] = v;
- return &end_[-1];
+ T *p = &end_[-1];
+ internal_memset(p, 0, sizeof(*p));
+ return p;
+ }
+
+ T *PushBack(const T& v) {
+ EnsureSize(Size() + 1);
+ T *p = &end_[-1];
+ internal_memcpy(p, &v, sizeof(*p));
+ return p;
}
void PopBack() {
@@ -68,11 +76,15 @@ class Vector {
}
void Resize(uptr size) {
+ if (size == 0) {
+ end_ = begin_;
+ return;
+ }
uptr old_size = Size();
EnsureSize(size);
if (old_size < size) {
for (uptr i = old_size; i < size; i++)
- begin_[i] = T();
+ internal_memset(&begin_[i], 0, sizeof(begin_[i]));
}
}
@@ -90,7 +102,7 @@ class Vector {
return;
}
uptr cap0 = last_ - begin_;
- uptr cap = 2 * cap0;
+ uptr cap = cap0 * 5 / 4; // 25% growth
if (cap == 0)
cap = 16;
if (cap < size)
diff --git a/libsanitizer/ubsan/Makefile.am b/libsanitizer/ubsan/Makefile.am
index 80b79e1cb8..12d1ffac8f 100644
--- a/libsanitizer/ubsan/Makefile.am
+++ b/libsanitizer/ubsan/Makefile.am
@@ -3,17 +3,20 @@ AM_CPPFLAGS = -I $(top_srcdir) -I $(top_srcdir)/include
# May be used by toolexeclibdir.
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
-DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libubsan.la
ubsan_files = \
ubsan_diag.cc \
+ ubsan_flags.cc \
ubsan_handlers.cc \
ubsan_handlers_cxx.cc \
+ ubsan_init.cc \
ubsan_type_hash.cc \
ubsan_value.cc
diff --git a/libsanitizer/ubsan/Makefile.in b/libsanitizer/ubsan/Makefile.in
index c38740be66..e0f8914894 100644
--- a/libsanitizer/ubsan/Makefile.in
+++ b/libsanitizer/ubsan/Makefile.in
@@ -84,8 +84,9 @@ am__DEPENDENCIES_1 =
libubsan_la_DEPENDENCIES = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(am__append_1) $(am__append_2) $(am__DEPENDENCIES_1)
-am__objects_1 = ubsan_diag.lo ubsan_handlers.lo ubsan_handlers_cxx.lo \
- ubsan_type_hash.lo ubsan_value.lo
+am__objects_1 = ubsan_diag.lo ubsan_flags.lo ubsan_handlers.lo \
+ ubsan_handlers_cxx.lo ubsan_init.lo ubsan_type_hash.lo \
+ ubsan_value.lo
am_libubsan_la_OBJECTS = $(am__objects_1)
libubsan_la_OBJECTS = $(am_libubsan_la_OBJECTS)
libubsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@@ -131,7 +132,7 @@ CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
-DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC
DEPDIR = @DEPDIR@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
@@ -180,6 +181,7 @@ SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -255,13 +257,16 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
- -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
+ -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libubsan.la
ubsan_files = \
ubsan_diag.cc \
+ ubsan_flags.cc \
ubsan_handlers.cc \
ubsan_handlers_cxx.cc \
+ ubsan_init.cc \
ubsan_type_hash.cc \
ubsan_value.cc
@@ -385,8 +390,10 @@ distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_diag.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_handlers.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_handlers_cxx.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_init.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_value.Plo@am__quote@
diff --git a/libsanitizer/ubsan/ubsan_diag.cc b/libsanitizer/ubsan/ubsan_diag.cc
index 786ffa7254..028ac1a9b9 100644
--- a/libsanitizer/ubsan/ubsan_diag.cc
+++ b/libsanitizer/ubsan/ubsan_diag.cc
@@ -10,15 +10,60 @@
//===----------------------------------------------------------------------===//
#include "ubsan_diag.h"
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_libc.h"
+#include "ubsan_init.h"
+#include "ubsan_flags.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include <stdio.h>
using namespace __ubsan;
+static void MaybePrintStackTrace(uptr pc, uptr bp) {
+ // We assume that flags are already parsed: InitIfNecessary
+ // will definitely be called when we print the first diagnostics message.
+ if (!flags()->print_stacktrace)
+ return;
+ // We can only use slow unwind, as we don't have any information about stack
+ // top/bottom.
+ // FIXME: It's better to respect "fast_unwind_on_fatal" runtime flag and
+ // fetch stack top/bottom information if we have it (e.g. if we're running
+ // under ASan).
+ if (StackTrace::WillUseFastUnwind(false))
+ return;
+ BufferedStackTrace stack;
+ stack.Unwind(kStackTraceMax, pc, bp, 0, 0, 0, false);
+ stack.Print();
+}
+
+static void MaybeReportErrorSummary(Location Loc) {
+ if (!common_flags()->print_summary)
+ return;
+ // Don't try to unwind the stack trace in UBSan summaries: just use the
+ // provided location.
+ if (Loc.isSourceLocation()) {
+ SourceLocation SLoc = Loc.getSourceLocation();
+ if (!SLoc.isInvalid()) {
+ ReportErrorSummary("undefined-behavior", SLoc.getFilename(),
+ SLoc.getLine(), "");
+ return;
+ }
+ }
+ ReportErrorSummary("undefined-behavior");
+}
+
+namespace {
+class Decorator : public SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() {}
+ const char *Highlight() const { return Green(); }
+ const char *EndHighlight() const { return Default(); }
+ const char *Note() const { return Black(); }
+ const char *EndNote() const { return Default(); }
+};
+}
+
Location __ubsan::getCallerLocation(uptr CallerLoc) {
if (!CallerLoc)
return Location();
@@ -30,10 +75,11 @@ Location __ubsan::getCallerLocation(uptr CallerLoc) {
Location __ubsan::getFunctionLocation(uptr Loc, const char **FName) {
if (!Loc)
return Location();
+ InitIfNecessary();
AddressInfo Info;
- if (!Symbolizer::GetOrInit()->SymbolizeCode(Loc, &Info, 1) ||
- !Info.module || !*Info.module)
+ if (!Symbolizer::GetOrInit()->SymbolizePC(Loc, &Info, 1) || !Info.module ||
+ !*Info.module)
return Location(Loc);
if (FName && Info.function)
@@ -82,14 +128,16 @@ static void renderLocation(Location Loc) {
if (SLoc.isInvalid())
LocBuffer.append("<unknown>");
else
- PrintSourceLocation(&LocBuffer, SLoc.getFilename(), SLoc.getLine(),
- SLoc.getColumn());
+ RenderSourceLocation(&LocBuffer, SLoc.getFilename(), SLoc.getLine(),
+ SLoc.getColumn(), common_flags()->strip_path_prefix);
break;
}
- case Location::LK_Module:
- PrintModuleAndOffset(&LocBuffer, Loc.getModuleLocation().getModuleName(),
- Loc.getModuleLocation().getOffset());
+ case Location::LK_Module: {
+ ModuleLocation MLoc = Loc.getModuleLocation();
+ RenderModuleLocation(&LocBuffer, MLoc.getModuleName(), MLoc.getOffset(),
+ common_flags()->strip_path_prefix);
break;
+ }
case Location::LK_Memory:
LocBuffer.append("%p", Loc.getMemoryLocation());
break;
@@ -162,36 +210,49 @@ static Range *upperBound(MemoryLocation Loc, Range *Ranges,
return Best;
}
+static inline uptr subtractNoOverflow(uptr LHS, uptr RHS) {
+ return (LHS < RHS) ? 0 : LHS - RHS;
+}
+
+static inline uptr addNoOverflow(uptr LHS, uptr RHS) {
+ const uptr Limit = (uptr)-1;
+ return (LHS > Limit - RHS) ? Limit : LHS + RHS;
+}
+
/// Render a snippet of the address space near a location.
-static void renderMemorySnippet(const __sanitizer::AnsiColorDecorator &Decor,
- MemoryLocation Loc,
+static void renderMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
Range *Ranges, unsigned NumRanges,
const Diag::Arg *Args) {
- const unsigned BytesToShow = 32;
- const unsigned MinBytesNearLoc = 4;
-
// Show at least the 8 bytes surrounding Loc.
- MemoryLocation Min = Loc - MinBytesNearLoc, Max = Loc + MinBytesNearLoc;
+ const unsigned MinBytesNearLoc = 4;
+ MemoryLocation Min = subtractNoOverflow(Loc, MinBytesNearLoc);
+ MemoryLocation Max = addNoOverflow(Loc, MinBytesNearLoc);
+ MemoryLocation OrigMin = Min;
for (unsigned I = 0; I < NumRanges; ++I) {
Min = __sanitizer::Min(Ranges[I].getStart().getMemoryLocation(), Min);
Max = __sanitizer::Max(Ranges[I].getEnd().getMemoryLocation(), Max);
}
// If we have too many interesting bytes, prefer to show bytes after Loc.
+ const unsigned BytesToShow = 32;
if (Max - Min > BytesToShow)
- Min = __sanitizer::Min(Max - BytesToShow, Loc - MinBytesNearLoc);
- Max = Min + BytesToShow;
+ Min = __sanitizer::Min(Max - BytesToShow, OrigMin);
+ Max = addNoOverflow(Min, BytesToShow);
+
+ if (!IsAccessibleMemoryRange(Min, Max - Min)) {
+ Printf("<memory cannot be printed>\n");
+ return;
+ }
// Emit data.
for (uptr P = Min; P != Max; ++P) {
- // FIXME: Check that the address is readable before printing it.
unsigned char C = *reinterpret_cast<const unsigned char*>(P);
Printf("%s%02x", (P % 8 == 0) ? " " : " ", C);
}
Printf("\n");
// Emit highlights.
- Printf(Decor.Green());
+ Printf(Decor.Highlight());
Range *InRange = upperBound(Min, Ranges, NumRanges);
for (uptr P = Min; P != Max; ++P) {
char Pad = ' ', Byte = ' ';
@@ -206,7 +267,7 @@ static void renderMemorySnippet(const __sanitizer::AnsiColorDecorator &Decor,
char Buffer[] = { Pad, Pad, P == Loc ? '^' : Byte, Byte, 0 };
Printf((P % 8 == 0) ? Buffer : &Buffer[1]);
}
- Printf("%s\n", Decor.Default());
+ Printf("%s\n", Decor.EndHighlight());
// Go over the line again, and print names for the ranges.
InRange = 0;
@@ -244,8 +305,9 @@ static void renderMemorySnippet(const __sanitizer::AnsiColorDecorator &Decor,
}
Diag::~Diag() {
- __sanitizer::AnsiColorDecorator Decor(PrintsToTty());
- SpinMutexLock l(&CommonSanitizerReportMutex);
+ // All diagnostics should be printed under report mutex.
+ CommonSanitizerReportMutex.CheckLocked();
+ Decorator Decor;
Printf(Decor.Bold());
renderLocation(Loc);
@@ -253,11 +315,11 @@ Diag::~Diag() {
switch (Level) {
case DL_Error:
Printf("%s runtime error: %s%s",
- Decor.Red(), Decor.Default(), Decor.Bold());
+ Decor.Warning(), Decor.EndWarning(), Decor.Bold());
break;
case DL_Note:
- Printf("%s note: %s", Decor.Black(), Decor.Default());
+ Printf("%s note: %s", Decor.Note(), Decor.EndNote());
break;
}
@@ -269,3 +331,26 @@ Diag::~Diag() {
renderMemorySnippet(Decor, Loc.getMemoryLocation(), Ranges,
NumRanges, Args);
}
+
+ScopedReport::ScopedReport(ReportOptions Opts, Location SummaryLoc)
+ : Opts(Opts), SummaryLoc(SummaryLoc) {
+ InitIfNecessary();
+ CommonSanitizerReportMutex.Lock();
+}
+
+ScopedReport::~ScopedReport() {
+ MaybePrintStackTrace(Opts.pc, Opts.bp);
+ MaybeReportErrorSummary(SummaryLoc);
+ CommonSanitizerReportMutex.Unlock();
+ if (Opts.DieAfterReport || flags()->halt_on_error)
+ Die();
+}
+
+bool __ubsan::MatchSuppression(const char *Str, SuppressionType Type) {
+ Suppression *s;
+ // If .preinit_array is not used, it is possible that the UBSan runtime is not
+ // initialized.
+ if (!SANITIZER_CAN_USE_PREINIT_ARRAY)
+ InitIfNecessary();
+ return SuppressionContext::Get()->Match(Str, Type, &s);
+}
diff --git a/libsanitizer/ubsan/ubsan_diag.h b/libsanitizer/ubsan/ubsan_diag.h
index 0450368756..fbb3f1ff9d 100644
--- a/libsanitizer/ubsan/ubsan_diag.h
+++ b/libsanitizer/ubsan/ubsan_diag.h
@@ -12,6 +12,8 @@
#define UBSAN_DIAG_H
#include "ubsan_value.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
namespace __ubsan {
@@ -201,6 +203,33 @@ public:
Diag &operator<<(const Range &R) { return AddRange(R); }
};
+struct ReportOptions {
+ /// If DieAfterReport is specified, UBSan will terminate the program after the
+ /// report is printed.
+ bool DieAfterReport;
+ /// pc/bp are used to unwind the stack trace.
+ uptr pc;
+ uptr bp;
+};
+
+#define GET_REPORT_OPTIONS(die_after_report) \
+ GET_CALLER_PC_BP; \
+ ReportOptions Opts = {die_after_report, pc, bp}
+
+/// \brief Instantiate this class before printing diagnostics in the error
+/// report. This class ensures that reports from different threads and from
+/// different sanitizers won't be mixed.
+class ScopedReport {
+ ReportOptions Opts;
+ Location SummaryLoc;
+
+public:
+ ScopedReport(ReportOptions Opts, Location SummaryLoc);
+ ~ScopedReport();
+};
+
+bool MatchSuppression(const char *Str, SuppressionType Type);
+
} // namespace __ubsan
#endif // UBSAN_DIAG_H
diff --git a/libsanitizer/ubsan/ubsan_flags.cc b/libsanitizer/ubsan/ubsan_flags.cc
new file mode 100644
index 0000000000..9645a85726
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_flags.cc
@@ -0,0 +1,61 @@
+//===-- ubsan_flags.cc ----------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Runtime flags for UndefinedBehaviorSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __ubsan {
+
+static const char *MaybeCallUbsanDefaultOptions() {
+ return (&__ubsan_default_options) ? __ubsan_default_options() : "";
+}
+
+void InitializeCommonFlags() {
+ CommonFlags *cf = common_flags();
+ SetCommonFlagsDefaults(cf);
+ cf->print_summary = false;
+ // Override from user-specified string.
+ ParseCommonFlagsFromString(cf, MaybeCallUbsanDefaultOptions());
+ // Override from environment variable.
+ ParseCommonFlagsFromString(cf, GetEnv("UBSAN_OPTIONS"));
+}
+
+Flags ubsan_flags;
+
+static void ParseFlagsFromString(Flags *f, const char *str) {
+ if (!str)
+ return;
+ ParseFlag(str, &f->halt_on_error, "halt_on_error",
+ "Crash the program after printing the first error report");
+ ParseFlag(str, &f->print_stacktrace, "print_stacktrace",
+ "Include full stacktrace into an error report");
+}
+
+void InitializeFlags() {
+ Flags *f = flags();
+ // Default values.
+ f->halt_on_error = false;
+ f->print_stacktrace = false;
+ // Override from user-specified string.
+ ParseFlagsFromString(f, MaybeCallUbsanDefaultOptions());
+ // Override from environment variable.
+ ParseFlagsFromString(f, GetEnv("UBSAN_OPTIONS"));
+}
+
+} // namespace __ubsan
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__ubsan_default_options() { return ""; }
+} // extern "C"
+#endif
diff --git a/libsanitizer/ubsan/ubsan_flags.h b/libsanitizer/ubsan/ubsan_flags.h
new file mode 100644
index 0000000000..00be9b004f
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_flags.h
@@ -0,0 +1,38 @@
+//===-- ubsan_flags.h -------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Runtime flags for UndefinedBehaviorSanitizer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_FLAGS_H
+#define UBSAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __ubsan {
+
+struct Flags {
+ bool halt_on_error;
+ bool print_stacktrace;
+};
+
+extern Flags ubsan_flags;
+inline Flags *flags() { return &ubsan_flags; }
+
+void InitializeCommonFlags();
+void InitializeFlags();
+
+} // namespace __ubsan
+
+extern "C" {
+// Users may provide their own implementation of __ubsan_default_options to
+// override the default flag values.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__ubsan_default_options();
+} // extern "C"
+
+#endif // UBSAN_FLAGS_H
diff --git a/libsanitizer/ubsan/ubsan_handlers.cc b/libsanitizer/ubsan/ubsan_handlers.cc
index dd2e7bbf3e..770d3c28a4 100644
--- a/libsanitizer/ubsan/ubsan_handlers.cc
+++ b/libsanitizer/ubsan/ubsan_handlers.cc
@@ -17,23 +17,35 @@
using namespace __sanitizer;
using namespace __ubsan;
+static bool ignoreReport(SourceLocation SLoc, ReportOptions Opts) {
+ // If source location is already acquired, we don't need to print an error
+ // report for the second time. However, if we're in an unrecoverable handler,
+ // it's possible that location was required by concurrently running thread.
+ // In this case, we should continue the execution to ensure that any of
+ // threads will grab the report mutex and print the report before
+ // crashing the program.
+ return SLoc.isDisabled() && !Opts.DieAfterReport;
+}
+
namespace __ubsan {
- const char *TypeCheckKinds[] = {
+const char *TypeCheckKinds[] = {
"load of", "store to", "reference binding to", "member access within",
- "member call on", "constructor call on", "downcast of", "downcast of"
- };
+ "member call on", "constructor call on", "downcast of", "downcast of",
+ "upcast of", "cast to virtual base of"};
}
static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer,
- Location FallbackLoc) {
+ Location FallbackLoc, ReportOptions Opts) {
Location Loc = Data->Loc.acquire();
-
// Use the SourceLocation from Data to track deduplication, even if 'invalid'
- if (Loc.getSourceLocation().isDisabled())
+ if (ignoreReport(Loc.getSourceLocation(), Opts))
return;
+
if (Data->Loc.isInvalid())
Loc = FallbackLoc;
+ ScopedReport R(Opts, Loc);
+
if (!Pointer)
Diag(Loc, DL_Error, "%0 null pointer of type %1")
<< TypeCheckKinds[Data->TypeCheckKind] << Data->Type;
@@ -49,70 +61,59 @@ static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer,
if (Pointer)
Diag(Pointer, DL_Note, "pointer points here");
}
+
void __ubsan::__ubsan_handle_type_mismatch(TypeMismatchData *Data,
ValueHandle Pointer) {
- handleTypeMismatchImpl(Data, Pointer, getCallerLocation());
+ GET_REPORT_OPTIONS(false);
+ handleTypeMismatchImpl(Data, Pointer, getCallerLocation(), Opts);
}
void __ubsan::__ubsan_handle_type_mismatch_abort(TypeMismatchData *Data,
ValueHandle Pointer) {
- handleTypeMismatchImpl(Data, Pointer, getCallerLocation());
+ GET_REPORT_OPTIONS(true);
+ handleTypeMismatchImpl(Data, Pointer, getCallerLocation(), Opts);
Die();
}
/// \brief Common diagnostic emission for various forms of integer overflow.
-template<typename T> static void HandleIntegerOverflow(OverflowData *Data,
- ValueHandle LHS,
- const char *Operator,
- T RHS) {
+template <typename T>
+static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS,
+ const char *Operator, T RHS,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Diag(Loc, DL_Error, "%0 integer overflow: "
"%1 %2 %3 cannot be represented in type %4")
<< (Data->Type.isSignedIntegerTy() ? "signed" : "unsigned")
<< Value(Data->Type, LHS) << Operator << RHS << Data->Type;
}
-void __ubsan::__ubsan_handle_add_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
- HandleIntegerOverflow(Data, LHS, "+", Value(Data->Type, RHS));
-}
-void __ubsan::__ubsan_handle_add_overflow_abort(OverflowData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
- __ubsan_handle_add_overflow(Data, LHS, RHS);
- Die();
-}
-
-void __ubsan::__ubsan_handle_sub_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
- HandleIntegerOverflow(Data, LHS, "-", Value(Data->Type, RHS));
-}
-void __ubsan::__ubsan_handle_sub_overflow_abort(OverflowData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
- __ubsan_handle_sub_overflow(Data, LHS, RHS);
- Die();
-}
-
-void __ubsan::__ubsan_handle_mul_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
- HandleIntegerOverflow(Data, LHS, "*", Value(Data->Type, RHS));
-}
-void __ubsan::__ubsan_handle_mul_overflow_abort(OverflowData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
- __ubsan_handle_mul_overflow(Data, LHS, RHS);
- Die();
-}
-
-void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data,
- ValueHandle OldVal) {
+#define UBSAN_OVERFLOW_HANDLER(handler_name, op, abort) \
+ void __ubsan::handler_name(OverflowData *Data, ValueHandle LHS, \
+ ValueHandle RHS) { \
+ GET_REPORT_OPTIONS(abort); \
+ handleIntegerOverflowImpl(Data, LHS, op, Value(Data->Type, RHS), Opts); \
+ if (abort) Die(); \
+ }
+
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow, "+", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow_abort, "+", true)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow, "-", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow_abort, "-", true)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow, "*", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow_abort, "*", true)
+
+static void handleNegateOverflowImpl(OverflowData *Data, ValueHandle OldVal,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
if (Data->Type.isSignedIntegerTy())
Diag(Loc, DL_Error,
"negation of %0 cannot be represented in type %1; "
@@ -123,18 +124,27 @@ void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data,
"negation of %0 cannot be represented in type %1")
<< Value(Data->Type, OldVal) << Data->Type;
}
+
+void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data,
+ ValueHandle OldVal) {
+ GET_REPORT_OPTIONS(false);
+ handleNegateOverflowImpl(Data, OldVal, Opts);
+}
void __ubsan::__ubsan_handle_negate_overflow_abort(OverflowData *Data,
ValueHandle OldVal) {
- __ubsan_handle_negate_overflow(Data, OldVal);
+ GET_REPORT_OPTIONS(true);
+ handleNegateOverflowImpl(Data, OldVal, Opts);
Die();
}
-void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
+static void handleDivremOverflowImpl(OverflowData *Data, ValueHandle LHS,
+ ValueHandle RHS, ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Value LHSVal(Data->Type, LHS);
Value RHSVal(Data->Type, RHS);
if (RHSVal.isMinusOne())
@@ -144,20 +154,29 @@ void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data,
else
Diag(Loc, DL_Error, "division by zero");
}
+
+void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data,
+ ValueHandle LHS, ValueHandle RHS) {
+ GET_REPORT_OPTIONS(false);
+ handleDivremOverflowImpl(Data, LHS, RHS, Opts);
+}
void __ubsan::__ubsan_handle_divrem_overflow_abort(OverflowData *Data,
ValueHandle LHS,
ValueHandle RHS) {
- __ubsan_handle_divrem_overflow(Data, LHS, RHS);
+ GET_REPORT_OPTIONS(true);
+ handleDivremOverflowImpl(Data, LHS, RHS, Opts);
Die();
}
-void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
+static void handleShiftOutOfBoundsImpl(ShiftOutOfBoundsData *Data,
+ ValueHandle LHS, ValueHandle RHS,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Value LHSVal(Data->LHSType, LHS);
Value RHSVal(Data->RHSType, RHS);
if (RHSVal.isNegative())
@@ -173,107 +192,219 @@ void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data,
"left shift of %0 by %1 places cannot be represented in type %2")
<< LHSVal << RHSVal << Data->LHSType;
}
+
+void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data,
+ ValueHandle LHS,
+ ValueHandle RHS) {
+ GET_REPORT_OPTIONS(false);
+ handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts);
+}
void __ubsan::__ubsan_handle_shift_out_of_bounds_abort(
ShiftOutOfBoundsData *Data,
ValueHandle LHS,
ValueHandle RHS) {
- __ubsan_handle_shift_out_of_bounds(Data, LHS, RHS);
+ GET_REPORT_OPTIONS(true);
+ handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts);
Die();
}
-void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data,
- ValueHandle Index) {
+static void handleOutOfBoundsImpl(OutOfBoundsData *Data, ValueHandle Index,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Value IndexVal(Data->IndexType, Index);
Diag(Loc, DL_Error, "index %0 out of bounds for type %1")
<< IndexVal << Data->ArrayType;
}
+
+void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data,
+ ValueHandle Index) {
+ GET_REPORT_OPTIONS(false);
+ handleOutOfBoundsImpl(Data, Index, Opts);
+}
void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data,
ValueHandle Index) {
- __ubsan_handle_out_of_bounds(Data, Index);
+ GET_REPORT_OPTIONS(true);
+ handleOutOfBoundsImpl(Data, Index, Opts);
Die();
}
-void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) {
+static void handleBuiltinUnreachableImpl(UnreachableData *Data,
+ ReportOptions Opts) {
+ ScopedReport R(Opts, Data->Loc);
Diag(Data->Loc, DL_Error, "execution reached a __builtin_unreachable() call");
+}
+
+void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleBuiltinUnreachableImpl(Data, Opts);
Die();
}
-void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) {
+static void handleMissingReturnImpl(UnreachableData *Data, ReportOptions Opts) {
+ ScopedReport R(Opts, Data->Loc);
Diag(Data->Loc, DL_Error,
"execution reached the end of a value-returning function "
"without returning a value");
+}
+
+void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleMissingReturnImpl(Data, Opts);
Die();
}
-void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data,
- ValueHandle Bound) {
+static void handleVLABoundNotPositive(VLABoundData *Data, ValueHandle Bound,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Diag(Loc, DL_Error, "variable length array bound evaluates to "
"non-positive value %0")
<< Value(Data->Type, Bound);
}
+
+void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data,
+ ValueHandle Bound) {
+ GET_REPORT_OPTIONS(false);
+ handleVLABoundNotPositive(Data, Bound, Opts);
+}
void __ubsan::__ubsan_handle_vla_bound_not_positive_abort(VLABoundData *Data,
- ValueHandle Bound) {
- __ubsan_handle_vla_bound_not_positive(Data, Bound);
+ ValueHandle Bound) {
+ GET_REPORT_OPTIONS(true);
+ handleVLABoundNotPositive(Data, Bound, Opts);
Die();
}
+static void handleFloatCastOverflow(FloatCastOverflowData *Data,
+ ValueHandle From, ReportOptions Opts) {
+ // TODO: Add deduplication once a SourceLocation is generated for this check.
+ Location Loc = getCallerLocation();
+ ScopedReport R(Opts, Loc);
+
+ Diag(Loc, DL_Error,
+ "value %0 is outside the range of representable values of type %2")
+ << Value(Data->FromType, From) << Data->FromType << Data->ToType;
+}
void __ubsan::__ubsan_handle_float_cast_overflow(FloatCastOverflowData *Data,
ValueHandle From) {
- // TODO: Add deduplication once a SourceLocation is generated for this check.
- Diag(getCallerLocation(), DL_Error,
- "value %0 is outside the range of representable values of type %2")
- << Value(Data->FromType, From) << Data->FromType << Data->ToType;
+ GET_REPORT_OPTIONS(false);
+ handleFloatCastOverflow(Data, From, Opts);
}
-void __ubsan::__ubsan_handle_float_cast_overflow_abort(
- FloatCastOverflowData *Data,
- ValueHandle From) {
- Diag(getCallerLocation(), DL_Error,
- "value %0 is outside the range of representable values of type %2")
- << Value(Data->FromType, From) << Data->FromType << Data->ToType;
+void
+__ubsan::__ubsan_handle_float_cast_overflow_abort(FloatCastOverflowData *Data,
+ ValueHandle From) {
+ GET_REPORT_OPTIONS(true);
+ handleFloatCastOverflow(Data, From, Opts);
Die();
}
-void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data,
- ValueHandle Val) {
+static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Diag(Loc, DL_Error,
"load of value %0, which is not a valid value for type %1")
<< Value(Data->Type, Val) << Data->Type;
}
+
+void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data,
+ ValueHandle Val) {
+ GET_REPORT_OPTIONS(false);
+ handleLoadInvalidValue(Data, Val, Opts);
+}
void __ubsan::__ubsan_handle_load_invalid_value_abort(InvalidValueData *Data,
ValueHandle Val) {
- __ubsan_handle_load_invalid_value(Data, Val);
+ GET_REPORT_OPTIONS(true);
+ handleLoadInvalidValue(Data, Val, Opts);
Die();
}
-void __ubsan::__ubsan_handle_function_type_mismatch(
- FunctionTypeMismatchData *Data,
- ValueHandle Function) {
+static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
+ ValueHandle Function,
+ ReportOptions Opts) {
const char *FName = "(unknown)";
Location Loc = getFunctionLocation(Function, &FName);
+ ScopedReport R(Opts, Loc);
+
Diag(Data->Loc, DL_Error,
"call to function %0 through pointer to incorrect function type %1")
<< FName << Data->Type;
Diag(Loc, DL_Note, "%0 defined here") << FName;
}
+void
+__ubsan::__ubsan_handle_function_type_mismatch(FunctionTypeMismatchData *Data,
+ ValueHandle Function) {
+ GET_REPORT_OPTIONS(false);
+ handleFunctionTypeMismatch(Data, Function, Opts);
+}
+
void __ubsan::__ubsan_handle_function_type_mismatch_abort(
- FunctionTypeMismatchData *Data,
- ValueHandle Function) {
- __ubsan_handle_function_type_mismatch(Data, Function);
+ FunctionTypeMismatchData *Data, ValueHandle Function) {
+ GET_REPORT_OPTIONS(true);
+ handleFunctionTypeMismatch(Data, Function, Opts);
+ Die();
+}
+
+static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ if (ignoreReport(Loc, Opts))
+ return;
+
+ ScopedReport R(Opts, Loc);
+
+ Diag(Loc, DL_Error, "null pointer returned from function declared to never "
+ "return null");
+ if (!Data->AttrLoc.isInvalid())
+ Diag(Data->AttrLoc, DL_Note, "returns_nonnull attribute specified here");
+}
+
+void __ubsan::__ubsan_handle_nonnull_return(NonNullReturnData *Data) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullReturn(Data, Opts);
+}
+
+void __ubsan::__ubsan_handle_nonnull_return_abort(NonNullReturnData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullReturn(Data, Opts);
+ Die();
+}
+
+static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ if (ignoreReport(Loc, Opts))
+ return;
+
+ ScopedReport R(Opts, Loc);
+
+ Diag(Loc, DL_Error, "null pointer passed as argument %0, which is declared to "
+ "never be null") << Data->ArgIndex;
+ if (!Data->AttrLoc.isInvalid())
+ Diag(Data->AttrLoc, DL_Note, "nonnull attribute specified here");
+}
+
+void __ubsan::__ubsan_handle_nonnull_arg(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullArg(Data, Opts);
+}
+
+void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullArg(Data, Opts);
Die();
}
diff --git a/libsanitizer/ubsan/ubsan_handlers.h b/libsanitizer/ubsan/ubsan_handlers.h
index 226faadc28..92365d8189 100644
--- a/libsanitizer/ubsan/ubsan_handlers.h
+++ b/libsanitizer/ubsan/ubsan_handlers.h
@@ -22,10 +22,14 @@ struct TypeMismatchData {
unsigned char TypeCheckKind;
};
+#define UNRECOVERABLE(checkname, ...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \
+ void __ubsan_handle_ ## checkname( __VA_ARGS__ );
+
#define RECOVERABLE(checkname, ...) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE \
void __ubsan_handle_ ## checkname( __VA_ARGS__ ); \
- extern "C" SANITIZER_INTERFACE_ATTRIBUTE \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \
void __ubsan_handle_ ## checkname ## _abort( __VA_ARGS__ );
/// \brief Handle a runtime type check failure, caused by either a misaligned
@@ -79,11 +83,9 @@ struct UnreachableData {
};
/// \brief Handle a __builtin_unreachable which is reached.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __ubsan_handle_builtin_unreachable(UnreachableData *Data);
+UNRECOVERABLE(builtin_unreachable, UnreachableData *Data)
/// \brief Handle reaching the end of a value-returning function.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __ubsan_handle_missing_return(UnreachableData *Data);
+UNRECOVERABLE(missing_return, UnreachableData *Data)
struct VLABoundData {
SourceLocation Loc;
@@ -119,6 +121,23 @@ RECOVERABLE(function_type_mismatch,
FunctionTypeMismatchData *Data,
ValueHandle Val)
+struct NonNullReturnData {
+ SourceLocation Loc;
+ SourceLocation AttrLoc;
+};
+
+/// \brief Handle returning null from function with returns_nonnull attribute.
+RECOVERABLE(nonnull_return, NonNullReturnData *Data)
+
+struct NonNullArgData {
+ SourceLocation Loc;
+ SourceLocation AttrLoc;
+ int ArgIndex;
+};
+
+/// \brief Handle passing null pointer to function with nonnull attribute.
+RECOVERABLE(nonnull_arg, NonNullArgData *Data)
+
}
#endif // UBSAN_HANDLERS_H
diff --git a/libsanitizer/ubsan/ubsan_handlers_cxx.cc b/libsanitizer/ubsan/ubsan_handlers_cxx.cc
index bb43cc75cf..86f3e57a8b 100644
--- a/libsanitizer/ubsan/ubsan_handlers_cxx.cc
+++ b/libsanitizer/ubsan/ubsan_handlers_cxx.cc
@@ -16,6 +16,7 @@
#include "ubsan_type_hash.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
using namespace __sanitizer;
using namespace __ubsan;
@@ -26,47 +27,54 @@ namespace __ubsan {
static void HandleDynamicTypeCacheMiss(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash,
- bool Abort) {
+ ReportOptions Opts) {
if (checkDynamicType((void*)Pointer, Data->TypeInfo, Hash))
// Just a cache miss. The type matches after all.
return;
+ // Check if error report should be suppressed.
+ DynamicTypeInfo DTI = getDynamicTypeInfo((void*)Pointer);
+ if (DTI.isValid() &&
+ MatchSuppression(DTI.getMostDerivedTypeName(), SuppressionVptrCheck))
+ return;
+
SourceLocation Loc = Data->Loc.acquire();
if (Loc.isDisabled())
return;
+ ScopedReport R(Opts, Loc);
+
Diag(Loc, DL_Error,
"%0 address %1 which does not point to an object of type %2")
<< TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer << Data->Type;
// If possible, say what type it actually points to.
- DynamicTypeInfo DTI = getDynamicTypeInfo((void*)Pointer);
if (!DTI.isValid())
Diag(Pointer, DL_Note, "object has invalid vptr")
- << MangledName(DTI.getMostDerivedTypeName())
- << Range(Pointer, Pointer + sizeof(uptr), "invalid vptr");
+ << MangledName(DTI.getMostDerivedTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr), "invalid vptr");
else if (!DTI.getOffset())
Diag(Pointer, DL_Note, "object is of type %0")
- << MangledName(DTI.getMostDerivedTypeName())
- << Range(Pointer, Pointer + sizeof(uptr), "vptr for %0");
+ << MangledName(DTI.getMostDerivedTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr), "vptr for %0");
else
// FIXME: Find the type at the specified offset, and include that
// in the note.
Diag(Pointer - DTI.getOffset(), DL_Note,
"object is base class subobject at offset %0 within object of type %1")
- << DTI.getOffset() << MangledName(DTI.getMostDerivedTypeName())
- << MangledName(DTI.getSubobjectTypeName())
- << Range(Pointer, Pointer + sizeof(uptr), "vptr for %2 base class of %1");
-
- if (Abort)
- Die();
+ << DTI.getOffset() << MangledName(DTI.getMostDerivedTypeName())
+ << MangledName(DTI.getSubobjectTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr),
+ "vptr for %2 base class of %1");
}
void __ubsan::__ubsan_handle_dynamic_type_cache_miss(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash) {
- HandleDynamicTypeCacheMiss(Data, Pointer, Hash, false);
+ GET_REPORT_OPTIONS(false);
+ HandleDynamicTypeCacheMiss(Data, Pointer, Hash, Opts);
}
void __ubsan::__ubsan_handle_dynamic_type_cache_miss_abort(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash) {
- HandleDynamicTypeCacheMiss(Data, Pointer, Hash, true);
+ GET_REPORT_OPTIONS(true);
+ HandleDynamicTypeCacheMiss(Data, Pointer, Hash, Opts);
}
diff --git a/libsanitizer/ubsan/ubsan_init.cc b/libsanitizer/ubsan/ubsan_init.cc
new file mode 100644
index 0000000000..f28fc811f5
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_init.cc
@@ -0,0 +1,59 @@
+//===-- ubsan_init.cc -----------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization of UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_init.h"
+#include "ubsan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+using namespace __ubsan;
+
+static bool ubsan_inited;
+
+void __ubsan::InitIfNecessary() {
+#if !SANITIZER_CAN_USE_PREINIT_ARRAY
+ // No need to lock mutex if we're initializing from preinit array.
+ static StaticSpinMutex init_mu;
+ SpinMutexLock l(&init_mu);
+#endif
+ if (LIKELY(ubsan_inited))
+ return;
+ if (0 == internal_strcmp(SanitizerToolName, "SanitizerTool")) {
+ // WARNING: If this condition holds, then either UBSan runs in a standalone
+ // mode, or initializer for another sanitizer hasn't run yet. In a latter
+ // case, another sanitizer will overwrite "SanitizerToolName" and reparse
+ // common flags. It means, that we are not allowed to *use* common flags
+ // in this function.
+ SanitizerToolName = "UndefinedBehaviorSanitizer";
+ InitializeCommonFlags();
+ }
+ // Initialize UBSan-specific flags.
+ InitializeFlags();
+ SuppressionContext::InitIfNecessary();
+ ubsan_inited = true;
+}
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+__attribute__((section(".preinit_array"), used))
+void (*__local_ubsan_preinit)(void) = __ubsan::InitIfNecessary;
+#else
+// Use a dynamic initializer.
+class UbsanInitializer {
+ public:
+ UbsanInitializer() {
+ InitIfNecessary();
+ }
+};
+static UbsanInitializer ubsan_initializer;
+#endif // SANITIZER_CAN_USE_PREINIT_ARRAY
diff --git a/libsanitizer/ubsan/ubsan_init.h b/libsanitizer/ubsan/ubsan_init.h
new file mode 100644
index 0000000000..b76bbfe7bb
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_init.h
@@ -0,0 +1,22 @@
+//===-- ubsan_init.h --------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization function for UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_INIT_H
+#define UBSAN_INIT_H
+
+namespace __ubsan {
+
+// NOTE: This function might take a lock (if .preinit_array initialization is
+// not used). It's generally a bad idea to call it on a fast path.
+void InitIfNecessary();
+
+} // namespace __ubsan
+
+#endif // UBSAN_INIT_H
diff --git a/libsanitizer/ubsan/ubsan_value.cc b/libsanitizer/ubsan/ubsan_value.cc
index 141e8b5350..e2f664d3b2 100644
--- a/libsanitizer/ubsan/ubsan_value.cc
+++ b/libsanitizer/ubsan/ubsan_value.cc
@@ -92,6 +92,7 @@ FloatMax Value::getFloatValue() const {
switch (getType().getFloatBitWidth()) {
case 64: return *reinterpret_cast<double*>(Val);
case 80: return *reinterpret_cast<long double*>(Val);
+ case 96: return *reinterpret_cast<long double*>(Val);
case 128: return *reinterpret_cast<long double*>(Val);
}
}
diff --git a/libsanitizer/ubsan/ubsan_value.h b/libsanitizer/ubsan/ubsan_value.h
index 6ca0f56c99..abfd31fbd9 100644
--- a/libsanitizer/ubsan/ubsan_value.h
+++ b/libsanitizer/ubsan/ubsan_value.h
@@ -12,9 +12,9 @@
#ifndef UBSAN_VALUE_H
#define UBSAN_VALUE_H
-// For now, only support linux and darwin. Other platforms should be easy to
-// add, and probably work as-is.
-#if !defined(__linux__) && !defined(__APPLE__)
+// For now, only support Linux, FreeBSD and Darwin. Other platforms should
+// be easy to add, and probably work as-is.
+#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__APPLE__)
#error "UBSan not supported for this platform!"
#endif