diff options
Diffstat (limited to 'libsanitizer/lsan')
-rw-r--r-- | libsanitizer/lsan/Makefile.am | 60 | ||||
-rw-r--r-- | libsanitizer/lsan/Makefile.in | 514 | ||||
-rw-r--r-- | libsanitizer/lsan/libtool-version | 6 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan.cc | 63 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan.h | 21 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_allocator.cc | 191 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_allocator.h | 37 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_common.cc | 577 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_common.h | 174 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_common_linux.cc | 139 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_interceptors.cc | 279 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_thread.cc | 154 | ||||
-rw-r--r-- | libsanitizer/lsan/lsan_thread.h | 51 |
13 files changed, 2266 insertions, 0 deletions
diff --git a/libsanitizer/lsan/Makefile.am b/libsanitizer/lsan/Makefile.am new file mode 100644 index 00000000000..5c8726fb35b --- /dev/null +++ b/libsanitizer/lsan/Makefile.am @@ -0,0 +1,60 @@ +AM_CPPFLAGS = -I $(top_srcdir)/include -I $(top_srcdir) + +# May be used by toolexeclibdir. +gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) + +DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS +AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros +AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS) +ACLOCAL_AMFLAGS = -I m4 + +noinst_LTLIBRARIES = libsanitizer_lsan.la + +sanitizer_lsan_files = \ + lsan_common.cc \ + lsan_common_linux.cc + +libsanitizer_lsan_la_SOURCES = $(sanitizer_lsan_files) + +# Work around what appears to be a GNU make bug handling MAKEFLAGS +# values defined in terms of make variables, as is the case for CC and +# friends when we are called from the top level Makefile. +AM_MAKEFLAGS = \ + "AR_FLAGS=$(AR_FLAGS)" \ + "CC_FOR_BUILD=$(CC_FOR_BUILD)" \ + "CFLAGS=$(CFLAGS)" \ + "CXXFLAGS=$(CXXFLAGS)" \ + "CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \ + "CFLAGS_FOR_TARGET=$(CFLAGS_FOR_TARGET)" \ + "INSTALL=$(INSTALL)" \ + "INSTALL_DATA=$(INSTALL_DATA)" \ + "INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \ + "INSTALL_SCRIPT=$(INSTALL_SCRIPT)" \ + "JC1FLAGS=$(JC1FLAGS)" \ + "LDFLAGS=$(LDFLAGS)" \ + "LIBCFLAGS=$(LIBCFLAGS)" \ + "LIBCFLAGS_FOR_TARGET=$(LIBCFLAGS_FOR_TARGET)" \ + "MAKE=$(MAKE)" \ + "MAKEINFO=$(MAKEINFO) $(MAKEINFOFLAGS)" \ + "PICFLAG=$(PICFLAG)" \ + "PICFLAG_FOR_TARGET=$(PICFLAG_FOR_TARGET)" \ + "SHELL=$(SHELL)" \ + "RUNTESTFLAGS=$(RUNTESTFLAGS)" \ + "exec_prefix=$(exec_prefix)" \ + "infodir=$(infodir)" \ + "libdir=$(libdir)" \ + "prefix=$(prefix)" \ + "includedir=$(includedir)" \ + "AR=$(AR)" \ + "AS=$(AS)" \ + "LD=$(LD)" \ + "LIBCFLAGS=$(LIBCFLAGS)" \ + "NM=$(NM)" \ + "PICFLAG=$(PICFLAG)" \ + "RANLIB=$(RANLIB)" \ + "DESTDIR=$(DESTDIR)" + +MAKEOVERRIDES= + +## ################################################################ + diff --git a/libsanitizer/lsan/Makefile.in b/libsanitizer/lsan/Makefile.in new file mode 100644 index 00000000000..e01f65b8663 --- /dev/null +++ b/libsanitizer/lsan/Makefile.in @@ -0,0 +1,514 @@ +# Makefile.in generated by automake 1.11.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, +# Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = lsan +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \ + $(top_srcdir)/../config/depstand.m4 \ + $(top_srcdir)/../config/lead-dot.m4 \ + $(top_srcdir)/../config/libstdc++-raw-cxx.m4 \ + $(top_srcdir)/../config/multi.m4 \ + $(top_srcdir)/../config/override.m4 \ + $(top_srcdir)/../ltoptions.m4 $(top_srcdir)/../ltsugar.m4 \ + $(top_srcdir)/../ltversion.m4 $(top_srcdir)/../lt~obsolete.m4 \ + $(top_srcdir)/acinclude.m4 $(top_srcdir)/../libtool.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +LTLIBRARIES = $(noinst_LTLIBRARIES) +libsanitizer_lsan_la_LIBADD = +am__objects_1 = lsan_common.lo lsan_common_linux.lo +am_libsanitizer_lsan_la_OBJECTS = $(am__objects_1) +libsanitizer_lsan_la_OBJECTS = $(am_libsanitizer_lsan_la_OBJECTS) +DEFAULT_INCLUDES = -I.@am__isrc@ +depcomp = $(SHELL) $(top_srcdir)/../depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(libsanitizer_lsan_la_SOURCES) +ETAGS = etags +CTAGS = ctags +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBSTDCXX_RAW_CXX_CXXFLAGS = @LIBSTDCXX_RAW_CXX_CXXFLAGS@ +LIBSTDCXX_RAW_CXX_LDFLAGS = @LIBSTDCXX_RAW_CXX_LDFLAGS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +enable_shared = @enable_shared@ +enable_static = @enable_static@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +multi_basedir = @multi_basedir@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_noncanonical = @target_noncanonical@ +target_os = @target_os@ +target_vendor = @target_vendor@ +toolexecdir = @toolexecdir@ +toolexeclibdir = @toolexeclibdir@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AM_CPPFLAGS = -I $(top_srcdir)/include -I $(top_srcdir) + +# May be used by toolexeclibdir. +gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) +AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \ + -Wno-long-long -fPIC -fno-builtin -fno-exceptions \ + -fomit-frame-pointer -funwind-tables -fvisibility=hidden \ + -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) +ACLOCAL_AMFLAGS = -I m4 +noinst_LTLIBRARIES = libsanitizer_lsan.la +sanitizer_lsan_files = \ + lsan_common.cc \ + lsan_common_linux.cc + +libsanitizer_lsan_la_SOURCES = $(sanitizer_lsan_files) + +# Work around what appears to be a GNU make bug handling MAKEFLAGS +# values defined in terms of make variables, as is the case for CC and +# friends when we are called from the top level Makefile. +AM_MAKEFLAGS = \ + "AR_FLAGS=$(AR_FLAGS)" \ + "CC_FOR_BUILD=$(CC_FOR_BUILD)" \ + "CFLAGS=$(CFLAGS)" \ + "CXXFLAGS=$(CXXFLAGS)" \ + "CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \ + "CFLAGS_FOR_TARGET=$(CFLAGS_FOR_TARGET)" \ + "INSTALL=$(INSTALL)" \ + "INSTALL_DATA=$(INSTALL_DATA)" \ + "INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \ + "INSTALL_SCRIPT=$(INSTALL_SCRIPT)" \ + "JC1FLAGS=$(JC1FLAGS)" \ + "LDFLAGS=$(LDFLAGS)" \ + "LIBCFLAGS=$(LIBCFLAGS)" \ + "LIBCFLAGS_FOR_TARGET=$(LIBCFLAGS_FOR_TARGET)" \ + "MAKE=$(MAKE)" \ + "MAKEINFO=$(MAKEINFO) $(MAKEINFOFLAGS)" \ + "PICFLAG=$(PICFLAG)" \ + "PICFLAG_FOR_TARGET=$(PICFLAG_FOR_TARGET)" \ + "SHELL=$(SHELL)" \ + "RUNTESTFLAGS=$(RUNTESTFLAGS)" \ + "exec_prefix=$(exec_prefix)" \ + "infodir=$(infodir)" \ + "libdir=$(libdir)" \ + "prefix=$(prefix)" \ + "includedir=$(includedir)" \ + "AR=$(AR)" \ + "AS=$(AS)" \ + "LD=$(LD)" \ + "LIBCFLAGS=$(LIBCFLAGS)" \ + "NM=$(NM)" \ + "PICFLAG=$(PICFLAG)" \ + "RANLIB=$(RANLIB)" \ + "DESTDIR=$(DESTDIR)" + +MAKEOVERRIDES = +all: all-am + +.SUFFIXES: +.SUFFIXES: .cc .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign lsan/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign lsan/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +libsanitizer_lsan.la: $(libsanitizer_lsan_la_OBJECTS) $(libsanitizer_lsan_la_DEPENDENCIES) + $(CXXLINK) $(libsanitizer_lsan_la_OBJECTS) $(libsanitizer_lsan_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common_linux.Plo@am__quote@ + +.cc.o: +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< + +.cc.obj: +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cc.lo: +@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + set x; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-noinstLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags dvi dvi-am html html-am info info-am install \ + install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libsanitizer/lsan/libtool-version b/libsanitizer/lsan/libtool-version new file mode 100644 index 00000000000..204fdd2d8e5 --- /dev/null +++ b/libsanitizer/lsan/libtool-version @@ -0,0 +1,6 @@ +# This file is used to maintain libtool version info for libmudflap. See +# the libtool manual to understand the meaning of the fields. This is +# a separate file so that version updates don't involve re-running +# automake. +# CURRENT:REVISION:AGE +0:0:0 diff --git a/libsanitizer/lsan/lsan.cc b/libsanitizer/lsan/lsan.cc new file mode 100644 index 00000000000..500da50622c --- /dev/null +++ b/libsanitizer/lsan/lsan.cc @@ -0,0 +1,63 @@ +//=-- lsan.cc -------------------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Standalone LSan RTL. +// +//===----------------------------------------------------------------------===// + +#include "lsan.h" + +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "lsan_allocator.h" +#include "lsan_common.h" +#include "lsan_thread.h" + +namespace __lsan { + +static void InitializeCommonFlags() { + CommonFlags *cf = common_flags(); + cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH"); + cf->symbolize = true; + cf->strip_path_prefix = ""; + cf->fast_unwind_on_malloc = true; + cf->malloc_context_size = 30; + cf->detect_leaks = true; + cf->leak_check_at_exit = true; + + ParseCommonFlagsFromString(GetEnv("LSAN_OPTIONS")); +} + +void Init() { + static bool inited; + if (inited) + return; + inited = true; + SanitizerToolName = "LeakSanitizer"; + InitializeCommonFlags(); + InitializeAllocator(); + InitTlsSize(); + InitializeInterceptors(); + InitializeThreadRegistry(); + u32 tid = ThreadCreate(0, 0, true); + CHECK_EQ(tid, 0); + ThreadStart(tid, GetTid()); + SetCurrentThread(tid); + + // Start symbolizer process if necessary. + if (common_flags()->symbolize) { + getSymbolizer() + ->InitializeExternal(common_flags()->external_symbolizer_path); + } + + InitCommonLsan(); + if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) + Atexit(DoLeakCheck); +} + +} // namespace __lsan diff --git a/libsanitizer/lsan/lsan.h b/libsanitizer/lsan/lsan.h new file mode 100644 index 00000000000..18ff5da6281 --- /dev/null +++ b/libsanitizer/lsan/lsan.h @@ -0,0 +1,21 @@ +//=-- lsan.h --------------------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Private header for standalone LSan RTL. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_stacktrace.h" + +namespace __lsan { + +void Init(); +void InitializeInterceptors(); + +} // namespace __lsan diff --git a/libsanitizer/lsan/lsan_allocator.cc b/libsanitizer/lsan/lsan_allocator.cc new file mode 100644 index 00000000000..66af603e656 --- /dev/null +++ b/libsanitizer/lsan/lsan_allocator.cc @@ -0,0 +1,191 @@ +//=-- lsan_allocator.cc ---------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// See lsan_allocator.h for details. +// +//===----------------------------------------------------------------------===// + +#include "lsan_allocator.h" + +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "lsan_common.h" + +namespace __lsan { + +static const uptr kMaxAllowedMallocSize = 8UL << 30; +static const uptr kAllocatorSpace = 0x600000000000ULL; +static const uptr kAllocatorSize = 0x40000000000ULL; // 4T. + +struct ChunkMetadata { + bool allocated : 8; // Must be first. + ChunkTag tag : 2; + uptr requested_size : 54; + u32 stack_trace_id; +}; + +typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, + sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator; +typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; +typedef LargeMmapAllocator<> SecondaryAllocator; +typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, + SecondaryAllocator> Allocator; + +static Allocator allocator; +static THREADLOCAL AllocatorCache cache; + +void InitializeAllocator() { + allocator.Init(); +} + +void AllocatorThreadFinish() { + allocator.SwallowCache(&cache); +} + +static ChunkMetadata *Metadata(void *p) { + return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p)); +} + +static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { + if (!p) return; + ChunkMetadata *m = Metadata(p); + CHECK(m); + m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; + m->stack_trace_id = StackDepotPut(stack.trace, stack.size); + m->requested_size = size; + atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed); +} + +static void RegisterDeallocation(void *p) { + if (!p) return; + ChunkMetadata *m = Metadata(p); + CHECK(m); + atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); +} + +void *Allocate(const StackTrace &stack, uptr size, uptr alignment, + bool cleared) { + if (size == 0) + size = 1; + if (size > kMaxAllowedMallocSize) { + Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); + return 0; + } + void *p = allocator.Allocate(&cache, size, alignment, cleared); + RegisterAllocation(stack, p, size); + return p; +} + +void Deallocate(void *p) { + RegisterDeallocation(p); + allocator.Deallocate(&cache, p); +} + +void *Reallocate(const StackTrace &stack, void *p, uptr new_size, + uptr alignment) { + RegisterDeallocation(p); + if (new_size > kMaxAllowedMallocSize) { + Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); + allocator.Deallocate(&cache, p); + return 0; + } + p = allocator.Reallocate(&cache, p, new_size, alignment); + RegisterAllocation(stack, p, new_size); + return p; +} + +void GetAllocatorCacheRange(uptr *begin, uptr *end) { + *begin = (uptr)&cache; + *end = *begin + sizeof(cache); +} + +uptr GetMallocUsableSize(void *p) { + ChunkMetadata *m = Metadata(p); + if (!m) return 0; + return m->requested_size; +} + +///// Interface to the common LSan module. ///// + +void LockAllocator() { + allocator.ForceLock(); +} + +void UnlockAllocator() { + allocator.ForceUnlock(); +} + +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { + *begin = (uptr)&allocator; + *end = *begin + sizeof(allocator); +} + +uptr PointsIntoChunk(void* p) { + uptr addr = reinterpret_cast<uptr>(p); + uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); + if (!chunk) return 0; + // LargeMmapAllocator considers pointers to the meta-region of a chunk to be + // valid, but we don't want that. + if (addr < chunk) return 0; + ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk)); + CHECK(m); + if (m->allocated && addr < chunk + m->requested_size) + return chunk; + return 0; +} + +uptr GetUserBegin(uptr chunk) { + return chunk; +} + +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = Metadata(reinterpret_cast<void *>(chunk)); + CHECK(metadata_); +} + +bool LsanMetadata::allocated() const { + return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; +} + +ChunkTag LsanMetadata::tag() const { + return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; +} + +void LsanMetadata::set_tag(ChunkTag value) { + reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; +} + +uptr LsanMetadata::requested_size() const { + return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; +} + +u32 LsanMetadata::stack_trace_id() const { + return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + allocator.ForEachChunk(callback, arg); +} + +IgnoreObjectResult IgnoreObjectLocked(const void *p) { + void *chunk = allocator.GetBlockBegin(p); + if (!chunk || p < chunk) return kIgnoreObjectInvalid; + ChunkMetadata *m = Metadata(chunk); + CHECK(m); + if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { + if (m->tag == kIgnored) + return kIgnoreObjectAlreadyIgnored; + m->tag = kIgnored; + return kIgnoreObjectSuccess; + } else { + return kIgnoreObjectInvalid; + } +} +} // namespace __lsan diff --git a/libsanitizer/lsan/lsan_allocator.h b/libsanitizer/lsan/lsan_allocator.h new file mode 100644 index 00000000000..61ea86572ef --- /dev/null +++ b/libsanitizer/lsan/lsan_allocator.h @@ -0,0 +1,37 @@ +//=-- lsan_allocator.h ----------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Allocator for standalone LSan. +// +//===----------------------------------------------------------------------===// + +#ifndef LSAN_ALLOCATOR_H +#define LSAN_ALLOCATOR_H + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" + +namespace __lsan { + +void *Allocate(const StackTrace &stack, uptr size, uptr alignment, + bool cleared); +void Deallocate(void *p); +void *Reallocate(const StackTrace &stack, void *p, uptr new_size, + uptr alignment); +uptr GetMallocUsableSize(void *p); + +template<typename Callable> +void ForEachChunk(const Callable &callback); + +void GetAllocatorCacheRange(uptr *begin, uptr *end); +void AllocatorThreadFinish(); +void InitializeAllocator(); + +} // namespace __lsan + +#endif // LSAN_ALLOCATOR_H diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc new file mode 100644 index 00000000000..ce82430f48b --- /dev/null +++ b/libsanitizer/lsan/lsan_common.cc @@ -0,0 +1,577 @@ +//=-- lsan_common.cc ------------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Implementation of common leak checking functionality. +// +//===----------------------------------------------------------------------===// + +#include "lsan_common.h" + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "sanitizer_common/sanitizer_stoptheworld.h" +#include "sanitizer_common/sanitizer_suppressions.h" +#include "sanitizer_common/sanitizer_report_decorator.h" + +#if CAN_SANITIZE_LEAKS +namespace __lsan { + +// This mutex is used to prevent races between DoLeakCheck and IgnoreObject. +BlockingMutex global_mutex(LINKER_INITIALIZED); + +THREADLOCAL int disable_counter; +bool DisabledInThisThread() { return disable_counter > 0; } + +Flags lsan_flags; + +static void InitializeFlags() { + Flags *f = flags(); + // Default values. + f->report_objects = false; + f->resolution = 0; + f->max_leaks = 0; + f->exitcode = 23; + f->suppressions=""; + f->use_registers = true; + f->use_globals = true; + f->use_stacks = true; + f->use_tls = true; + f->use_unaligned = false; + f->verbosity = 0; + f->log_pointers = false; + f->log_threads = false; + + const char *options = GetEnv("LSAN_OPTIONS"); + if (options) { + ParseFlag(options, &f->use_registers, "use_registers"); + ParseFlag(options, &f->use_globals, "use_globals"); + ParseFlag(options, &f->use_stacks, "use_stacks"); + ParseFlag(options, &f->use_tls, "use_tls"); + ParseFlag(options, &f->use_unaligned, "use_unaligned"); + ParseFlag(options, &f->report_objects, "report_objects"); + ParseFlag(options, &f->resolution, "resolution"); + CHECK_GE(&f->resolution, 0); + ParseFlag(options, &f->max_leaks, "max_leaks"); + CHECK_GE(&f->max_leaks, 0); + ParseFlag(options, &f->verbosity, "verbosity"); + ParseFlag(options, &f->log_pointers, "log_pointers"); + ParseFlag(options, &f->log_threads, "log_threads"); + ParseFlag(options, &f->exitcode, "exitcode"); + ParseFlag(options, &f->suppressions, "suppressions"); + } +} + +SuppressionContext *suppression_ctx; + +void InitializeSuppressions() { + CHECK(!suppression_ctx); + ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)]; + suppression_ctx = new(placeholder_) SuppressionContext; + char *suppressions_from_file; + uptr buffer_size; + if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file, + &buffer_size, 1 << 26 /* max_len */)) + suppression_ctx->Parse(suppressions_from_file); + if (flags()->suppressions[0] && !buffer_size) { + Printf("LeakSanitizer: failed to read suppressions file '%s'\n", + flags()->suppressions); + Die(); + } + if (&__lsan_default_suppressions) + suppression_ctx->Parse(__lsan_default_suppressions()); +} + +void InitCommonLsan() { + InitializeFlags(); + InitializeSuppressions(); + InitializePlatformSpecificModules(); +} + +class Decorator: private __sanitizer::AnsiColorDecorator { + public: + Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { } + const char *Error() { return Red(); } + const char *Leak() { return Blue(); } + const char *End() { return Default(); } +}; + +static inline bool CanBeAHeapPointer(uptr p) { + // Since our heap is located in mmap-ed memory, we can assume a sensible lower + // bound on heap addresses. + const uptr kMinAddress = 4 * 4096; + if (p < kMinAddress) return false; +#ifdef __x86_64__ + // Accept only canonical form user-space addresses. + return ((p >> 47) == 0); +#else + return true; +#endif +} + +// Scans the memory range, looking for byte patterns that point into allocator +// chunks. Marks those chunks with |tag| and adds them to |frontier|. +// There are two usage modes for this function: finding reachable or ignored +// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks +// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, +// so |frontier| = 0. +void ScanRangeForPointers(uptr begin, uptr end, + Frontier *frontier, + const char *region_type, ChunkTag tag) { + const uptr alignment = flags()->pointer_alignment(); + if (flags()->log_pointers) + Report("Scanning %s range %p-%p.\n", region_type, begin, end); + uptr pp = begin; + if (pp % alignment) + pp = pp + alignment - pp % alignment; + for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT + void *p = *reinterpret_cast<void **>(pp); + if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; + uptr chunk = PointsIntoChunk(p); + if (!chunk) continue; + LsanMetadata m(chunk); + // Reachable beats ignored beats leaked. + if (m.tag() == kReachable) continue; + if (m.tag() == kIgnored && tag != kReachable) continue; + m.set_tag(tag); + if (flags()->log_pointers) + Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p, + chunk, chunk + m.requested_size(), m.requested_size()); + if (frontier) + frontier->push_back(chunk); + } +} + +// Scans thread data (stacks and TLS) for heap pointers. +static void ProcessThreads(SuspendedThreadsList const &suspended_threads, + Frontier *frontier) { + InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); + uptr registers_begin = reinterpret_cast<uptr>(registers.data()); + uptr registers_end = registers_begin + registers.size(); + for (uptr i = 0; i < suspended_threads.thread_count(); i++) { + uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); + if (flags()->log_threads) Report("Processing thread %d.\n", os_id); + uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; + bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, + &tls_begin, &tls_end, + &cache_begin, &cache_end); + if (!thread_found) { + // If a thread can't be found in the thread registry, it's probably in the + // process of destruction. Log this event and move on. + if (flags()->log_threads) + Report("Thread %d not found in registry.\n", os_id); + continue; + } + uptr sp; + bool have_registers = + (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); + if (!have_registers) { + Report("Unable to get registers from thread %d.\n"); + // If unable to get SP, consider the entire stack to be reachable. + sp = stack_begin; + } + + if (flags()->use_registers && have_registers) + ScanRangeForPointers(registers_begin, registers_end, frontier, + "REGISTERS", kReachable); + + if (flags()->use_stacks) { + if (flags()->log_threads) + Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp); + if (sp < stack_begin || sp >= stack_end) { + // SP is outside the recorded stack range (e.g. the thread is running a + // signal handler on alternate stack). Again, consider the entire stack + // range to be reachable. + if (flags()->log_threads) + Report("WARNING: stack pointer not in stack range.\n"); + } else { + // Shrink the stack range to ignore out-of-scope values. + stack_begin = sp; + } + ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", + kReachable); + } + + if (flags()->use_tls) { + if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end); + if (cache_begin == cache_end) { + ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); + } else { + // Because LSan should not be loaded with dlopen(), we can assume + // that allocator cache will be part of static TLS image. + CHECK_LE(tls_begin, cache_begin); + CHECK_GE(tls_end, cache_end); + if (tls_begin < cache_begin) + ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", + kReachable); + if (tls_end > cache_end) + ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); + } + } + } +} + +static void FloodFillTag(Frontier *frontier, ChunkTag tag) { + while (frontier->size()) { + uptr next_chunk = frontier->back(); + frontier->pop_back(); + LsanMetadata m(next_chunk); + ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, + "HEAP", tag); + } +} + +// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks +// which are reachable from it as indirectly leaked. +static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (m.allocated() && m.tag() != kReachable) { + ScanRangeForPointers(chunk, chunk + m.requested_size(), + /* frontier */ 0, "HEAP", kIndirectlyLeaked); + } +} + +// ForEachChunk callback. If chunk is marked as ignored, adds its address to +// frontier. +static void CollectIgnoredCb(uptr chunk, void *arg) { + CHECK(arg); + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (m.allocated() && m.tag() == kIgnored) + reinterpret_cast<Frontier *>(arg)->push_back(chunk); +} + +// Sets the appropriate tag on each chunk. +static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { + // Holds the flood fill frontier. + Frontier frontier(GetPageSizeCached()); + + if (flags()->use_globals) + ProcessGlobalRegions(&frontier); + ProcessThreads(suspended_threads, &frontier); + FloodFillTag(&frontier, kReachable); + // The check here is relatively expensive, so we do this in a separate flood + // fill. That way we can skip the check for chunks that are reachable + // otherwise. + ProcessPlatformSpecificAllocations(&frontier); + FloodFillTag(&frontier, kReachable); + + if (flags()->log_pointers) + Report("Scanning ignored chunks.\n"); + CHECK_EQ(0, frontier.size()); + ForEachChunk(CollectIgnoredCb, &frontier); + FloodFillTag(&frontier, kIgnored); + + // Iterate over leaked chunks and mark those that are reachable from other + // leaked chunks. + if (flags()->log_pointers) + Report("Scanning leaked chunks.\n"); + ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */); +} + +static void PrintStackTraceById(u32 stack_trace_id) { + CHECK(stack_trace_id); + uptr size = 0; + const uptr *trace = StackDepotGet(stack_trace_id, &size); + StackTrace::PrintStack(trace, size, common_flags()->symbolize, + common_flags()->strip_path_prefix, 0); +} + +// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport. +static void CollectLeaksCb(uptr chunk, void *arg) { + CHECK(arg); + LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg); + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (!m.allocated()) return; + if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { + uptr resolution = flags()->resolution; + if (resolution > 0) { + uptr size = 0; + const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); + size = Min(size, resolution); + leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag()); + } else { + leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag()); + } + } +} + +// ForEachChunkCallback. Prints addresses of unreachable chunks. +static void PrintLeakedCb(uptr chunk, void *arg) { + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (!m.allocated()) return; + if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { + Printf("%s leaked %zu byte object at %p.\n", + m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", + m.requested_size(), chunk); + } +} + +static void PrintMatchedSuppressions() { + InternalMmapVector<Suppression *> matched(1); + suppression_ctx->GetMatched(&matched); + if (!matched.size()) + return; + const char *line = "-----------------------------------------------------"; + Printf("%s\n", line); + Printf("Suppressions used:\n"); + Printf(" count bytes template\n"); + for (uptr i = 0; i < matched.size(); i++) + Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count), + matched[i]->weight, matched[i]->templ); + Printf("%s\n\n", line); +} + +static void PrintLeaked() { + Printf("\n"); + Printf("Reporting individual objects:\n"); + ForEachChunk(PrintLeakedCb, 0 /* arg */); +} + +struct DoLeakCheckParam { + bool success; + LeakReport leak_report; +}; + +static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, + void *arg) { + DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg); + CHECK(param); + CHECK(!param->success); + CHECK(param->leak_report.IsEmpty()); + ClassifyAllChunks(suspended_threads); + ForEachChunk(CollectLeaksCb, ¶m->leak_report); + if (!param->leak_report.IsEmpty() && flags()->report_objects) + PrintLeaked(); + param->success = true; +} + +void DoLeakCheck() { + EnsureMainThreadIDIsCorrect(); + BlockingMutexLock l(&global_mutex); + static bool already_done; + if (already_done) return; + already_done = true; + if (&__lsan_is_turned_off && __lsan_is_turned_off()) + return; + + DoLeakCheckParam param; + param.success = false; + LockThreadRegistry(); + LockAllocator(); + StopTheWorld(DoLeakCheckCallback, ¶m); + UnlockAllocator(); + UnlockThreadRegistry(); + + if (!param.success) { + Report("LeakSanitizer has encountered a fatal error.\n"); + Die(); + } + uptr have_unsuppressed = param.leak_report.ApplySuppressions(); + if (have_unsuppressed) { + Decorator d; + Printf("\n" + "=================================================================" + "\n"); + Printf("%s", d.Error()); + Report("ERROR: LeakSanitizer: detected memory leaks\n"); + Printf("%s", d.End()); + param.leak_report.PrintLargest(flags()->max_leaks); + } + if (have_unsuppressed || (flags()->verbosity >= 1)) { + PrintMatchedSuppressions(); + param.leak_report.PrintSummary(); + } + if (have_unsuppressed && flags()->exitcode) + internal__exit(flags()->exitcode); +} + +static Suppression *GetSuppressionForAddr(uptr addr) { + static const uptr kMaxAddrFrames = 16; + InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames); + for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo(); + uptr addr_frames_num = + getSymbolizer()->SymbolizeCode(addr, addr_frames.data(), kMaxAddrFrames); + for (uptr i = 0; i < addr_frames_num; i++) { + Suppression* s; + if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) || + suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) || + suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s)) + return s; + } + return 0; +} + +static Suppression *GetSuppressionForStack(u32 stack_trace_id) { + uptr size = 0; + const uptr *trace = StackDepotGet(stack_trace_id, &size); + for (uptr i = 0; i < size; i++) { + Suppression *s = + GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i])); + if (s) return s; + } + return 0; +} + +///// LeakReport implementation. ///// + +// A hard limit on the number of distinct leaks, to avoid quadratic complexity +// in LeakReport::Add(). We don't expect to ever see this many leaks in +// real-world applications. +// FIXME: Get rid of this limit by changing the implementation of LeakReport to +// use a hash table. +const uptr kMaxLeaksConsidered = 5000; + +void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { + CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); + bool is_directly_leaked = (tag == kDirectlyLeaked); + for (uptr i = 0; i < leaks_.size(); i++) + if (leaks_[i].stack_trace_id == stack_trace_id && + leaks_[i].is_directly_leaked == is_directly_leaked) { + leaks_[i].hit_count++; + leaks_[i].total_size += leaked_size; + return; + } + if (leaks_.size() == kMaxLeaksConsidered) return; + Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id, + is_directly_leaked, /* is_suppressed */ false }; + leaks_.push_back(leak); +} + +static bool LeakComparator(const Leak &leak1, const Leak &leak2) { + if (leak1.is_directly_leaked == leak2.is_directly_leaked) + return leak1.total_size > leak2.total_size; + else + return leak1.is_directly_leaked; +} + +void LeakReport::PrintLargest(uptr num_leaks_to_print) { + CHECK(leaks_.size() <= kMaxLeaksConsidered); + Printf("\n"); + if (leaks_.size() == kMaxLeaksConsidered) + Printf("Too many leaks! Only the first %zu leaks encountered will be " + "reported.\n", + kMaxLeaksConsidered); + + uptr unsuppressed_count = 0; + for (uptr i = 0; i < leaks_.size(); i++) + if (!leaks_[i].is_suppressed) unsuppressed_count++; + if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count) + Printf("The %zu largest leak(s):\n", num_leaks_to_print); + InternalSort(&leaks_, leaks_.size(), LeakComparator); + uptr leaks_printed = 0; + Decorator d; + for (uptr i = 0; i < leaks_.size(); i++) { + if (leaks_[i].is_suppressed) continue; + Printf("%s", d.Leak()); + Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", + leaks_[i].is_directly_leaked ? "Direct" : "Indirect", + leaks_[i].total_size, leaks_[i].hit_count); + Printf("%s", d.End()); + PrintStackTraceById(leaks_[i].stack_trace_id); + Printf("\n"); + leaks_printed++; + if (leaks_printed == num_leaks_to_print) break; + } + if (leaks_printed < unsuppressed_count) { + uptr remaining = unsuppressed_count - leaks_printed; + Printf("Omitting %zu more leak(s).\n", remaining); + } +} + +void LeakReport::PrintSummary() { + CHECK(leaks_.size() <= kMaxLeaksConsidered); + uptr bytes = 0, allocations = 0; + for (uptr i = 0; i < leaks_.size(); i++) { + if (leaks_[i].is_suppressed) continue; + bytes += leaks_[i].total_size; + allocations += leaks_[i].hit_count; + } + const int kMaxSummaryLength = 128; + InternalScopedBuffer<char> summary(kMaxSummaryLength); + internal_snprintf(summary.data(), kMaxSummaryLength, + "LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).", + bytes, allocations); + __sanitizer_report_error_summary(summary.data()); +} + +uptr LeakReport::ApplySuppressions() { + uptr unsuppressed_count = 0; + for (uptr i = 0; i < leaks_.size(); i++) { + Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); + if (s) { + s->weight += leaks_[i].total_size; + s->hit_count += leaks_[i].hit_count; + leaks_[i].is_suppressed = true; + } else { + unsuppressed_count++; + } + } + return unsuppressed_count; +} +} // namespace __lsan +#endif // CAN_SANITIZE_LEAKS + +using namespace __lsan; // NOLINT + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __lsan_ignore_object(const void *p) { +#if CAN_SANITIZE_LEAKS + // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not + // locked. + BlockingMutexLock l(&global_mutex); + IgnoreObjectResult res = IgnoreObjectLocked(p); + if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2) + Report("__lsan_ignore_object(): no heap object found at %p", p); + if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2) + Report("__lsan_ignore_object(): " + "heap object at %p is already being ignored\n", p); + if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3) + Report("__lsan_ignore_object(): ignoring heap object at %p\n", p); +#endif // CAN_SANITIZE_LEAKS +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __lsan_disable() { +#if CAN_SANITIZE_LEAKS + __lsan::disable_counter++; +#endif +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __lsan_enable() { +#if CAN_SANITIZE_LEAKS + if (!__lsan::disable_counter) { + Report("Unmatched call to __lsan_enable().\n"); + Die(); + } + __lsan::disable_counter--; +#endif +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __lsan_do_leak_check() { +#if CAN_SANITIZE_LEAKS + if (common_flags()->detect_leaks) + __lsan::DoLeakCheck(); +#endif // CAN_SANITIZE_LEAKS +} + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +int __lsan_is_turned_off() { + return 0; +} +#endif +} // extern "C" diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h new file mode 100644 index 00000000000..7906ecb9177 --- /dev/null +++ b/libsanitizer/lsan/lsan_common.h @@ -0,0 +1,174 @@ +//=-- lsan_common.h -------------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Private LSan header. +// +//===----------------------------------------------------------------------===// + +#ifndef LSAN_COMMON_H +#define LSAN_COMMON_H + +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_platform.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +#if SANITIZER_LINUX && defined(__x86_64__) +#define CAN_SANITIZE_LEAKS 1 +#else +#define CAN_SANITIZE_LEAKS 0 +#endif + +namespace __lsan { + +// Chunk tags. +enum ChunkTag { + kDirectlyLeaked = 0, // default + kIndirectlyLeaked = 1, + kReachable = 2, + kIgnored = 3 +}; + +struct Flags { + uptr pointer_alignment() const { + return use_unaligned ? 1 : sizeof(uptr); + } + + // Print addresses of leaked objects after main leak report. + bool report_objects; + // Aggregate two objects into one leak if this many stack frames match. If + // zero, the entire stack trace must match. + int resolution; + // The number of leaks reported. + int max_leaks; + // If nonzero kill the process with this exit code upon finding leaks. + int exitcode; + // Suppressions file name. + const char* suppressions; + + // Flags controlling the root set of reachable memory. + // Global variables (.data and .bss). + bool use_globals; + // Thread stacks. + bool use_stacks; + // Thread registers. + bool use_registers; + // TLS and thread-specific storage. + bool use_tls; + + // Consider unaligned pointers valid. + bool use_unaligned; + + // User-visible verbosity. + int verbosity; + + // Debug logging. + bool log_pointers; + bool log_threads; +}; + +extern Flags lsan_flags; +inline Flags *flags() { return &lsan_flags; } + +struct Leak { + uptr hit_count; + uptr total_size; + u32 stack_trace_id; + bool is_directly_leaked; + bool is_suppressed; +}; + +// Aggregates leaks by stack trace prefix. +class LeakReport { + public: + LeakReport() : leaks_(1) {} + void Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag); + void PrintLargest(uptr max_leaks); + void PrintSummary(); + bool IsEmpty() { return leaks_.size() == 0; } + uptr ApplySuppressions(); + private: + InternalMmapVector<Leak> leaks_; +}; + +typedef InternalMmapVector<uptr> Frontier; + +// Platform-specific functions. +void InitializePlatformSpecificModules(); +void ProcessGlobalRegions(Frontier *frontier); +void ProcessPlatformSpecificAllocations(Frontier *frontier); + +void ScanRangeForPointers(uptr begin, uptr end, + Frontier *frontier, + const char *region_type, ChunkTag tag); + +enum IgnoreObjectResult { + kIgnoreObjectSuccess, + kIgnoreObjectAlreadyIgnored, + kIgnoreObjectInvalid +}; + +// Functions called from the parent tool. +void InitCommonLsan(); +void DoLeakCheck(); +bool DisabledInThisThread(); + +// The following must be implemented in the parent tool. + +void ForEachChunk(ForEachChunkCallback callback, void *arg); +// Returns the address range occupied by the global allocator object. +void GetAllocatorGlobalRange(uptr *begin, uptr *end); +// Wrappers for allocator's ForceLock()/ForceUnlock(). +void LockAllocator(); +void UnlockAllocator(); +// Wrappers for ThreadRegistry access. +void LockThreadRegistry(); +void UnlockThreadRegistry(); +bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, + uptr *tls_begin, uptr *tls_end, + uptr *cache_begin, uptr *cache_end); +// If called from the main thread, updates the main thread's TID in the thread +// registry. We need this to handle processes that fork() without a subsequent +// exec(), which invalidates the recorded TID. To update it, we must call +// gettid() from the main thread. Our solution is to call this function before +// leak checking and also before every call to pthread_create() (to handle cases +// where leak checking is initiated from a non-main thread). +void EnsureMainThreadIDIsCorrect(); +// If p points into a chunk that has been allocated to the user, returns its +// user-visible address. Otherwise, returns 0. +uptr PointsIntoChunk(void *p); +// Returns address of user-visible chunk contained in this allocator chunk. +uptr GetUserBegin(uptr chunk); +// Helper for __lsan_ignore_object(). +IgnoreObjectResult IgnoreObjectLocked(const void *p); +// Wrapper for chunk metadata operations. +class LsanMetadata { + public: + // Constructor accepts address of user-visible chunk. + explicit LsanMetadata(uptr chunk); + bool allocated() const; + ChunkTag tag() const; + void set_tag(ChunkTag value); + uptr requested_size() const; + u32 stack_trace_id() const; + private: + void *metadata_; +}; + +} // namespace __lsan + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +int __lsan_is_turned_off(); + +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +const char *__lsan_default_suppressions(); +} // extern "C" + +#endif // LSAN_COMMON_H diff --git a/libsanitizer/lsan/lsan_common_linux.cc b/libsanitizer/lsan/lsan_common_linux.cc new file mode 100644 index 00000000000..80d2459a9ad --- /dev/null +++ b/libsanitizer/lsan/lsan_common_linux.cc @@ -0,0 +1,139 @@ +//=-- lsan_common_linux.cc ------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Implementation of common leak checking functionality. Linux-specific code. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#include "lsan_common.h" + +#if CAN_SANITIZE_LEAKS && SANITIZER_LINUX +#include <link.h> + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_stackdepot.h" + +namespace __lsan { + +static const char kLinkerName[] = "ld"; +// We request 2 modules matching "ld", so we can print a warning if there's more +// than one match. But only the first one is actually used. +static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64); +static LoadedModule *linker = 0; + +static bool IsLinker(const char* full_name) { + return LibraryNameIs(full_name, kLinkerName); +} + +void InitializePlatformSpecificModules() { + internal_memset(linker_placeholder, 0, sizeof(linker_placeholder)); + uptr num_matches = GetListOfModules( + reinterpret_cast<LoadedModule *>(linker_placeholder), 2, IsLinker); + if (num_matches == 1) { + linker = reinterpret_cast<LoadedModule *>(linker_placeholder); + return; + } + if (num_matches == 0) + Report("LeakSanitizer: Dynamic linker not found. " + "TLS will not be handled correctly.\n"); + else if (num_matches > 1) + Report("LeakSanitizer: Multiple modules match \"%s\". " + "TLS will not be handled correctly.\n", kLinkerName); + linker = 0; +} + +static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, + void *data) { + Frontier *frontier = reinterpret_cast<Frontier *>(data); + for (uptr j = 0; j < info->dlpi_phnum; j++) { + const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]); + // We're looking for .data and .bss sections, which reside in writeable, + // loadable segments. + if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) || + (phdr->p_memsz == 0)) + continue; + uptr begin = info->dlpi_addr + phdr->p_vaddr; + uptr end = begin + phdr->p_memsz; + uptr allocator_begin = 0, allocator_end = 0; + GetAllocatorGlobalRange(&allocator_begin, &allocator_end); + if (begin <= allocator_begin && allocator_begin < end) { + CHECK_LE(allocator_begin, allocator_end); + CHECK_LT(allocator_end, end); + if (begin < allocator_begin) + ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL", + kReachable); + if (allocator_end < end) + ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", + kReachable); + } else { + ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable); + } + } + return 0; +} + +// Scans global variables for heap pointers. +void ProcessGlobalRegions(Frontier *frontier) { + // FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of + // deadlocking by running this under StopTheWorld. However, the lock is + // reentrant, so we should be able to fix this by acquiring the lock before + // suspending threads. + dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier); +} + +static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { + CHECK(stack_id); + uptr size = 0; + const uptr *trace = map->Get(stack_id, &size); + // The top frame is our malloc/calloc/etc. The next frame is the caller. + if (size >= 2) + return trace[1]; + return 0; +} + +struct ProcessPlatformAllocParam { + Frontier *frontier; + StackDepotReverseMap *stack_depot_reverse_map; +}; + +// ForEachChunk callback. Identifies unreachable chunks which must be treated as +// reachable. Marks them as reachable and adds them to the frontier. +static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) { + CHECK(arg); + ProcessPlatformAllocParam *param = + reinterpret_cast<ProcessPlatformAllocParam *>(arg); + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (m.allocated() && m.tag() != kReachable) { + u32 stack_id = m.stack_trace_id(); + uptr caller_pc = 0; + if (stack_id > 0) + caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map); + // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark + // it as reachable, as we can't properly report its allocation stack anyway. + if (caller_pc == 0 || linker->containsAddress(caller_pc)) { + m.set_tag(kReachable); + param->frontier->push_back(chunk); + } + } +} + +// Handles dynamically allocated TLS blocks by treating all chunks allocated +// from ld-linux.so as reachable. +void ProcessPlatformSpecificAllocations(Frontier *frontier) { + if (!flags()->use_tls) return; + if (!linker) return; + StackDepotReverseMap stack_depot_reverse_map; + ProcessPlatformAllocParam arg = {frontier, &stack_depot_reverse_map}; + ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg); +} + +} // namespace __lsan +#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX diff --git a/libsanitizer/lsan/lsan_interceptors.cc b/libsanitizer/lsan/lsan_interceptors.cc new file mode 100644 index 00000000000..40ddc7773e2 --- /dev/null +++ b/libsanitizer/lsan/lsan_interceptors.cc @@ -0,0 +1,279 @@ +//=-- lsan_interceptors.cc ------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Interceptors for standalone LSan. +// +//===----------------------------------------------------------------------===// + +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_platform_limits_posix.h" +#include "lsan.h" +#include "lsan_allocator.h" +#include "lsan_thread.h" + +using namespace __lsan; + +extern "C" { +int pthread_attr_init(void *attr); +int pthread_attr_destroy(void *attr); +int pthread_attr_getdetachstate(void *attr, int *v); +int pthread_key_create(unsigned *key, void (*destructor)(void* v)); +int pthread_setspecific(unsigned key, const void *v); +} + +#define GET_STACK_TRACE \ + StackTrace stack; \ + { \ + uptr stack_top = 0, stack_bottom = 0; \ + ThreadContext *t; \ + bool fast = common_flags()->fast_unwind_on_malloc; \ + if (fast && (t = CurrentThreadContext())) { \ + stack_top = t->stack_end(); \ + stack_bottom = t->stack_begin(); \ + } \ + GetStackTrace(&stack, __sanitizer::common_flags()->malloc_context_size, \ + StackTrace::GetCurrentPc(), \ + GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \ + } + +///// Malloc/free interceptors. ///// + +const bool kAlwaysClearMemory = true; + +namespace std { + struct nothrow_t; +} + +INTERCEPTOR(void*, malloc, uptr size) { + Init(); + GET_STACK_TRACE; + return Allocate(stack, size, 1, kAlwaysClearMemory); +} + +INTERCEPTOR(void, free, void *p) { + Init(); + Deallocate(p); +} + +INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { + if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; + Init(); + GET_STACK_TRACE; + size *= nmemb; + return Allocate(stack, size, 1, true); +} + +INTERCEPTOR(void*, realloc, void *q, uptr size) { + Init(); + GET_STACK_TRACE; + return Reallocate(stack, q, size, 1); +} + +INTERCEPTOR(void*, memalign, uptr alignment, uptr size) { + Init(); + GET_STACK_TRACE; + return Allocate(stack, size, alignment, kAlwaysClearMemory); +} + +INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { + Init(); + GET_STACK_TRACE; + *memptr = Allocate(stack, size, alignment, kAlwaysClearMemory); + // FIXME: Return ENOMEM if user requested more than max alloc size. + return 0; +} + +INTERCEPTOR(void*, valloc, uptr size) { + Init(); + GET_STACK_TRACE; + if (size == 0) + size = GetPageSizeCached(); + return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory); +} + +INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { + Init(); + return GetMallocUsableSize(ptr); +} + +struct fake_mallinfo { + int x[10]; +}; + +INTERCEPTOR(struct fake_mallinfo, mallinfo, void) { + struct fake_mallinfo res; + internal_memset(&res, 0, sizeof(res)); + return res; +} + +INTERCEPTOR(int, mallopt, int cmd, int value) { + return -1; +} + +INTERCEPTOR(void*, pvalloc, uptr size) { + Init(); + GET_STACK_TRACE; + uptr PageSize = GetPageSizeCached(); + size = RoundUpTo(size, PageSize); + if (size == 0) { + // pvalloc(0) should allocate one page. + size = PageSize; + } + return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory); +} + +INTERCEPTOR(void, cfree, void *p) ALIAS("free"); + +#define OPERATOR_NEW_BODY \ + Init(); \ + GET_STACK_TRACE; \ + return Allocate(stack, size, 1, kAlwaysClearMemory); + +INTERCEPTOR_ATTRIBUTE +void *operator new(uptr size) { OPERATOR_NEW_BODY; } +INTERCEPTOR_ATTRIBUTE +void *operator new[](uptr size) { OPERATOR_NEW_BODY; } +INTERCEPTOR_ATTRIBUTE +void *operator new(uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; } +INTERCEPTOR_ATTRIBUTE +void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; } + +#define OPERATOR_DELETE_BODY \ + Init(); \ + Deallocate(ptr); + +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr) { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const &) { + OPERATOR_DELETE_BODY; +} + +// We need this to intercept the __libc_memalign calls that are used to +// allocate dynamic TLS space in ld-linux.so. +INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s) ALIAS("memalign"); + +///// Thread initialization and finalization. ///// + +static unsigned g_thread_finalize_key; + +static void thread_finalize(void *v) { + uptr iter = (uptr)v; + if (iter > 1) { + if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) { + Report("LeakSanitizer: failed to set thread key.\n"); + Die(); + } + return; + } + ThreadFinish(); +} + +struct ThreadParam { + void *(*callback)(void *arg); + void *param; + atomic_uintptr_t tid; +}; + +// PTHREAD_DESTRUCTOR_ITERATIONS from glibc. +const uptr kPthreadDestructorIterations = 4; + +extern "C" void *__lsan_thread_start_func(void *arg) { + ThreadParam *p = (ThreadParam*)arg; + void* (*callback)(void *arg) = p->callback; + void *param = p->param; + // Wait until the last iteration to maximize the chance that we are the last + // destructor to run. + if (pthread_setspecific(g_thread_finalize_key, + (void*)kPthreadDestructorIterations)) { + Report("LeakSanitizer: failed to set thread key.\n"); + Die(); + } + int tid = 0; + while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) + internal_sched_yield(); + atomic_store(&p->tid, 0, memory_order_release); + SetCurrentThread(tid); + ThreadStart(tid, GetTid()); + return callback(param); +} + +INTERCEPTOR(int, pthread_create, void *th, void *attr, + void *(*callback)(void *), void *param) { + Init(); + EnsureMainThreadIDIsCorrect(); + __sanitizer_pthread_attr_t myattr; + if (attr == 0) { + pthread_attr_init(&myattr); + attr = &myattr; + } + AdjustStackSizeLinux(attr, 0); + int detached = 0; + pthread_attr_getdetachstate(attr, &detached); + ThreadParam p; + p.callback = callback; + p.param = param; + atomic_store(&p.tid, 0, memory_order_relaxed); + int res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p); + if (res == 0) { + int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached); + CHECK_NE(tid, 0); + atomic_store(&p.tid, tid, memory_order_release); + while (atomic_load(&p.tid, memory_order_acquire) != 0) + internal_sched_yield(); + } + if (attr == &myattr) + pthread_attr_destroy(&myattr); + return res; +} + +INTERCEPTOR(int, pthread_join, void *th, void **ret) { + Init(); + int tid = ThreadTid((uptr)th); + int res = REAL(pthread_join)(th, ret); + if (res == 0) + ThreadJoin(tid); + return res; +} + +namespace __lsan { + +void InitializeInterceptors() { + INTERCEPT_FUNCTION(malloc); + INTERCEPT_FUNCTION(free); + INTERCEPT_FUNCTION(cfree); + INTERCEPT_FUNCTION(calloc); + INTERCEPT_FUNCTION(realloc); + INTERCEPT_FUNCTION(memalign); + INTERCEPT_FUNCTION(posix_memalign); + INTERCEPT_FUNCTION(__libc_memalign); + INTERCEPT_FUNCTION(valloc); + INTERCEPT_FUNCTION(pvalloc); + INTERCEPT_FUNCTION(malloc_usable_size); + INTERCEPT_FUNCTION(mallinfo); + INTERCEPT_FUNCTION(mallopt); + INTERCEPT_FUNCTION(pthread_create); + INTERCEPT_FUNCTION(pthread_join); + + if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { + Report("LeakSanitizer: failed to create thread key.\n"); + Die(); + } +} + +} // namespace __lsan diff --git a/libsanitizer/lsan/lsan_thread.cc b/libsanitizer/lsan/lsan_thread.cc new file mode 100644 index 00000000000..c260972cb47 --- /dev/null +++ b/libsanitizer/lsan/lsan_thread.cc @@ -0,0 +1,154 @@ +//=-- lsan_thread.cc ------------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// See lsan_thread.h for details. +// +//===----------------------------------------------------------------------===// + +#include "lsan_thread.h" + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_thread_registry.h" +#include "lsan_allocator.h" + +namespace __lsan { + +const u32 kInvalidTid = (u32) -1; + +static ThreadRegistry *thread_registry; +static THREADLOCAL u32 current_thread_tid = kInvalidTid; + +static ThreadContextBase *CreateThreadContext(u32 tid) { + void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext"); + return new(mem) ThreadContext(tid); +} + +static const uptr kMaxThreads = 1 << 13; +static const uptr kThreadQuarantineSize = 64; + +void InitializeThreadRegistry() { + static char thread_registry_placeholder[sizeof(ThreadRegistry)] ALIGNED(64); + thread_registry = new(thread_registry_placeholder) + ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize); +} + +u32 GetCurrentThread() { + return current_thread_tid; +} + +void SetCurrentThread(u32 tid) { + current_thread_tid = tid; +} + +ThreadContext::ThreadContext(int tid) + : ThreadContextBase(tid), + stack_begin_(0), + stack_end_(0), + cache_begin_(0), + cache_end_(0), + tls_begin_(0), + tls_end_(0) {} + +struct OnStartedArgs { + uptr stack_begin, stack_end, + cache_begin, cache_end, + tls_begin, tls_end; +}; + +void ThreadContext::OnStarted(void *arg) { + OnStartedArgs *args = reinterpret_cast<OnStartedArgs *>(arg); + stack_begin_ = args->stack_begin; + stack_end_ = args->stack_end; + tls_begin_ = args->tls_begin; + tls_end_ = args->tls_end; + cache_begin_ = args->cache_begin; + cache_end_ = args->cache_end; +} + +void ThreadContext::OnFinished() { + AllocatorThreadFinish(); +} + +u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) { + return thread_registry->CreateThread(user_id, detached, parent_tid, + /* arg */ 0); +} + +void ThreadStart(u32 tid, uptr os_id) { + OnStartedArgs args; + uptr stack_size = 0; + uptr tls_size = 0; + GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size, + &args.tls_begin, &tls_size); + args.stack_end = args.stack_begin + stack_size; + args.tls_end = args.tls_begin + tls_size; + GetAllocatorCacheRange(&args.cache_begin, &args.cache_end); + thread_registry->StartThread(tid, os_id, &args); +} + +void ThreadFinish() { + thread_registry->FinishThread(GetCurrentThread()); +} + +ThreadContext *CurrentThreadContext() { + if (!thread_registry) return 0; + if (GetCurrentThread() == kInvalidTid) + return 0; + // No lock needed when getting current thread. + return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread()); +} + +static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { + uptr uid = (uptr)arg; + if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { + return true; + } + return false; +} + +u32 ThreadTid(uptr uid) { + return thread_registry->FindThread(FindThreadByUid, (void*)uid); +} + +void ThreadJoin(u32 tid) { + CHECK_NE(tid, kInvalidTid); + thread_registry->JoinThread(tid, /* arg */0); +} + +void EnsureMainThreadIDIsCorrect() { + if (GetCurrentThread() == 0) + CurrentThreadContext()->os_id = GetTid(); +} + +///// Interface to the common LSan module. ///// + +bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, + uptr *tls_begin, uptr *tls_end, + uptr *cache_begin, uptr *cache_end) { + ThreadContext *context = static_cast<ThreadContext *>( + thread_registry->FindThreadContextByOsIDLocked(os_id)); + if (!context) return false; + *stack_begin = context->stack_begin(); + *stack_end = context->stack_end(); + *tls_begin = context->tls_begin(); + *tls_end = context->tls_end(); + *cache_begin = context->cache_begin(); + *cache_end = context->cache_end(); + return true; +} + +void LockThreadRegistry() { + thread_registry->Lock(); +} + +void UnlockThreadRegistry() { + thread_registry->Unlock(); +} + +} // namespace __lsan diff --git a/libsanitizer/lsan/lsan_thread.h b/libsanitizer/lsan/lsan_thread.h new file mode 100644 index 00000000000..cd13fdb5c52 --- /dev/null +++ b/libsanitizer/lsan/lsan_thread.h @@ -0,0 +1,51 @@ +//=-- lsan_thread.h -------------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Thread registry for standalone LSan. +// +//===----------------------------------------------------------------------===// + +#ifndef LSAN_THREAD_H +#define LSAN_THREAD_H + +#include "sanitizer_common/sanitizer_thread_registry.h" + +namespace __lsan { + +class ThreadContext : public ThreadContextBase { + public: + explicit ThreadContext(int tid); + void OnStarted(void *arg); + void OnFinished(); + uptr stack_begin() { return stack_begin_; } + uptr stack_end() { return stack_end_; } + uptr tls_begin() { return tls_begin_; } + uptr tls_end() { return tls_end_; } + uptr cache_begin() { return cache_begin_; } + uptr cache_end() { return cache_end_; } + private: + uptr stack_begin_, stack_end_, + cache_begin_, cache_end_, + tls_begin_, tls_end_; +}; + +void InitializeThreadRegistry(); + +void ThreadStart(u32 tid, uptr os_id); +void ThreadFinish(); +u32 ThreadCreate(u32 tid, uptr uid, bool detached); +void ThreadJoin(u32 tid); +u32 ThreadTid(uptr uid); + +u32 GetCurrentThread(); +void SetCurrentThread(u32 tid); +ThreadContext *CurrentThreadContext(); +void EnsureMainThreadIDIsCorrect(); +} // namespace __lsan + +#endif // LSAN_THREAD_H |