summaryrefslogtreecommitdiff
path: root/libsanitizer/asan
diff options
context:
space:
mode:
authorkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2013-01-10 12:44:08 +0000
committerkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2013-01-10 12:44:08 +0000
commitf5ed54288a2a1d1f8d99490f2529fc36b3d2c150 (patch)
tree23cebf7ab15836f70e055aee309f853c0c377de6 /libsanitizer/asan
parentefc3a86d56685d9e49ef92d2bfb175c1e67f0476 (diff)
downloadgcc-f5ed54288a2a1d1f8d99490f2529fc36b3d2c150.tar.gz
libsanitizer mege from upstream r171973
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@195083 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libsanitizer/asan')
-rw-r--r--libsanitizer/asan/Makefile.am2
-rw-r--r--libsanitizer/asan/Makefile.in31
-rw-r--r--libsanitizer/asan/asan_allocator.cc331
-rw-r--r--libsanitizer/asan/asan_allocator.h105
-rw-r--r--libsanitizer/asan/asan_allocator2.cc714
-rw-r--r--libsanitizer/asan/asan_fake_stack.cc180
-rw-r--r--libsanitizer/asan/asan_flags.h13
-rw-r--r--libsanitizer/asan/asan_intercepted_functions.h20
-rw-r--r--libsanitizer/asan/asan_interceptors.cc78
-rw-r--r--libsanitizer/asan/asan_internal.h13
-rw-r--r--libsanitizer/asan/asan_linux.cc48
-rw-r--r--libsanitizer/asan/asan_mac.cc21
-rw-r--r--libsanitizer/asan/asan_malloc_linux.cc32
-rw-r--r--libsanitizer/asan/asan_malloc_mac.cc38
-rw-r--r--libsanitizer/asan/asan_malloc_win.cc12
-rw-r--r--libsanitizer/asan/asan_mapping.h4
-rw-r--r--libsanitizer/asan/asan_new_delete.cc30
-rw-r--r--libsanitizer/asan/asan_poisoning.cc32
-rw-r--r--libsanitizer/asan/asan_report.cc236
-rw-r--r--libsanitizer/asan/asan_report.h4
-rw-r--r--libsanitizer/asan/asan_rtl.cc50
-rw-r--r--libsanitizer/asan/asan_stack.cc10
-rw-r--r--libsanitizer/asan/asan_stack.h32
-rw-r--r--libsanitizer/asan/asan_stats.cc10
-rw-r--r--libsanitizer/asan/asan_stats.h2
-rw-r--r--libsanitizer/asan/asan_thread.h9
-rw-r--r--libsanitizer/asan/asan_thread_registry.cc3
-rw-r--r--libsanitizer/asan/asan_win.cc3
28 files changed, 1551 insertions, 512 deletions
diff --git a/libsanitizer/asan/Makefile.am b/libsanitizer/asan/Makefile.am
index 04a621fd0fe..7d675001a37 100644
--- a/libsanitizer/asan/Makefile.am
+++ b/libsanitizer/asan/Makefile.am
@@ -15,6 +15,7 @@ toolexeclib_LTLIBRARIES = libasan.la
asan_files = \
asan_allocator.cc \
+ asan_allocator2.cc \
asan_interceptors.cc \
asan_mac.cc \
asan_malloc_mac.cc \
@@ -23,6 +24,7 @@ asan_files = \
asan_rtl.cc \
asan_stats.cc \
asan_thread_registry.cc \
+ asan_fake_stack.cc \
asan_globals.cc \
asan_linux.cc \
asan_malloc_linux.cc \
diff --git a/libsanitizer/asan/Makefile.in b/libsanitizer/asan/Makefile.in
index 7c8e321a1cd..4578391b3c0 100644
--- a/libsanitizer/asan/Makefile.in
+++ b/libsanitizer/asan/Makefile.in
@@ -84,19 +84,20 @@ am__DEPENDENCIES_1 =
@USING_MAC_INTERPOSE_FALSE@ $(am__DEPENDENCIES_1)
@USING_MAC_INTERPOSE_TRUE@libasan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
@USING_MAC_INTERPOSE_TRUE@ $(am__DEPENDENCIES_1)
-am__libasan_la_SOURCES_DIST = asan_allocator.cc asan_interceptors.cc \
- asan_mac.cc asan_malloc_mac.cc asan_new_delete.cc \
- asan_posix.cc asan_rtl.cc asan_stats.cc \
- asan_thread_registry.cc asan_globals.cc asan_linux.cc \
- asan_malloc_linux.cc asan_malloc_win.cc asan_poisoning.cc \
- asan_report.cc asan_stack.cc asan_thread.cc asan_win.cc \
- dynamic/asan_interceptors_dynamic.cc
-am__objects_1 = asan_allocator.lo asan_interceptors.lo asan_mac.lo \
- asan_malloc_mac.lo asan_new_delete.lo asan_posix.lo \
- asan_rtl.lo asan_stats.lo asan_thread_registry.lo \
- asan_globals.lo asan_linux.lo asan_malloc_linux.lo \
- asan_malloc_win.lo asan_poisoning.lo asan_report.lo \
- asan_stack.lo asan_thread.lo asan_win.lo
+am__libasan_la_SOURCES_DIST = asan_allocator.cc asan_allocator2.cc \
+ asan_interceptors.cc asan_mac.cc asan_malloc_mac.cc \
+ asan_new_delete.cc asan_posix.cc asan_rtl.cc asan_stats.cc \
+ asan_thread_registry.cc asan_fake_stack.cc asan_globals.cc \
+ asan_linux.cc asan_malloc_linux.cc asan_malloc_win.cc \
+ asan_poisoning.cc asan_report.cc asan_stack.cc asan_thread.cc \
+ asan_win.cc dynamic/asan_interceptors_dynamic.cc
+am__objects_1 = asan_allocator.lo asan_allocator2.lo \
+ asan_interceptors.lo asan_mac.lo asan_malloc_mac.lo \
+ asan_new_delete.lo asan_posix.lo asan_rtl.lo asan_stats.lo \
+ asan_thread_registry.lo asan_fake_stack.lo asan_globals.lo \
+ asan_linux.lo asan_malloc_linux.lo asan_malloc_win.lo \
+ asan_poisoning.lo asan_report.lo asan_stack.lo asan_thread.lo \
+ asan_win.lo
@USING_MAC_INTERPOSE_TRUE@am__objects_2 = \
@USING_MAC_INTERPOSE_TRUE@ asan_interceptors_dynamic.lo
am_libasan_la_OBJECTS = $(am__objects_1) $(am__objects_2)
@@ -269,6 +270,7 @@ ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la
asan_files = \
asan_allocator.cc \
+ asan_allocator2.cc \
asan_interceptors.cc \
asan_mac.cc \
asan_malloc_mac.cc \
@@ -277,6 +279,7 @@ asan_files = \
asan_rtl.cc \
asan_stats.cc \
asan_thread_registry.cc \
+ asan_fake_stack.cc \
asan_globals.cc \
asan_linux.cc \
asan_malloc_linux.cc \
@@ -409,6 +412,8 @@ distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors_dynamic.Plo@am__quote@
diff --git a/libsanitizer/asan/asan_allocator.cc b/libsanitizer/asan/asan_allocator.cc
index 03d5bbd3eb5..b170fe723d2 100644
--- a/libsanitizer/asan/asan_allocator.cc
+++ b/libsanitizer/asan/asan_allocator.cc
@@ -22,8 +22,9 @@
// Once freed, the body of the chunk contains the stack trace of the free call.
//
//===----------------------------------------------------------------------===//
-
#include "asan_allocator.h"
+
+#if ASAN_ALLOCATOR_VERSION == 1
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_lock.h"
@@ -35,10 +36,6 @@
#include "sanitizer/asan_interface.h"
#include "sanitizer_common/sanitizer_atomic.h"
-#if defined(_WIN32) && !defined(__clang__)
-#include <intrin.h>
-#endif
-
namespace __asan {
#define REDZONE ((uptr)(flags()->redzone))
@@ -58,42 +55,6 @@ static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
static const uptr kMaxAllowedMallocSize =
(SANITIZER_WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
-static inline bool IsAligned(uptr a, uptr alignment) {
- return (a & (alignment - 1)) == 0;
-}
-
-static inline uptr Log2(uptr x) {
- CHECK(IsPowerOfTwo(x));
-#if !defined(_WIN32) || defined(__clang__)
- return __builtin_ctzl(x);
-#elif defined(_WIN64)
- unsigned long ret; // NOLINT
- _BitScanForward64(&ret, x);
- return ret;
-#else
- unsigned long ret; // NOLINT
- _BitScanForward(&ret, x);
- return ret;
-#endif
-}
-
-static inline uptr RoundUpToPowerOfTwo(uptr size) {
- CHECK(size);
- if (IsPowerOfTwo(size)) return size;
-
- unsigned long up; // NOLINT
-#if !defined(_WIN32) || defined(__clang__)
- up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
-#elif defined(_WIN64)
- _BitScanReverse64(&up, size);
-#else
- _BitScanReverse(&up, size);
-#endif
- CHECK(size < (1ULL << (up + 1)));
- CHECK(size > (1ULL << up));
- return 1UL << (up + 1);
-}
-
static inline uptr SizeClassToSize(u8 size_class) {
CHECK(size_class < kNumberOfSizeClasses);
if (size_class <= kMallocSizeClassStepLog) {
@@ -165,7 +126,8 @@ struct ChunkBase {
// Second 8 bytes.
uptr alignment_log : 8;
- uptr used_size : FIRST_32_SECOND_64(32, 56); // Size requested by the user.
+ uptr alloc_type : 2;
+ uptr used_size : FIRST_32_SECOND_64(32, 54); // Size requested by the user.
// This field may overlap with the user area and thus should not
// be used while the chunk is in CHUNK_ALLOCATED state.
@@ -215,33 +177,6 @@ void AsanChunkView::GetFreeStack(StackTrace *stack) {
chunk_->compressed_free_stack_size());
}
-bool AsanChunkView::AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
- if (addr >= Beg() && (addr + access_size) <= End()) {
- *offset = addr - Beg();
- return true;
- }
- return false;
-}
-
-bool AsanChunkView::AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
- if (addr < Beg()) {
- *offset = Beg() - addr;
- return true;
- }
- return false;
-}
-
-bool AsanChunkView::AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
- if (addr + access_size >= End()) {
- if (addr <= End())
- *offset = 0;
- else
- *offset = addr - End();
- return true;
- }
- return false;
-}
-
static AsanChunk *PtrToChunk(uptr ptr) {
AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
if (m->chunk_state == CHUNK_MEMALIGN) {
@@ -252,34 +187,13 @@ static AsanChunk *PtrToChunk(uptr ptr) {
void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
CHECK(q->size() > 0);
- if (last_) {
- CHECK(first_);
- CHECK(!last_->next);
- last_->next = q->first_;
- last_ = q->last_;
- } else {
- CHECK(!first_);
- last_ = q->last_;
- first_ = q->first_;
- CHECK(first_);
- }
- CHECK(last_);
- CHECK(!last_->next);
size_ += q->size();
+ append_back(q);
q->clear();
}
void AsanChunkFifoList::Push(AsanChunk *n) {
- CHECK(n->next == 0);
- if (last_) {
- CHECK(first_);
- CHECK(!last_->next);
- last_->next = n;
- last_ = n;
- } else {
- CHECK(!first_);
- last_ = first_ = n;
- }
+ push_back(n);
size_ += n->Size();
}
@@ -288,15 +202,9 @@ void AsanChunkFifoList::Push(AsanChunk *n) {
// ago. Not sure if we can or want to do anything with this.
AsanChunk *AsanChunkFifoList::Pop() {
CHECK(first_);
- AsanChunk *res = first_;
- first_ = first_->next;
- if (first_ == 0)
- last_ = 0;
- CHECK(size_ >= res->Size());
+ AsanChunk *res = front();
size_ -= res->Size();
- if (last_) {
- CHECK(!last_->next);
- }
+ pop_front();
return res;
}
@@ -588,7 +496,8 @@ AsanChunkView FindHeapChunkByAddress(uptr address) {
return AsanChunkView(malloc_info.FindChunkByAddr(address));
}
-static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack) {
+static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack,
+ AllocType alloc_type) {
__asan_init();
CHECK(stack);
if (size == 0) {
@@ -645,6 +554,7 @@ static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack) {
CHECK(m);
CHECK(m->chunk_state == CHUNK_AVAILABLE);
m->chunk_state = CHUNK_ALLOCATED;
+ m->alloc_type = alloc_type;
m->next = 0;
CHECK(m->Size() == size_to_allocate);
uptr addr = (uptr)m + REDZONE;
@@ -679,7 +589,7 @@ static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack) {
return (u8*)addr;
}
-static void Deallocate(u8 *ptr, StackTrace *stack) {
+static void Deallocate(u8 *ptr, StackTrace *stack, AllocType alloc_type) {
if (!ptr) return;
CHECK(stack);
@@ -700,6 +610,9 @@ static void Deallocate(u8 *ptr, StackTrace *stack) {
ReportFreeNotMalloced((uptr)ptr, stack);
}
CHECK(old_chunk_state == CHUNK_ALLOCATED);
+ if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
+ ReportAllocTypeMismatch((uptr)ptr, stack,
+ (AllocType)m->alloc_type, (AllocType)alloc_type);
// With REDZONE==16 m->next is in the user area, otherwise it should be 0.
CHECK(REDZONE <= 16 || !m->next);
CHECK(m->free_tid == kInvalidTid);
@@ -744,18 +657,19 @@ static u8 *Reallocate(u8 *old_ptr, uptr new_size,
CHECK(m->chunk_state == CHUNK_ALLOCATED);
uptr old_size = m->used_size;
uptr memcpy_size = Min(new_size, old_size);
- u8 *new_ptr = Allocate(0, new_size, stack);
+ u8 *new_ptr = Allocate(0, new_size, stack, FROM_MALLOC);
if (new_ptr) {
CHECK(REAL(memcpy) != 0);
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
- Deallocate(old_ptr, stack);
+ Deallocate(old_ptr, stack, FROM_MALLOC);
}
return new_ptr;
}
} // namespace __asan
-// Default (no-op) implementation of malloc hooks.
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
extern "C" {
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void __asan_malloc_hook(void *ptr, uptr size) {
@@ -767,53 +681,58 @@ void __asan_free_hook(void *ptr) {
(void)ptr;
}
} // extern "C"
+#endif
namespace __asan {
+void PrintInternalAllocatorStats() {
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
-void *asan_memalign(uptr alignment, uptr size, StackTrace *stack) {
- void *ptr = (void*)Allocate(alignment, size, stack);
- __asan_malloc_hook(ptr, size);
+void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
+ AllocType alloc_type) {
+ void *ptr = (void*)Allocate(alignment, size, stack, alloc_type);
+ ASAN_MALLOC_HOOK(ptr, size);
return ptr;
}
SANITIZER_INTERFACE_ATTRIBUTE
-void asan_free(void *ptr, StackTrace *stack) {
- __asan_free_hook(ptr);
- Deallocate((u8*)ptr, stack);
+void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
+ ASAN_FREE_HOOK(ptr);
+ Deallocate((u8*)ptr, stack, alloc_type);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *asan_malloc(uptr size, StackTrace *stack) {
- void *ptr = (void*)Allocate(0, size, stack);
- __asan_malloc_hook(ptr, size);
+ void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC);
+ ASAN_MALLOC_HOOK(ptr, size);
return ptr;
}
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
- void *ptr = (void*)Allocate(0, nmemb * size, stack);
+ void *ptr = (void*)Allocate(0, nmemb * size, stack, FROM_MALLOC);
if (ptr)
REAL(memset)(ptr, 0, nmemb * size);
- __asan_malloc_hook(ptr, nmemb * size);
+ ASAN_MALLOC_HOOK(ptr, size);
return ptr;
}
void *asan_realloc(void *p, uptr size, StackTrace *stack) {
if (p == 0) {
- void *ptr = (void*)Allocate(0, size, stack);
- __asan_malloc_hook(ptr, size);
+ void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC);
+ ASAN_MALLOC_HOOK(ptr, size);
return ptr;
} else if (size == 0) {
- __asan_free_hook(p);
- Deallocate((u8*)p, stack);
+ ASAN_FREE_HOOK(p);
+ Deallocate((u8*)p, stack, FROM_MALLOC);
return 0;
}
return Reallocate((u8*)p, size, stack);
}
void *asan_valloc(uptr size, StackTrace *stack) {
- void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack);
- __asan_malloc_hook(ptr, size);
+ void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack, FROM_MALLOC);
+ ASAN_MALLOC_HOOK(ptr, size);
return ptr;
}
@@ -824,16 +743,16 @@ void *asan_pvalloc(uptr size, StackTrace *stack) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
- void *ptr = (void*)Allocate(PageSize, size, stack);
- __asan_malloc_hook(ptr, size);
+ void *ptr = (void*)Allocate(PageSize, size, stack, FROM_MALLOC);
+ ASAN_MALLOC_HOOK(ptr, size);
return ptr;
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack) {
- void *ptr = Allocate(alignment, size, stack);
+ void *ptr = Allocate(alignment, size, stack, FROM_MALLOC);
CHECK(IsAligned((uptr)ptr, alignment));
- __asan_malloc_hook(ptr, size);
+ ASAN_MALLOC_HOOK(ptr, size);
*memptr = ptr;
return 0;
}
@@ -860,170 +779,11 @@ void asan_mz_force_unlock() {
malloc_info.ForceUnlock();
}
-// ---------------------- Fake stack-------------------- {{{1
-FakeStack::FakeStack() {
- CHECK(REAL(memset) != 0);
- REAL(memset)(this, 0, sizeof(*this));
-}
-
-bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
- uptr mem = allocated_size_classes_[size_class];
- uptr size = ClassMmapSize(size_class);
- bool res = mem && addr >= mem && addr < mem + size;
- return res;
-}
-
-uptr FakeStack::AddrIsInFakeStack(uptr addr) {
- for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
- if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
- }
- return 0;
-}
-
-// We may want to compute this during compilation.
-inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
- uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
- uptr log = Log2(rounded_size);
- CHECK(alloc_size <= (1UL << log));
- if (!(alloc_size > (1UL << (log-1)))) {
- Printf("alloc_size %zu log %zu\n", alloc_size, log);
- }
- CHECK(alloc_size > (1UL << (log-1)));
- uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
- CHECK(res < kNumberOfSizeClasses);
- CHECK(ClassSize(res) >= rounded_size);
- return res;
-}
-
-void FakeFrameFifo::FifoPush(FakeFrame *node) {
- CHECK(node);
- node->next = 0;
- if (first_ == 0 && last_ == 0) {
- first_ = last_ = node;
- } else {
- CHECK(first_);
- CHECK(last_);
- last_->next = node;
- last_ = node;
- }
-}
-
-FakeFrame *FakeFrameFifo::FifoPop() {
- CHECK(first_ && last_ && "Exhausted fake stack");
- FakeFrame *res = 0;
- if (first_ == last_) {
- res = first_;
- first_ = last_ = 0;
- } else {
- res = first_;
- first_ = first_->next;
- }
- return res;
-}
-
-void FakeStack::Init(uptr stack_size) {
- stack_size_ = stack_size;
- alive_ = true;
-}
-
-void FakeStack::Cleanup() {
- alive_ = false;
- for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
- uptr mem = allocated_size_classes_[i];
- if (mem) {
- PoisonShadow(mem, ClassMmapSize(i), 0);
- allocated_size_classes_[i] = 0;
- UnmapOrDie((void*)mem, ClassMmapSize(i));
- }
- }
-}
-
-uptr FakeStack::ClassMmapSize(uptr size_class) {
- return RoundUpToPowerOfTwo(stack_size_);
-}
-
-void FakeStack::AllocateOneSizeClass(uptr size_class) {
- CHECK(ClassMmapSize(size_class) >= GetPageSizeCached());
- uptr new_mem = (uptr)MmapOrDie(
- ClassMmapSize(size_class), __FUNCTION__);
- // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
- // asanThreadRegistry().GetCurrent()->tid(),
- // size_class, new_mem, new_mem + ClassMmapSize(size_class),
- // ClassMmapSize(size_class));
- uptr i;
- for (i = 0; i < ClassMmapSize(size_class);
- i += ClassSize(size_class)) {
- size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
- }
- CHECK(i == ClassMmapSize(size_class));
- allocated_size_classes_[size_class] = new_mem;
-}
-
-uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
- if (!alive_) return real_stack;
- CHECK(size <= kMaxStackMallocSize && size > 1);
- uptr size_class = ComputeSizeClass(size);
- if (!allocated_size_classes_[size_class]) {
- AllocateOneSizeClass(size_class);
- }
- FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
- CHECK(fake_frame);
- fake_frame->size_minus_one = size - 1;
- fake_frame->real_stack = real_stack;
- while (FakeFrame *top = call_stack_.top()) {
- if (top->real_stack > real_stack) break;
- call_stack_.LifoPop();
- DeallocateFrame(top);
- }
- call_stack_.LifoPush(fake_frame);
- uptr ptr = (uptr)fake_frame;
- PoisonShadow(ptr, size, 0);
- return ptr;
-}
-
-void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
- CHECK(alive_);
- uptr size = fake_frame->size_minus_one + 1;
- uptr size_class = ComputeSizeClass(size);
- CHECK(allocated_size_classes_[size_class]);
- uptr ptr = (uptr)fake_frame;
- CHECK(AddrIsInSizeClass(ptr, size_class));
- CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
- size_classes_[size_class].FifoPush(fake_frame);
-}
-
-void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
- FakeFrame *fake_frame = (FakeFrame*)ptr;
- CHECK(fake_frame->magic = kRetiredStackFrameMagic);
- CHECK(fake_frame->descr != 0);
- CHECK(fake_frame->size_minus_one == size - 1);
- PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
-}
-
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
-uptr __asan_stack_malloc(uptr size, uptr real_stack) {
- if (!flags()->use_fake_stack) return real_stack;
- AsanThread *t = asanThreadRegistry().GetCurrent();
- if (!t) {
- // TSD is gone, use the real stack.
- return real_stack;
- }
- uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
- // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
- return ptr;
-}
-
-void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
- if (!flags()->use_fake_stack) return;
- if (ptr != real_stack) {
- FakeStack::OnFree(ptr, size, real_stack);
- }
-}
-
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size".
uptr __asan_get_estimated_allocated_size(uptr size) {
@@ -1040,8 +800,9 @@ uptr __asan_get_allocated_size(const void *p) {
uptr allocated_size = malloc_info.AllocationSize((uptr)p);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_FATAL_HERE;
ReportAsanGetAllocatedSizeNotOwned((uptr)p, &stack);
}
return allocated_size;
}
+#endif // ASAN_ALLOCATOR_VERSION
diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h
index 372ca0635ed..4ade352a3e5 100644
--- a/libsanitizer/asan/asan_allocator.h
+++ b/libsanitizer/asan/asan_allocator.h
@@ -15,9 +15,22 @@
#include "asan_internal.h"
#include "asan_interceptors.h"
+#include "sanitizer_common/sanitizer_list.h"
+
+// We are in the process of transitioning from the old allocator (version 1)
+// to a new one (version 2). The change is quite intrusive so both allocators
+// will co-exist in the source base for a while. The actual allocator is chosen
+// at build time by redefining this macrozz.
+#define ASAN_ALLOCATOR_VERSION 1
namespace __asan {
+enum AllocType {
+ FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc.
+ FROM_NEW = 2, // Memory block came from operator new.
+ FROM_NEW_BR = 3 // Memory block came from operator new [ ]
+};
+
static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk;
@@ -32,16 +45,40 @@ class AsanChunkView {
uptr FreeTid();
void GetAllocStack(StackTrace *stack);
void GetFreeStack(StackTrace *stack);
- bool AddrIsInside(uptr addr, uptr access_size, uptr *offset);
- bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset);
- bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset);
+ bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
+ if (addr >= Beg() && (addr + access_size) <= End()) {
+ *offset = addr - Beg();
+ return true;
+ }
+ return false;
+ }
+ bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
+ (void)access_size;
+ if (addr < Beg()) {
+ *offset = Beg() - addr;
+ return true;
+ }
+ return false;
+ }
+ bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
+ if (addr + access_size >= End()) {
+ if (addr <= End())
+ *offset = 0;
+ else
+ *offset = addr - End();
+ return true;
+ }
+ return false;
+ }
+
private:
AsanChunk *const chunk_;
};
AsanChunkView FindHeapChunkByAddress(uptr address);
-class AsanChunkFifoList {
+// List of AsanChunks with total size.
+class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
public:
explicit AsanChunkFifoList(LinkerInitialized) { }
AsanChunkFifoList() { clear(); }
@@ -50,12 +87,10 @@ class AsanChunkFifoList {
AsanChunk *Pop();
uptr size() { return size_; }
void clear() {
- first_ = last_ = 0;
+ IntrusiveList<AsanChunk>::clear();
size_ = 0;
}
private:
- AsanChunk *first_;
- AsanChunk *last_;
uptr size_;
};
@@ -68,7 +103,11 @@ struct AsanThreadLocalMallocStorage {
}
AsanChunkFifoList quarantine_;
+#if ASAN_ALLOCATOR_VERSION == 1
AsanChunk *free_lists_[kNumberOfSizeClasses];
+#else
+ uptr allocator2_cache[1024]; // Opaque.
+#endif
void CommitBack();
};
@@ -156,8 +195,9 @@ class FakeStack {
FakeFrameLifo call_stack_;
};
-void *asan_memalign(uptr alignment, uptr size, StackTrace *stack);
-void asan_free(void *ptr, StackTrace *stack);
+void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
+ AllocType alloc_type);
+void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);
void *asan_malloc(uptr size, StackTrace *stack);
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
@@ -173,5 +213,52 @@ uptr asan_mz_size(const void *ptr);
void asan_mz_force_lock();
void asan_mz_force_unlock();
+void PrintInternalAllocatorStats();
+
+// Log2 and RoundUpToPowerOfTwo should be inlined for performance.
+#if defined(_WIN32) && !defined(__clang__)
+extern "C" {
+unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
+unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
+#if defined(_WIN64)
+unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
+unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
+#endif
+}
+#endif
+
+static inline uptr Log2(uptr x) {
+ CHECK(IsPowerOfTwo(x));
+#if !defined(_WIN32) || defined(__clang__)
+ return __builtin_ctzl(x);
+#elif defined(_WIN64)
+ unsigned long ret; // NOLINT
+ _BitScanForward64(&ret, x);
+ return ret;
+#else
+ unsigned long ret; // NOLINT
+ _BitScanForward(&ret, x);
+ return ret;
+#endif
+}
+
+static inline uptr RoundUpToPowerOfTwo(uptr size) {
+ CHECK(size);
+ if (IsPowerOfTwo(size)) return size;
+
+ unsigned long up; // NOLINT
+#if !defined(_WIN32) || defined(__clang__)
+ up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
+#elif defined(_WIN64)
+ _BitScanReverse64(&up, size);
+#else
+ _BitScanReverse(&up, size);
+#endif
+ CHECK(size < (1ULL << (up + 1)));
+ CHECK(size > (1ULL << up));
+ return 1UL << (up + 1);
+}
+
+
} // namespace __asan
#endif // ASAN_ALLOCATOR_H
diff --git a/libsanitizer/asan/asan_allocator2.cc b/libsanitizer/asan/asan_allocator2.cc
new file mode 100644
index 00000000000..d12ccb7f23b
--- /dev/null
+++ b/libsanitizer/asan/asan_allocator2.cc
@@ -0,0 +1,714 @@
+//===-- asan_allocator2.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Implementation of ASan's memory allocator, 2-nd version.
+// This variant uses the allocator from sanitizer_common, i.e. the one shared
+// with ThreadSanitizer and MemorySanitizer.
+//
+// Status: under development, not enabled by default yet.
+//===----------------------------------------------------------------------===//
+#include "asan_allocator.h"
+#if ASAN_ALLOCATOR_VERSION == 2
+
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_thread.h"
+#include "asan_thread_registry.h"
+#include "sanitizer/asan_interface.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __asan {
+
+struct AsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const {
+ PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
+ // Statistics.
+ AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ thread_stats.mmaps++;
+ thread_stats.mmaped += size;
+ }
+ void OnUnmap(uptr p, uptr size) const {
+ PoisonShadow(p, size, 0);
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ uptr page_size = GetPageSizeCached();
+ uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
+ uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
+ FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+ // Statistics.
+ AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ thread_stats.munmaps++;
+ thread_stats.munmaped += size;
+ }
+};
+
+#if SANITIZER_WORDSIZE == 64
+const uptr kAllocatorSpace = 0x600000000000ULL;
+const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
+typedef DefaultSizeClassMap SizeClassMap;
+typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
+ SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
+#elif SANITIZER_WORDSIZE == 32
+static const u64 kAddressSpaceSize = 1ULL << 32;
+typedef CompactSizeClassMap SizeClassMap;
+typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
+ SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
+#endif
+
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+
+// We can not use THREADLOCAL because it is not supported on some of the
+// platforms we care about (OSX 10.6, Android).
+// static THREADLOCAL AllocatorCache cache;
+AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
+ return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
+}
+
+static Allocator allocator;
+
+static const uptr kMaxAllowedMallocSize =
+ FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
+
+static const uptr kMaxThreadLocalQuarantine =
+ FIRST_32_SECOND_64(1 << 18, 1 << 20);
+
+static const uptr kReturnOnZeroMalloc = 2048; // Zero page is protected.
+
+static int inited = 0;
+
+static void Init() {
+ if (inited) return;
+ __asan_init();
+ inited = true; // this must happen before any threads are created.
+ allocator.Init();
+}
+
+// Every chunk of memory allocated by this allocator can be in one of 3 states:
+// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
+// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
+// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
+enum {
+ CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
+ CHUNK_ALLOCATED = 2,
+ CHUNK_QUARANTINE = 3
+};
+
+// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
+// We use adaptive redzones: for larger allocation larger redzones are used.
+static u32 RZLog2Size(u32 rz_log) {
+ CHECK_LT(rz_log, 8);
+ return 16 << rz_log;
+}
+
+static u32 RZSize2Log(u32 rz_size) {
+ CHECK_GE(rz_size, 16);
+ CHECK_LE(rz_size, 2048);
+ CHECK(IsPowerOfTwo(rz_size));
+ u32 res = __builtin_ctz(rz_size) - 4;
+ CHECK_EQ(rz_size, RZLog2Size(res));
+ return res;
+}
+
+static uptr ComputeRZLog(uptr user_requested_size) {
+ u32 rz_log =
+ user_requested_size <= 64 - 16 ? 0 :
+ user_requested_size <= 128 - 32 ? 1 :
+ user_requested_size <= 512 - 64 ? 2 :
+ user_requested_size <= 4096 - 128 ? 3 :
+ user_requested_size <= (1 << 14) - 256 ? 4 :
+ user_requested_size <= (1 << 15) - 512 ? 5 :
+ user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
+ return Max(rz_log, RZSize2Log(flags()->redzone));
+}
+
+// The memory chunk allocated from the underlying allocator looks like this:
+// L L L L L L H H U U U U U U R R
+// L -- left redzone words (0 or more bytes)
+// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
+// U -- user memory.
+// R -- right redzone (0 or more bytes)
+// ChunkBase consists of ChunkHeader and other bytes that overlap with user
+// memory.
+
+// If a memory chunk is allocated by memalign and we had to increase the
+// allocation size to achieve the proper alignment, then we store this magic
+// value in the first uptr word of the memory block and store the address of
+// ChunkBase in the next uptr.
+// M B ? ? ? L L L L L L H H U U U U U U
+// M -- magic value kMemalignMagic
+// B -- address of ChunkHeader pointing to the first 'H'
+static const uptr kMemalignMagic = 0xCC6E96B9;
+
+struct ChunkHeader {
+ // 1-st 8 bytes.
+ u32 chunk_state : 8; // Must be first.
+ u32 alloc_tid : 24;
+
+ u32 free_tid : 24;
+ u32 from_memalign : 1;
+ u32 alloc_type : 2;
+ u32 rz_log : 3;
+ // 2-nd 8 bytes
+ // This field is used for small sizes. For large sizes it is equal to
+ // SizeClassMap::kMaxSize and the actual size is stored in the
+ // SecondaryAllocator's metadata.
+ u32 user_requested_size;
+ u32 alloc_context_id;
+};
+
+struct ChunkBase : ChunkHeader {
+ // Header2, intersects with user memory.
+ AsanChunk *next;
+ u32 free_context_id;
+};
+
+static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
+static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
+COMPILER_CHECK(kChunkHeaderSize == 16);
+COMPILER_CHECK(kChunkHeader2Size <= 16);
+
+struct AsanChunk: ChunkBase {
+ uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
+ uptr UsedSize() {
+ if (user_requested_size != SizeClassMap::kMaxSize)
+ return user_requested_size;
+ return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg()));
+ }
+ void *AllocBeg() {
+ if (from_memalign)
+ return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
+ return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
+ }
+ // We store the alloc/free stack traces in the chunk itself.
+ u32 *AllocStackBeg() {
+ return (u32*)(Beg() - RZLog2Size(rz_log));
+ }
+ uptr AllocStackSize() {
+ CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
+ return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
+ }
+ u32 *FreeStackBeg() {
+ return (u32*)(Beg() + kChunkHeader2Size);
+ }
+ uptr FreeStackSize() {
+ if (user_requested_size < kChunkHeader2Size) return 0;
+ uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
+ return (available - kChunkHeader2Size) / sizeof(u32);
+ }
+};
+
+uptr AsanChunkView::Beg() { return chunk_->Beg(); }
+uptr AsanChunkView::End() { return Beg() + UsedSize(); }
+uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
+uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
+uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
+
+static void GetStackTraceFromId(u32 id, StackTrace *stack) {
+ CHECK(id);
+ uptr size = 0;
+ const uptr *trace = StackDepotGet(id, &size);
+ CHECK_LT(size, kStackTraceMax);
+ internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
+ stack->size = size;
+}
+
+void AsanChunkView::GetAllocStack(StackTrace *stack) {
+ if (flags()->use_stack_depot)
+ GetStackTraceFromId(chunk_->alloc_context_id, stack);
+ else
+ StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
+ chunk_->AllocStackSize());
+}
+
+void AsanChunkView::GetFreeStack(StackTrace *stack) {
+ if (flags()->use_stack_depot)
+ GetStackTraceFromId(chunk_->free_context_id, stack);
+ else
+ StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
+ chunk_->FreeStackSize());
+}
+
+class Quarantine: public AsanChunkFifoList {
+ public:
+ void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) {
+ AsanChunkFifoList *q = &ms->quarantine_;
+ if (!q->size()) return;
+ SpinMutexLock l(&mutex_);
+ PushList(q);
+ PopAndDeallocateLoop(ms);
+ }
+
+ void BypassThreadLocalQuarantine(AsanChunk *m) {
+ SpinMutexLock l(&mutex_);
+ Push(m);
+ }
+
+ private:
+ void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) {
+ while (size() > (uptr)flags()->quarantine_size) {
+ PopAndDeallocate(ms);
+ }
+ }
+ void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) {
+ CHECK_GT(size(), 0);
+ AsanChunk *m = Pop();
+ CHECK(m);
+ CHECK(m->chunk_state == CHUNK_QUARANTINE);
+ m->chunk_state = CHUNK_AVAILABLE;
+ CHECK_NE(m->alloc_tid, kInvalidTid);
+ CHECK_NE(m->free_tid, kInvalidTid);
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapLeftRedzoneMagic);
+ void *p = reinterpret_cast<void *>(m->AllocBeg());
+ if (m->from_memalign) {
+ uptr *memalign_magic = reinterpret_cast<uptr *>(p);
+ CHECK_EQ(memalign_magic[0], kMemalignMagic);
+ CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m));
+ }
+
+ // Statistics.
+ AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ thread_stats.real_frees++;
+ thread_stats.really_freed += m->UsedSize();
+
+ allocator.Deallocate(GetAllocatorCache(ms), p);
+ }
+ SpinMutex mutex_;
+};
+
+static Quarantine quarantine;
+
+void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
+ CHECK(q->size() > 0);
+ size_ += q->size();
+ append_back(q);
+ q->clear();
+}
+
+void AsanChunkFifoList::Push(AsanChunk *n) {
+ push_back(n);
+ size_ += n->UsedSize();
+}
+
+// Interesting performance observation: this function takes up to 15% of overal
+// allocator time. That's because *first_ has been evicted from cache long time
+// ago. Not sure if we can or want to do anything with this.
+AsanChunk *AsanChunkFifoList::Pop() {
+ CHECK(first_);
+ AsanChunk *res = front();
+ size_ -= res->UsedSize();
+ pop_front();
+ return res;
+}
+
+static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
+ AllocType alloc_type) {
+ Init();
+ CHECK(stack);
+ const uptr min_alignment = SHADOW_GRANULARITY;
+ if (alignment < min_alignment)
+ alignment = min_alignment;
+ if (size == 0) {
+ if (alignment <= kReturnOnZeroMalloc)
+ return reinterpret_cast<void *>(kReturnOnZeroMalloc);
+ else
+ return 0; // 0 bytes with large alignment requested. Just return 0.
+ }
+ CHECK(IsPowerOfTwo(alignment));
+ uptr rz_log = ComputeRZLog(size);
+ uptr rz_size = RZLog2Size(rz_log);
+ uptr rounded_size = RoundUpTo(size, alignment);
+ if (rounded_size < kChunkHeader2Size)
+ rounded_size = kChunkHeader2Size;
+ uptr needed_size = rounded_size + rz_size;
+ if (alignment > min_alignment)
+ needed_size += alignment;
+ bool using_primary_allocator = true;
+ // If we are allocating from the secondary allocator, there will be no
+ // automatic right redzone, so add the right redzone manually.
+ if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
+ needed_size += rz_size;
+ using_primary_allocator = false;
+ }
+ CHECK(IsAligned(needed_size, min_alignment));
+ if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
+ Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
+ (void*)size);
+ return 0;
+ }
+
+ AsanThread *t = asanThreadRegistry().GetCurrent();
+ AllocatorCache *cache = t ? GetAllocatorCache(&t->malloc_storage()) : 0;
+ void *allocated = allocator.Allocate(cache, needed_size, 8, false);
+ uptr alloc_beg = reinterpret_cast<uptr>(allocated);
+ uptr alloc_end = alloc_beg + needed_size;
+ uptr beg_plus_redzone = alloc_beg + rz_size;
+ uptr user_beg = beg_plus_redzone;
+ if (!IsAligned(user_beg, alignment))
+ user_beg = RoundUpTo(user_beg, alignment);
+ uptr user_end = user_beg + size;
+ CHECK_LE(user_end, alloc_end);
+ uptr chunk_beg = user_beg - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ m->chunk_state = CHUNK_ALLOCATED;
+ m->alloc_type = alloc_type;
+ m->rz_log = rz_log;
+ u32 alloc_tid = t ? t->tid() : 0;
+ m->alloc_tid = alloc_tid;
+ CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
+ m->free_tid = kInvalidTid;
+ m->from_memalign = user_beg != beg_plus_redzone;
+ if (m->from_memalign) {
+ CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg);
+ uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
+ memalign_magic[0] = kMemalignMagic;
+ memalign_magic[1] = chunk_beg;
+ }
+ if (using_primary_allocator) {
+ CHECK(size);
+ m->user_requested_size = size;
+ CHECK(allocator.FromPrimary(allocated));
+ } else {
+ CHECK(!allocator.FromPrimary(allocated));
+ m->user_requested_size = SizeClassMap::kMaxSize;
+ uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
+ meta[0] = size;
+ meta[1] = chunk_beg;
+ }
+
+ if (flags()->use_stack_depot) {
+ m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
+ } else {
+ m->alloc_context_id = 0;
+ StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
+ }
+
+ uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
+ // Unpoison the bulk of the memory region.
+ if (size_rounded_down_to_granularity)
+ PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
+ // Deal with the end of the region if size is not aligned to granularity.
+ if (size != size_rounded_down_to_granularity && flags()->poison_heap) {
+ u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
+ *shadow = size & (SHADOW_GRANULARITY - 1);
+ }
+
+ AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ thread_stats.mallocs++;
+ thread_stats.malloced += size;
+ thread_stats.malloced_redzones += needed_size - size;
+ uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
+ thread_stats.malloced_by_size[class_id]++;
+ if (needed_size > SizeClassMap::kMaxSize)
+ thread_stats.malloc_large++;
+
+ void *res = reinterpret_cast<void *>(user_beg);
+ ASAN_MALLOC_HOOK(res, size);
+ return res;
+}
+
+static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ if (p == 0 || p == kReturnOnZeroMalloc) return;
+ uptr chunk_beg = p - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+
+ // Flip the chunk_state atomically to avoid race on double-free.
+ u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
+ memory_order_acq_rel);
+
+ if (old_chunk_state == CHUNK_QUARANTINE)
+ ReportDoubleFree((uptr)ptr, stack);
+ else if (old_chunk_state != CHUNK_ALLOCATED)
+ ReportFreeNotMalloced((uptr)ptr, stack);
+ CHECK(old_chunk_state == CHUNK_ALLOCATED);
+ if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
+ ReportAllocTypeMismatch((uptr)ptr, stack,
+ (AllocType)m->alloc_type, (AllocType)alloc_type);
+
+ CHECK_GE(m->alloc_tid, 0);
+ if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
+ CHECK_EQ(m->free_tid, kInvalidTid);
+ AsanThread *t = asanThreadRegistry().GetCurrent();
+ m->free_tid = t ? t->tid() : 0;
+ if (flags()->use_stack_depot) {
+ m->free_context_id = StackDepotPut(stack->trace, stack->size);
+ } else {
+ m->free_context_id = 0;
+ StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
+ }
+ CHECK(m->chunk_state == CHUNK_QUARANTINE);
+ // Poison the region.
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapFreeMagic);
+
+ AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ thread_stats.frees++;
+ thread_stats.freed += m->UsedSize();
+
+ // Push into quarantine.
+ if (t) {
+ AsanChunkFifoList &q = t->malloc_storage().quarantine_;
+ q.Push(m);
+
+ if (q.size() > kMaxThreadLocalQuarantine)
+ quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage());
+ } else {
+ quarantine.BypassThreadLocalQuarantine(m);
+ }
+
+ ASAN_FREE_HOOK(ptr);
+}
+
+static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
+ CHECK(old_ptr && new_size);
+ uptr p = reinterpret_cast<uptr>(old_ptr);
+ uptr chunk_beg = p - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+
+ AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ thread_stats.reallocs++;
+ thread_stats.realloced += new_size;
+
+ CHECK(m->chunk_state == CHUNK_ALLOCATED);
+ uptr old_size = m->UsedSize();
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
+ if (new_ptr) {
+ CHECK(REAL(memcpy) != 0);
+ REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
+ Deallocate(old_ptr, stack, FROM_MALLOC);
+ }
+ return new_ptr;
+}
+
+static AsanChunk *GetAsanChunkByAddr(uptr p) {
+ void *ptr = reinterpret_cast<void *>(p);
+ uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr));
+ if (!alloc_beg) return 0;
+ uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
+ if (memalign_magic[0] == kMemalignMagic) {
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]);
+ CHECK(m->from_memalign);
+ return m;
+ }
+ if (!allocator.FromPrimary(ptr)) {
+ uptr *meta = reinterpret_cast<uptr *>(
+ allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg)));
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
+ return m;
+ }
+ uptr actual_size = allocator.GetActuallyAllocatedSize(ptr);
+ CHECK_LE(actual_size, SizeClassMap::kMaxSize);
+ // We know the actually allocted size, but we don't know the redzone size.
+ // Just try all possible redzone sizes.
+ for (u32 rz_log = 0; rz_log < 8; rz_log++) {
+ u32 rz_size = RZLog2Size(rz_log);
+ uptr max_possible_size = actual_size - rz_size;
+ if (ComputeRZLog(max_possible_size) != rz_log)
+ continue;
+ return reinterpret_cast<AsanChunk *>(
+ alloc_beg + rz_size - kChunkHeaderSize);
+ }
+ return 0;
+}
+
+static uptr AllocationSize(uptr p) {
+ AsanChunk *m = GetAsanChunkByAddr(p);
+ if (!m) return 0;
+ if (m->chunk_state != CHUNK_ALLOCATED) return 0;
+ if (m->Beg() != p) return 0;
+ return m->UsedSize();
+}
+
+// We have an address between two chunks, and we want to report just one.
+AsanChunk *ChooseChunk(uptr addr,
+ AsanChunk *left_chunk, AsanChunk *right_chunk) {
+ // Prefer an allocated chunk over freed chunk and freed chunk
+ // over available chunk.
+ if (left_chunk->chunk_state != right_chunk->chunk_state) {
+ if (left_chunk->chunk_state == CHUNK_ALLOCATED)
+ return left_chunk;
+ if (right_chunk->chunk_state == CHUNK_ALLOCATED)
+ return right_chunk;
+ if (left_chunk->chunk_state == CHUNK_QUARANTINE)
+ return left_chunk;
+ if (right_chunk->chunk_state == CHUNK_QUARANTINE)
+ return right_chunk;
+ }
+ // Same chunk_state: choose based on offset.
+ uptr l_offset = 0, r_offset = 0;
+ CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
+ CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
+ if (l_offset < r_offset)
+ return left_chunk;
+ return right_chunk;
+}
+
+AsanChunkView FindHeapChunkByAddress(uptr addr) {
+ AsanChunk *m1 = GetAsanChunkByAddr(addr);
+ if (!m1) return AsanChunkView(m1);
+ uptr offset = 0;
+ if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
+ // The address is in the chunk's left redzone, so maybe it is actually
+ // a right buffer overflow from the other chunk to the left.
+ // Search a bit to the left to see if there is another chunk.
+ AsanChunk *m2 = 0;
+ for (uptr l = 1; l < GetPageSizeCached(); l++) {
+ m2 = GetAsanChunkByAddr(addr - l);
+ if (m2 == m1) continue; // Still the same chunk.
+ break;
+ }
+ if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
+ m1 = ChooseChunk(addr, m2, m1);
+ }
+ return AsanChunkView(m1);
+}
+
+void AsanThreadLocalMallocStorage::CommitBack() {
+ quarantine.SwallowThreadLocalQuarantine(this);
+ allocator.SwallowCache(GetAllocatorCache(this));
+}
+
+void PrintInternalAllocatorStats() {
+ allocator.PrintStats();
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
+ AllocType alloc_type) {
+ return Allocate(size, alignment, stack, alloc_type);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
+ Deallocate(ptr, stack, alloc_type);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *asan_malloc(uptr size, StackTrace *stack) {
+ return Allocate(size, 8, stack, FROM_MALLOC);
+}
+
+void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
+ void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
+ if (ptr)
+ REAL(memset)(ptr, 0, nmemb * size);
+ return ptr;
+}
+
+void *asan_realloc(void *p, uptr size, StackTrace *stack) {
+ if (p == 0)
+ return Allocate(size, 8, stack, FROM_MALLOC);
+ if (size == 0) {
+ Deallocate(p, stack, FROM_MALLOC);
+ return 0;
+ }
+ return Reallocate(p, size, stack);
+}
+
+void *asan_valloc(uptr size, StackTrace *stack) {
+ return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC);
+}
+
+void *asan_pvalloc(uptr size, StackTrace *stack) {
+ uptr PageSize = GetPageSizeCached();
+ size = RoundUpTo(size, PageSize);
+ if (size == 0) {
+ // pvalloc(0) should allocate one page.
+ size = PageSize;
+ }
+ return Allocate(size, PageSize, stack, FROM_MALLOC);
+}
+
+int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ StackTrace *stack) {
+ void *ptr = Allocate(size, alignment, stack, FROM_MALLOC);
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
+ CHECK(stack);
+ if (ptr == 0) return 0;
+ uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
+ if (flags()->check_malloc_usable_size && (usable_size == 0))
+ ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
+ return usable_size;
+}
+
+uptr asan_mz_size(const void *ptr) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+void asan_mz_force_lock() {
+ UNIMPLEMENTED();
+}
+
+void asan_mz_force_unlock() {
+ UNIMPLEMENTED();
+}
+
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+// ASan allocator doesn't reserve extra bytes, so normally we would
+// just return "size". We don't want to expose our redzone sizes, etc here.
+uptr __asan_get_estimated_allocated_size(uptr size) {
+ return size;
+}
+
+bool __asan_get_ownership(const void *p) {
+ return AllocationSize(reinterpret_cast<uptr>(p)) > 0;
+}
+
+uptr __asan_get_allocated_size(const void *p) {
+ if (p == 0) return 0;
+ uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p));
+ // Die if p is not malloced or if it is already freed.
+ if (allocated_size == 0) {
+ GET_STACK_TRACE_FATAL_HERE;
+ ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack);
+ }
+ return allocated_size;
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_free_hook(void *ptr) {
+ (void)ptr;
+}
+} // extern "C"
+#endif
+
+
+#endif // ASAN_ALLOCATOR_VERSION
diff --git a/libsanitizer/asan/asan_fake_stack.cc b/libsanitizer/asan/asan_fake_stack.cc
new file mode 100644
index 00000000000..e8d1e78488b
--- /dev/null
+++ b/libsanitizer/asan/asan_fake_stack.cc
@@ -0,0 +1,180 @@
+//===-- asan_fake_stack.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// FakeStack is used to detect use-after-return bugs.
+//===----------------------------------------------------------------------===//
+#include "asan_allocator.h"
+#include "asan_thread.h"
+#include "asan_thread_registry.h"
+#include "sanitizer/asan_interface.h"
+
+namespace __asan {
+
+FakeStack::FakeStack() {
+ CHECK(REAL(memset) != 0);
+ REAL(memset)(this, 0, sizeof(*this));
+}
+
+bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
+ uptr mem = allocated_size_classes_[size_class];
+ uptr size = ClassMmapSize(size_class);
+ bool res = mem && addr >= mem && addr < mem + size;
+ return res;
+}
+
+uptr FakeStack::AddrIsInFakeStack(uptr addr) {
+ for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
+ if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
+ }
+ return 0;
+}
+
+// We may want to compute this during compilation.
+inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
+ uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
+ uptr log = Log2(rounded_size);
+ CHECK(alloc_size <= (1UL << log));
+ if (!(alloc_size > (1UL << (log-1)))) {
+ Printf("alloc_size %zu log %zu\n", alloc_size, log);
+ }
+ CHECK(alloc_size > (1UL << (log-1)));
+ uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
+ CHECK(res < kNumberOfSizeClasses);
+ CHECK(ClassSize(res) >= rounded_size);
+ return res;
+}
+
+void FakeFrameFifo::FifoPush(FakeFrame *node) {
+ CHECK(node);
+ node->next = 0;
+ if (first_ == 0 && last_ == 0) {
+ first_ = last_ = node;
+ } else {
+ CHECK(first_);
+ CHECK(last_);
+ last_->next = node;
+ last_ = node;
+ }
+}
+
+FakeFrame *FakeFrameFifo::FifoPop() {
+ CHECK(first_ && last_ && "Exhausted fake stack");
+ FakeFrame *res = 0;
+ if (first_ == last_) {
+ res = first_;
+ first_ = last_ = 0;
+ } else {
+ res = first_;
+ first_ = first_->next;
+ }
+ return res;
+}
+
+void FakeStack::Init(uptr stack_size) {
+ stack_size_ = stack_size;
+ alive_ = true;
+}
+
+void FakeStack::Cleanup() {
+ alive_ = false;
+ for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
+ uptr mem = allocated_size_classes_[i];
+ if (mem) {
+ PoisonShadow(mem, ClassMmapSize(i), 0);
+ allocated_size_classes_[i] = 0;
+ UnmapOrDie((void*)mem, ClassMmapSize(i));
+ }
+ }
+}
+
+uptr FakeStack::ClassMmapSize(uptr size_class) {
+ return RoundUpToPowerOfTwo(stack_size_);
+}
+
+void FakeStack::AllocateOneSizeClass(uptr size_class) {
+ CHECK(ClassMmapSize(size_class) >= GetPageSizeCached());
+ uptr new_mem = (uptr)MmapOrDie(
+ ClassMmapSize(size_class), __FUNCTION__);
+ // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
+ // asanThreadRegistry().GetCurrent()->tid(),
+ // size_class, new_mem, new_mem + ClassMmapSize(size_class),
+ // ClassMmapSize(size_class));
+ uptr i;
+ for (i = 0; i < ClassMmapSize(size_class);
+ i += ClassSize(size_class)) {
+ size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
+ }
+ CHECK(i == ClassMmapSize(size_class));
+ allocated_size_classes_[size_class] = new_mem;
+}
+
+uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
+ if (!alive_) return real_stack;
+ CHECK(size <= kMaxStackMallocSize && size > 1);
+ uptr size_class = ComputeSizeClass(size);
+ if (!allocated_size_classes_[size_class]) {
+ AllocateOneSizeClass(size_class);
+ }
+ FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
+ CHECK(fake_frame);
+ fake_frame->size_minus_one = size - 1;
+ fake_frame->real_stack = real_stack;
+ while (FakeFrame *top = call_stack_.top()) {
+ if (top->real_stack > real_stack) break;
+ call_stack_.LifoPop();
+ DeallocateFrame(top);
+ }
+ call_stack_.LifoPush(fake_frame);
+ uptr ptr = (uptr)fake_frame;
+ PoisonShadow(ptr, size, 0);
+ return ptr;
+}
+
+void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
+ CHECK(alive_);
+ uptr size = fake_frame->size_minus_one + 1;
+ uptr size_class = ComputeSizeClass(size);
+ CHECK(allocated_size_classes_[size_class]);
+ uptr ptr = (uptr)fake_frame;
+ CHECK(AddrIsInSizeClass(ptr, size_class));
+ CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
+ size_classes_[size_class].FifoPush(fake_frame);
+}
+
+void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
+ FakeFrame *fake_frame = (FakeFrame*)ptr;
+ CHECK(fake_frame->magic = kRetiredStackFrameMagic);
+ CHECK(fake_frame->descr != 0);
+ CHECK(fake_frame->size_minus_one == size - 1);
+ PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
+}
+
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+uptr __asan_stack_malloc(uptr size, uptr real_stack) {
+ if (!flags()->use_fake_stack) return real_stack;
+ AsanThread *t = asanThreadRegistry().GetCurrent();
+ if (!t) {
+ // TSD is gone, use the real stack.
+ return real_stack;
+ }
+ uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
+ // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
+ return ptr;
+}
+
+void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
+ if (!flags()->use_fake_stack) return;
+ if (ptr != real_stack) {
+ FakeStack::OnFree(ptr, size, real_stack);
+ }
+}
diff --git a/libsanitizer/asan/asan_flags.h b/libsanitizer/asan/asan_flags.h
index a0dcf3e8a57..49a90730084 100644
--- a/libsanitizer/asan/asan_flags.h
+++ b/libsanitizer/asan/asan_flags.h
@@ -43,7 +43,7 @@ struct Flags {
int report_globals;
// If set, attempts to catch initialization order issues.
bool check_initialization_order;
- // Max number of stack frames kept for each allocation.
+ // Max number of stack frames kept for each allocation/deallocation.
int malloc_context_size;
// If set, uses custom wrappers and replacements for libc string functions
// to find more errors.
@@ -93,6 +93,17 @@ struct Flags {
bool print_full_thread_history;
// ASan will write logs to "log_path.pid" instead of stderr.
const char *log_path;
+ // Use fast (frame-pointer-based) unwinder on fatal errors (if available).
+ bool fast_unwind_on_fatal;
+ // Use fast (frame-pointer-based) unwinder on malloc/free (if available).
+ bool fast_unwind_on_malloc;
+ // Poison (or not) the heap memory on [de]allocation. Zero value is useful
+ // for benchmarking the allocator or instrumentator.
+ bool poison_heap;
+ // Report errors on malloc/delete, new/free, new/delete[], etc.
+ bool alloc_dealloc_mismatch;
+ // Use stack depot instead of storing stacks in the redzones.
+ bool use_stack_depot;
};
Flags *flags();
diff --git a/libsanitizer/asan/asan_intercepted_functions.h b/libsanitizer/asan/asan_intercepted_functions.h
index 89f91ad6a31..8341bc65745 100644
--- a/libsanitizer/asan/asan_intercepted_functions.h
+++ b/libsanitizer/asan/asan_intercepted_functions.h
@@ -14,6 +14,7 @@
#include "asan_internal.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
using __sanitizer::uptr;
@@ -39,8 +40,10 @@ using __sanitizer::uptr;
#if defined(__linux__)
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
+# define ASAN_INTERCEPT_PRCTL 1
#else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
+# define ASAN_INTERCEPT_PRCTL 0
#endif
#if !defined(__APPLE__)
@@ -149,10 +152,23 @@ DECLARE_FUNCTION_AND_WRAPPER(long long, atoll, const char *nptr); // NOLINT
DECLARE_FUNCTION_AND_WRAPPER(long long, strtoll, const char *nptr, char **endptr, int base); // NOLINT
# endif
+// unistd.h
+# if SANITIZER_INTERCEPT_READ
+DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, read, int fd, void *buf, SIZE_T count);
+# endif
+# if SANITIZER_INTERCEPT_PREAD
+DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread, int fd, void *buf,
+ SIZE_T count, OFF_T offset);
+# endif
+# if SANITIZER_INTERCEPT_PREAD64
+DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread64, int fd, void *buf,
+ SIZE_T count, OFF64_T offset);
+# endif
+
# if ASAN_INTERCEPT_MLOCKX
// mlock/munlock
-DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, size_t len);
-DECLARE_FUNCTION_AND_WRAPPER(int, munlock, const void *addr, size_t len);
+DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, SIZE_T len);
+DECLARE_FUNCTION_AND_WRAPPER(int, munlock, const void *addr, SIZE_T len);
DECLARE_FUNCTION_AND_WRAPPER(int, mlockall, int flags);
DECLARE_FUNCTION_AND_WRAPPER(int, munlockall, void);
# endif
diff --git a/libsanitizer/asan/asan_interceptors.cc b/libsanitizer/asan/asan_interceptors.cc
index 5b544c87fcb..26daee1727c 100644
--- a/libsanitizer/asan/asan_interceptors.cc
+++ b/libsanitizer/asan/asan_interceptors.cc
@@ -25,38 +25,20 @@
namespace __asan {
-// Instruments read/write access to a single byte in memory.
-// On error calls __asan_report_error, which aborts the program.
-#define ACCESS_ADDRESS(address, isWrite) do { \
- if (!AddrIsInMem(address) || AddressIsPoisoned(address)) { \
- GET_CURRENT_PC_BP_SP; \
- __asan_report_error(pc, bp, sp, address, isWrite, /* access_size */ 1); \
- } \
-} while (0)
-
// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
// and ASAN_WRITE_RANGE as macro instead of function so
// that no extra frames are created, and stack trace contains
// relevant information only.
-
-// Instruments read/write access to a memory range.
-// More complex implementation is possible, for now just
-// checking the first and the last byte of a range.
-#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \
- if (size > 0) { \
- uptr ptr = (uptr)(offset); \
- ACCESS_ADDRESS(ptr, isWrite); \
- ACCESS_ADDRESS(ptr + (size) - 1, isWrite); \
- } \
+// We check all shadow bytes.
+#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \
+ if (uptr __ptr = __asan_region_is_poisoned((uptr)(offset), size)) { \
+ GET_CURRENT_PC_BP_SP; \
+ __asan_report_error(pc, bp, sp, __ptr, isWrite, /* access_size */1); \
+ } \
} while (0)
-#define ASAN_READ_RANGE(offset, size) do { \
- ACCESS_MEMORY_RANGE(offset, size, false); \
-} while (0)
-
-#define ASAN_WRITE_RANGE(offset, size) do { \
- ACCESS_MEMORY_RANGE(offset, size, true); \
-} while (0)
+#define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false)
+#define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true);
// Behavior of functions like "memcpy" or "strcpy" is undefined
// if memory intervals overlap. We report error in this case.
@@ -69,7 +51,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
const char *offset1 = (const char*)_offset1; \
const char *offset2 = (const char*)_offset2; \
if (RangesOverlap(offset1, length1, offset2, length2)) { \
- GET_STACK_TRACE_HERE(kStackTraceMax); \
+ GET_STACK_TRACE_FATAL_HERE; \
ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
offset2, length2, &stack); \
} \
@@ -96,6 +78,11 @@ static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
// ---------------------- Wrappers ---------------- {{{1
using namespace __asan; // NOLINT
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ptr, size) ASAN_WRITE_RANGE(ptr, size)
+#define COMMON_INTERCEPTOR_READ_RANGE(ptr, size) ASAN_READ_RANGE(ptr, size)
+#define COMMON_INTERCEPTOR_ENTER(func, ...) ENSURE_ASAN_INITED()
+#include "sanitizer_common/sanitizer_common_interceptors.h"
+
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread*)arg;
asanThreadRegistry().SetCurrent(t);
@@ -105,7 +92,7 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
#if ASAN_INTERCEPT_PTHREAD_CREATE
INTERCEPTOR(int, pthread_create, void *thread,
void *attr, void *(*start_routine)(void*), void *arg) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
asanThreadRegistry().RegisterThread(t);
@@ -175,6 +162,25 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) {
}
#endif
+#if ASAN_INTERCEPT_PRCTL
+#define PR_SET_NAME 15
+INTERCEPTOR(int, prctl, int option,
+ unsigned long arg2, unsigned long arg3, // NOLINT
+ unsigned long arg4, unsigned long arg5) { // NOLINT
+ int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
+ if (option == PR_SET_NAME) {
+ AsanThread *t = asanThreadRegistry().GetCurrent();
+ if (t) {
+ char buff[17];
+ internal_strncpy(buff, (char*)arg2, 16);
+ buff[16] = 0;
+ t->summary()->set_name(buff);
+ }
+ }
+ return res;
+}
+#endif
+
#if ASAN_INTERCEPT___CXA_THROW
INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
CHECK(REAL(__cxa_throw));
@@ -256,8 +262,8 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);
}
- ASAN_WRITE_RANGE(from, size);
- ASAN_READ_RANGE(to, size);
+ ASAN_READ_RANGE(from, size);
+ ASAN_WRITE_RANGE(to, size);
}
#if MAC_INTERPOSE_FUNCTIONS
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8.
@@ -275,8 +281,8 @@ INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
}
ENSURE_ASAN_INITED();
if (flags()->replace_intrin) {
- ASAN_WRITE_RANGE(from, size);
- ASAN_READ_RANGE(to, size);
+ ASAN_READ_RANGE(from, size);
+ ASAN_WRITE_RANGE(to, size);
}
#if MAC_INTERPOSE_FUNCTIONS
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8.
@@ -621,7 +627,7 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
void* security, uptr stack_size,
DWORD (__stdcall *start_routine)(void*), void* arg,
DWORD flags, void* tid) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
asanThreadRegistry().RegisterThread(t);
@@ -646,6 +652,9 @@ void InitializeAsanInterceptors() {
#if MAC_INTERPOSE_FUNCTIONS
return;
#endif
+
+ SANITIZER_COMMON_INTERCEPTORS_INIT;
+
// Intercept mem* functions.
ASAN_INTERCEPT_FUNC(memcmp);
ASAN_INTERCEPT_FUNC(memmove);
@@ -718,6 +727,9 @@ void InitializeAsanInterceptors() {
#if ASAN_INTERCEPT_SIGLONGJMP
ASAN_INTERCEPT_FUNC(siglongjmp);
#endif
+#if ASAN_INTERCEPT_PRCTL
+ ASAN_INTERCEPT_FUNC(prctl);
+#endif
// Intercept exception handling functions.
#if ASAN_INTERCEPT___CXA_THROW
diff --git a/libsanitizer/asan/asan_internal.h b/libsanitizer/asan/asan_internal.h
index ac1ee7de700..1717fce66fb 100644
--- a/libsanitizer/asan/asan_internal.h
+++ b/libsanitizer/asan/asan_internal.h
@@ -81,9 +81,9 @@
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
-# ifdef ASAN_ANDROID
+#if SANITIZER_WORDSIZE == 32
# define ASAN_LOW_MEMORY 1
-# else
+#else
# define ASAN_LOW_MEMORY 0
# endif
#endif
@@ -143,6 +143,15 @@ bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
#endif // __APPLE__
+// Add convenient macro for interface functions that may be represented as
+// weak hooks.
+#define ASAN_MALLOC_HOOK(ptr, size) \
+ if (&__asan_malloc_hook) __asan_malloc_hook(ptr, size)
+#define ASAN_FREE_HOOK(ptr) \
+ if (&__asan_free_hook) __asan_free_hook(ptr)
+#define ASAN_ON_ERROR() \
+ if (&__asan_on_error) __asan_on_error()
+
extern int asan_inited;
// Used to avoid infinite recursion in __asan_init().
extern bool asan_init_is_running;
diff --git a/libsanitizer/asan/asan_linux.cc b/libsanitizer/asan/asan_linux.cc
index 447999446c8..0e6c6280f0b 100644
--- a/libsanitizer/asan/asan_linux.cc
+++ b/libsanitizer/asan/asan_linux.cc
@@ -120,53 +120,21 @@ void AsanLock::Unlock() {
pthread_mutex_unlock((pthread_mutex_t*)&opaque_storage_);
}
-#ifdef __arm__
-#define UNWIND_STOP _URC_END_OF_STACK
-#define UNWIND_CONTINUE _URC_NO_REASON
-#else
-#define UNWIND_STOP _URC_NORMAL_STOP
-#define UNWIND_CONTINUE _URC_NO_REASON
-#endif
-
-uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
-#ifdef __arm__
- uptr val;
- _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
- 15 /* r15 = PC */, _UVRSD_UINT32, &val);
- CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
- // Clear the Thumb bit.
- return val & ~(uptr)1;
-#else
- return _Unwind_GetIP(ctx);
+void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
+#if defined(__arm__) || \
+ defined(__powerpc__) || defined(__powerpc64__) || \
+ defined(__sparc__)
+ fast = false;
#endif
-}
-
-_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx,
- void *param) {
- StackTrace *b = (StackTrace*)param;
- CHECK(b->size < b->max_size);
- uptr pc = Unwind_GetIP(ctx);
- b->trace[b->size++] = pc;
- if (b->size == b->max_size) return UNWIND_STOP;
- return UNWIND_CONTINUE;
-}
-
-void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
+ if (!fast)
+ return stack->SlowUnwindStack(pc, max_s);
stack->size = 0;
stack->trace[0] = pc;
- if ((max_s) > 1) {
+ if (max_s > 1) {
stack->max_size = max_s;
-#if defined(__arm__) || \
- defined(__powerpc__) || defined(__powerpc64__) || \
- defined(__sparc__)
- _Unwind_Backtrace(Unwind_Trace, stack);
- // Pop off the two ASAN functions from the backtrace.
- stack->PopStackFrames(2);
-#else
if (!asan_inited) return;
if (AsanThread *t = asanThreadRegistry().GetCurrent())
stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom());
-#endif
}
}
diff --git a/libsanitizer/asan/asan_mac.cc b/libsanitizer/asan/asan_mac.cc
index db0b6d30a8b..094c69ff6a2 100644
--- a/libsanitizer/asan/asan_mac.cc
+++ b/libsanitizer/asan/asan_mac.cc
@@ -158,7 +158,8 @@ void AsanLock::Unlock() {
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
-void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
+void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
+ (void)fast;
stack->size = 0;
stack->trace[0] = pc;
if ((max_s) > 1) {
@@ -306,7 +307,7 @@ void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
// alloc_asan_context().
extern "C"
void asan_dispatch_call_block_and_release(void *block) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
asan_block_context_t *context = (asan_block_context_t*)block;
if (flags()->verbosity >= 2) {
Report("asan_dispatch_call_block_and_release(): "
@@ -316,7 +317,7 @@ void asan_dispatch_call_block_and_release(void *block) {
asan_register_worker_thread(context->parent_tid, &stack);
// Call the original dispatcher for the block.
context->func(context->block);
- asan_free(context, &stack);
+ asan_free(context, &stack, FROM_MALLOC);
}
} // namespace __asan
@@ -341,7 +342,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
dispatch_function_t func) { \
- GET_STACK_TRACE_HERE(kStackTraceMax); \
+ GET_STACK_TRACE_THREAD; \
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
if (flags()->verbosity >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
@@ -359,7 +360,7 @@ INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
dispatch_queue_t dq, void *ctxt,
dispatch_function_t func) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (flags()->verbosity >= 2) {
Report("dispatch_after_f: %p\n", asan_ctxt);
@@ -372,7 +373,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
dispatch_queue_t dq, void *ctxt,
dispatch_function_t func) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (flags()->verbosity >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
@@ -407,7 +408,7 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
void (^asan_block)(void); \
int parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); \
asan_block = ^(void) { \
- GET_STACK_TRACE_HERE(kStackTraceMax); \
+ GET_STACK_TRACE_THREAD; \
asan_register_worker_thread(parent_tid, &stack); \
work(); \
}
@@ -457,15 +458,15 @@ void *wrap_workitem_func(void *arg) {
asan_block_context_t *ctxt = (asan_block_context_t*)arg;
worker_t fn = (worker_t)(ctxt->func);
void *result = fn(ctxt->block);
- GET_STACK_TRACE_HERE(kStackTraceMax);
- asan_free(arg, &stack);
+ GET_STACK_TRACE_THREAD;
+ asan_free(arg, &stack, FROM_MALLOC);
return result;
}
INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
void *(*workitem_func)(void *), void * workitem_arg,
pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt =
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
asan_ctxt->block = workitem_arg;
diff --git a/libsanitizer/asan/asan_malloc_linux.cc b/libsanitizer/asan/asan_malloc_linux.cc
index b00bbe5deca..e33d0c0d4d3 100644
--- a/libsanitizer/asan/asan_malloc_linux.cc
+++ b/libsanitizer/asan/asan_malloc_linux.cc
@@ -17,6 +17,8 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stack.h"
+#include "asan_thread_registry.h"
+#include "sanitizer/asan_interface.h"
#if ASAN_ANDROID
DECLARE_REAL_AND_INTERCEPTOR(void*, malloc, uptr size)
@@ -57,17 +59,17 @@ void ReplaceSystemMalloc() {
using namespace __asan; // NOLINT
INTERCEPTOR(void, free, void *ptr) {
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
- asan_free(ptr, &stack);
+ GET_STACK_TRACE_FREE;
+ asan_free(ptr, &stack, FROM_MALLOC);
}
INTERCEPTOR(void, cfree, void *ptr) {
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
- asan_free(ptr, &stack);
+ GET_STACK_TRACE_FREE;
+ asan_free(ptr, &stack, FROM_MALLOC);
}
INTERCEPTOR(void*, malloc, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
@@ -83,25 +85,25 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
CHECK(allocated < kCallocPoolSize);
return mem;
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
- return asan_memalign(boundary, size, &stack);
+ GET_STACK_TRACE_MALLOC;
+ return asan_memalign(boundary, size, &stack, FROM_MALLOC);
}
INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
ALIAS("memalign");
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc_usable_size(ptr, &stack);
}
@@ -124,19 +126,23 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
}
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
// Printf("posix_memalign: %zx %zu\n", alignment, size);
return asan_posix_memalign(memptr, alignment, size, &stack);
}
INTERCEPTOR(void*, valloc, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_valloc(size, &stack);
}
INTERCEPTOR(void*, pvalloc, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_pvalloc(size, &stack);
}
+INTERCEPTOR(void, malloc_stats, void) {
+ __asan_print_accumulated_stats();
+}
+
#endif // __linux__
diff --git a/libsanitizer/asan/asan_malloc_mac.cc b/libsanitizer/asan/asan_malloc_mac.cc
index b56b620a1e3..97aa4424d33 100644
--- a/libsanitizer/asan/asan_malloc_mac.cc
+++ b/libsanitizer/asan/asan_malloc_mac.cc
@@ -90,8 +90,8 @@ INTERCEPTOR(void, free, void *ptr) {
#endif
} else {
if (!asan_mz_size(ptr)) ptr = get_saved_cfallocator_ref(ptr);
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
- asan_free(ptr, &stack);
+ GET_STACK_TRACE_FREE;
+ asan_free(ptr, &stack, FROM_MALLOC);
}
}
@@ -128,7 +128,7 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
@@ -137,7 +137,7 @@ void *cf_malloc(CFIndex size, CFOptionFlags hint, void *info) {
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
@@ -153,7 +153,7 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
CHECK(allocated < kCallocPoolSize);
return mem;
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
@@ -162,8 +162,8 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) {
CHECK(system_malloc_zone);
return malloc_zone_valloc(system_malloc_zone, size);
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
- return asan_memalign(GetPageSizeCached(), size, &stack);
+ GET_STACK_TRACE_MALLOC;
+ return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
}
#define GET_ZONE_FOR_PTR(ptr) \
@@ -173,8 +173,8 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) {
void ALWAYS_INLINE free_common(void *context, void *ptr) {
if (!ptr) return;
if (asan_mz_size(ptr)) {
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
- asan_free(ptr, &stack);
+ GET_STACK_TRACE_FREE;
+ asan_free(ptr, &stack, FROM_MALLOC);
} else {
// If the pointer does not belong to any of the zones, use one of the
// fallback methods to free memory.
@@ -188,9 +188,9 @@ void ALWAYS_INLINE free_common(void *context, void *ptr) {
// If the memory chunk pointer was moved to store additional
// CFAllocatorRef, fix it back.
ptr = get_saved_cfallocator_ref(ptr);
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ GET_STACK_TRACE_FREE;
if (!flags()->mac_ignore_invalid_free) {
- asan_free(ptr, &stack);
+ asan_free(ptr, &stack, FROM_MALLOC);
} else {
GET_ZONE_FOR_PTR(ptr);
WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
@@ -211,17 +211,17 @@ void cf_free(void *ptr, void *info) {
void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (!ptr) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
} else {
if (asan_mz_size(ptr)) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
} else {
// We can't recover from reallocating an unknown address, because
// this would require reading at most |size| bytes from
// potentially unaccessible memory.
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ GET_STACK_TRACE_FREE;
GET_ZONE_FOR_PTR(ptr);
ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
}
@@ -230,17 +230,17 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
void *cf_realloc(void *ptr, CFIndex size, CFOptionFlags hint, void *info) {
if (!ptr) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
} else {
if (asan_mz_size(ptr)) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
} else {
// We can't recover from reallocating an unknown address, because
// this would require reading at most |size| bytes from
// potentially unaccessible memory.
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ GET_STACK_TRACE_FREE;
GET_ZONE_FOR_PTR(ptr);
ReportMacCfReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
}
@@ -259,8 +259,8 @@ void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
CHECK(system_malloc_zone);
return malloc_zone_memalign(system_malloc_zone, align, size);
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
- return asan_memalign(align, size, &stack);
+ GET_STACK_TRACE_MALLOC;
+ return asan_memalign(align, size, &stack, FROM_MALLOC);
}
// This function is currently unused, and we build with -Werror.
diff --git a/libsanitizer/asan/asan_malloc_win.cc b/libsanitizer/asan/asan_malloc_win.cc
index 7389a257248..437079f5d1d 100644
--- a/libsanitizer/asan/asan_malloc_win.cc
+++ b/libsanitizer/asan/asan_malloc_win.cc
@@ -29,8 +29,8 @@ using namespace __asan; // NOLINT
extern "C" {
void free(void *ptr) {
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
- return asan_free(ptr, &stack);
+ GET_STACK_TRACE_FREE;
+ return asan_free(ptr, &stack, FROM_MALLOC);
}
void _free_dbg(void* ptr, int) {
@@ -42,7 +42,7 @@ void cfree(void *ptr) {
}
void *malloc(size_t size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
@@ -51,7 +51,7 @@ void* _malloc_dbg(size_t size, int , const char*, int) {
}
void *calloc(size_t nmemb, size_t size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
@@ -64,7 +64,7 @@ void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) {
}
void *realloc(void *ptr, size_t size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
@@ -83,7 +83,7 @@ void* _recalloc(void* p, size_t n, size_t elem_size) {
}
size_t _msize(void *ptr) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc_usable_size(ptr, &stack);
}
diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h
index 85d1129dea8..54e21b79678 100644
--- a/libsanitizer/asan/asan_mapping.h
+++ b/libsanitizer/asan/asan_mapping.h
@@ -18,8 +18,8 @@
// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
-extern __attribute__((visibility("default"))) uptr __asan_mapping_scale;
-extern __attribute__((visibility("default"))) uptr __asan_mapping_offset;
+extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale;
+extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset;
# define SHADOW_SCALE (__asan_mapping_scale)
# define SHADOW_OFFSET (__asan_mapping_offset)
#else
diff --git a/libsanitizer/asan/asan_new_delete.cc b/libsanitizer/asan/asan_new_delete.cc
index 8132e58b6e2..6597b93366f 100644
--- a/libsanitizer/asan/asan_new_delete.cc
+++ b/libsanitizer/asan/asan_new_delete.cc
@@ -33,32 +33,34 @@ namespace std {
struct nothrow_t {};
} // namespace std
-#define OPERATOR_NEW_BODY \
- GET_STACK_TRACE_HERE_FOR_MALLOC;\
- return asan_memalign(0, size, &stack);
+#define OPERATOR_NEW_BODY(type) \
+ GET_STACK_TRACE_MALLOC;\
+ return asan_memalign(0, size, &stack, type);
INTERCEPTOR_ATTRIBUTE
-void *operator new(size_t size) { OPERATOR_NEW_BODY; }
+void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE
-void *operator new[](size_t size) { OPERATOR_NEW_BODY; }
+void *operator new[](size_t size) { OPERATOR_NEW_BODY(FROM_NEW_BR); }
INTERCEPTOR_ATTRIBUTE
-void *operator new(size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE
-void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
-#define OPERATOR_DELETE_BODY \
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);\
- asan_free(ptr, &stack);
+#define OPERATOR_DELETE_BODY(type) \
+ GET_STACK_TRACE_FREE;\
+ asan_free(ptr, &stack, type);
INTERCEPTOR_ATTRIBUTE
-void operator delete(void *ptr) { OPERATOR_DELETE_BODY; }
+void operator delete(void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; }
+void operator delete[](void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY; }
+{ OPERATOR_DELETE_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY; }
+{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
#endif
diff --git a/libsanitizer/asan/asan_poisoning.cc b/libsanitizer/asan/asan_poisoning.cc
index 449299a5836..a00bafffae0 100644
--- a/libsanitizer/asan/asan_poisoning.cc
+++ b/libsanitizer/asan/asan_poisoning.cc
@@ -14,10 +14,12 @@
#include "asan_internal.h"
#include "asan_mapping.h"
#include "sanitizer/asan_interface.h"
+#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
void PoisonShadow(uptr addr, uptr size, u8 value) {
+ if (!flags()->poison_heap) return;
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
uptr shadow_beg = MemToShadow(addr);
@@ -30,6 +32,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
uptr size,
uptr redzone_size,
u8 value) {
+ if (!flags()->poison_heap) return;
CHECK(AddrIsAlignedByGranularity(addr));
u8 *shadow = (u8*)MemToShadow(addr);
for (uptr i = 0; i < redzone_size;
@@ -150,6 +153,33 @@ bool __asan_address_is_poisoned(void const volatile *addr) {
return __asan::AddressIsPoisoned((uptr)addr);
}
+uptr __asan_region_is_poisoned(uptr beg, uptr size) {
+ if (!size) return 0;
+ uptr end = beg + size;
+ if (!AddrIsInMem(beg)) return beg;
+ if (!AddrIsInMem(end)) return end;
+ uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
+ uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
+ uptr shadow_beg = MemToShadow(aligned_b);
+ uptr shadow_end = MemToShadow(aligned_e);
+ // First check the first and the last application bytes,
+ // then check the SHADOW_GRANULARITY-aligned region by calling
+ // mem_is_zero on the corresponding shadow.
+ if (!__asan::AddressIsPoisoned(beg) &&
+ !__asan::AddressIsPoisoned(end - 1) &&
+ (shadow_end <= shadow_beg ||
+ __sanitizer::mem_is_zero((const char *)shadow_beg,
+ shadow_end - shadow_beg)))
+ return 0;
+ // The fast check failed, so we have a poisoned byte somewhere.
+ // Find it slowly.
+ for (; beg < end; beg++)
+ if (__asan::AddressIsPoisoned(beg))
+ return beg;
+ UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
+ return 0;
+}
+
// This is a simplified version of __asan_(un)poison_memory_region, which
// assumes that left border of region to be poisoned is properly aligned.
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
@@ -166,7 +196,7 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
// If possible, mark all the bytes mapping to last shadow byte as
// unaddressable.
if (end_value > 0 && end_value <= end_offset)
- *shadow_end = kAsanStackUseAfterScopeMagic;
+ *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
} else {
// If necessary, mark few first bytes mapping to last shadow byte
// as addressable
diff --git a/libsanitizer/asan/asan_report.cc b/libsanitizer/asan/asan_report.cc
index 9804ddd1850..ed18ab25098 100644
--- a/libsanitizer/asan/asan_report.cc
+++ b/libsanitizer/asan/asan_report.cc
@@ -16,6 +16,9 @@
#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __asan {
@@ -38,14 +41,79 @@ void AppendToErrorMessageBuffer(const char *buffer) {
}
}
+// ---------------------- Decorator ------------------------------ {{{1
+bool PrintsToTtyCached() {
+ static int cached = 0;
+ static bool prints_to_tty;
+ if (!cached) { // Ok wrt threads since we are printing only from one thread.
+ prints_to_tty = PrintsToTty();
+ cached = 1;
+ }
+ return prints_to_tty;
+}
+class Decorator: private __sanitizer::AnsiColorDecorator {
+ public:
+ Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
+ const char *Warning() { return Red(); }
+ const char *EndWarning() { return Default(); }
+ const char *Access() { return Blue(); }
+ const char *EndAccess() { return Default(); }
+ const char *Location() { return Green(); }
+ const char *EndLocation() { return Default(); }
+ const char *Allocation() { return Magenta(); }
+ const char *EndAllocation() { return Default(); }
+
+ const char *ShadowByte(u8 byte) {
+ switch (byte) {
+ case kAsanHeapLeftRedzoneMagic:
+ case kAsanHeapRightRedzoneMagic:
+ return Red();
+ case kAsanHeapFreeMagic:
+ return Magenta();
+ case kAsanStackLeftRedzoneMagic:
+ case kAsanStackMidRedzoneMagic:
+ case kAsanStackRightRedzoneMagic:
+ case kAsanStackPartialRedzoneMagic:
+ return Red();
+ case kAsanStackAfterReturnMagic:
+ return Magenta();
+ case kAsanInitializationOrderMagic:
+ return Cyan();
+ case kAsanUserPoisonedMemoryMagic:
+ return Blue();
+ case kAsanStackUseAfterScopeMagic:
+ return Magenta();
+ case kAsanGlobalRedzoneMagic:
+ return Red();
+ case kAsanInternalHeapMagic:
+ return Yellow();
+ default:
+ return Default();
+ }
+ }
+ const char *EndShadowByte() { return Default(); }
+};
+
// ---------------------- Helper functions ----------------------- {{{1
-static void PrintBytes(const char *before, uptr *a) {
- u8 *bytes = (u8*)a;
- uptr byte_num = (SANITIZER_WORDSIZE) / 8;
- Printf("%s%p:", before, (void*)a);
- for (uptr i = 0; i < byte_num; i++) {
- Printf(" %x%x", bytes[i] >> 4, bytes[i] & 15);
+static void PrintShadowByte(const char *before, u8 byte,
+ const char *after = "\n") {
+ Decorator d;
+ Printf("%s%s%x%x%s%s", before,
+ d.ShadowByte(byte), byte >> 4, byte & 15, d.EndShadowByte(), after);
+}
+
+static void PrintShadowBytes(const char *before, u8 *bytes,
+ u8 *guilty, uptr n) {
+ Decorator d;
+ if (before)
+ Printf("%s%p:", before, bytes);
+ for (uptr i = 0; i < n; i++) {
+ u8 *p = bytes + i;
+ const char *before = p == guilty ? "[" :
+ p - 1 == guilty ? "" : " ";
+ const char *after = p == guilty ? "]" : "";
+ PrintShadowByte(before, *p, after);
}
Printf("\n");
}
@@ -54,15 +122,35 @@ static void PrintShadowMemoryForAddress(uptr addr) {
if (!AddrIsInMem(addr))
return;
uptr shadow_addr = MemToShadow(addr);
- Printf("Shadow byte and word:\n");
- Printf(" %p: %x\n", (void*)shadow_addr, *(unsigned char*)shadow_addr);
- uptr aligned_shadow = shadow_addr & ~(kWordSize - 1);
- PrintBytes(" ", (uptr*)(aligned_shadow));
- Printf("More shadow bytes:\n");
- for (int i = -4; i <= 4; i++) {
+ const uptr n_bytes_per_row = 16;
+ uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
+ Printf("Shadow bytes around the buggy address:\n");
+ for (int i = -5; i <= 5; i++) {
const char *prefix = (i == 0) ? "=>" : " ";
- PrintBytes(prefix, (uptr*)(aligned_shadow + i * kWordSize));
+ PrintShadowBytes(prefix,
+ (u8*)(aligned_shadow + i * n_bytes_per_row),
+ (u8*)shadow_addr, n_bytes_per_row);
}
+ Printf("Shadow byte legend (one shadow byte represents %d "
+ "application bytes):\n", (int)SHADOW_GRANULARITY);
+ PrintShadowByte(" Addressable: ", 0);
+ Printf(" Partially addressable: ");
+ for (uptr i = 1; i < SHADOW_GRANULARITY; i++)
+ PrintShadowByte("", i, " ");
+ Printf("\n");
+ PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic);
+ PrintShadowByte(" Heap righ redzone: ", kAsanHeapRightRedzoneMagic);
+ PrintShadowByte(" Freed Heap region: ", kAsanHeapFreeMagic);
+ PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic);
+ PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic);
+ PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic);
+ PrintShadowByte(" Stack partial redzone: ", kAsanStackPartialRedzoneMagic);
+ PrintShadowByte(" Stack after return: ", kAsanStackAfterReturnMagic);
+ PrintShadowByte(" Stack use after scope: ", kAsanStackUseAfterScopeMagic);
+ PrintShadowByte(" Global redzone: ", kAsanGlobalRedzoneMagic);
+ PrintShadowByte(" Global init order: ", kAsanInitializationOrderMagic);
+ PrintShadowByte(" Poisoned by user: ", kAsanUserPoisonedMemoryMagic);
+ PrintShadowByte(" ASan internal: ", kAsanInternalHeapMagic);
}
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
@@ -98,6 +186,8 @@ static void PrintGlobalNameIfASCII(const __asan_global &g) {
bool DescribeAddressRelativeToGlobal(uptr addr, const __asan_global &g) {
if (addr < g.beg - kGlobalAndStackRedzone) return false;
if (addr >= g.beg + g.size_with_redzone) return false;
+ Decorator d;
+ Printf("%s", d.Location());
Printf("%p is located ", (void*)addr);
if (addr < g.beg) {
Printf("%zd bytes to the left", g.beg - addr);
@@ -108,6 +198,7 @@ bool DescribeAddressRelativeToGlobal(uptr addr, const __asan_global &g) {
}
Printf(" of global variable '%s' (0x%zx) of size %zu\n",
g.name, g.beg, g.size);
+ Printf("%s", d.EndLocation());
PrintGlobalNameIfASCII(g);
return true;
}
@@ -151,9 +242,12 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
internal_strncat(buf, frame_descr,
Min(kBufSize,
static_cast<sptr>(name_end - frame_descr)));
+ Decorator d;
+ Printf("%s", d.Location());
Printf("Address %p is located at offset %zu "
"in frame <%s> of T%d's stack:\n",
- (void*)addr, offset, buf, t->tid());
+ (void*)addr, offset, Demangle(buf), t->tid());
+ Printf("%s", d.EndLocation());
// Report the number of stack objects.
char *p;
uptr n_objects = internal_simple_strtoll(name_end, &p, 10);
@@ -187,6 +281,8 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
uptr access_size) {
uptr offset;
+ Decorator d;
+ Printf("%s", d.Location());
Printf("%p is located ", (void*)addr);
if (chunk.AddrIsInside(addr, access_size, &offset)) {
Printf("%zu bytes inside of", offset);
@@ -199,6 +295,26 @@ static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
}
Printf(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
(void*)(chunk.Beg()), (void*)(chunk.End()));
+ Printf("%s", d.EndLocation());
+}
+
+// Return " (thread_name) " or an empty string if the name is empty.
+const char *ThreadNameWithParenthesis(AsanThreadSummary *t, char buff[],
+ uptr buff_len) {
+ const char *name = t->name();
+ if (*name == 0) return "";
+ buff[0] = 0;
+ internal_strncat(buff, " (", 3);
+ internal_strncat(buff, name, buff_len - 4);
+ internal_strncat(buff, ")", 2);
+ return buff;
+}
+
+const char *ThreadNameWithParenthesis(u32 tid, char buff[],
+ uptr buff_len) {
+ if (tid == kInvalidTid) return "";
+ AsanThreadSummary *t = asanThreadRegistry().FindByTid(tid);
+ return ThreadNameWithParenthesis(t, buff, buff_len);
}
void DescribeHeapAddress(uptr addr, uptr access_size) {
@@ -212,20 +328,31 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
chunk.GetAllocStack(&alloc_stack);
AsanThread *t = asanThreadRegistry().GetCurrent();
CHECK(t);
+ char tname[128];
+ Decorator d;
if (chunk.FreeTid() != kInvalidTid) {
AsanThreadSummary *free_thread =
asanThreadRegistry().FindByTid(chunk.FreeTid());
- Printf("freed by thread T%d here:\n", free_thread->tid());
+ Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
+ free_thread->tid(),
+ ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
+ d.EndAllocation());
StackTrace free_stack;
chunk.GetFreeStack(&free_stack);
PrintStack(&free_stack);
- Printf("previously allocated by thread T%d here:\n", alloc_thread->tid());
+ Printf("%spreviously allocated by thread T%d%s here:%s\n",
+ d.Allocation(), alloc_thread->tid(),
+ ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
+ d.EndAllocation());
PrintStack(&alloc_stack);
DescribeThread(t->summary());
DescribeThread(free_thread);
DescribeThread(alloc_thread);
} else {
- Printf("allocated by thread T%d here:\n", alloc_thread->tid());
+ Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
+ alloc_thread->tid(),
+ ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
+ d.EndAllocation());
PrintStack(&alloc_stack);
DescribeThread(t->summary());
DescribeThread(alloc_thread);
@@ -254,8 +381,13 @@ void DescribeThread(AsanThreadSummary *summary) {
return;
}
summary->set_announced(true);
- Printf("Thread T%d created by T%d here:\n",
- summary->tid(), summary->parent_tid());
+ char tname[128];
+ Printf("Thread T%d%s", summary->tid(),
+ ThreadNameWithParenthesis(summary->tid(), tname, sizeof(tname)));
+ Printf(" created by T%d%s here:\n",
+ summary->parent_tid(),
+ ThreadNameWithParenthesis(summary->parent_tid(),
+ tname, sizeof(tname)));
PrintStack(summary->stack());
// Recursively described parent thread if needed.
if (flags()->print_full_thread_history) {
@@ -291,7 +423,7 @@ class ScopedInErrorReport {
// Die() to bypass any additional checks.
Exit(flags()->exitcode);
}
- __asan_on_error();
+ ASAN_ON_ERROR();
reporting_thread_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
Printf("===================================================="
"=============\n");
@@ -322,44 +454,79 @@ class ScopedInErrorReport {
void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: SEGV on unknown address %p"
" (pc %p sp %p bp %p T%d)\n",
(void*)addr, (void*)pc, (void*)sp, (void*)bp,
asanThreadRegistry().GetCurrentTidOrInvalid());
+ Printf("%s", d.EndWarning());
Printf("AddressSanitizer can not provide additional info.\n");
- GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp);
+ GET_STACK_TRACE_FATAL(pc, bp);
PrintStack(&stack);
}
void ReportDoubleFree(uptr addr, StackTrace *stack) {
ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: attempting double-free on %p:\n", addr);
+ Printf("%s", d.EndWarning());
PrintStack(stack);
DescribeHeapAddress(addr, 1);
}
void ReportFreeNotMalloced(uptr addr, StackTrace *stack) {
ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: attempting free on address "
"which was not malloc()-ed: %p\n", addr);
+ Printf("%s", d.EndWarning());
PrintStack(stack);
DescribeHeapAddress(addr, 1);
}
+void ReportAllocTypeMismatch(uptr addr, StackTrace *stack,
+ AllocType alloc_type,
+ AllocType dealloc_type) {
+ static const char *alloc_names[] =
+ {"INVALID", "malloc", "operator new", "operator new []"};
+ static const char *dealloc_names[] =
+ {"INVALID", "free", "operator delete", "operator delete []"};
+ CHECK_NE(alloc_type, dealloc_type);
+ ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n",
+ alloc_names[alloc_type], dealloc_names[dealloc_type], addr);
+ Printf("%s", d.EndWarning());
+ PrintStack(stack);
+ DescribeHeapAddress(addr, 1);
+ Report("HINT: if you don't care about these warnings you may set "
+ "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
+}
+
void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: attempting to call "
"malloc_usable_size() for pointer which is "
"not owned: %p\n", addr);
+ Printf("%s", d.EndWarning());
PrintStack(stack);
DescribeHeapAddress(addr, 1);
}
void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: attempting to call "
"__asan_get_allocated_size() for pointer which is "
"not owned: %p\n", addr);
+ Printf("%s", d.EndWarning());
PrintStack(stack);
DescribeHeapAddress(addr, 1);
}
@@ -368,9 +535,12 @@ void ReportStringFunctionMemoryRangesOverlap(
const char *function, const char *offset1, uptr length1,
const char *offset2, uptr length2, StackTrace *stack) {
ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s-param-overlap: "
"memory ranges [%p,%p) and [%p, %p) overlap\n", \
function, offset1, offset1 + length1, offset2, offset2 + length2);
+ Printf("%s", d.EndWarning());
PrintStack(stack);
DescribeAddress((uptr)offset1, length1);
DescribeAddress((uptr)offset2, length2);
@@ -463,17 +633,23 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
break;
}
}
-
+ Decorator d;
+ Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s on address "
"%p at pc 0x%zx bp 0x%zx sp 0x%zx\n",
bug_descr, (void*)addr, pc, bp, sp);
+ Printf("%s", d.EndWarning());
u32 curr_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
- Printf("%s of size %zu at %p thread T%d\n",
- access_size ? (is_write ? "WRITE" : "READ") : "ACCESS",
- access_size, (void*)addr, curr_tid);
-
- GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp);
+ char tname[128];
+ Printf("%s%s of size %zu at %p thread T%d%s%s\n",
+ d.Access(),
+ access_size ? (is_write ? "WRITE" : "READ") : "ACCESS",
+ access_size, (void*)addr, curr_tid,
+ ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)),
+ d.EndAccess());
+
+ GET_STACK_TRACE_FATAL(pc, bp);
PrintStack(&stack);
DescribeAddress(addr, access_size);
@@ -491,7 +667,13 @@ void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
}
}
+void __asan_describe_address(uptr addr) {
+ DescribeAddress(addr, 1);
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default implementation of __asan_on_error that does nothing
// and may be overriden by user.
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
void __asan_on_error() {}
+#endif
diff --git a/libsanitizer/asan/asan_report.h b/libsanitizer/asan/asan_report.h
index 9710bd7968e..a7e0e5816b7 100644
--- a/libsanitizer/asan/asan_report.h
+++ b/libsanitizer/asan/asan_report.h
@@ -10,6 +10,7 @@
// ASan-private header for error reporting functions.
//===----------------------------------------------------------------------===//
+#include "asan_allocator.h"
#include "asan_internal.h"
#include "asan_thread.h"
#include "sanitizer/asan_interface.h"
@@ -32,6 +33,9 @@ void DescribeThread(AsanThreadSummary *summary);
void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
void NORETURN ReportDoubleFree(uptr addr, StackTrace *stack);
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *stack);
+void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *stack,
+ AllocType alloc_type,
+ AllocType dealloc_type);
void NORETURN ReportMallocUsableSizeNotOwned(uptr addr,
StackTrace *stack);
void NORETURN ReportAsanGetAllocatedSizeNotOwned(uptr addr,
diff --git a/libsanitizer/asan/asan_rtl.cc b/libsanitizer/asan/asan_rtl.cc
index cf0025a70ef..6480bf4cc35 100644
--- a/libsanitizer/asan/asan_rtl.cc
+++ b/libsanitizer/asan/asan_rtl.cc
@@ -52,7 +52,7 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
file, line, cond, (uptr)v1, (uptr)v2);
// FIXME: check for infinite recursion without a thread-local counter here.
PRINT_CURRENT_STACK();
- ShowStatsAndAbort();
+ Die();
}
// -------------------------- Flags ------------------------- {{{1
@@ -64,6 +64,10 @@ Flags *flags() {
return &asan_flags;
}
+static const char *MaybeCallAsanDefaultOptions() {
+ return (&__asan_default_options) ? __asan_default_options() : "";
+}
+
static void ParseFlagsFromString(Flags *f, const char *str) {
ParseFlag(str, &f->quarantine_size, "quarantine_size");
ParseFlag(str, &f->symbolize, "symbolize");
@@ -98,21 +102,20 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
ParseFlag(str, &f->allow_reexec, "allow_reexec");
ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
ParseFlag(str, &f->log_path, "log_path");
+ ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
+ ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
+ ParseFlag(str, &f->poison_heap, "poison_heap");
+ ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
+ ParseFlag(str, &f->use_stack_depot, "use_stack_depot");
}
-extern "C" {
-SANITIZER_WEAK_ATTRIBUTE
-SANITIZER_INTERFACE_ATTRIBUTE
-const char* __asan_default_options() { return ""; }
-} // extern "C"
-
void InitializeFlags(Flags *f, const char *env) {
internal_memset(f, 0, sizeof(*f));
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
f->symbolize = false;
f->verbosity = 0;
- f->redzone = (ASAN_LOW_MEMORY) ? 64 : 128;
+ f->redzone = ASAN_ALLOCATOR_VERSION == 2 ? 16 : (ASAN_LOW_MEMORY) ? 64 : 128;
f->debug = false;
f->report_globals = 1;
f->check_initialization_order = true;
@@ -137,12 +140,17 @@ void InitializeFlags(Flags *f, const char *env) {
f->allow_reexec = true;
f->print_full_thread_history = true;
f->log_path = 0;
+ f->fast_unwind_on_fatal = true;
+ f->fast_unwind_on_malloc = true;
+ f->poison_heap = true;
+ f->alloc_dealloc_mismatch = true;
+ f->use_stack_depot = true; // Only affects allocator2.
// Override from user-specified string.
- ParseFlagsFromString(f, __asan_default_options());
+ ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
if (flags()->verbosity) {
Report("Using the defaults from __asan_default_options: %s\n",
- __asan_default_options());
+ MaybeCallAsanDefaultOptions());
}
// Override from command line.
@@ -239,15 +247,12 @@ static NOINLINE void force_interface_symbols() {
case 27: __asan_set_error_exit_code(0); break;
case 28: __asan_stack_free(0, 0, 0); break;
case 29: __asan_stack_malloc(0, 0); break;
- case 30: __asan_on_error(); break;
- case 31: __asan_default_options(); break;
- case 32: __asan_before_dynamic_init(0, 0); break;
- case 33: __asan_after_dynamic_init(); break;
- case 34: __asan_malloc_hook(0, 0); break;
- case 35: __asan_free_hook(0); break;
- case 36: __asan_symbolize(0, 0, 0); break;
- case 37: __asan_poison_stack_memory(0, 0); break;
- case 38: __asan_unpoison_stack_memory(0, 0); break;
+ case 30: __asan_before_dynamic_init(0, 0); break;
+ case 31: __asan_after_dynamic_init(); break;
+ case 32: __asan_poison_stack_memory(0, 0); break;
+ case 33: __asan_unpoison_stack_memory(0, 0); break;
+ case 34: __asan_region_is_poisoned(0, 0); break;
+ case 35: __asan_describe_address(0); break;
}
}
@@ -261,6 +266,13 @@ static void asan_atexit() {
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+const char* __asan_default_options() { return ""; }
+} // extern "C"
+#endif
+
int NOINLINE __asan_set_error_exit_code(int exit_code) {
int old = flags()->exitcode;
flags()->exitcode = exit_code;
diff --git a/libsanitizer/asan/asan_stack.cc b/libsanitizer/asan/asan_stack.cc
index 2531a7f9df8..9b6a28e8082 100644
--- a/libsanitizer/asan/asan_stack.cc
+++ b/libsanitizer/asan/asan_stack.cc
@@ -15,9 +15,15 @@
namespace __asan {
+static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
+ int out_size) {
+ return (&__asan_symbolize) ? __asan_symbolize(pc, out_buffer, out_size)
+ : false;
+}
+
void PrintStack(StackTrace *stack) {
stack->PrintStack(stack->trace, stack->size, flags()->symbolize,
- flags()->strip_path_prefix, __asan_symbolize);
+ flags()->strip_path_prefix, MaybeCallAsanSymbolize);
}
} // namespace __asan
@@ -27,7 +33,7 @@ void PrintStack(StackTrace *stack) {
// Provide default implementation of __asan_symbolize that does nothing
// and may be overriden by user if he wants to use his own symbolization.
// ASan on Windows has its own implementation of this.
-#ifndef _WIN32
+#if !defined(_WIN32) && !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) {
return false;
diff --git a/libsanitizer/asan/asan_stack.h b/libsanitizer/asan/asan_stack.h
index da622ed8eec..6a5ffc934cc 100644
--- a/libsanitizer/asan/asan_stack.h
+++ b/libsanitizer/asan/asan_stack.h
@@ -13,10 +13,11 @@
#define ASAN_STACK_H
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "asan_flags.h"
namespace __asan {
-void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp);
+void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast);
void PrintStack(StackTrace *stack);
} // namespace __asan
@@ -25,27 +26,38 @@ void PrintStack(StackTrace *stack);
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
// fast_unwind is currently unused.
-#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp) \
+#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
- GetStackTrace(&stack, max_s, pc, bp)
+ GetStackTrace(&stack, max_s, pc, bp, fast)
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
// as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals.
-#define GET_STACK_TRACE_HERE(max_size) \
+#define GET_STACK_TRACE(max_size, fast) \
GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \
- StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
+ StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), fast)
-#define GET_STACK_TRACE_HERE_FOR_MALLOC \
- GET_STACK_TRACE_HERE(flags()->malloc_context_size)
+#define GET_STACK_TRACE_FATAL(pc, bp) \
+ GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \
+ flags()->fast_unwind_on_fatal)
-#define GET_STACK_TRACE_HERE_FOR_FREE(ptr) \
- GET_STACK_TRACE_HERE(flags()->malloc_context_size)
+#define GET_STACK_TRACE_FATAL_HERE \
+ GET_STACK_TRACE(kStackTraceMax, flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_THREAD \
+ GET_STACK_TRACE(kStackTraceMax, true)
+
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(flags()->malloc_context_size, \
+ flags()->fast_unwind_on_malloc)
+
+#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
#define PRINT_CURRENT_STACK() \
{ \
- GET_STACK_TRACE_HERE(kStackTraceMax); \
+ GET_STACK_TRACE(kStackTraceMax, \
+ flags()->fast_unwind_on_fatal); \
PrintStack(&stack); \
}
diff --git a/libsanitizer/asan/asan_stats.cc b/libsanitizer/asan/asan_stats.cc
index 566e0103e41..94dd741d325 100644
--- a/libsanitizer/asan/asan_stats.cc
+++ b/libsanitizer/asan/asan_stats.cc
@@ -15,6 +15,7 @@
#include "asan_stats.h"
#include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
@@ -40,8 +41,9 @@ void AsanStats::Print() {
Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees);
Printf("Stats: %zuM really freed by %zu calls\n",
really_freed>>20, real_frees);
- Printf("Stats: %zuM (%zu full pages) mmaped in %zu calls\n",
- mmaped>>20, mmaped / GetPageSizeCached(), mmaps);
+ Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n",
+ (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20,
+ mmaps, munmaps);
PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size);
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
@@ -59,6 +61,10 @@ static void PrintAccumulatedStats() {
// Use lock to keep reports from mixing up.
ScopedLock lock(&print_lock);
stats.Print();
+ StackDepotStats *stack_depot_stats = StackDepotGetStats();
+ Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
+ stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
+ PrintInternalAllocatorStats();
}
} // namespace __asan
diff --git a/libsanitizer/asan/asan_stats.h b/libsanitizer/asan/asan_stats.h
index c2b3298266b..fd27451aef2 100644
--- a/libsanitizer/asan/asan_stats.h
+++ b/libsanitizer/asan/asan_stats.h
@@ -35,6 +35,8 @@ struct AsanStats {
uptr realloced;
uptr mmaps;
uptr mmaped;
+ uptr munmaps;
+ uptr munmaped;
uptr mmaped_by_size[kNumberOfSizeClasses];
uptr malloced_by_size[kNumberOfSizeClasses];
uptr freed_by_size[kNumberOfSizeClasses];
diff --git a/libsanitizer/asan/asan_thread.h b/libsanitizer/asan/asan_thread.h
index dff8c88528a..f385ec35fcd 100644
--- a/libsanitizer/asan/asan_thread.h
+++ b/libsanitizer/asan/asan_thread.h
@@ -37,6 +37,7 @@ class AsanThreadSummary {
internal_memcpy(&stack_, stack, sizeof(*stack));
}
thread_ = 0;
+ name_[0] = 0;
}
u32 tid() { return tid_; }
void set_tid(u32 tid) { tid_ = tid; }
@@ -47,6 +48,10 @@ class AsanThreadSummary {
AsanThread *thread() { return thread_; }
void set_thread(AsanThread *thread) { thread_ = thread; }
static void TSDDtor(void *tsd);
+ void set_name(const char *name) {
+ internal_strncpy(name_, name, sizeof(name_) - 1);
+ }
+ const char *name() { return name_; }
private:
u32 tid_;
@@ -54,8 +59,12 @@ class AsanThreadSummary {
bool announced_;
StackTrace stack_;
AsanThread *thread_;
+ char name_[128];
};
+// AsanThreadSummary objects are never freed, so we need many of them.
+COMPILER_CHECK(sizeof(AsanThreadSummary) <= 4094);
+
// AsanThread are stored in TSD and destroyed when the thread dies.
class AsanThread {
public:
diff --git a/libsanitizer/asan/asan_thread_registry.cc b/libsanitizer/asan/asan_thread_registry.cc
index 9858cce22b0..8db6a57cdff 100644
--- a/libsanitizer/asan/asan_thread_registry.cc
+++ b/libsanitizer/asan/asan_thread_registry.cc
@@ -121,13 +121,14 @@ uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
uptr AsanThreadRegistry::GetHeapSize() {
ScopedLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
- return accumulated_stats_.mmaped;
+ return accumulated_stats_.mmaped - accumulated_stats_.munmaped;
}
uptr AsanThreadRegistry::GetFreeBytes() {
ScopedLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
uptr total_free = accumulated_stats_.mmaped
+ - accumulated_stats_.munmaped
+ accumulated_stats_.really_freed
+ accumulated_stats_.really_freed_redzones;
uptr total_used = accumulated_stats_.malloced
diff --git a/libsanitizer/asan/asan_win.cc b/libsanitizer/asan/asan_win.cc
index 8b7f9ef0e38..02a2e08868e 100644
--- a/libsanitizer/asan/asan_win.cc
+++ b/libsanitizer/asan/asan_win.cc
@@ -30,7 +30,8 @@ static AsanLock dbghelp_lock(LINKER_INITIALIZED);
static bool dbghelp_initialized = false;
#pragma comment(lib, "dbghelp.lib")
-void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
+void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
+ (void)fast;
stack->max_size = max_s;
void *tmp[kStackTraceMax];