summaryrefslogtreecommitdiff
path: root/libsanitizer/asan
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/asan')
-rw-r--r--libsanitizer/asan/asan_allocator.cc18
-rw-r--r--libsanitizer/asan/asan_allocator.h20
-rw-r--r--libsanitizer/asan/asan_allocator2.cc130
-rw-r--r--libsanitizer/asan/asan_globals.cc32
-rw-r--r--libsanitizer/asan/asan_intercepted_functions.h70
-rw-r--r--libsanitizer/asan/asan_interceptors.cc65
-rw-r--r--libsanitizer/asan/asan_internal.h2
-rw-r--r--libsanitizer/asan/asan_linux.cc35
-rw-r--r--libsanitizer/asan/asan_lock.h39
-rw-r--r--libsanitizer/asan/asan_mac.cc116
-rw-r--r--libsanitizer/asan/asan_malloc_mac.cc280
-rw-r--r--libsanitizer/asan/asan_mapping.h4
-rw-r--r--libsanitizer/asan/asan_new_delete.cc5
-rw-r--r--libsanitizer/asan/asan_poisoning.cc2
-rw-r--r--libsanitizer/asan/asan_rtl.cc8
-rw-r--r--libsanitizer/asan/asan_stats.cc5
-rw-r--r--libsanitizer/asan/asan_thread.cc2
-rw-r--r--libsanitizer/asan/asan_thread_registry.cc16
-rw-r--r--libsanitizer/asan/asan_thread_registry.h4
-rw-r--r--libsanitizer/asan/asan_win.cc46
-rw-r--r--libsanitizer/asan/dynamic/asan_interceptors_dynamic.cc14
21 files changed, 334 insertions, 579 deletions
diff --git a/libsanitizer/asan/asan_allocator.cc b/libsanitizer/asan/asan_allocator.cc
index b170fe723d2..f01d2db9dbc 100644
--- a/libsanitizer/asan/asan_allocator.cc
+++ b/libsanitizer/asan/asan_allocator.cc
@@ -27,7 +27,6 @@
#if ASAN_ALLOCATOR_VERSION == 1
#include "asan_interceptors.h"
#include "asan_internal.h"
-#include "asan_lock.h"
#include "asan_mapping.h"
#include "asan_stats.h"
#include "asan_report.h"
@@ -35,6 +34,7 @@
#include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h"
#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan {
@@ -227,7 +227,7 @@ class MallocInfo {
AsanChunk *m = 0;
AsanChunk **fl = &free_lists_[size_class];
{
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
for (uptr i = 0; i < n_chunks; i++) {
if (!(*fl)) {
*fl = GetNewChunks(size_class);
@@ -245,7 +245,7 @@ class MallocInfo {
void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
bool eat_free_lists) {
CHECK(flags()->quarantine_size > 0);
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
AsanChunkFifoList *q = &x->quarantine_;
if (q->size() > 0) {
quarantine_.PushList(q);
@@ -269,18 +269,18 @@ class MallocInfo {
}
void BypassThreadLocalQuarantine(AsanChunk *chunk) {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
quarantine_.Push(chunk);
}
AsanChunk *FindChunkByAddr(uptr addr) {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
return FindChunkByAddrUnlocked(addr);
}
uptr AllocationSize(uptr ptr) {
if (!ptr) return 0;
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
// Make sure this is our chunk and |ptr| actually points to the beginning
// of the allocated memory.
@@ -303,7 +303,7 @@ class MallocInfo {
}
void PrintStatus() {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
uptr malloced = 0;
Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
@@ -321,7 +321,7 @@ class MallocInfo {
}
PageGroup *FindPageGroup(uptr addr) {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
return FindPageGroupUnlocked(addr);
}
@@ -479,7 +479,7 @@ class MallocInfo {
AsanChunk *free_lists_[kNumberOfSizeClasses];
AsanChunkFifoList quarantine_;
- AsanLock mu_;
+ BlockingMutex mu_;
PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
atomic_uint32_t n_page_groups_;
diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h
index 4ade352a3e5..cc16ce85497 100644
--- a/libsanitizer/asan/asan_allocator.h
+++ b/libsanitizer/asan/asan_allocator.h
@@ -20,8 +20,14 @@
// We are in the process of transitioning from the old allocator (version 1)
// to a new one (version 2). The change is quite intrusive so both allocators
// will co-exist in the source base for a while. The actual allocator is chosen
-// at build time by redefining this macrozz.
-#define ASAN_ALLOCATOR_VERSION 1
+// at build time by redefining this macro.
+#ifndef ASAN_ALLOCATOR_VERSION
+# if ASAN_LINUX && !ASAN_ANDROID
+# define ASAN_ALLOCATOR_VERSION 2
+# else
+# define ASAN_ALLOCATOR_VERSION 1
+# endif
+#endif // ASAN_ALLOCATOR_VERSION
namespace __asan {
@@ -96,17 +102,21 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
struct AsanThreadLocalMallocStorage {
explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
- : quarantine_(x) { }
+#if ASAN_ALLOCATOR_VERSION == 1
+ : quarantine_(x)
+#endif
+ { }
AsanThreadLocalMallocStorage() {
CHECK(REAL(memset));
REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
}
- AsanChunkFifoList quarantine_;
#if ASAN_ALLOCATOR_VERSION == 1
+ AsanChunkFifoList quarantine_;
AsanChunk *free_lists_[kNumberOfSizeClasses];
#else
- uptr allocator2_cache[1024]; // Opaque.
+ uptr quarantine_cache[16];
+ uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
#endif
void CommitBack();
};
diff --git a/libsanitizer/asan/asan_allocator2.cc b/libsanitizer/asan/asan_allocator2.cc
index d12ccb7f23b..4aa5141f2a7 100644
--- a/libsanitizer/asan/asan_allocator2.cc
+++ b/libsanitizer/asan/asan_allocator2.cc
@@ -25,6 +25,7 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_quarantine.h"
namespace __asan {
@@ -90,15 +91,6 @@ static const uptr kMaxThreadLocalQuarantine =
static const uptr kReturnOnZeroMalloc = 2048; // Zero page is protected.
-static int inited = 0;
-
-static void Init() {
- if (inited) return;
- __asan_init();
- inited = true; // this must happen before any threads are created.
- allocator.Init();
-}
-
// Every chunk of memory allocated by this allocator can be in one of 3 states:
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
@@ -244,31 +236,26 @@ void AsanChunkView::GetFreeStack(StackTrace *stack) {
chunk_->FreeStackSize());
}
-class Quarantine: public AsanChunkFifoList {
- public:
- void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) {
- AsanChunkFifoList *q = &ms->quarantine_;
- if (!q->size()) return;
- SpinMutexLock l(&mutex_);
- PushList(q);
- PopAndDeallocateLoop(ms);
- }
+struct QuarantineCallback;
+typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
+typedef AsanQuarantine::Cache QuarantineCache;
+static AsanQuarantine quarantine(LINKER_INITIALIZED);
+static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
+static AllocatorCache fallback_allocator_cache;
+static SpinMutex fallback_mutex;
- void BypassThreadLocalQuarantine(AsanChunk *m) {
- SpinMutexLock l(&mutex_);
- Push(m);
- }
+QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
+ return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
+}
- private:
- void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) {
- while (size() > (uptr)flags()->quarantine_size) {
- PopAndDeallocate(ms);
- }
+struct QuarantineCallback {
+ explicit QuarantineCallback(AllocatorCache *cache)
+ : cache_(cache) {
}
- void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) {
- CHECK_GT(size(), 0);
- AsanChunk *m = Pop();
- CHECK(m);
+
+ void Recycle(AsanChunk *m) {
CHECK(m->chunk_state == CHUNK_QUARANTINE);
m->chunk_state = CHUNK_AVAILABLE;
CHECK_NE(m->alloc_tid, kInvalidTid);
@@ -288,34 +275,27 @@ class Quarantine: public AsanChunkFifoList {
thread_stats.real_frees++;
thread_stats.really_freed += m->UsedSize();
- allocator.Deallocate(GetAllocatorCache(ms), p);
+ allocator.Deallocate(cache_, p);
}
- SpinMutex mutex_;
-};
-static Quarantine quarantine;
+ void *Allocate(uptr size) {
+ return allocator.Allocate(cache_, size, 1, false);
+ }
-void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
- CHECK(q->size() > 0);
- size_ += q->size();
- append_back(q);
- q->clear();
-}
+ void Deallocate(void *p) {
+ allocator.Deallocate(cache_, p);
+ }
-void AsanChunkFifoList::Push(AsanChunk *n) {
- push_back(n);
- size_ += n->UsedSize();
-}
+ AllocatorCache *cache_;
+};
-// Interesting performance observation: this function takes up to 15% of overal
-// allocator time. That's because *first_ has been evicted from cache long time
-// ago. Not sure if we can or want to do anything with this.
-AsanChunk *AsanChunkFifoList::Pop() {
- CHECK(first_);
- AsanChunk *res = front();
- size_ -= res->UsedSize();
- pop_front();
- return res;
+static void Init() {
+ static int inited = 0;
+ if (inited) return;
+ __asan_init();
+ inited = true; // this must happen before any threads are created.
+ allocator.Init();
+ quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
@@ -355,9 +335,18 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
}
AsanThread *t = asanThreadRegistry().GetCurrent();
- AllocatorCache *cache = t ? GetAllocatorCache(&t->malloc_storage()) : 0;
- void *allocated = allocator.Allocate(cache, needed_size, 8, false);
+ void *allocated;
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocated = allocator.Allocate(cache, needed_size, 8, false);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated = allocator.Allocate(cache, needed_size, 8, false);
+ }
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
+ // Clear the first allocated word (an old kMemalignMagic may still be there).
+ reinterpret_cast<uptr *>(alloc_beg)[0] = 0;
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
uptr user_beg = beg_plus_redzone;
@@ -432,7 +421,7 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
// Flip the chunk_state atomically to avoid race on double-free.
u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
- memory_order_acq_rel);
+ memory_order_relaxed);
if (old_chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack);
@@ -466,13 +455,15 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
// Push into quarantine.
if (t) {
- AsanChunkFifoList &q = t->malloc_storage().quarantine_;
- q.Push(m);
-
- if (q.size() > kMaxThreadLocalQuarantine)
- quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage());
+ AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
+ AllocatorCache *ac = GetAllocatorCache(ms);
+ quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
+ m, m->UsedSize());
} else {
- quarantine.BypassThreadLocalQuarantine(m);
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *ac = &fallback_allocator_cache;
+ quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
+ m, m->UsedSize());
}
ASAN_FREE_HOOK(ptr);
@@ -584,7 +575,8 @@ AsanChunkView FindHeapChunkByAddress(uptr addr) {
}
void AsanThreadLocalMallocStorage::CommitBack() {
- quarantine.SwallowThreadLocalQuarantine(this);
+ AllocatorCache *ac = GetAllocatorCache(this);
+ quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
allocator.SwallowCache(GetAllocatorCache(this));
}
@@ -681,16 +673,18 @@ uptr __asan_get_estimated_allocated_size(uptr size) {
}
bool __asan_get_ownership(const void *p) {
- return AllocationSize(reinterpret_cast<uptr>(p)) > 0;
+ uptr ptr = reinterpret_cast<uptr>(p);
+ return (ptr == kReturnOnZeroMalloc) || (AllocationSize(ptr) > 0);
}
uptr __asan_get_allocated_size(const void *p) {
if (p == 0) return 0;
- uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p));
+ uptr ptr = reinterpret_cast<uptr>(p);
+ uptr allocated_size = AllocationSize(ptr);
// Die if p is not malloced or if it is already freed.
- if (allocated_size == 0) {
+ if (allocated_size == 0 && ptr != kReturnOnZeroMalloc) {
GET_STACK_TRACE_FATAL_HERE;
- ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack);
+ ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
}
return allocated_size;
}
diff --git a/libsanitizer/asan/asan_globals.cc b/libsanitizer/asan/asan_globals.cc
index b195a9091b3..88aeefa8fe5 100644
--- a/libsanitizer/asan/asan_globals.cc
+++ b/libsanitizer/asan/asan_globals.cc
@@ -11,13 +11,13 @@
//===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
#include "asan_internal.h"
-#include "asan_lock.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread.h"
#include "sanitizer/asan_interface.h"
+#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan {
@@ -28,7 +28,7 @@ struct ListOfGlobals {
ListOfGlobals *next;
};
-static AsanLock mu_for_globals(LINKER_INITIALIZED);
+static BlockingMutex mu_for_globals(LINKER_INITIALIZED);
static LowLevelAllocator allocator_for_globals;
static ListOfGlobals *list_of_all_globals;
static ListOfGlobals *list_of_dynamic_init_globals;
@@ -53,14 +53,9 @@ void PoisonRedZones(const Global &g) {
}
}
-static uptr GetAlignedSize(uptr size) {
- return ((size + kGlobalAndStackRedzone - 1) / kGlobalAndStackRedzone)
- * kGlobalAndStackRedzone;
-}
-
bool DescribeAddressIfGlobal(uptr addr) {
if (!flags()->report_globals) return false;
- ScopedLock lock(&mu_for_globals);
+ BlockingMutexLock lock(&mu_for_globals);
bool res = false;
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g;
@@ -140,23 +135,10 @@ static void UnpoisonGlobal(const Global *g) {
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
-// Register one global with a default redzone.
-void __asan_register_global(uptr addr, uptr size,
- const char *name) {
- if (!flags()->report_globals) return;
- ScopedLock lock(&mu_for_globals);
- Global *g = (Global *)allocator_for_globals.Allocate(sizeof(Global));
- g->beg = addr;
- g->size = size;
- g->size_with_redzone = GetAlignedSize(size) + kGlobalAndStackRedzone;
- g->name = name;
- RegisterGlobal(g);
-}
-
// Register an array of globals.
void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
- ScopedLock lock(&mu_for_globals);
+ BlockingMutexLock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) {
RegisterGlobal(&globals[i]);
}
@@ -166,7 +148,7 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
// We must do this when a shared objects gets dlclosed.
void __asan_unregister_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
- ScopedLock lock(&mu_for_globals);
+ BlockingMutexLock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) {
UnregisterGlobal(&globals[i]);
}
@@ -179,7 +161,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) {
if (!flags()->check_initialization_order) return;
CHECK(list_of_dynamic_init_globals);
- ScopedLock lock(&mu_for_globals);
+ BlockingMutexLock lock(&mu_for_globals);
bool from_current_tu = false;
// The list looks like:
// a => ... => b => last_addr => ... => first_addr => c => ...
@@ -200,7 +182,7 @@ void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) {
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order) return;
- ScopedLock lock(&mu_for_globals);
+ BlockingMutexLock lock(&mu_for_globals);
for (ListOfGlobals *l = list_of_dynamic_init_globals; l; l = l->next)
UnpoisonGlobal(l->g);
}
diff --git a/libsanitizer/asan/asan_intercepted_functions.h b/libsanitizer/asan/asan_intercepted_functions.h
index 8341bc65745..2d678ab7000 100644
--- a/libsanitizer/asan/asan_intercepted_functions.h
+++ b/libsanitizer/asan/asan_intercepted_functions.h
@@ -16,6 +16,8 @@
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include <stdarg.h>
+
using __sanitizer::uptr;
// Use macro to describe if specific function should be
@@ -40,10 +42,8 @@ using __sanitizer::uptr;
#if defined(__linux__)
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
-# define ASAN_INTERCEPT_PRCTL 1
#else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
-# define ASAN_INTERCEPT_PRCTL 0
#endif
#if !defined(__APPLE__)
@@ -105,7 +105,7 @@ DECLARE_FUNCTION_AND_WRAPPER(void, siglongjmp, void *env, int value);
# endif
# if ASAN_INTERCEPT___CXA_THROW
DECLARE_FUNCTION_AND_WRAPPER(void, __cxa_throw, void *a, void *b, void *c);
-#endif
+# endif
// string.h / strings.h
DECLARE_FUNCTION_AND_WRAPPER(int, memcmp,
@@ -139,9 +139,9 @@ DECLARE_FUNCTION_AND_WRAPPER(char*, strdup, const char *s);
# if ASAN_INTERCEPT_STRNLEN
DECLARE_FUNCTION_AND_WRAPPER(uptr, strnlen, const char *s, uptr maxlen);
# endif
-#if ASAN_INTERCEPT_INDEX
+# if ASAN_INTERCEPT_INDEX
DECLARE_FUNCTION_AND_WRAPPER(char*, index, const char *string, int c);
-#endif
+# endif
// stdlib.h
DECLARE_FUNCTION_AND_WRAPPER(int, atoi, const char *nptr);
@@ -165,6 +165,13 @@ DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread64, int fd, void *buf,
SIZE_T count, OFF64_T offset);
# endif
+# if SANITIZER_INTERCEPT_WRITE
+DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, write, int fd, void *ptr, SIZE_T count);
+# endif
+# if SANITIZER_INTERCEPT_PWRITE
+DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count);
+# endif
+
# if ASAN_INTERCEPT_MLOCKX
// mlock/munlock
DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, SIZE_T len);
@@ -186,7 +193,18 @@ DECLARE_FUNCTION_AND_WRAPPER(int, pthread_create,
void *(*start_routine)(void*), void *arg);
# endif
-#if defined(__APPLE__)
+DECLARE_FUNCTION_AND_WRAPPER(int, vscanf, const char *format, va_list ap);
+DECLARE_FUNCTION_AND_WRAPPER(int, vsscanf, const char *str, const char *format,
+ va_list ap);
+DECLARE_FUNCTION_AND_WRAPPER(int, vfscanf, void *stream, const char *format,
+ va_list ap);
+DECLARE_FUNCTION_AND_WRAPPER(int, scanf, const char *format, ...);
+DECLARE_FUNCTION_AND_WRAPPER(int, fscanf,
+ void* stream, const char *format, ...);
+DECLARE_FUNCTION_AND_WRAPPER(int, sscanf, // NOLINT
+ const char *str, const char *format, ...);
+
+# if defined(__APPLE__)
typedef void* pthread_workqueue_t;
typedef void* pthread_workitem_handle_t;
@@ -196,8 +214,6 @@ typedef void* dispatch_source_t;
typedef u64 dispatch_time_t;
typedef void (*dispatch_function_t)(void *block);
typedef void* (*worker_t)(void *block);
-typedef void* CFStringRef;
-typedef void* CFAllocatorRef;
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_async_f,
dispatch_queue_t dq,
@@ -215,11 +231,7 @@ DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async_f,
dispatch_group_t group, dispatch_queue_t dq,
void *ctxt, dispatch_function_t func);
-DECLARE_FUNCTION_AND_WRAPPER(void, __CFInitialize, void);
-DECLARE_FUNCTION_AND_WRAPPER(CFStringRef, CFStringCreateCopy,
- CFAllocatorRef alloc, CFStringRef str);
-DECLARE_FUNCTION_AND_WRAPPER(void, free, void* ptr);
-#if MAC_INTERPOSE_FUNCTIONS && !defined(MISSING_BLOCKS_SUPPORT)
+# if MAC_INTERPOSE_FUNCTIONS && !defined(MISSING_BLOCKS_SUPPORT)
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async,
dispatch_group_t dg,
dispatch_queue_t dq, void (^work)(void));
@@ -231,9 +243,35 @@ DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_event_handler,
dispatch_source_t ds, void (^work)(void));
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_cancel_handler,
dispatch_source_t ds, void (^work)(void));
-#endif // MAC_INTERPOSE_FUNCTIONS
-#endif // __APPLE__
+# endif // MAC_INTERPOSE_FUNCTIONS
+
+typedef void malloc_zone_t;
+typedef size_t vm_size_t;
+DECLARE_FUNCTION_AND_WRAPPER(malloc_zone_t *, malloc_create_zone,
+ vm_size_t start_size, unsigned flags);
+DECLARE_FUNCTION_AND_WRAPPER(malloc_zone_t *, malloc_default_zone, void);
+DECLARE_FUNCTION_AND_WRAPPER(
+ malloc_zone_t *, malloc_default_purgeable_zone, void);
+DECLARE_FUNCTION_AND_WRAPPER(void, malloc_make_purgeable, void *ptr);
+DECLARE_FUNCTION_AND_WRAPPER(int, malloc_make_nonpurgeable, void *ptr);
+DECLARE_FUNCTION_AND_WRAPPER(void, malloc_set_zone_name,
+ malloc_zone_t *zone, const char *name);
+DECLARE_FUNCTION_AND_WRAPPER(void *, malloc, size_t size);
+DECLARE_FUNCTION_AND_WRAPPER(void, free, void *ptr);
+DECLARE_FUNCTION_AND_WRAPPER(void *, realloc, void *ptr, size_t size);
+DECLARE_FUNCTION_AND_WRAPPER(void *, calloc, size_t nmemb, size_t size);
+DECLARE_FUNCTION_AND_WRAPPER(void *, valloc, size_t size);
+DECLARE_FUNCTION_AND_WRAPPER(size_t, malloc_good_size, size_t size);
+DECLARE_FUNCTION_AND_WRAPPER(int, posix_memalign,
+ void **memptr, size_t alignment, size_t size);
+DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_prepare, void);
+DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_parent, void);
+DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_child, void);
+
+
+
+# endif // __APPLE__
} // extern "C"
-#endif
+#endif // defined(__APPLE__) || (defined(_WIN32) && !defined(_DLL))
#endif // ASAN_INTERCEPTED_FUNCTIONS_H
diff --git a/libsanitizer/asan/asan_interceptors.cc b/libsanitizer/asan/asan_interceptors.cc
index 26daee1727c..98329f38e61 100644
--- a/libsanitizer/asan/asan_interceptors.cc
+++ b/libsanitizer/asan/asan_interceptors.cc
@@ -73,15 +73,30 @@ static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
return internal_strnlen(s, maxlen);
}
+void SetThreadName(const char *name) {
+ AsanThread *t = asanThreadRegistry().GetCurrent();
+ if (t)
+ t->summary()->set_name(name);
+}
+
} // namespace __asan
// ---------------------- Wrappers ---------------- {{{1
using namespace __asan; // NOLINT
-#define COMMON_INTERCEPTOR_WRITE_RANGE(ptr, size) ASAN_WRITE_RANGE(ptr, size)
-#define COMMON_INTERCEPTOR_READ_RANGE(ptr, size) ASAN_READ_RANGE(ptr, size)
-#define COMMON_INTERCEPTOR_ENTER(func, ...) ENSURE_ASAN_INITED()
-#include "sanitizer_common/sanitizer_common_interceptors.h"
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ ASAN_WRITE_RANGE(ptr, size)
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size)
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ do { \
+ ctx = 0; \
+ (void)ctx; \
+ ENSURE_ASAN_INITED(); \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) do { } while (false)
+#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) do { } while (false)
+#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name)
+#include "sanitizer_common/sanitizer_common_interceptors.inc"
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread*)arg;
@@ -122,6 +137,18 @@ DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
#endif // ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
#if ASAN_INTERCEPT_SWAPCONTEXT
+static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) {
+ // Align to page size.
+ uptr PageSize = GetPageSizeCached();
+ uptr bottom = stack & ~(PageSize - 1);
+ ssize += stack - bottom;
+ ssize = RoundUpTo(ssize, PageSize);
+ static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb
+ if (ssize && ssize <= kMaxSaneContextStackSize) {
+ PoisonShadow(bottom, ssize, 0);
+ }
+}
+
INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
struct ucontext_t *ucp) {
static bool reported_warning = false;
@@ -132,16 +159,18 @@ INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
}
// Clear shadow memory for new context (it may share stack
// with current context).
- ClearShadowMemoryForContext(ucp);
+ uptr stack, ssize;
+ ReadContextStack(ucp, &stack, &ssize);
+ ClearShadowMemoryForContextStack(stack, ssize);
int res = REAL(swapcontext)(oucp, ucp);
// swapcontext technically does not return, but program may swap context to
// "oucp" later, that would look as if swapcontext() returned 0.
// We need to clear shadow for ucp once again, as it may be in arbitrary
// state.
- ClearShadowMemoryForContext(ucp);
+ ClearShadowMemoryForContextStack(stack, ssize);
return res;
}
-#endif
+#endif // ASAN_INTERCEPT_SWAPCONTEXT
INTERCEPTOR(void, longjmp, void *env, int val) {
__asan_handle_no_return();
@@ -162,25 +191,6 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) {
}
#endif
-#if ASAN_INTERCEPT_PRCTL
-#define PR_SET_NAME 15
-INTERCEPTOR(int, prctl, int option,
- unsigned long arg2, unsigned long arg3, // NOLINT
- unsigned long arg4, unsigned long arg5) { // NOLINT
- int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
- if (option == PR_SET_NAME) {
- AsanThread *t = asanThreadRegistry().GetCurrent();
- if (t) {
- char buff[17];
- internal_strncpy(buff, (char*)arg2, 16);
- buff[16] = 0;
- t->summary()->set_name(buff);
- }
- }
- return res;
-}
-#endif
-
#if ASAN_INTERCEPT___CXA_THROW
INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
CHECK(REAL(__cxa_throw));
@@ -727,9 +737,6 @@ void InitializeAsanInterceptors() {
#if ASAN_INTERCEPT_SIGLONGJMP
ASAN_INTERCEPT_FUNC(siglongjmp);
#endif
-#if ASAN_INTERCEPT_PRCTL
- ASAN_INTERCEPT_FUNC(prctl);
-#endif
// Intercept exception handling functions.
#if ASAN_INTERCEPT___CXA_THROW
diff --git a/libsanitizer/asan/asan_internal.h b/libsanitizer/asan/asan_internal.h
index 1717fce66fb..a9c6c0f9022 100644
--- a/libsanitizer/asan/asan_internal.h
+++ b/libsanitizer/asan/asan_internal.h
@@ -114,7 +114,7 @@ bool AsanInterceptsSignal(int signum);
void SetAlternateSignalStack();
void UnsetAlternateSignalStack();
void InstallSignalHandlers();
-void ClearShadowMemoryForContext(void *context);
+void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
// Wrapper for TLS/TSD.
diff --git a/libsanitizer/asan/asan_linux.cc b/libsanitizer/asan/asan_linux.cc
index 0e6c6280f0b..a030fcd3972 100644
--- a/libsanitizer/asan/asan_linux.cc
+++ b/libsanitizer/asan/asan_linux.cc
@@ -13,7 +13,6 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
-#include "asan_lock.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_libc.h"
@@ -100,26 +99,6 @@ void AsanPlatformThreadInit() {
// Nothing here for now.
}
-AsanLock::AsanLock(LinkerInitialized) {
- // We assume that pthread_mutex_t initialized to all zeroes is a valid
- // unlocked mutex. We can not use PTHREAD_MUTEX_INITIALIZER as it triggers
- // a gcc warning:
- // extended initializer lists only available with -std=c++0x or -std=gnu++0x
-}
-
-void AsanLock::Lock() {
- CHECK(sizeof(pthread_mutex_t) <= sizeof(opaque_storage_));
- pthread_mutex_lock((pthread_mutex_t*)&opaque_storage_);
- CHECK(!owner_);
- owner_ = (uptr)pthread_self();
-}
-
-void AsanLock::Unlock() {
- CHECK(owner_ == (uptr)pthread_self());
- owner_ = 0;
- pthread_mutex_unlock((pthread_mutex_t*)&opaque_storage_);
-}
-
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
#if defined(__arm__) || \
defined(__powerpc__) || defined(__powerpc64__) || \
@@ -139,19 +118,13 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
}
#if !ASAN_ANDROID
-void ClearShadowMemoryForContext(void *context) {
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
ucontext_t *ucp = (ucontext_t*)context;
- uptr sp = (uptr)ucp->uc_stack.ss_sp;
- uptr size = ucp->uc_stack.ss_size;
- // Align to page size.
- uptr PageSize = GetPageSizeCached();
- uptr bottom = sp & ~(PageSize - 1);
- size += sp - bottom;
- size = RoundUpTo(size, PageSize);
- PoisonShadow(bottom, size, 0);
+ *stack = (uptr)ucp->uc_stack.ss_sp;
+ *ssize = ucp->uc_stack.ss_size;
}
#else
-void ClearShadowMemoryForContext(void *context) {
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
#endif
diff --git a/libsanitizer/asan/asan_lock.h b/libsanitizer/asan/asan_lock.h
index 2392e3c0e7b..8b137891791 100644
--- a/libsanitizer/asan/asan_lock.h
+++ b/libsanitizer/asan/asan_lock.h
@@ -1,40 +1 @@
-//===-- asan_lock.h ---------------------------------------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// A wrapper for a simple lock.
-//===----------------------------------------------------------------------===//
-#ifndef ASAN_LOCK_H
-#define ASAN_LOCK_H
-#include "sanitizer_common/sanitizer_mutex.h"
-#include "asan_internal.h"
-
-// The locks in ASan are global objects and they are never destroyed to avoid
-// at-exit races (that is, a lock is being used by other threads while the main
-// thread is doing atexit destructors).
-// We define the class using opaque storage to avoid including system headers.
-
-namespace __asan {
-
-class AsanLock {
- public:
- explicit AsanLock(LinkerInitialized);
- void Lock();
- void Unlock();
- bool IsLocked() { return owner_ != 0; }
- private:
- uptr opaque_storage_[10];
- uptr owner_; // for debugging and for malloc_introspection_t interface
-};
-
-typedef GenericScopedLock<AsanLock> ScopedLock;
-
-} // namespace __asan
-
-#endif // ASAN_LOCK_H
diff --git a/libsanitizer/asan/asan_mac.cc b/libsanitizer/asan/asan_mac.cc
index 094c69ff6a2..a49d5c5ab8f 100644
--- a/libsanitizer/asan/asan_mac.cc
+++ b/libsanitizer/asan/asan_mac.cc
@@ -34,7 +34,6 @@
#include <stdlib.h> // for free()
#include <unistd.h>
#include <libkern/OSAtomic.h>
-#include <CoreFoundation/CFString.h>
namespace __asan {
@@ -129,33 +128,6 @@ bool AsanInterceptsSignal(int signum) {
}
void AsanPlatformThreadInit() {
- // For the first program thread, we can't replace the allocator before
- // __CFInitialize() has been called. If it hasn't, we'll call
- // MaybeReplaceCFAllocator() later on this thread.
- // For other threads __CFInitialize() has been called before their creation.
- // See also asan_malloc_mac.cc.
- if (((CFRuntimeBase*)kCFAllocatorSystemDefault)->_cfisa) {
- MaybeReplaceCFAllocator();
- }
-}
-
-AsanLock::AsanLock(LinkerInitialized) {
- // We assume that OS_SPINLOCK_INIT is zero
-}
-
-void AsanLock::Lock() {
- CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
- CHECK(OS_SPINLOCK_INIT == 0);
- CHECK(owner_ != (uptr)pthread_self());
- OSSpinLockLock((OSSpinLock*)&opaque_storage_);
- CHECK(!owner_);
- owner_ = (uptr)pthread_self();
-}
-
-void AsanLock::Unlock() {
- CHECK(owner_ == (uptr)pthread_self());
- owner_ = 0;
- OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
@@ -170,7 +142,7 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
}
}
-void ClearShadowMemoryForContext(void *context) {
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
@@ -254,9 +226,6 @@ mach_error_t __interception_deallocate_island(void *ptr) {
// The implementation details are at
// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
-typedef void* pthread_workqueue_t;
-typedef void* pthread_workitem_handle_t;
-
typedef void* dispatch_group_t;
typedef void* dispatch_queue_t;
typedef void* dispatch_source_t;
@@ -287,9 +256,6 @@ void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq,
void *ctxt, dispatch_function_t func);
-int pthread_workqueue_additem_np(pthread_workqueue_t workq,
- void *(*workitem_func)(void *), void * workitem_arg,
- pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
} // extern "C"
static ALWAYS_INLINE
@@ -444,66 +410,6 @@ INTERCEPTOR(void, dispatch_source_set_event_handler,
}
#endif
-// The following stuff has been extremely helpful while looking for the
-// unhandled functions that spawned jobs on Chromium shutdown. If the verbosity
-// level is 2 or greater, we wrap pthread_workqueue_additem_np() in order to
-// find the points of worker thread creation (each of such threads may be used
-// to run several tasks, that's why this is not enough to support the whole
-// libdispatch API.
-extern "C"
-void *wrap_workitem_func(void *arg) {
- if (flags()->verbosity >= 2) {
- Report("wrap_workitem_func: %p, pthread_self: %p\n", arg, pthread_self());
- }
- asan_block_context_t *ctxt = (asan_block_context_t*)arg;
- worker_t fn = (worker_t)(ctxt->func);
- void *result = fn(ctxt->block);
- GET_STACK_TRACE_THREAD;
- asan_free(arg, &stack, FROM_MALLOC);
- return result;
-}
-
-INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
- void *(*workitem_func)(void *), void * workitem_arg,
- pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
- GET_STACK_TRACE_THREAD;
- asan_block_context_t *asan_ctxt =
- (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
- asan_ctxt->block = workitem_arg;
- asan_ctxt->func = (dispatch_function_t)workitem_func;
- asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
- if (flags()->verbosity >= 2) {
- Report("pthread_workqueue_additem_np: %p\n", asan_ctxt);
- PRINT_CURRENT_STACK();
- }
- return REAL(pthread_workqueue_additem_np)(workq, wrap_workitem_func,
- asan_ctxt, itemhandlep,
- gencountp);
-}
-
-// See http://opensource.apple.com/source/CF/CF-635.15/CFString.c
-int __CFStrIsConstant(CFStringRef str) {
- CFRuntimeBase *base = (CFRuntimeBase*)str;
-#if __LP64__
- return base->_rc == 0;
-#else
- return (base->_cfinfo[CF_RC_BITS]) == 0;
-#endif
-}
-
-INTERCEPTOR(CFStringRef, CFStringCreateCopy, CFAllocatorRef alloc,
- CFStringRef str) {
- if (__CFStrIsConstant(str)) {
- return str;
- } else {
- return REAL(CFStringCreateCopy)(alloc, str);
- }
-}
-
-DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
-
-DECLARE_REAL_AND_INTERCEPTOR(void, __CFInitialize, void)
-
namespace __asan {
void InitializeMacInterceptors() {
@@ -512,26 +418,6 @@ void InitializeMacInterceptors() {
CHECK(INTERCEPT_FUNCTION(dispatch_after_f));
CHECK(INTERCEPT_FUNCTION(dispatch_barrier_async_f));
CHECK(INTERCEPT_FUNCTION(dispatch_group_async_f));
- // We don't need to intercept pthread_workqueue_additem_np() to support the
- // libdispatch API, but it helps us to debug the unsupported functions. Let's
- // intercept it only during verbose runs.
- if (flags()->verbosity >= 2) {
- CHECK(INTERCEPT_FUNCTION(pthread_workqueue_additem_np));
- }
- // Normally CFStringCreateCopy should not copy constant CF strings.
- // Replacing the default CFAllocator causes constant strings to be copied
- // rather than just returned, which leads to bugs in big applications like
- // Chromium and WebKit, see
- // http://code.google.com/p/address-sanitizer/issues/detail?id=10
- // Until this problem is fixed we need to check that the string is
- // non-constant before calling CFStringCreateCopy.
- CHECK(INTERCEPT_FUNCTION(CFStringCreateCopy));
- // Some of the library functions call free() directly, so we have to
- // intercept it.
- CHECK(INTERCEPT_FUNCTION(free));
- if (flags()->replace_cfallocator) {
- CHECK(INTERCEPT_FUNCTION(__CFInitialize));
- }
}
} // namespace __asan
diff --git a/libsanitizer/asan/asan_malloc_mac.cc b/libsanitizer/asan/asan_malloc_mac.cc
index 97aa4424d33..3ae6c594650 100644
--- a/libsanitizer/asan/asan_malloc_mac.cc
+++ b/libsanitizer/asan/asan_malloc_mac.cc
@@ -34,85 +34,108 @@ using namespace __asan; // NOLINT
// TODO(glider): do we need both zones?
static malloc_zone_t *system_malloc_zone = 0;
-static malloc_zone_t *system_purgeable_zone = 0;
static malloc_zone_t asan_zone;
-CFAllocatorRef cf_asan = 0;
-
-// _CFRuntimeCreateInstance() checks whether the supplied allocator is
-// kCFAllocatorSystemDefault and, if it is not, stores the allocator reference
-// at the beginning of the allocated memory and returns the pointer to the
-// allocated memory plus sizeof(CFAllocatorRef). See
-// http://www.opensource.apple.com/source/CF/CF-635.21/CFRuntime.c
-// Pointers returned by _CFRuntimeCreateInstance() can then be passed directly
-// to free() or CFAllocatorDeallocate(), which leads to false invalid free
-// reports.
-// The corresponding rdar bug is http://openradar.appspot.com/radar?id=1796404.
-void* ALWAYS_INLINE get_saved_cfallocator_ref(void *ptr) {
- if (flags()->replace_cfallocator) {
- // Make sure we're not hitting the previous page. This may be incorrect
- // if ASan's malloc returns an address ending with 0xFF8, which will be
- // then padded to a page boundary with a CFAllocatorRef.
- uptr arith_ptr = (uptr)ptr;
- if ((arith_ptr & 0xFFF) > sizeof(CFAllocatorRef)) {
- CFAllocatorRef *saved =
- (CFAllocatorRef*)(arith_ptr - sizeof(CFAllocatorRef));
- if ((*saved == cf_asan) && asan_mz_size(saved)) ptr = (void*)saved;
- }
+
+INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
+ vm_size_t start_size, unsigned zone_flags) {
+ if (!asan_inited) __asan_init();
+ GET_STACK_TRACE_MALLOC;
+ malloc_zone_t *new_zone =
+ (malloc_zone_t*)asan_malloc(sizeof(asan_zone), &stack);
+ internal_memcpy(new_zone, &asan_zone, sizeof(asan_zone));
+ new_zone->zone_name = NULL; // The name will be changed anyway.
+ return new_zone;
+}
+
+INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
+ if (!asan_inited) __asan_init();
+ return &asan_zone;
+}
+
+INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {
+ // FIXME: ASan should support purgeable allocations.
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=139
+ if (!asan_inited) __asan_init();
+ return &asan_zone;
+}
+
+INTERCEPTOR(void, malloc_make_purgeable, void *ptr) {
+ // FIXME: ASan should support purgeable allocations. Ignoring them is fine
+ // for now.
+ if (!asan_inited) __asan_init();
+}
+
+INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
+ // FIXME: ASan should support purgeable allocations. Ignoring them is fine
+ // for now.
+ if (!asan_inited) __asan_init();
+ // Must return 0 if the contents were not purged since the last call to
+ // malloc_make_purgeable().
+ return 0;
+}
+
+INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
+ if (!asan_inited) __asan_init();
+ // Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes.
+ size_t buflen = 6 + (name ? internal_strlen(name) : 0);
+ InternalScopedBuffer<char> new_name(buflen);
+ if (name && zone->introspect == asan_zone.introspect) {
+ internal_snprintf(new_name.data(), buflen, "asan-%s", name);
+ name = new_name.data();
}
- return ptr;
+
+ // Call the system malloc's implementation for both external and our zones,
+ // since that appropriately changes VM region protections on the zone.
+ REAL(malloc_set_zone_name)(zone, name);
+}
+
+INTERCEPTOR(void *, malloc, size_t size) {
+ if (!asan_inited) __asan_init();
+ GET_STACK_TRACE_MALLOC;
+ void *res = asan_malloc(size, &stack);
+ return res;
}
-// The free() implementation provided by OS X calls malloc_zone_from_ptr()
-// to find the owner of |ptr|. If the result is 0, an invalid free() is
-// reported. Our implementation falls back to asan_free() in this case
-// in order to print an ASan-style report.
-//
-// For the objects created by _CFRuntimeCreateInstance a CFAllocatorRef is
-// placed at the beginning of the allocated chunk and the pointer returned by
-// our allocator is off by sizeof(CFAllocatorRef). This pointer can be then
-// passed directly to free(), which will lead to errors.
-// To overcome this we're checking whether |ptr-sizeof(CFAllocatorRef)|
-// contains a pointer to our CFAllocator (assuming no other allocator is used).
-// See http://code.google.com/p/address-sanitizer/issues/detail?id=70 for more
-// info.
INTERCEPTOR(void, free, void *ptr) {
- malloc_zone_t *zone = malloc_zone_from_ptr(ptr);
- if (zone) {
-#if defined(MAC_OS_X_VERSION_10_6) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
- if ((zone->version >= 6) && (zone->free_definite_size)) {
- zone->free_definite_size(zone, ptr, malloc_size(ptr));
- } else {
- malloc_zone_free(zone, ptr);
- }
-#else
- malloc_zone_free(zone, ptr);
-#endif
- } else {
- if (!asan_mz_size(ptr)) ptr = get_saved_cfallocator_ref(ptr);
- GET_STACK_TRACE_FREE;
- asan_free(ptr, &stack, FROM_MALLOC);
- }
+ if (!asan_inited) __asan_init();
+ if (!ptr) return;
+ GET_STACK_TRACE_FREE;
+ asan_free(ptr, &stack, FROM_MALLOC);
}
-// We can't always replace the default CFAllocator with cf_asan right in
-// ReplaceSystemMalloc(), because it is sometimes called before
-// __CFInitialize(), when the default allocator is invalid and replacing it may
-// crash the program. Instead we wait for the allocator to initialize and jump
-// in just after __CFInitialize(). Nobody is going to allocate memory using
-// CFAllocators before that, so we won't miss anything.
-//
-// See http://code.google.com/p/address-sanitizer/issues/detail?id=87
-// and http://opensource.apple.com/source/CF/CF-550.43/CFRuntime.c
-INTERCEPTOR(void, __CFInitialize, void) {
- // If the runtime is built as dynamic library, __CFInitialize wrapper may be
- // called before __asan_init.
-#if !MAC_INTERPOSE_FUNCTIONS
- CHECK(flags()->replace_cfallocator);
- CHECK(asan_inited);
-#endif
- REAL(__CFInitialize)();
- if (!cf_asan && asan_inited) MaybeReplaceCFAllocator();
+INTERCEPTOR(void *, realloc, void *ptr, size_t size) {
+ if (!asan_inited) __asan_init();
+ GET_STACK_TRACE_MALLOC;
+ return asan_realloc(ptr, size, &stack);
+}
+
+INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) {
+ if (!asan_inited) __asan_init();
+ GET_STACK_TRACE_MALLOC;
+ return asan_calloc(nmemb, size, &stack);
+}
+
+INTERCEPTOR(void *, valloc, size_t size) {
+ if (!asan_inited) __asan_init();
+ GET_STACK_TRACE_MALLOC;
+ return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
+}
+
+INTERCEPTOR(size_t, malloc_good_size, size_t size) {
+ if (!asan_inited) __asan_init();
+ return asan_zone.introspect->good_size(&asan_zone, size);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
+ if (!asan_inited) __asan_init();
+ CHECK(memptr);
+ GET_STACK_TRACE_MALLOC;
+ void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC);
+ if (result) {
+ *memptr = result;
+ return 0;
+ }
+ return -1;
}
namespace {
@@ -132,15 +155,6 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
return asan_malloc(size, &stack);
}
-void *cf_malloc(CFIndex size, CFOptionFlags hint, void *info) {
- if (!asan_inited) {
- CHECK(system_malloc_zone);
- return malloc_zone_malloc(system_malloc_zone, size);
- }
- GET_STACK_TRACE_MALLOC;
- return asan_malloc(size, &stack);
-}
-
void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
if (!asan_inited) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
@@ -172,31 +186,14 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) {
void ALWAYS_INLINE free_common(void *context, void *ptr) {
if (!ptr) return;
- if (asan_mz_size(ptr)) {
- GET_STACK_TRACE_FREE;
+ GET_STACK_TRACE_FREE;
+ // FIXME: need to retire this flag.
+ if (!flags()->mac_ignore_invalid_free) {
asan_free(ptr, &stack, FROM_MALLOC);
} else {
- // If the pointer does not belong to any of the zones, use one of the
- // fallback methods to free memory.
- malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr);
- if (zone_ptr == system_purgeable_zone) {
- // allocations from malloc_default_purgeable_zone() done before
- // __asan_init() may be occasionally freed via free_common().
- // see http://code.google.com/p/address-sanitizer/issues/detail?id=99.
- malloc_zone_free(zone_ptr, ptr);
- } else {
- // If the memory chunk pointer was moved to store additional
- // CFAllocatorRef, fix it back.
- ptr = get_saved_cfallocator_ref(ptr);
- GET_STACK_TRACE_FREE;
- if (!flags()->mac_ignore_invalid_free) {
- asan_free(ptr, &stack, FROM_MALLOC);
- } else {
- GET_ZONE_FOR_PTR(ptr);
- WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
- return;
- }
- }
+ GET_ZONE_FOR_PTR(ptr);
+ WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
+ return;
}
}
@@ -205,10 +202,6 @@ void mz_free(malloc_zone_t *zone, void *ptr) {
free_common(zone, ptr);
}
-void cf_free(void *ptr, void *info) {
- free_common(info, ptr);
-}
-
void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (!ptr) {
GET_STACK_TRACE_MALLOC;
@@ -228,29 +221,11 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
}
}
-void *cf_realloc(void *ptr, CFIndex size, CFOptionFlags hint, void *info) {
- if (!ptr) {
- GET_STACK_TRACE_MALLOC;
- return asan_malloc(size, &stack);
- } else {
- if (asan_mz_size(ptr)) {
- GET_STACK_TRACE_MALLOC;
- return asan_realloc(ptr, size, &stack);
- } else {
- // We can't recover from reallocating an unknown address, because
- // this would require reading at most |size| bytes from
- // potentially unaccessible memory.
- GET_STACK_TRACE_FREE;
- GET_ZONE_FOR_PTR(ptr);
- ReportMacCfReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
- }
- }
-}
-
void mz_destroy(malloc_zone_t* zone) {
// A no-op -- we will not be destroyed!
- Printf("mz_destroy() called -- ignoring\n");
+ Report("mz_destroy() called -- ignoring\n");
}
+
// from AvailabilityMacros.h
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
@@ -322,23 +297,7 @@ boolean_t mi_zone_locked(malloc_zone_t *zone) {
} // unnamed namespace
-extern int __CFRuntimeClassTableSize;
-
namespace __asan {
-void MaybeReplaceCFAllocator() {
- static CFAllocatorContext asan_context = {
- /*version*/ 0, /*info*/ &asan_zone,
- /*retain*/ 0, /*release*/ 0,
- /*copyDescription*/0,
- /*allocate*/ &cf_malloc,
- /*reallocate*/ &cf_realloc,
- /*deallocate*/ &cf_free,
- /*preferredSize*/ 0 };
- if (!cf_asan)
- cf_asan = CFAllocatorCreate(kCFAllocatorUseContext, &asan_context);
- if (flags()->replace_cfallocator && CFAllocatorGetDefault() != cf_asan)
- CFAllocatorSetDefault(cf_asan);
-}
void ReplaceSystemMalloc() {
static malloc_introspection_t asan_introspection;
@@ -378,41 +337,10 @@ void ReplaceSystemMalloc() {
asan_zone.free_definite_size = 0;
asan_zone.memalign = &mz_memalign;
asan_introspection.zone_locked = &mi_zone_locked;
-
- // Request the default purgable zone to force its creation. The
- // current default zone is registered with the purgable zone for
- // doing tiny and small allocs. Sadly, it assumes that the default
- // zone is the szone implementation from OS X and will crash if it
- // isn't. By creating the zone now, this will be true and changing
- // the default zone won't cause a problem. (OS X 10.6 and higher.)
- system_purgeable_zone = malloc_default_purgeable_zone();
#endif
- // Register the ASan zone. At this point, it will not be the
- // default zone.
+ // Register the ASan zone.
malloc_zone_register(&asan_zone);
-
- // Unregister and reregister the default zone. Unregistering swaps
- // the specified zone with the last one registered which for the
- // default zone makes the more recently registered zone the default
- // zone. The default zone is then re-registered to ensure that
- // allocations made from it earlier will be handled correctly.
- // Things are not guaranteed to work that way, but it's how they work now.
- system_malloc_zone = malloc_default_zone();
- malloc_zone_unregister(system_malloc_zone);
- malloc_zone_register(system_malloc_zone);
- // Make sure the default allocator was replaced.
- CHECK(malloc_default_zone() == &asan_zone);
-
- // If __CFInitialize() hasn't been called yet, cf_asan will be created and
- // installed as the default allocator after __CFInitialize() finishes (see
- // the interceptor for __CFInitialize() above). Otherwise install cf_asan
- // right now. On both Snow Leopard and Lion __CFInitialize() calls
- // __CFAllocatorInitialize(), which initializes the _base._cfisa field of
- // the default allocators we check here.
- if (((CFRuntimeBase*)kCFAllocatorSystemDefault)->_cfisa) {
- MaybeReplaceCFAllocator();
- }
}
} // namespace __asan
diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h
index 54e21b79678..be645c4afcd 100644
--- a/libsanitizer/asan/asan_mapping.h
+++ b/libsanitizer/asan/asan_mapping.h
@@ -109,6 +109,10 @@ static inline bool AddrIsInShadow(uptr a) {
}
static inline bool AddrIsInShadowGap(uptr a) {
+ // In zero-based shadow mode we treat addresses near zero as addresses
+ // in shadow gap as well.
+ if (SHADOW_OFFSET == 0)
+ return a <= kShadowGapEnd;
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
diff --git a/libsanitizer/asan/asan_new_delete.cc b/libsanitizer/asan/asan_new_delete.cc
index 6597b93366f..e41135d0e51 100644
--- a/libsanitizer/asan/asan_new_delete.cc
+++ b/libsanitizer/asan/asan_new_delete.cc
@@ -25,8 +25,9 @@ void ReplaceOperatorsNewAndDelete() { }
using namespace __asan; // NOLINT
-// On Android new() goes through malloc interceptors.
-#if !ASAN_ANDROID
+// On Mac and Android new() goes through malloc interceptors.
+// See also https://code.google.com/p/address-sanitizer/issues/detail?id=131.
+#if !ASAN_ANDROID && !ASAN_MAC
// Fake std::nothrow_t to avoid including <new>.
namespace std {
diff --git a/libsanitizer/asan/asan_poisoning.cc b/libsanitizer/asan/asan_poisoning.cc
index a00bafffae0..11edca57ae0 100644
--- a/libsanitizer/asan/asan_poisoning.cc
+++ b/libsanitizer/asan/asan_poisoning.cc
@@ -23,7 +23,7 @@ void PoisonShadow(uptr addr, uptr size, u8 value) {
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
uptr shadow_beg = MemToShadow(addr);
- uptr shadow_end = MemToShadow(addr + size);
+ uptr shadow_end = MemToShadow(addr + size - SHADOW_GRANULARITY) + 1;
CHECK(REAL(memset) != 0);
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
}
diff --git a/libsanitizer/asan/asan_rtl.cc b/libsanitizer/asan/asan_rtl.cc
index 6480bf4cc35..04d2d7af63f 100644
--- a/libsanitizer/asan/asan_rtl.cc
+++ b/libsanitizer/asan/asan_rtl.cc
@@ -12,7 +12,6 @@
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
-#include "asan_lock.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_stack.h"
@@ -140,10 +139,12 @@ void InitializeFlags(Flags *f, const char *env) {
f->allow_reexec = true;
f->print_full_thread_history = true;
f->log_path = 0;
- f->fast_unwind_on_fatal = true;
+ f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true;
f->poison_heap = true;
- f->alloc_dealloc_mismatch = true;
+ // Turn off alloc/dealloc mismatch checker on Mac for now.
+ // TODO(glider): Fix known issues and enable this back.
+ f->alloc_dealloc_mismatch = (ASAN_MAC == 0);
f->use_stack_depot = true; // Only affects allocator2.
// Override from user-specified string.
@@ -228,7 +229,6 @@ static NOINLINE void force_interface_symbols() {
case 8: __asan_report_store4(0); break;
case 9: __asan_report_store8(0); break;
case 10: __asan_report_store16(0); break;
- case 11: __asan_register_global(0, 0, 0); break;
case 12: __asan_register_globals(0, 0); break;
case 13: __asan_unregister_globals(0, 0); break;
case 14: __asan_set_death_callback(0); break;
diff --git a/libsanitizer/asan/asan_stats.cc b/libsanitizer/asan/asan_stats.cc
index 94dd741d325..1187bd92f97 100644
--- a/libsanitizer/asan/asan_stats.cc
+++ b/libsanitizer/asan/asan_stats.cc
@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
#include "asan_internal.h"
-#include "asan_lock.h"
#include "asan_stats.h"
#include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h"
@@ -53,13 +52,13 @@ void AsanStats::Print() {
malloc_large, malloc_small_slow);
}
-static AsanLock print_lock(LINKER_INITIALIZED);
+static BlockingMutex print_lock(LINKER_INITIALIZED);
static void PrintAccumulatedStats() {
AsanStats stats;
asanThreadRegistry().GetAccumulatedStats(&stats);
// Use lock to keep reports from mixing up.
- ScopedLock lock(&print_lock);
+ BlockingMutexLock lock(&print_lock);
stats.Print();
StackDepotStats *stack_depot_stats = StackDepotGetStats();
Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
diff --git a/libsanitizer/asan/asan_thread.cc b/libsanitizer/asan/asan_thread.cc
index cc2e777a977..02f49dd59ef 100644
--- a/libsanitizer/asan/asan_thread.cc
+++ b/libsanitizer/asan/asan_thread.cc
@@ -72,7 +72,7 @@ void AsanThread::Destroy() {
void AsanThread::Init() {
SetThreadStackTopAndBottom();
CHECK(AddrIsInMem(stack_bottom_));
- CHECK(AddrIsInMem(stack_top_));
+ CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStack();
if (flags()->verbosity >= 1) {
int local = 0;
diff --git a/libsanitizer/asan/asan_thread_registry.cc b/libsanitizer/asan/asan_thread_registry.cc
index 8db6a57cdff..8fda9b6ea0a 100644
--- a/libsanitizer/asan/asan_thread_registry.cc
+++ b/libsanitizer/asan/asan_thread_registry.cc
@@ -42,7 +42,7 @@ void AsanThreadRegistry::Init() {
}
void AsanThreadRegistry::RegisterThread(AsanThread *thread) {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
u32 tid = n_threads_;
n_threads_++;
CHECK(n_threads_ < kMaxNumberOfThreads);
@@ -54,7 +54,7 @@ void AsanThreadRegistry::RegisterThread(AsanThread *thread) {
}
void AsanThreadRegistry::UnregisterThread(AsanThread *thread) {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
FlushToAccumulatedStatsUnlocked(&thread->stats());
AsanThreadSummary *summary = thread->summary();
CHECK(summary);
@@ -103,13 +103,13 @@ AsanStats &AsanThreadRegistry::GetCurrentThreadStats() {
}
void AsanThreadRegistry::GetAccumulatedStats(AsanStats *stats) {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
internal_memcpy(stats, &accumulated_stats_, sizeof(accumulated_stats_));
}
uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
uptr malloced = accumulated_stats_.malloced;
uptr freed = accumulated_stats_.freed;
@@ -119,13 +119,13 @@ uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
}
uptr AsanThreadRegistry::GetHeapSize() {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
return accumulated_stats_.mmaped - accumulated_stats_.munmaped;
}
uptr AsanThreadRegistry::GetFreeBytes() {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
uptr total_free = accumulated_stats_.mmaped
- accumulated_stats_.munmaped
@@ -141,7 +141,7 @@ uptr AsanThreadRegistry::GetFreeBytes() {
// Return several stats counters with a single call to
// UpdateAccumulatedStatsUnlocked().
void AsanThreadRegistry::FillMallocStatistics(AsanMallocStats *malloc_stats) {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
malloc_stats->blocks_in_use = accumulated_stats_.mallocs;
malloc_stats->size_in_use = accumulated_stats_.malloced;
@@ -156,7 +156,7 @@ AsanThreadSummary *AsanThreadRegistry::FindByTid(u32 tid) {
}
AsanThread *AsanThreadRegistry::FindThreadByStackAddress(uptr addr) {
- ScopedLock lock(&mu_);
+ BlockingMutexLock lock(&mu_);
for (u32 tid = 0; tid < n_threads_; tid++) {
AsanThread *t = thread_summaries_[tid]->thread();
if (!t || !(t->fake_stack().StackSize())) continue;
diff --git a/libsanitizer/asan/asan_thread_registry.h b/libsanitizer/asan/asan_thread_registry.h
index bb6b2678faa..8c3d0c886e0 100644
--- a/libsanitizer/asan/asan_thread_registry.h
+++ b/libsanitizer/asan/asan_thread_registry.h
@@ -13,10 +13,10 @@
#ifndef ASAN_THREAD_REGISTRY_H
#define ASAN_THREAD_REGISTRY_H
-#include "asan_lock.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan {
@@ -71,7 +71,7 @@ class AsanThreadRegistry {
// per-thread AsanStats.
uptr max_malloced_memory_;
u32 n_threads_;
- AsanLock mu_;
+ BlockingMutex mu_;
bool inited_;
};
diff --git a/libsanitizer/asan/asan_win.cc b/libsanitizer/asan/asan_win.cc
index 02a2e08868e..6acfeebc8bf 100644
--- a/libsanitizer/asan/asan_win.cc
+++ b/libsanitizer/asan/asan_win.cc
@@ -15,18 +15,16 @@
#include <dbghelp.h>
#include <stdlib.h>
-#include <new> // FIXME: temporarily needed for placement new in AsanLock.
-
#include "asan_interceptors.h"
#include "asan_internal.h"
-#include "asan_lock.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan {
// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
-static AsanLock dbghelp_lock(LINKER_INITIALIZED);
+static BlockingMutex dbghelp_lock(LINKER_INITIALIZED);
static bool dbghelp_initialized = false;
#pragma comment(lib, "dbghelp.lib")
@@ -54,42 +52,6 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
stack->trace[i] = (uptr)tmp[i + offset];
}
-// ---------------------- AsanLock ---------------- {{{1
-enum LockState {
- LOCK_UNINITIALIZED = 0,
- LOCK_READY = -1,
-};
-
-AsanLock::AsanLock(LinkerInitialized li) {
- // FIXME: see comments in AsanLock::Lock() for the details.
- CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
-
- CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
- InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
- owner_ = LOCK_READY;
-}
-
-void AsanLock::Lock() {
- if (owner_ == LOCK_UNINITIALIZED) {
- // FIXME: hm, global AsanLock objects are not initialized?!?
- // This might be a side effect of the clang+cl+link Frankenbuild...
- new(this) AsanLock((LinkerInitialized)(LINKER_INITIALIZED + 1));
-
- // FIXME: If it turns out the linker doesn't invoke our
- // constructors, we should probably manually Lock/Unlock all the global
- // locks while we're starting in one thread to avoid double-init races.
- }
- EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
- CHECK(owner_ == LOCK_READY);
- owner_ = GetThreadSelf();
-}
-
-void AsanLock::Unlock() {
- CHECK(owner_ == GetThreadSelf());
- owner_ = LOCK_READY;
- LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
-}
-
// ---------------------- TSD ---------------- {{{1
static bool tsd_key_inited = false;
@@ -138,7 +100,7 @@ void AsanPlatformThreadInit() {
// Nothing here for now.
}
-void ClearShadowMemoryForContext(void *context) {
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
@@ -150,7 +112,7 @@ using namespace __asan; // NOLINT
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
bool __asan_symbolize(const void *addr, char *out_buffer, int buffer_size) {
- ScopedLock lock(&dbghelp_lock);
+ BlockingMutexLock lock(&dbghelp_lock);
if (!dbghelp_initialized) {
SymSetOptions(SYMOPT_DEFERRED_LOADS |
SYMOPT_UNDNAME |
diff --git a/libsanitizer/asan/dynamic/asan_interceptors_dynamic.cc b/libsanitizer/asan/dynamic/asan_interceptors_dynamic.cc
index 3f6f06a5118..757c704bd55 100644
--- a/libsanitizer/asan/dynamic/asan_interceptors_dynamic.cc
+++ b/libsanitizer/asan/dynamic/asan_interceptors_dynamic.cc
@@ -99,9 +99,19 @@ const interpose_substitution substitutions[]
INTERPOSE_FUNCTION(signal),
INTERPOSE_FUNCTION(sigaction),
- INTERPOSE_FUNCTION(__CFInitialize),
- INTERPOSE_FUNCTION(CFStringCreateCopy),
+ INTERPOSE_FUNCTION(malloc_create_zone),
+ INTERPOSE_FUNCTION(malloc_default_zone),
+ INTERPOSE_FUNCTION(malloc_default_purgeable_zone),
+ INTERPOSE_FUNCTION(malloc_make_purgeable),
+ INTERPOSE_FUNCTION(malloc_make_nonpurgeable),
+ INTERPOSE_FUNCTION(malloc_set_zone_name),
+ INTERPOSE_FUNCTION(malloc),
INTERPOSE_FUNCTION(free),
+ INTERPOSE_FUNCTION(realloc),
+ INTERPOSE_FUNCTION(calloc),
+ INTERPOSE_FUNCTION(valloc),
+ INTERPOSE_FUNCTION(malloc_good_size),
+ INTERPOSE_FUNCTION(posix_memalign),
};
} // namespace __asan