diff options
author | kcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-11-04 21:33:31 +0000 |
---|---|---|
committer | kcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-11-04 21:33:31 +0000 |
commit | 1e80ce4111e28463d870335befe7d99066b5971e (patch) | |
tree | 7cfc103c9b6b4ce7ca19d39f91509a1b68819a63 /libsanitizer/sanitizer_common/sanitizer_allocator.cc | |
parent | 482026b63e8a488d6b7f0eab53fcbfe12c3309ae (diff) | |
download | gcc-1e80ce4111e28463d870335befe7d99066b5971e.tar.gz |
libsanitizer merge from upstream r191666
This may break gcc-asan on Mac, will follow up separately.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@204368 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libsanitizer/sanitizer_common/sanitizer_allocator.cc')
-rw-r--r-- | libsanitizer/sanitizer_common/sanitizer_allocator.cc | 105 |
1 files changed, 87 insertions, 18 deletions
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.cc b/libsanitizer/sanitizer_common/sanitizer_allocator.cc index a54de9d6f9a..9d38e9429ac 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator.cc +++ b/libsanitizer/sanitizer_common/sanitizer_allocator.cc @@ -7,44 +7,103 @@ // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries. -// This allocator that is used inside run-times. +// This allocator is used inside run-times. //===----------------------------------------------------------------------===// +#include "sanitizer_allocator.h" +#include "sanitizer_allocator_internal.h" #include "sanitizer_common.h" +#include "sanitizer_flags.h" -// FIXME: We should probably use more low-level allocator that would -// mmap some pages and split them into chunks to fulfill requests. -#if defined(__linux__) && !defined(__ANDROID__) -extern "C" void *__libc_malloc(__sanitizer::uptr size); +namespace __sanitizer { + +// ThreadSanitizer for Go uses libc malloc/free. +#if defined(SANITIZER_GO) +# if SANITIZER_LINUX && !SANITIZER_ANDROID +extern "C" void *__libc_malloc(uptr size); extern "C" void __libc_free(void *ptr); -# define LIBC_MALLOC __libc_malloc -# define LIBC_FREE __libc_free -#else // __linux__ && !ANDROID -# include <stdlib.h> -# define LIBC_MALLOC malloc -# define LIBC_FREE free -#endif // __linux__ && !ANDROID +# define LIBC_MALLOC __libc_malloc +# define LIBC_FREE __libc_free +# else +# include <stdlib.h> +# define LIBC_MALLOC malloc +# define LIBC_FREE free +# endif -namespace __sanitizer { +static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) { + (void)cache; + return LIBC_MALLOC(size); +} + +static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { + (void)cache; + LIBC_FREE(ptr); +} + +InternalAllocator *internal_allocator() { + return 0; +} + +#else // SANITIZER_GO + +static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; +static atomic_uint8_t internal_allocator_initialized; +static StaticSpinMutex internal_alloc_init_mu; + +static InternalAllocatorCache internal_allocator_cache; +static StaticSpinMutex internal_allocator_cache_mu; + +InternalAllocator *internal_allocator() { + InternalAllocator *internal_allocator_instance = + reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder); + if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) { + SpinMutexLock l(&internal_alloc_init_mu); + if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == + 0) { + internal_allocator_instance->Init(); + atomic_store(&internal_allocator_initialized, 1, memory_order_release); + } + } + return internal_allocator_instance; +} + +static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) { + if (cache == 0) { + SpinMutexLock l(&internal_allocator_cache_mu); + return internal_allocator()->Allocate(&internal_allocator_cache, size, 8, + false); + } + return internal_allocator()->Allocate(cache, size, 8, false); +} + +static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { + if (cache == 0) { + SpinMutexLock l(&internal_allocator_cache_mu); + return internal_allocator()->Deallocate(&internal_allocator_cache, ptr); + } + internal_allocator()->Deallocate(cache, ptr); +} + +#endif // SANITIZER_GO const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; -void *InternalAlloc(uptr size) { +void *InternalAlloc(uptr size, InternalAllocatorCache *cache) { if (size + sizeof(u64) < size) return 0; - void *p = LIBC_MALLOC(size + sizeof(u64)); + void *p = RawInternalAlloc(size + sizeof(u64), cache); if (p == 0) return 0; ((u64*)p)[0] = kBlockMagic; return (char*)p + sizeof(u64); } -void InternalFree(void *addr) { +void InternalFree(void *addr, InternalAllocatorCache *cache) { if (addr == 0) return; addr = (char*)addr - sizeof(u64); - CHECK_EQ(((u64*)addr)[0], kBlockMagic); + CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); ((u64*)addr)[0] = 0; - LIBC_FREE(addr); + RawInternalFree(addr, cache); } // LowLevelAllocator @@ -79,4 +138,14 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) { return (max / size) < n; } +void *AllocatorReturnNull() { + if (common_flags()->allocator_may_return_null) + return 0; + Report("%s's allocator is terminating the process instead of returning 0\n", + SanitizerToolName); + Report("If you don't like this behavior set allocator_may_return_null=1\n"); + CHECK(0); + return 0; +} + } // namespace __sanitizer |