diff options
author | Alex Shlyapnikov <alekseys@google.com> | 2017-01-11 22:10:35 +0000 |
---|---|---|
committer | Alex Shlyapnikov <alekseys@google.com> | 2017-01-11 22:10:35 +0000 |
commit | 2aa605e2b83d6028107557fe0cb834546d9c28d4 (patch) | |
tree | 63193aab531743cd3ae6d9fdf6ea570ee0ac2681 | |
parent | 2d5a51da2c5ce01ab0f43556388de96efc70d3f1 (diff) | |
download | compiler-rt-2aa605e2b83d6028107557fe0cb834546d9c28d4.tar.gz |
Repoison the entire ASan chunk if it is not in active use.
Summary:
Repoisoning just the minimal redzones might leave an unpoisoned
gap of the size of the actual redzone minus minimal redzone size.
After ASan activation the actual redzone might be bigger than the minimal
size and ASan allocator assumes that the chunk returned by the common
allocator is either entirely poisoned or entirely not poisoned (it's too
expensive to check the entire chunk or always poison one).
Reviewers: eugenis
Subscribers: kubabrecka, llvm-commits
Differential Revision: https://reviews.llvm.org/D28577
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@291714 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/asan/asan_allocator.cc | 12 | ||||
-rw-r--r-- | test/asan/TestCases/Posix/start-deactivated.cc | 37 |
2 files changed, 38 insertions, 11 deletions
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc index 2cf9d08d4..ee9b1a6a0 100644 --- a/lib/asan/asan_allocator.cc +++ b/lib/asan/asan_allocator.cc @@ -269,24 +269,24 @@ struct Allocator { } void RePoisonChunk(uptr chunk) { - // This could a user-facing chunk (with redzones), or some internal + // This could be a user-facing chunk (with redzones), or some internal // housekeeping chunk, like TransferBatch. Start by assuming the former. AsanChunk *ac = GetAsanChunk((void *)chunk); uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac); uptr beg = ac->Beg(); uptr end = ac->Beg() + ac->UsedSize(true); uptr chunk_end = chunk + allocated_size; - if (chunk < beg && beg < end && end <= chunk_end) { - // Looks like a valid AsanChunk. Or maybe not. Be conservative and only - // poison the redzones. + if (chunk < beg && beg < end && end <= chunk_end && + ac->chunk_state == CHUNK_ALLOCATED) { + // Looks like a valid AsanChunk in use, poison redzones only. PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY); FastPoisonShadowPartialRightRedzone( end_aligned_down, end - end_aligned_down, chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); } else { - // This can not be an AsanChunk. Poison everything. It may be reused as - // AsanChunk later. + // This is either not an AsanChunk or freed or quarantined AsanChunk. + // In either case, poison everything. PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); } } diff --git a/test/asan/TestCases/Posix/start-deactivated.cc b/test/asan/TestCases/Posix/start-deactivated.cc index 9691404eb..b223f04e4 100644 --- a/test/asan/TestCases/Posix/start-deactivated.cc +++ b/test/asan/TestCases/Posix/start-deactivated.cc @@ -21,6 +21,7 @@ // XFAIL: arm-linux-gnueabi #if !defined(SHARED_LIB) + #include <assert.h> #include <dlfcn.h> #include <stdio.h> @@ -32,13 +33,13 @@ #include "sanitizer/asan_interface.h" -constexpr unsigned nPtrs = 200; -char *ptrs[nPtrs]; - void test_malloc_shadow(char *p, size_t sz, bool expect_redzones) { + // Last byte of the left redzone, if present. assert((char *)__asan_region_is_poisoned(p - 1, sz + 1) == (expect_redzones ? p - 1 : nullptr)); + // The user memory. assert((char *)__asan_region_is_poisoned(p, sz) == nullptr); + // First byte of the right redzone, if present. assert((char *)__asan_region_is_poisoned(p, sz + 1) == (expect_redzones ? p + sz : nullptr)); } @@ -46,12 +47,29 @@ void test_malloc_shadow(char *p, size_t sz, bool expect_redzones) { typedef void (*Fn)(); int main(int argc, char *argv[]) { + constexpr unsigned nPtrs = 200; + char *ptrs[nPtrs]; + // Before activation: no redzones. for (size_t sz = 1; sz < nPtrs; ++sz) { ptrs[sz] = (char *)malloc(sz); test_malloc_shadow(ptrs[sz], sz, false); } + // Create a honey pot for the future, instrumented, allocations. Since the + // quarantine is disabled, chunks are going to be recycled right away and + // reused for the new allocations. New allocations must get the proper + // redzones anyway, whether it's a fresh or reused allocation. + constexpr size_t HoneyPotBlockSize = 4096; + constexpr int HoneyPotSize = 200; + char *honeyPot[HoneyPotSize]; + for (int i = 1; i < HoneyPotSize; ++i) { + honeyPot[i] = (char *)malloc(HoneyPotBlockSize); + test_malloc_shadow(honeyPot[i], HoneyPotBlockSize, false); + } + for (int i = 1; i < HoneyPotSize; ++i) + free(honeyPot[i]); + std::string path = std::string(argv[0]) + "-so.so"; void *dso = dlopen(path.c_str(), RTLD_NOW); if (!dso) { @@ -67,11 +85,17 @@ int main(int argc, char *argv[]) { } // After activation: redzones. + for (int i = 1; i < HoneyPotSize; ++i) { + honeyPot[i] = (char *)malloc(HoneyPotBlockSize); + test_malloc_shadow(honeyPot[i], HoneyPotBlockSize, true); + } { - char *p = (char *)malloc(100); - test_malloc_shadow(p, 100, true); + char *p = (char *)malloc(HoneyPotBlockSize); + test_malloc_shadow(p, HoneyPotBlockSize, true); free(p); } + for (int i = 1; i < HoneyPotSize; ++i) + free(honeyPot[i]); // Pre-existing allocations got redzones, too. for (size_t sz = 1; sz < nPtrs; ++sz) { @@ -93,7 +117,9 @@ int main(int argc, char *argv[]) { return 0; } + #else // SHARED_LIB + #include <stdio.h> #include <stdlib.h> @@ -101,6 +127,7 @@ extern "C" void do_another_bad_thing() { char *volatile p = (char *)malloc(100); printf("%hhx\n", p[105]); } + #endif // SHARED_LIB // help=1 in activation flags lists only flags are are supported at activation |