summaryrefslogtreecommitdiff
path: root/libsanitizer/asan/asan_poisoning.cc
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/asan/asan_poisoning.cc')
-rw-r--r--libsanitizer/asan/asan_poisoning.cc44
1 files changed, 34 insertions, 10 deletions
diff --git a/libsanitizer/asan/asan_poisoning.cc b/libsanitizer/asan/asan_poisoning.cc
index 39f74879b38..8fe2bd42bdb 100644
--- a/libsanitizer/asan/asan_poisoning.cc
+++ b/libsanitizer/asan/asan_poisoning.cc
@@ -67,7 +67,7 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
uptr page_size = GetPageSizeCached();
uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+ ReleaseMemoryToOS(shadow_beg, shadow_end - shadow_beg);
}
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
@@ -100,7 +100,7 @@ using namespace __asan; // NOLINT
// that user program (un)poisons the memory it owns. It poisons memory
// conservatively, and unpoisons progressively to make sure asan shadow
// mapping invariant is preserved (see detailed mapping description here:
-// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm).
+// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
//
// * if user asks to poison region [left, right), the program poisons
// at least [left, AlignDown(right)).
@@ -115,9 +115,9 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
- CHECK(beg.offset < end.offset);
+ CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
- CHECK(value == end.value);
+ CHECK_EQ(value, end.value);
// We can only poison memory if the byte in end.offset is unaddressable.
// No need to re-poison memory if it is poisoned already.
if (value > 0 && value <= end.offset) {
@@ -129,7 +129,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
- CHECK(beg.chunk < end.chunk);
+ CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
// Mark bytes from beg.offset as unaddressable.
if (beg.value == 0) {
@@ -155,9 +155,9 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
- CHECK(beg.offset < end.offset);
+ CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
- CHECK(value == end.value);
+ CHECK_EQ(value, end.value);
// We unpoison memory bytes up to enbytes up to end.offset if it is not
// unpoisoned already.
if (value != 0) {
@@ -165,7 +165,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
- CHECK(beg.chunk < end.chunk);
+ CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
*beg.chunk = 0;
beg.chunk++;
@@ -312,6 +312,30 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
}
+void __asan_set_shadow_00(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0, size);
+}
+
+void __asan_set_shadow_f1(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf1, size);
+}
+
+void __asan_set_shadow_f2(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf2, size);
+}
+
+void __asan_set_shadow_f3(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf3, size);
+}
+
+void __asan_set_shadow_f5(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf5, size);
+}
+
+void __asan_set_shadow_f8(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf8, size);
+}
+
void __asan_poison_stack_memory(uptr addr, uptr size) {
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
@@ -341,7 +365,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
&stack);
}
CHECK_LE(end - beg,
- FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
+ FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
@@ -352,7 +376,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
// Make a quick sanity check that we are indeed in this state.
//
// FIXME: Two of these three checks are disabled until we fix
- // https://code.google.com/p/address-sanitizer/issues/detail?id=258.
+ // https://github.com/google/sanitizers/issues/258.
// if (d1 != d2)
// CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
if (a + granularity <= d1)