summaryrefslogtreecommitdiff
path: root/chromium/base/memory
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/base/memory')
-rw-r--r--chromium/base/memory/aligned_memory.cc18
-rw-r--r--chromium/base/memory/aligned_memory.h21
-rw-r--r--chromium/base/memory/aligned_memory_unittest.cc4
-rw-r--r--chromium/base/memory/checked_ptr.cc29
-rw-r--r--chromium/base/memory/checked_ptr.h441
-rw-r--r--chromium/base/memory/checked_ptr_unittest.cc387
-rw-r--r--chromium/base/memory/discardable_shared_memory.cc11
-rw-r--r--chromium/base/memory/discardable_shared_memory.h2
-rw-r--r--chromium/base/memory/discardable_shared_memory_unittest.cc10
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_allocator_posix.cc12
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_allocator_posix.h3
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc20
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_posix.cc10
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_posix.h2
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_posix_unittest.cc2
-rw-r--r--chromium/base/memory/memory_pressure_listener.cc12
-rw-r--r--chromium/base/memory/memory_pressure_listener.h7
-rw-r--r--chromium/base/memory/memory_pressure_listener_unittest.cc5
-rw-r--r--chromium/base/memory/platform_shared_memory_region.cc10
-rw-r--r--chromium/base/memory/platform_shared_memory_region_android.cc1
-rw-r--r--chromium/base/memory/platform_shared_memory_region_unittest.cc1
-rw-r--r--chromium/base/memory/ref_counted.h2
-rw-r--r--chromium/base/memory/ref_counted_delete_on_sequence.h2
-rw-r--r--chromium/base/memory/scoped_refptr.h2
-rw-r--r--chromium/base/memory/shared_memory_tracker.cc15
-rw-r--r--chromium/base/memory/shared_memory_tracker.h2
-rw-r--r--chromium/base/memory/singleton.h2
-rw-r--r--chromium/base/memory/singleton_unittest.cc21
-rw-r--r--chromium/base/memory/weak_ptr.h2
29 files changed, 875 insertions, 181 deletions
diff --git a/chromium/base/memory/aligned_memory.cc b/chromium/base/memory/aligned_memory.cc
index 97b49248373..7017e316af2 100644
--- a/chromium/base/memory/aligned_memory.cc
+++ b/chromium/base/memory/aligned_memory.cc
@@ -15,24 +15,26 @@ namespace base {
void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_GT(size, 0U);
- DCHECK_EQ(alignment & (alignment - 1), 0U);
+ DCHECK(bits::IsPowerOfTwo(alignment));
DCHECK_EQ(alignment % sizeof(void*), 0U);
void* ptr = nullptr;
#if defined(COMPILER_MSVC)
ptr = _aligned_malloc(size, alignment);
-// Android technically supports posix_memalign(), but does not expose it in
-// the current version of the library headers used by Chrome. Luckily,
-// memalign() on Android returns pointers which can safely be used with
-// free(), so we can use it instead. Issue filed to document this:
-// http://code.google.com/p/android/issues/detail?id=35391
#elif defined(OS_ANDROID)
+ // Android technically supports posix_memalign(), but does not expose it in
+ // the current version of the library headers used by Chromium. Luckily,
+ // memalign() on Android returns pointers which can safely be used with
+ // free(), so we can use it instead. Issue filed to document this:
+ // http://code.google.com/p/android/issues/detail?id=35391
ptr = memalign(alignment, size);
#else
- if (int ret = posix_memalign(&ptr, alignment, size)) {
+ int ret = posix_memalign(&ptr, alignment, size);
+ if (ret != 0) {
DLOG(ERROR) << "posix_memalign() returned with error " << ret;
ptr = nullptr;
}
#endif
+
// Since aligned allocations may fail for non-memory related reasons, force a
// crash if we encounter a failed allocation; maintaining consistent behavior
// with a normal allocation failure in Chrome.
@@ -42,7 +44,7 @@ void* AlignedAlloc(size_t size, size_t alignment) {
CHECK(false);
}
// Sanity check alignment just to be safe.
- DCHECK_EQ(reinterpret_cast<uintptr_t>(ptr) & (alignment - 1), 0U);
+ DCHECK(IsAligned(ptr, alignment));
return ptr;
}
diff --git a/chromium/base/memory/aligned_memory.h b/chromium/base/memory/aligned_memory.h
index d1cba0c7bb1..39a823a7d14 100644
--- a/chromium/base/memory/aligned_memory.h
+++ b/chromium/base/memory/aligned_memory.h
@@ -11,8 +11,8 @@
#include <type_traits>
#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/logging.h"
+#include "base/bits.h"
+#include "base/check.h"
#include "base/process/process_metrics.h"
#include "build/build_config.h"
@@ -57,22 +57,25 @@ struct AlignedFreeDeleter {
}
};
-#ifndef __has_builtin
-#define __has_builtin(x) 0 // Compatibility with non-clang compilers.
+#ifdef __has_builtin
+#define SUPPORTS_BUILTIN_IS_ALIGNED (__has_builtin(__builtin_is_aligned))
+#else
+#define SUPPORTS_BUILTIN_IS_ALIGNED 0
#endif
inline bool IsAligned(uintptr_t val, size_t alignment) {
// If the compiler supports builtin alignment checks prefer them.
-#if __has_builtin(__builtin_is_aligned)
+#if SUPPORTS_BUILTIN_IS_ALIGNED
return __builtin_is_aligned(val, alignment);
#else
- DCHECK(!((alignment - 1) & alignment))
- << alignment << " is not a power of two";
+ DCHECK(bits::IsPowerOfTwo(alignment)) << alignment << " is not a power of 2";
return (val & (alignment - 1)) == 0;
#endif
}
-inline bool IsAligned(void* val, size_t alignment) {
+#undef SUPPORTS_BUILTIN_IS_ALIGNED
+
+inline bool IsAligned(const void* val, size_t alignment) {
return IsAligned(reinterpret_cast<uintptr_t>(val), alignment);
}
@@ -80,7 +83,7 @@ template <typename Type>
inline bool IsPageAligned(Type val) {
static_assert(std::is_integral<Type>::value || std::is_pointer<Type>::value,
"Integral or pointer type required");
- return base::IsAligned(val, base::GetPageSize());
+ return IsAligned(val, GetPageSize());
}
} // namespace base
diff --git a/chromium/base/memory/aligned_memory_unittest.cc b/chromium/base/memory/aligned_memory_unittest.cc
index e067b4cbbc2..810f2e46ca7 100644
--- a/chromium/base/memory/aligned_memory_unittest.cc
+++ b/chromium/base/memory/aligned_memory_unittest.cc
@@ -38,6 +38,10 @@ TEST(AlignedMemoryTest, ScopedDynamicAllocation) {
static_cast<float*>(AlignedAlloc(8, 8)));
EXPECT_TRUE(p.get());
EXPECT_TRUE(IsAligned(p.get(), 8));
+
+ // Make sure IsAligned() can check const pointers as well.
+ const float* const_p = p.get();
+ EXPECT_TRUE(IsAligned(const_p, 8));
}
TEST(AlignedMemoryTest, IsAligned) {
diff --git a/chromium/base/memory/checked_ptr.cc b/chromium/base/memory/checked_ptr.cc
new file mode 100644
index 00000000000..99f12a34b06
--- /dev/null
+++ b/chromium/base/memory/checked_ptr.cc
@@ -0,0 +1,29 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/checked_ptr.h"
+
+#include "base/allocator/partition_allocator/partition_alloc.h"
+
+namespace base {
+namespace internal {
+
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+BASE_EXPORT bool CheckedPtr2ImplPartitionAllocSupport::EnabledForPtr(
+ void* ptr) {
+ // CheckedPtr2Impl works only when memory is allocated by PartitionAlloc and
+ // only only if the pointer points to the beginning of the allocated slot.
+ //
+ // TODO(bartekn): Add |&& PartitionAllocGetSlotOffset(ptr) == 0|
+ // CheckedPtr2Impl uses a fake implementation at the moment, which happens to
+ // work even for non-0 offsets, so skip this check for now to get a better
+ // coverage.
+ return IsManagedByPartitionAlloc(ptr);
+}
+
+#endif
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/memory/checked_ptr.h b/chromium/base/memory/checked_ptr.h
index dc81e98130f..58cd9ba0528 100644
--- a/chromium/base/memory/checked_ptr.h
+++ b/chromium/base/memory/checked_ptr.h
@@ -5,11 +5,28 @@
#ifndef BASE_MEMORY_CHECKED_PTR_H_
#define BASE_MEMORY_CHECKED_PTR_H_
-#include <cstddef>
-#include <cstdint>
+#include <stddef.h>
+#include <stdint.h>
+
#include <utility>
+#include "base/check_op.h"
#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+// TEST: We can't use protection in the real code (yet) because it may lead to
+// crashes in absence of PartitionAlloc support. Setting it to 0 will disable
+// the protection, while preserving all calculations.
+#define CHECKED_PTR2_PROTECTION_ENABLED 0
+
+#define CHECKED_PTR2_USE_NO_OP_WRAPPER 0
+
+// Set it to 1 to avoid branches when checking if per-pointer protection is
+// enabled.
+#define CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED 0
+// Set it to 1 to avoid branches when dereferencing the pointer.
+// Must be 1 if the above is 1.
+#define CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING 0
namespace base {
@@ -22,8 +39,10 @@ namespace internal {
struct CheckedPtrNoOpImpl {
// Wraps a pointer, and returns its uintptr_t representation.
- static ALWAYS_INLINE uintptr_t WrapRawPtr(const void* const_ptr) {
- return reinterpret_cast<uintptr_t>(const_ptr);
+ // Use |const volatile| to prevent compiler error. These will be dropped
+ // anyway when casting to uintptr_t and brought back upon pointer extraction.
+ static ALWAYS_INLINE uintptr_t WrapRawPtr(const volatile void* cv_ptr) {
+ return reinterpret_cast<uintptr_t>(cv_ptr);
}
// Returns equivalent of |WrapRawPtr(nullptr)|. Separated out to make it a
@@ -64,6 +83,272 @@ struct CheckedPtrNoOpImpl {
static ALWAYS_INLINE void IncrementSwapCountForTest() {}
};
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+constexpr int kValidAddressBits = 48;
+constexpr uintptr_t kAddressMask = (1ull << kValidAddressBits) - 1;
+constexpr int kGenerationBits = sizeof(uintptr_t) * 8 - kValidAddressBits;
+constexpr uintptr_t kGenerationMask = ~kAddressMask;
+constexpr int kTopBitShift = 63;
+constexpr uintptr_t kTopBit = 1ull << kTopBitShift;
+static_assert(kTopBit << 1 == 0, "kTopBit should really be the top bit");
+static_assert((kTopBit & kGenerationMask) > 0,
+ "kTopBit bit must be inside the generation region");
+
+// This functionality is outside of CheckedPtr2Impl, so that it can be
+// overridden by tests. The implementation is in the .cc file, because including
+// partition_alloc.h here could lead to cyclic includes.
+struct CheckedPtr2ImplPartitionAllocSupport {
+ // Checks if CheckedPtr2 support is enabled in PartitionAlloc for |ptr|.
+ // TODO(bartekn): Check if this function gets inlined.
+ BASE_EXPORT static bool EnabledForPtr(void* ptr);
+};
+
+template <typename PartitionAllocSupport = CheckedPtr2ImplPartitionAllocSupport>
+struct CheckedPtr2Impl {
+ // This implementation assumes that pointers are 64 bits long and at least 16
+ // top bits are unused. The latter is harder to verify statically, but this is
+ // true for all currently supported 64-bit architectures (DCHECK when wrapping
+ // will verify that).
+ static_assert(sizeof(void*) >= 8, "Need 64-bit pointers");
+
+ // Wraps a pointer, and returns its uintptr_t representation.
+ static ALWAYS_INLINE uintptr_t WrapRawPtr(const volatile void* cv_ptr) {
+ void* ptr = const_cast<void*>(cv_ptr);
+ uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
+#if CHECKED_PTR2_USE_NO_OP_WRAPPER
+ static_assert(!CHECKED_PTR2_PROTECTION_ENABLED, "");
+#else
+ // Make sure that the address bits that will be used for generation are 0.
+ // If they aren't, they'd fool the unwrapper into thinking that the
+ // protection is enabled, making it try to read and compare the generation.
+ DCHECK_EQ(ExtractGeneration(addr), 0ull);
+
+ // Return a not-wrapped |addr|, if it's either nullptr or if the protection
+ // for this pointer is disabled.
+ if (!PartitionAllocSupport::EnabledForPtr(ptr)) {
+ return addr;
+ }
+
+ // Read the generation from 16 bits before the allocation. Then place it in
+ // the top bits of the address.
+ static_assert(sizeof(uint16_t) * 8 == kGenerationBits, "");
+#if CHECKED_PTR2_PROTECTION_ENABLED
+ uintptr_t generation = *(static_cast<volatile uint16_t*>(ptr) - 1);
+#else
+ // TEST: Reading from offset -1 may crash without full PA support.
+ // Just read from offset 0 to attain the same perf characteristics as the
+ // expected production solution.
+ // This generation will be ignored anyway either when unwrapping or below
+ // (depending on the algorithm variant), on the
+ // !CHECKED_PTR2_PROTECTION_ENABLED path.
+ uintptr_t generation = *(static_cast<volatile uint16_t*>(ptr));
+#endif // CHECKED_PTR2_PROTECTION_ENABLED
+ generation <<= kValidAddressBits;
+ addr |= generation;
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ // Always set top bit to 1, to indicated that the protection is enabled.
+ addr |= kTopBit;
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ // TEST: Clear the generation, or else it could crash without PA support.
+ // If the top bit was set, the unwrapper would read from before the address
+ // address, but with it cleared, it'll read from the address itself.
+ addr &= kAddressMask;
+#endif // !CHECKED_PTR2_PROTECTION_ENABLED
+#endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+#endif // CHECKED_PTR2_USE_NO_OP_WRAPPER
+ return addr;
+ }
+
+ // Returns equivalent of |WrapRawPtr(nullptr)|. Separated out to make it a
+ // constexpr.
+ static constexpr ALWAYS_INLINE uintptr_t GetWrappedNullPtr() {
+ return kWrappedNullPtr;
+ }
+
+ static ALWAYS_INLINE uintptr_t
+ SafelyUnwrapPtrInternal(uintptr_t wrapped_ptr) {
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ // Top bit tells if the protection is enabled. Use it to decide whether to
+ // read the word before the allocation, which exists only if the protection
+ // is enabled. Otherwise it may crash, in which case read the data from the
+ // beginning of the allocation instead and ignore it later. All this magic
+ // is to avoid a branch, for performance reasons.
+ //
+ // A couple examples, assuming 64-bit system (continued below):
+ // Ex.1: wrapped_ptr=0x8442000012345678
+ // => enabled=0x8000000000000000
+ // => offset=1
+ // Ex.2: wrapped_ptr=0x0000000012345678
+ // => enabled=0x0000000000000000
+ // => offset=0
+ uintptr_t enabled = wrapped_ptr & kTopBit;
+ // We can't have protection disabled and generation set in the same time.
+ DCHECK(!(enabled == 0 && (ExtractGeneration(wrapped_ptr)) != 0));
+ uintptr_t offset = enabled >> kTopBitShift; // 0 or 1
+ // Use offset to decide if the generation should be read at the beginning or
+ // before the allocation.
+ // TODO(bartekn): Do something about 1-byte allocations. Reading 2-byte
+ // generation at the allocation could crash. This case is executed
+ // specifically for non-PartitionAlloc pointers, so we can't make
+ // assumptions about alignment.
+ //
+ // Cast to volatile to ensure memory is read. E.g. in a tight loop, the
+ // compiler could cache the value in a register and thus could miss that
+ // another thread freed memory and cleared generation.
+ //
+ // Examples (continued):
+ // Ex.1: generation_ptr=0x0000000012345676
+ // a) if pointee wasn't freed, read e.g. generation=0x0442 (could be
+ // also 0x8442, the top bit is overwritten later)
+ // b) if pointee was freed, read e.g. generation=0x1234 (could be
+ // anything)
+ // Ex.2: generation_ptr=0x0000000012345678, read e.g. 0x2345 (doesn't
+ // matter what we read, as long as this read doesn't crash)
+ volatile uint16_t* generation_ptr =
+ reinterpret_cast<volatile uint16_t*>(ExtractAddress(wrapped_ptr)) -
+ offset;
+ uintptr_t generation = *generation_ptr;
+ // Shift generation into the right place and add back the enabled bit.
+ //
+ // Examples (continued):
+ // Ex.1:
+ // a) generation=0x8442000000000000
+ // a) generation=0x9234000000000000
+ // Ex.2: generation=0x2345000000000000
+ generation <<= kValidAddressBits;
+ generation |= enabled;
+
+ // If the protection isn't enabled, clear top bits. Casting to a signed
+ // type makes >> sign extend the last bit.
+ //
+ // Examples (continued):
+ // Ex.1: mask=0xffff000000000000
+ // a) generation=0x8442000000000000
+ // b) generation=0x9234000000000000
+ // Ex.2: mask=0x0000000000000000 => generation=0x0000000000000000
+ uintptr_t mask = static_cast<intptr_t>(enabled) >> (kGenerationBits - 1);
+ generation &= mask;
+
+ // Use hardware to detect generation mismatch. CPU will crash if top bits
+ // aren't all 0 (technically it won't if all bits are 1, but that's a kernel
+ // mode address, which isn't allowed either... also, top bit will be always
+ // zeroed out).
+ //
+ // Examples (continued):
+ // Ex.1:
+ // a) returning 0x0000000012345678
+ // b) returning 0x1676000012345678 (this will generate a desired crash)
+ // Ex.2: returning 0x0000000012345678
+ static_assert(CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING, "");
+ return generation ^ wrapped_ptr;
+#else // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ uintptr_t ptr_generation = wrapped_ptr >> kValidAddressBits;
+ if (ptr_generation > 0) {
+ // Read generation from before the allocation.
+ //
+ // Cast to volatile to ensure memory is read. E.g. in a tight loop, the
+ // compiler could cache the value in a register and thus could miss that
+ // another thread freed memory and cleared generation.
+#if CHECKED_PTR2_PROTECTION_ENABLED
+ uintptr_t read_generation =
+ *(reinterpret_cast<volatile uint16_t*>(ExtractAddress(wrapped_ptr)) -
+ 1);
+#else
+ // TEST: Reading from before the pointer may crash. See more above...
+ uintptr_t read_generation =
+ *(reinterpret_cast<volatile uint16_t*>(ExtractAddress(wrapped_ptr)));
+#endif
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING
+ // Use hardware to detect generation mismatch. CPU will crash if top bits
+ // aren't all 0 (technically it won't if all bits are 1, but that's a
+ // kernel mode address, which isn't allowed either).
+ read_generation <<= kValidAddressBits;
+ return read_generation ^ wrapped_ptr;
+#else
+#if CHECKED_PTR2_PROTECTION_ENABLED
+ if (UNLIKELY(ptr_generation != read_generation))
+ IMMEDIATE_CRASH();
+#else
+ // TEST: Use volatile to prevent optimizing out the calculations leading
+ // to this point.
+ volatile bool x = false;
+ if (ptr_generation != read_generation)
+ x = true;
+#endif // CHECKED_PTR2_PROTECTION_ENABLED
+ return wrapped_ptr & kAddressMask;
+#endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING
+ }
+ return wrapped_ptr;
+#endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ }
+
+ // Unwraps the pointer's uintptr_t representation, while asserting that memory
+ // hasn't been freed. The function is allowed to crash on nullptr.
+ static ALWAYS_INLINE void* SafelyUnwrapPtrForDereference(
+ uintptr_t wrapped_ptr) {
+#if CHECKED_PTR2_PROTECTION_ENABLED
+ return reinterpret_cast<void*>(SafelyUnwrapPtrInternal(wrapped_ptr));
+#else
+ // TEST: Use volatile to prevent optimizing out the calculations leading to
+ // this point.
+ // |SafelyUnwrapPtrInternal| was separated out solely for this purpose.
+ volatile uintptr_t addr = SafelyUnwrapPtrInternal(wrapped_ptr);
+ return reinterpret_cast<void*>(addr);
+#endif
+ }
+
+ // Unwraps the pointer's uintptr_t representation, while asserting that memory
+ // hasn't been freed. The function must handle nullptr gracefully.
+ static ALWAYS_INLINE void* SafelyUnwrapPtrForExtraction(
+ uintptr_t wrapped_ptr) {
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ // In this implementation SafelyUnwrapPtrForDereference doesn't tolerate
+ // nullptr, because it reads unconditionally to avoid branches. Handle the
+ // nullptr case here.
+ if (wrapped_ptr == kWrappedNullPtr)
+ return nullptr;
+ return reinterpret_cast<void*>(SafelyUnwrapPtrForDereference(wrapped_ptr));
+#else
+ // In this implementation SafelyUnwrapPtrForDereference handles nullptr case
+ // well.
+ return reinterpret_cast<void*>(SafelyUnwrapPtrForDereference(wrapped_ptr));
+#endif
+ }
+
+ // Unwraps the pointer's uintptr_t representation, without making an assertion
+ // on whether memory was freed or not.
+ static ALWAYS_INLINE void* UnsafelyUnwrapPtrForComparison(
+ uintptr_t wrapped_ptr) {
+ return reinterpret_cast<void*>(ExtractAddress(wrapped_ptr));
+ }
+
+ // Advance the wrapped pointer by |delta| bytes.
+ static ALWAYS_INLINE uintptr_t Advance(uintptr_t wrapped_ptr, size_t delta) {
+ // Mask out the generation to disable the protection. It's not supported for
+ // pointers inside an allocation.
+ return ExtractAddress(wrapped_ptr) + delta;
+ }
+
+ // This is for accounting only, used by unit tests.
+ static ALWAYS_INLINE void IncrementSwapCountForTest() {}
+
+ private:
+ static ALWAYS_INLINE uintptr_t ExtractAddress(uintptr_t wrapped_ptr) {
+ return wrapped_ptr & kAddressMask;
+ }
+
+ static ALWAYS_INLINE uintptr_t ExtractGeneration(uintptr_t wrapped_ptr) {
+ return wrapped_ptr & kGenerationMask;
+ }
+
+ // This relies on nullptr and 0 being equal in the eyes of reinterpret_cast,
+ // which apparently isn't true in some rare environments.
+ static constexpr uintptr_t kWrappedNullPtr = 0;
+};
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
template <typename T>
struct DereferencedPointerType {
using Type = decltype(*std::declval<T*>());
@@ -89,7 +374,12 @@ struct DereferencedPointerType<void> {};
// 2. Keep this class as small as possible, while still satisfying goal #1 (i.e.
// we aren't striving to maximize compatibility with raw pointers, merely
// adding support for cases encountered so far).
-template <typename T, typename Impl = internal::CheckedPtrNoOpImpl>
+template <typename T,
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ typename Impl = internal::CheckedPtr2Impl<>>
+#else
+ typename Impl = internal::CheckedPtrNoOpImpl>
+#endif
class CheckedPtr {
public:
// CheckedPtr can be trivially default constructed (leaving |wrapped_ptr_|
@@ -122,6 +412,10 @@ class CheckedPtr {
wrapped_ptr_ = Impl::WrapRawPtr(p);
return *this;
}
+ ALWAYS_INLINE CheckedPtr& operator=(std::nullptr_t) noexcept {
+ wrapped_ptr_ = Impl::GetWrappedNullPtr();
+ return *this;
+ }
~CheckedPtr() = default;
@@ -153,64 +447,103 @@ class CheckedPtr {
wrapped_ptr_ = Impl::Advance(wrapped_ptr_, sizeof(T));
return *this;
}
-
ALWAYS_INLINE CheckedPtr& operator--() {
wrapped_ptr_ = Impl::Advance(wrapped_ptr_, -sizeof(T));
return *this;
}
-
+ ALWAYS_INLINE CheckedPtr operator++(int /* post_increment */) {
+ CheckedPtr result = *this;
+ ++(*this);
+ return result;
+ }
+ ALWAYS_INLINE CheckedPtr operator--(int /* post_decrement */) {
+ CheckedPtr result = *this;
+ --(*this);
+ return result;
+ }
ALWAYS_INLINE CheckedPtr& operator+=(ptrdiff_t delta_elems) {
wrapped_ptr_ = Impl::Advance(wrapped_ptr_, delta_elems * sizeof(T));
return *this;
}
-
ALWAYS_INLINE CheckedPtr& operator-=(ptrdiff_t delta_elems) {
return *this += -delta_elems;
}
- ALWAYS_INLINE bool operator==(T* p) const { return GetForComparison() == p; }
- ALWAYS_INLINE bool operator!=(T* p) const { return !operator==(p); }
-
- // Useful for cases like this:
- // class Base {};
- // class Derived : public Base {};
- // Derived d;
- // CheckedPtr<Derived> derived_ptr = &d;
- // Base* base_ptr = &d;
- // if (derived_ptr == base_ptr) {...}
- // Without these, such comparisons would end up calling |operator T*()|.
+ // Be careful to cover all cases with CheckedPtr being on both sides, left
+ // side only and right side only. If any case is missed, a more costly
+ // |operator T*()| will get called, instead of |operator==|.
+ friend ALWAYS_INLINE bool operator==(const CheckedPtr& lhs,
+ const CheckedPtr& rhs) {
+ return lhs.GetForComparison() == rhs.GetForComparison();
+ }
+ friend ALWAYS_INLINE bool operator!=(const CheckedPtr& lhs,
+ const CheckedPtr& rhs) {
+ return !(lhs == rhs);
+ }
+ friend ALWAYS_INLINE bool operator==(const CheckedPtr& lhs, T* rhs) {
+ return lhs.GetForComparison() == rhs;
+ }
+ friend ALWAYS_INLINE bool operator!=(const CheckedPtr& lhs, T* rhs) {
+ return !(lhs == rhs);
+ }
+ friend ALWAYS_INLINE bool operator==(T* lhs, const CheckedPtr& rhs) {
+ return rhs == lhs; // Reverse order to call the operator above.
+ }
+ friend ALWAYS_INLINE bool operator!=(T* lhs, const CheckedPtr& rhs) {
+ return rhs != lhs; // Reverse order to call the operator above.
+ }
+ // Needed for cases like |derived_ptr == base_ptr|. Without these, a more
+ // costly |operator T*()| will get called, instead of |operator==|.
template <typename U>
- ALWAYS_INLINE bool operator==(U* p) const {
- // Add |const| when casting, because |U| may have |const| in it. Even if |T|
- // doesn't, comparison between |T*| and |const T*| is fine.
- return GetForComparison() == static_cast<std::add_const_t<T>*>(p);
+ friend ALWAYS_INLINE bool operator==(const CheckedPtr& lhs,
+ const CheckedPtr<U, Impl>& rhs) {
+ // Add |const volatile| when casting, in case |U| has any. Even if |T|
+ // doesn't, comparison between |T*| and |const volatile T*| is fine.
+ return lhs.GetForComparison() ==
+ static_cast<std::add_cv_t<T>*>(rhs.GetForComparison());
}
template <typename U>
- ALWAYS_INLINE bool operator!=(U* p) const {
- return !operator==(p);
+ friend ALWAYS_INLINE bool operator!=(const CheckedPtr& lhs,
+ const CheckedPtr<U, Impl>& rhs) {
+ return !(lhs == rhs);
}
-
- ALWAYS_INLINE bool operator==(const CheckedPtr& other) const {
- return GetForComparison() == other.GetForComparison();
+ template <typename U>
+ friend ALWAYS_INLINE bool operator==(const CheckedPtr& lhs, U* rhs) {
+ // Add |const volatile| when casting, in case |U| has any. Even if |T|
+ // doesn't, comparison between |T*| and |const volatile T*| is fine.
+ return lhs.GetForComparison() == static_cast<std::add_cv_t<T>*>(rhs);
}
- ALWAYS_INLINE bool operator!=(const CheckedPtr& other) const {
- return !operator==(other);
+ template <typename U>
+ friend ALWAYS_INLINE bool operator!=(const CheckedPtr& lhs, U* rhs) {
+ return !(lhs == rhs);
}
- template <typename U, typename I>
- ALWAYS_INLINE bool operator==(const CheckedPtr<U, I>& other) const {
- // Add |const| when casting, because |U| may have |const| in it. Even if |T|
- // doesn't, comparison between |T*| and |const T*| is fine.
- return GetForComparison() ==
- static_cast<std::add_const_t<T>*>(other.GetForComparison());
+ template <typename U>
+ friend ALWAYS_INLINE bool operator==(U* lhs, const CheckedPtr& rhs) {
+ return rhs == lhs; // Reverse order to call the operator above.
+ }
+ template <typename U>
+ friend ALWAYS_INLINE bool operator!=(U* lhs, const CheckedPtr& rhs) {
+ return rhs != lhs; // Reverse order to call the operator above.
}
- template <typename U, typename I>
- ALWAYS_INLINE bool operator!=(const CheckedPtr<U, I>& other) const {
- return !operator==(other);
+ // Needed for comparisons against nullptr. Without these, a slightly more
+ // costly version would be called that extracts wrapped pointer, as opposed
+ // to plain comparison against 0.
+ friend ALWAYS_INLINE bool operator==(const CheckedPtr& lhs, std::nullptr_t) {
+ return !lhs;
+ }
+ friend ALWAYS_INLINE bool operator!=(const CheckedPtr& lhs, std::nullptr_t) {
+ return !!lhs; // Use !! otherwise the costly implicit cast will be used.
+ }
+ friend ALWAYS_INLINE bool operator==(std::nullptr_t, const CheckedPtr& rhs) {
+ return !rhs;
+ }
+ friend ALWAYS_INLINE bool operator!=(std::nullptr_t, const CheckedPtr& rhs) {
+ return !!rhs; // Use !! otherwise the costly implicit cast will be used.
}
- ALWAYS_INLINE void swap(CheckedPtr& other) noexcept {
+ friend ALWAYS_INLINE void swap(CheckedPtr& lhs, CheckedPtr& rhs) noexcept {
Impl::IncrementSwapCountForTest();
- std::swap(wrapped_ptr_, other.wrapped_ptr_);
+ std::swap(lhs.wrapped_ptr_, rhs.wrapped_ptr_);
}
private:
@@ -241,32 +574,6 @@ class CheckedPtr {
friend class CheckedPtr;
};
-// These are for cases where a raw pointer is on the left hand side. Reverse
-// order, so that |CheckedPtr::operator==()| kicks in, which will compare more
-// efficiently. Otherwise the CheckedPtr operand would have to be cast to raw
-// pointer, which may be more costly.
-template <typename T, typename I>
-ALWAYS_INLINE bool operator==(T* lhs, const CheckedPtr<T, I>& rhs) {
- return rhs == lhs;
-}
-template <typename T, typename I>
-ALWAYS_INLINE bool operator!=(T* lhs, const CheckedPtr<T, I>& rhs) {
- return !operator==(lhs, rhs);
-}
-template <typename T, typename I, typename U>
-ALWAYS_INLINE bool operator==(U* lhs, const CheckedPtr<T, I>& rhs) {
- return rhs == lhs;
-}
-template <typename T, typename I, typename U>
-ALWAYS_INLINE bool operator!=(U* lhs, const CheckedPtr<T, I>& rhs) {
- return !operator==(lhs, rhs);
-}
-
-template <typename T, typename I>
-ALWAYS_INLINE void swap(CheckedPtr<T, I>& lhs, CheckedPtr<T, I>& rhs) noexcept {
- lhs.swap(rhs);
-}
-
} // namespace base
using base::CheckedPtr;
diff --git a/chromium/base/memory/checked_ptr_unittest.cc b/chromium/base/memory/checked_ptr_unittest.cc
index 32fa63964ec..e1eedb2ff06 100644
--- a/chromium/base/memory/checked_ptr_unittest.cc
+++ b/chromium/base/memory/checked_ptr_unittest.cc
@@ -4,13 +4,17 @@
#include "base/memory/checked_ptr.h"
+#include <climits>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
+#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
+using testing::Test;
+
static_assert(sizeof(CheckedPtr<void>) == sizeof(void*),
"CheckedPtr shouldn't add memory overhead");
static_assert(sizeof(CheckedPtr<int>) == sizeof(int*),
@@ -49,12 +53,14 @@ static_assert(
namespace {
+static int g_wrap_raw_ptr_cnt = INT_MIN;
static int g_get_for_dereference_cnt = INT_MIN;
static int g_get_for_extraction_cnt = INT_MIN;
static int g_get_for_comparison_cnt = INT_MIN;
static int g_checked_ptr_swap_cnt = INT_MIN;
static void ClearCounters() {
+ g_wrap_raw_ptr_cnt = 0;
g_get_for_dereference_cnt = 0;
g_get_for_extraction_cnt = 0;
g_get_for_comparison_cnt = 0;
@@ -64,6 +70,11 @@ static void ClearCounters() {
struct CheckedPtrCountingNoOpImpl : base::internal::CheckedPtrNoOpImpl {
using Super = base::internal::CheckedPtrNoOpImpl;
+ static ALWAYS_INLINE uintptr_t WrapRawPtr(const volatile void* cv_ptr) {
+ ++g_wrap_raw_ptr_cnt;
+ return Super::WrapRawPtr(cv_ptr);
+ }
+
static ALWAYS_INLINE void* SafelyUnwrapPtrForDereference(
uintptr_t wrapped_ptr) {
++g_get_for_dereference_cnt;
@@ -109,55 +120,156 @@ struct Derived : Base1, Base2 {
int d;
};
-TEST(CheckedPtr, NullStarDereference) {
+class CheckedPtrTest : public Test {
+ protected:
+ void SetUp() override { ClearCounters(); }
+};
+
+TEST_F(CheckedPtrTest, NullStarDereference) {
CheckedPtr<int> ptr = nullptr;
EXPECT_DEATH_IF_SUPPORTED(if (*ptr == 42) return, "");
}
-TEST(CheckedPtr, NullArrowDereference) {
+TEST_F(CheckedPtrTest, NullArrowDereference) {
CheckedPtr<MyStruct> ptr = nullptr;
EXPECT_DEATH_IF_SUPPORTED(if (ptr->x == 42) return, "");
}
-TEST(CheckedPtr, NullExtractNoDereference) {
- CheckedPtr<int> ptr = nullptr;
+TEST_F(CheckedPtrTest, NullExtractNoDereference) {
+ CountingCheckedPtr<int> ptr = nullptr;
+ // No dereference hence shouldn't crash.
int* raw = ptr;
std::ignore = raw;
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 1);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+TEST_F(CheckedPtrTest, NullCmpExplicit) {
+ CountingCheckedPtr<int> ptr = nullptr;
+ EXPECT_TRUE(ptr == nullptr);
+ EXPECT_TRUE(nullptr == ptr);
+ EXPECT_FALSE(ptr != nullptr);
+ EXPECT_FALSE(nullptr != ptr);
+ // No need to unwrap pointer, just compare against 0.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+TEST_F(CheckedPtrTest, NullCmpBool) {
+ CountingCheckedPtr<int> ptr = nullptr;
+ EXPECT_FALSE(ptr);
+ EXPECT_TRUE(!ptr);
+ // No need to unwrap pointer, just compare against 0.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+void FuncThatAcceptsBool(bool b) {}
+
+bool IsValidNoCast(CountingCheckedPtr<int> ptr) {
+ return !!ptr; // !! to avoid implicit cast
+}
+bool IsValidNoCast2(CountingCheckedPtr<int> ptr) {
+ return ptr && true;
+}
+
+TEST_F(CheckedPtrTest, BoolOpNotCast) {
+ CountingCheckedPtr<int> ptr = nullptr;
+ volatile bool is_valid = !!ptr; // !! to avoid implicit cast
+ is_valid = ptr || is_valid; // volatile, so won't be optimized
+ if (ptr)
+ is_valid = true;
+ bool is_not_valid = !ptr;
+ if (!ptr)
+ is_not_valid = true;
+ std::ignore = IsValidNoCast(ptr);
+ std::ignore = IsValidNoCast2(ptr);
+ FuncThatAcceptsBool(!ptr);
+ // No need to unwrap pointer, just compare against 0.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, StarDereference) {
+bool IsValidWithCast(CountingCheckedPtr<int> ptr) {
+ return ptr;
+}
+
+// This test is mostly for documentation purposes. It demonstrates cases where
+// |operator T*| is called first and then the pointer is converted to bool,
+// as opposed to calling |operator bool| directly. The former may be more
+// costly, so the caller has to be careful not to trigger this path.
+TEST_F(CheckedPtrTest, CastNotBoolOp) {
+ CountingCheckedPtr<int> ptr = nullptr;
+ bool is_valid = ptr;
+ is_valid = IsValidWithCast(ptr);
+ FuncThatAcceptsBool(ptr);
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 3);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+TEST_F(CheckedPtrTest, StarDereference) {
int foo = 42;
- CheckedPtr<int> ptr = &foo;
+ CountingCheckedPtr<int> ptr = &foo;
EXPECT_EQ(*ptr, 42);
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 1);
}
-TEST(CheckedPtr, ArrowDereference) {
+TEST_F(CheckedPtrTest, ArrowDereference) {
MyStruct foo = {42};
- CheckedPtr<MyStruct> ptr = &foo;
+ CountingCheckedPtr<MyStruct> ptr = &foo;
EXPECT_EQ(ptr->x, 42);
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 1);
}
-TEST(CheckedPtr, ConstVoidPtr) {
+TEST_F(CheckedPtrTest, Delete) {
+ CountingCheckedPtr<int> ptr = new int(42);
+ delete ptr;
+ // The pointer was extracted using implicit cast before passing to |delete|.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 1);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+TEST_F(CheckedPtrTest, ConstVolatileVoidPtr) {
int32_t foo[] = {1234567890};
- CheckedPtr<const void> ptr = foo;
- EXPECT_EQ(*static_cast<const int32_t*>(ptr), 1234567890);
+ CountingCheckedPtr<const volatile void> ptr = foo;
+ EXPECT_EQ(*static_cast<const volatile int32_t*>(ptr), 1234567890);
+ // Because we're using a cast, the extraction API kicks in, which doesn't
+ // know if the extracted pointer will be dereferenced or not.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 1);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, VoidPtr) {
+TEST_F(CheckedPtrTest, VoidPtr) {
int32_t foo[] = {1234567890};
- CheckedPtr<void> ptr = foo;
+ CountingCheckedPtr<void> ptr = foo;
EXPECT_EQ(*static_cast<int32_t*>(ptr), 1234567890);
+ // Because we're using a cast, the extraction API kicks in, which doesn't
+ // know if the extracted pointer will be dereferenced or not.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 1);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, OperatorEQ) {
+TEST_F(CheckedPtrTest, OperatorEQ) {
int foo;
- CheckedPtr<int> ptr1 = nullptr;
+ CountingCheckedPtr<int> ptr1 = nullptr;
EXPECT_TRUE(ptr1 == ptr1);
- CheckedPtr<int> ptr2 = nullptr;
+ CountingCheckedPtr<int> ptr2 = nullptr;
EXPECT_TRUE(ptr1 == ptr2);
- CheckedPtr<int> ptr3 = &foo;
+ CountingCheckedPtr<int> ptr3 = &foo;
EXPECT_TRUE(&foo == ptr3);
EXPECT_TRUE(ptr3 == &foo);
EXPECT_FALSE(ptr1 == ptr3);
@@ -165,17 +277,21 @@ TEST(CheckedPtr, OperatorEQ) {
ptr1 = &foo;
EXPECT_TRUE(ptr1 == ptr3);
EXPECT_TRUE(ptr3 == ptr1);
+
+ EXPECT_EQ(g_get_for_comparison_cnt, 12);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, OperatorNE) {
+TEST_F(CheckedPtrTest, OperatorNE) {
int foo;
- CheckedPtr<int> ptr1 = nullptr;
+ CountingCheckedPtr<int> ptr1 = nullptr;
EXPECT_FALSE(ptr1 != ptr1);
- CheckedPtr<int> ptr2 = nullptr;
+ CountingCheckedPtr<int> ptr2 = nullptr;
EXPECT_FALSE(ptr1 != ptr2);
- CheckedPtr<int> ptr3 = &foo;
+ CountingCheckedPtr<int> ptr3 = &foo;
EXPECT_FALSE(&foo != ptr3);
EXPECT_FALSE(ptr3 != &foo);
EXPECT_TRUE(ptr1 != ptr3);
@@ -183,14 +299,17 @@ TEST(CheckedPtr, OperatorNE) {
ptr1 = &foo;
EXPECT_FALSE(ptr1 != ptr3);
EXPECT_FALSE(ptr3 != ptr1);
+
+ EXPECT_EQ(g_get_for_comparison_cnt, 12);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, OperatorEQCast) {
- ClearCounters();
+TEST_F(CheckedPtrTest, OperatorEQCast) {
int foo = 42;
const int* raw_int_ptr = &foo;
- void* raw_void_ptr = &foo;
- CountingCheckedPtr<int> checked_int_ptr = &foo;
+ volatile void* raw_void_ptr = &foo;
+ CountingCheckedPtr<volatile int> checked_int_ptr = &foo;
CountingCheckedPtr<const void> checked_void_ptr = &foo;
EXPECT_TRUE(checked_int_ptr == checked_int_ptr);
EXPECT_TRUE(checked_int_ptr == raw_int_ptr);
@@ -209,14 +328,15 @@ TEST(CheckedPtr, OperatorEQCast) {
EXPECT_EQ(g_get_for_comparison_cnt, 16);
EXPECT_EQ(g_get_for_extraction_cnt, 0);
EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
- ClearCounters();
+TEST_F(CheckedPtrTest, OperatorEQCastHierarchy) {
Derived derived_val(42, 84, 1024);
Derived* raw_derived_ptr = &derived_val;
const Base1* raw_base1_ptr = &derived_val;
- Base2* raw_base2_ptr = &derived_val;
- CountingCheckedPtr<const Derived> checked_derived_ptr = &derived_val;
- CountingCheckedPtr<Base1> checked_base1_ptr = &derived_val;
+ volatile Base2* raw_base2_ptr = &derived_val;
+ CountingCheckedPtr<const volatile Derived> checked_derived_ptr = &derived_val;
+ CountingCheckedPtr<volatile Base1> checked_base1_ptr = &derived_val;
CountingCheckedPtr<const Base2> checked_base2_ptr = &derived_val;
EXPECT_TRUE(checked_derived_ptr == checked_derived_ptr);
EXPECT_TRUE(checked_derived_ptr == raw_derived_ptr);
@@ -251,13 +371,12 @@ TEST(CheckedPtr, OperatorEQCast) {
EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, OperatorNECast) {
- ClearCounters();
+TEST_F(CheckedPtrTest, OperatorNECast) {
int foo = 42;
- int* raw_int_ptr = &foo;
+ volatile int* raw_int_ptr = &foo;
const void* raw_void_ptr = &foo;
CountingCheckedPtr<const int> checked_int_ptr = &foo;
- CountingCheckedPtr<void> checked_void_ptr = &foo;
+ CountingCheckedPtr<volatile void> checked_void_ptr = &foo;
EXPECT_FALSE(checked_int_ptr != checked_int_ptr);
EXPECT_FALSE(checked_int_ptr != raw_int_ptr);
EXPECT_FALSE(raw_int_ptr != checked_int_ptr);
@@ -275,15 +394,16 @@ TEST(CheckedPtr, OperatorNECast) {
EXPECT_EQ(g_get_for_comparison_cnt, 16);
EXPECT_EQ(g_get_for_extraction_cnt, 0);
EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
- ClearCounters();
+TEST_F(CheckedPtrTest, OperatorNECastHierarchy) {
Derived derived_val(42, 84, 1024);
const Derived* raw_derived_ptr = &derived_val;
- Base1* raw_base1_ptr = &derived_val;
+ volatile Base1* raw_base1_ptr = &derived_val;
const Base2* raw_base2_ptr = &derived_val;
- CountingCheckedPtr<Derived> checked_derived_ptr = &derived_val;
+ CountingCheckedPtr<volatile Derived> checked_derived_ptr = &derived_val;
CountingCheckedPtr<const Base1> checked_base1_ptr = &derived_val;
- CountingCheckedPtr<Base2> checked_base2_ptr = &derived_val;
+ CountingCheckedPtr<const volatile Base2> checked_base2_ptr = &derived_val;
EXPECT_FALSE(checked_derived_ptr != checked_derived_ptr);
EXPECT_FALSE(checked_derived_ptr != raw_derived_ptr);
EXPECT_FALSE(raw_derived_ptr != checked_derived_ptr);
@@ -317,7 +437,7 @@ TEST(CheckedPtr, OperatorNECast) {
EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, Cast) {
+TEST_F(CheckedPtrTest, Cast) {
Derived derived_val(42, 84, 1024);
CheckedPtr<Derived> checked_derived_ptr = &derived_val;
Base1* raw_base1_ptr = checked_derived_ptr;
@@ -359,6 +479,17 @@ TEST(CheckedPtr, Cast) {
EXPECT_EQ(checked_const_derived_ptr->b2, 84);
EXPECT_EQ(checked_const_derived_ptr->d, 1024);
+ volatile Derived* raw_volatile_derived_ptr = checked_derived_ptr2;
+ EXPECT_EQ(raw_volatile_derived_ptr->b1, 42);
+ EXPECT_EQ(raw_volatile_derived_ptr->b2, 84);
+ EXPECT_EQ(raw_volatile_derived_ptr->d, 1024);
+
+ CheckedPtr<volatile Derived> checked_volatile_derived_ptr =
+ raw_volatile_derived_ptr;
+ EXPECT_EQ(checked_volatile_derived_ptr->b1, 42);
+ EXPECT_EQ(checked_volatile_derived_ptr->b2, 84);
+ EXPECT_EQ(checked_volatile_derived_ptr->d, 1024);
+
void* raw_void_ptr = checked_derived_ptr;
CheckedPtr<void> checked_void_ptr = raw_derived_ptr;
CheckedPtr<Derived> checked_derived_ptr3 =
@@ -373,8 +504,7 @@ TEST(CheckedPtr, Cast) {
EXPECT_EQ(checked_derived_ptr4->d, 1024);
}
-TEST(CheckedPtr, CustomSwap) {
- ClearCounters();
+TEST_F(CheckedPtrTest, CustomSwap) {
int foo1, foo2;
CountingCheckedPtr<int> ptr1(&foo1);
CountingCheckedPtr<int> ptr2(&foo2);
@@ -386,8 +516,7 @@ TEST(CheckedPtr, CustomSwap) {
EXPECT_EQ(g_checked_ptr_swap_cnt, 1);
}
-TEST(CheckedPtr, StdSwap) {
- ClearCounters();
+TEST_F(CheckedPtrTest, StdSwap) {
int foo1, foo2;
CountingCheckedPtr<int> ptr1(&foo1);
CountingCheckedPtr<int> ptr2(&foo2);
@@ -397,44 +526,188 @@ TEST(CheckedPtr, StdSwap) {
EXPECT_EQ(g_checked_ptr_swap_cnt, 0);
}
-TEST(CheckedPtr, AdvanceIntArray) {
- // operator++
+TEST_F(CheckedPtrTest, PostIncrementOperator) {
int foo[] = {42, 43, 44, 45};
- CheckedPtr<int> ptr = foo;
- for (int i = 0; i < 4; ++i, ++ptr) {
- ASSERT_EQ(*ptr, 42 + i);
+ CountingCheckedPtr<int> ptr = foo;
+ for (int i = 0; i < 4; ++i) {
+ ASSERT_EQ(*ptr++, 42 + i);
}
- ptr = &foo[1];
- for (int i = 1; i < 4; ++i, ++ptr) {
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 4);
+}
+
+TEST_F(CheckedPtrTest, PostDecrementOperator) {
+ int foo[] = {42, 43, 44, 45};
+ CountingCheckedPtr<int> ptr = &foo[3];
+ for (int i = 3; i >= 0; --i) {
+ ASSERT_EQ(*ptr--, 42 + i);
+ }
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 4);
+}
+
+TEST_F(CheckedPtrTest, PreIncrementOperator) {
+ int foo[] = {42, 43, 44, 45};
+ CountingCheckedPtr<int> ptr = foo;
+ for (int i = 0; i < 4; ++i, ++ptr) {
ASSERT_EQ(*ptr, 42 + i);
}
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 4);
+}
- // operator--
- ptr = &foo[3];
+TEST_F(CheckedPtrTest, PreDecrementOperator) {
+ int foo[] = {42, 43, 44, 45};
+ CountingCheckedPtr<int> ptr = &foo[3];
for (int i = 3; i >= 0; --i, --ptr) {
ASSERT_EQ(*ptr, 42 + i);
}
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 4);
+}
- // operator+=
- ptr = foo;
+TEST_F(CheckedPtrTest, PlusEqualOperator) {
+ int foo[] = {42, 43, 44, 45};
+ CountingCheckedPtr<int> ptr = foo;
for (int i = 0; i < 4; i += 2, ptr += 2) {
ASSERT_EQ(*ptr, 42 + i);
}
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 2);
+}
- // operator-=
- ptr = &foo[3];
+TEST_F(CheckedPtrTest, MinusEqualOperator) {
+ int foo[] = {42, 43, 44, 45};
+ CountingCheckedPtr<int> ptr = &foo[3];
for (int i = 3; i >= 0; i -= 2, ptr -= 2) {
ASSERT_EQ(*ptr, 42 + i);
}
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 2);
}
-TEST(CheckedPtr, AdvanceString) {
+TEST_F(CheckedPtrTest, AdvanceString) {
const char kChars[] = "Hello";
std::string str = kChars;
- CheckedPtr<const char> ptr = str.c_str();
+ CountingCheckedPtr<const char> ptr = str.c_str();
for (size_t i = 0; i < str.size(); ++i, ++ptr) {
ASSERT_EQ(*ptr, kChars[i]);
}
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 5);
+}
+
+TEST_F(CheckedPtrTest, AssignmentFromNullptr) {
+ CountingCheckedPtr<int> checked_ptr;
+ checked_ptr = nullptr;
+ EXPECT_EQ(g_wrap_raw_ptr_cnt, 0);
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+namespace {
+
+struct CheckedPtr2ImplPartitionAllocSupportEnabled
+ : base::internal::CheckedPtr2ImplPartitionAllocSupport {
+ static bool EnabledForPtr(void* ptr) { return true; }
+};
+
+using CheckedPtr2ImplEnabled = base::internal::CheckedPtr2Impl<
+ CheckedPtr2ImplPartitionAllocSupportEnabled>;
+
+} // namespace
+
+TEST(CheckedPtr2Impl, WrapNull) {
+ ASSERT_EQ(base::internal::CheckedPtr2Impl<>::GetWrappedNullPtr(), 0u);
+ ASSERT_EQ(base::internal::CheckedPtr2Impl<>::WrapRawPtr(nullptr), 0u);
+}
+
+TEST(CheckedPtr2Impl, SafelyUnwrapNull) {
+ ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrForExtraction(0), nullptr);
}
+TEST(CheckedPtr2Impl, WrapAndSafelyUnwrap) {
+ char bytes[] = {0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0xBA, 0x42, 0x78, 0x89};
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ // If protection is disabled, wrap & unwrap will read at the pointer, not
+ // before it.
+ bytes[8] = bytes[6];
+ bytes[9] = bytes[7];
+#endif
+ void* ptr = bytes + sizeof(uintptr_t);
+ uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
+
+ uintptr_t set_top_bit = 0x0000000000000000;
+ uintptr_t mask = 0xFFFFFFFFFFFFFFFF;
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ set_top_bit = 0x8000000000000000;
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ mask = 0x0000FFFFFFFFFFFF;
+#endif
+#endif
+
+ uintptr_t wrapped = CheckedPtr2ImplEnabled::WrapRawPtr(ptr);
+ // First 2 bytes in the preceding word will be used as generation (in reverse
+ // order due to little-endianness).
+#if CHECKED_PTR2_USE_NO_OP_WRAPPER
+ ASSERT_EQ(wrapped, addr);
+ std::ignore = set_top_bit;
+ std::ignore = mask;
+#else
+ ASSERT_EQ(wrapped, (addr | 0x42BA000000000000 | set_top_bit) & mask);
+#endif
+ ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(wrapped), addr);
+
+ bytes[7] |= 0x80;
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ bytes[9] = bytes[7];
+#endif
+ wrapped = CheckedPtr2ImplEnabled::WrapRawPtr(ptr);
+#if CHECKED_PTR2_USE_NO_OP_WRAPPER
+ ASSERT_EQ(wrapped, addr);
+#else
+ ASSERT_EQ(wrapped, (addr | 0xC2BA000000000000 | set_top_bit) & mask);
+#endif
+ ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(wrapped), addr);
+
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING
+ bytes[6] = 0;
+ bytes[7] = 0;
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ bytes[8] = bytes[6];
+ bytes[9] = bytes[7];
+#endif
+ mask = 0xFFFFFFFFFFFFFFFF;
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ mask = 0x7FFFFFFFFFFFFFFF;
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ mask = 0x0000FFFFFFFFFFFF;
+#endif
+#endif
+
+ // Mask out the top bit, because in some cases (not all), it may differ.
+ ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(wrapped) & mask,
+ wrapped & mask);
+#endif
+}
+
+TEST(CheckedPtr2Impl, SafelyUnwrapDisabled) {
+ char bytes[] = {0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0xBA, 0x42, 0x78, 0x89};
+ void* ptr = bytes + sizeof(uintptr_t);
+ uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
+ ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(addr), addr);
+}
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
} // namespace
diff --git a/chromium/base/memory/discardable_shared_memory.cc b/chromium/base/memory/discardable_shared_memory.cc
index bee394a6e63..7214a801a61 100644
--- a/chromium/base/memory/discardable_shared_memory.cc
+++ b/chromium/base/memory/discardable_shared_memory.cc
@@ -17,8 +17,7 @@
#include "base/memory/shared_memory_tracker.h"
#include "base/numerics/safe_math.h"
#include "base/process/process_metrics.h"
-#include "base/trace_event/memory_allocator_dump.h"
-#include "base/trace_event/process_memory_dump.h"
+#include "base/tracing_buildflags.h"
#include "build/build_config.h"
#if defined(OS_POSIX) && !defined(OS_NACL)
@@ -41,6 +40,11 @@
#include "base/fuchsia/fuchsia_logging.h"
#endif
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/process_memory_dump.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
namespace base {
namespace {
@@ -483,6 +487,8 @@ void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
trace_event::MemoryAllocatorDump* local_segment_dump,
trace_event::ProcessMemoryDump* pmd,
bool is_owned) const {
+// Memory dumps are only supported when tracing support is enabled,.
+#if BUILDFLAG(ENABLE_BASE_TRACING)
auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
shared_memory_mapping_, pmd);
// TODO(ssid): Clean this by a new api to inherit size of parent dump once the
@@ -512,6 +518,7 @@ void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
shared_memory_guid, kImportance);
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
// static
diff --git a/chromium/base/memory/discardable_shared_memory.h b/chromium/base/memory/discardable_shared_memory.h
index 44d4cab02a5..af06d5d5190 100644
--- a/chromium/base/memory/discardable_shared_memory.h
+++ b/chromium/base/memory/discardable_shared_memory.h
@@ -8,7 +8,7 @@
#include <stddef.h>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/memory/unsafe_shared_memory_region.h"
diff --git a/chromium/base/memory/discardable_shared_memory_unittest.cc b/chromium/base/memory/discardable_shared_memory_unittest.cc
index 817e105f8de..d567ee973e1 100644
--- a/chromium/base/memory/discardable_shared_memory_unittest.cc
+++ b/chromium/base/memory/discardable_shared_memory_unittest.cc
@@ -9,11 +9,15 @@
#include "base/memory/discardable_shared_memory.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/process/process_metrics.h"
-#include "base/trace_event/memory_allocator_dump.h"
-#include "base/trace_event/process_memory_dump.h"
+#include "base/tracing_buildflags.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/process_memory_dump.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
namespace base {
class TestDiscardableSharedMemory : public DiscardableSharedMemory {
@@ -450,6 +454,7 @@ TEST(DiscardableSharedMemoryTest, ZeroFilledPagesAfterPurge) {
}
#endif
+#if BUILDFLAG(ENABLE_BASE_TRACING)
TEST(DiscardableSharedMemoryTest, TracingOwnershipEdges) {
const uint32_t kDataSize = 1024;
TestDiscardableSharedMemory memory1;
@@ -474,5 +479,6 @@ TEST(DiscardableSharedMemoryTest, TracingOwnershipEdges) {
// TODO(ssid): test for weak global dump once the
// CreateWeakSharedMemoryOwnershipEdge() is fixed, crbug.com/661257.
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
} // namespace base
diff --git a/chromium/base/memory/madv_free_discardable_memory_allocator_posix.cc b/chromium/base/memory/madv_free_discardable_memory_allocator_posix.cc
index 3587db93627..a98057cf2d4 100644
--- a/chromium/base/memory/madv_free_discardable_memory_allocator_posix.cc
+++ b/chromium/base/memory/madv_free_discardable_memory_allocator_posix.cc
@@ -10,12 +10,17 @@
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/tracing_buildflags.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
#include "base/trace_event/memory_dump_manager.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
namespace base {
MadvFreeDiscardableMemoryAllocatorPosix::
MadvFreeDiscardableMemoryAllocatorPosix() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
// Don't register dump provider if ThreadTaskRunnerHandle is not set, such as
// in tests and Android Webview.
if (base::ThreadTaskRunnerHandle::IsSet()) {
@@ -23,11 +28,14 @@ MadvFreeDiscardableMemoryAllocatorPosix::
this, "MadvFreeDiscardableMemoryAllocator",
ThreadTaskRunnerHandle::Get());
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
MadvFreeDiscardableMemoryAllocatorPosix::
~MadvFreeDiscardableMemoryAllocatorPosix() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
std::unique_ptr<DiscardableMemory>
@@ -44,6 +52,7 @@ size_t MadvFreeDiscardableMemoryAllocatorPosix::GetBytesAllocated() const {
bool MadvFreeDiscardableMemoryAllocatorPosix::OnMemoryDump(
const trace_event::MemoryDumpArgs& args,
trace_event::ProcessMemoryDump* pmd) {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
if (args.level_of_detail !=
base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND) {
return true;
@@ -55,6 +64,9 @@ bool MadvFreeDiscardableMemoryAllocatorPosix::OnMemoryDump(
base::trace_event::MemoryAllocatorDump::kUnitsBytes,
GetBytesAllocated());
return true;
+#else // BUILDFLAG(ENABLE_BASE_TRACING)
+ return false;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
} // namespace base
diff --git a/chromium/base/memory/madv_free_discardable_memory_allocator_posix.h b/chromium/base/memory/madv_free_discardable_memory_allocator_posix.h
index f4c33c59270..c569ca7a835 100644
--- a/chromium/base/memory/madv_free_discardable_memory_allocator_posix.h
+++ b/chromium/base/memory/madv_free_discardable_memory_allocator_posix.h
@@ -13,12 +13,11 @@
#include "base/base_export.h"
#include "base/bind.h"
#include "base/callback.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/discardable_memory.h"
#include "base/memory/discardable_memory_allocator.h"
#include "base/memory/madv_free_discardable_memory_posix.h"
-#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
namespace base {
diff --git a/chromium/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc b/chromium/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc
index 50c30e67bee..0c7a53e1760 100644
--- a/chromium/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc
+++ b/chromium/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc
@@ -11,11 +11,15 @@
#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
#include "base/memory/madv_free_discardable_memory_posix.h"
#include "base/process/process_metrics.h"
-#include "base/trace_event/memory_allocator_dump.h"
-#include "base/trace_event/process_memory_dump.h"
+#include "base/tracing_buildflags.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/process_memory_dump.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
#define SUCCEED_IF_MADV_FREE_UNSUPPORTED() \
do { \
if (GetMadvFreeSupport() != base::MadvFreeSupport::kSupported) { \
@@ -31,9 +35,11 @@ namespace base {
class MadvFreeDiscardableMemoryAllocatorPosixTest : public ::testing::Test {
protected:
MadvFreeDiscardableMemoryAllocatorPosixTest() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
base::trace_event::MemoryDumpArgs dump_args = {
base::trace_event::MemoryDumpLevelOfDetail::DETAILED};
pmd_ = std::make_unique<base::trace_event::ProcessMemoryDump>(dump_args);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
std::unique_ptr<MadvFreeDiscardableMemoryPosix>
@@ -43,15 +49,19 @@ class MadvFreeDiscardableMemoryAllocatorPosixTest : public ::testing::Test {
allocator_.AllocateLockedDiscardableMemory(size).release()));
}
+#if BUILDFLAG(ENABLE_BASE_TRACING)
size_t GetDiscardableMemorySizeFromDump(const DiscardableMemory& mem,
const std::string& dump_id) {
return mem.CreateMemoryAllocatorDump(dump_id.c_str(), pmd_.get())
->GetSizeInternal();
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
MadvFreeDiscardableMemoryAllocatorPosix allocator_;
- std::unique_ptr<base::trace_event::ProcessMemoryDump> pmd_;
const size_t kPageSize = base::GetPageSize();
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+ std::unique_ptr<base::trace_event::ProcessMemoryDump> pmd_;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
};
TEST_F(MadvFreeDiscardableMemoryAllocatorPosixTest, AllocateAndUseMemory) {
@@ -61,8 +71,10 @@ TEST_F(MadvFreeDiscardableMemoryAllocatorPosixTest, AllocateAndUseMemory) {
auto mem1 = AllocateLockedMadvFreeDiscardableMemory(kPageSize * 3 + 1);
EXPECT_TRUE(mem1->IsLockedForTesting());
+#if BUILDFLAG(ENABLE_BASE_TRACING)
EXPECT_EQ(GetDiscardableMemorySizeFromDump(*mem1, "dummy_dump_1"),
kPageSize * 3 + 1);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
EXPECT_EQ(allocator_.GetBytesAllocated(), kPageSize * 3 + 1);
// Allocate 3 pages of discardable memory, and free the previously allocated
@@ -70,8 +82,10 @@ TEST_F(MadvFreeDiscardableMemoryAllocatorPosixTest, AllocateAndUseMemory) {
auto mem2 = AllocateLockedMadvFreeDiscardableMemory(kPageSize * 3);
EXPECT_TRUE(mem2->IsLockedForTesting());
+#if BUILDFLAG(ENABLE_BASE_TRACING)
EXPECT_EQ(GetDiscardableMemorySizeFromDump(*mem2, "dummy_dump_2"),
kPageSize * 3);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
EXPECT_EQ(allocator_.GetBytesAllocated(), kPageSize * 6 + 1);
mem1.reset();
diff --git a/chromium/base/memory/madv_free_discardable_memory_posix.cc b/chromium/base/memory/madv_free_discardable_memory_posix.cc
index 0950964ed1a..ed89d6ea773 100644
--- a/chromium/base/memory/madv_free_discardable_memory_posix.cc
+++ b/chromium/base/memory/madv_free_discardable_memory_posix.cc
@@ -13,13 +13,18 @@
#include "base/atomicops.h"
#include "base/bits.h"
#include "base/callback.h"
+#include "base/logging.h"
#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
#include "base/memory/madv_free_discardable_memory_posix.h"
#include "base/process/process_metrics.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
+#include "base/tracing_buildflags.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_dump_manager.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
#if defined(ADDRESS_SANITIZER)
#include <sanitizer/asan_interface.h>
@@ -223,6 +228,7 @@ trace_event::MemoryAllocatorDump*
MadvFreeDiscardableMemoryPosix::CreateMemoryAllocatorDump(
const char* name,
trace_event::ProcessMemoryDump* pmd) const {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
DFAKE_SCOPED_LOCK(thread_collision_warner_);
using base::trace_event::MemoryAllocatorDump;
@@ -267,6 +273,10 @@ MadvFreeDiscardableMemoryPosix::CreateMemoryAllocatorDump(
pmd->AddSuballocation(dump->guid(), allocator_dump_name);
return dump;
+#else // BUILDFLAG(ENABLE_BASE_TRACING)
+ NOTREACHED();
+ return nullptr;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
bool MadvFreeDiscardableMemoryPosix::IsValid() const {
diff --git a/chromium/base/memory/madv_free_discardable_memory_posix.h b/chromium/base/memory/madv_free_discardable_memory_posix.h
index c482a9866b6..e7875188822 100644
--- a/chromium/base/memory/madv_free_discardable_memory_posix.h
+++ b/chromium/base/memory/madv_free_discardable_memory_posix.h
@@ -12,7 +12,7 @@
#include "base/base_export.h"
#include "base/callback.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/discardable_memory.h"
#include "base/sequence_checker.h"
diff --git a/chromium/base/memory/madv_free_discardable_memory_posix_unittest.cc b/chromium/base/memory/madv_free_discardable_memory_posix_unittest.cc
index 731411d4980..a5507b86fc9 100644
--- a/chromium/base/memory/madv_free_discardable_memory_posix_unittest.cc
+++ b/chromium/base/memory/madv_free_discardable_memory_posix_unittest.cc
@@ -15,8 +15,6 @@
#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
#include "base/memory/madv_free_discardable_memory_posix.h"
#include "base/process/process_metrics.h"
-#include "base/trace_event/memory_allocator_dump.h"
-#include "base/trace_event/process_memory_dump.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/base/memory/memory_pressure_listener.cc b/chromium/base/memory/memory_pressure_listener.cc
index 1b97fd8cac6..87cdd8c962a 100644
--- a/chromium/base/memory/memory_pressure_listener.cc
+++ b/chromium/base/memory/memory_pressure_listener.cc
@@ -5,7 +5,7 @@
#include "base/memory/memory_pressure_listener.h"
#include "base/observer_list_threadsafe.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
@@ -60,17 +60,20 @@ subtle::Atomic32 g_notifications_suppressed = 0;
} // namespace
MemoryPressureListener::MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureListener::MemoryPressureCallback& callback)
- : callback_(callback) {
+ : callback_(callback), creation_location_(creation_location) {
GetMemoryPressureObserver()->AddObserver(this, false);
}
MemoryPressureListener::MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureListener::MemoryPressureCallback& callback,
const MemoryPressureListener::SyncMemoryPressureCallback&
sync_memory_pressure_callback)
: callback_(callback),
- sync_memory_pressure_callback_(sync_memory_pressure_callback) {
+ sync_memory_pressure_callback_(sync_memory_pressure_callback),
+ creation_location_(creation_location) {
GetMemoryPressureObserver()->AddObserver(this, true);
}
@@ -79,6 +82,9 @@ MemoryPressureListener::~MemoryPressureListener() {
}
void MemoryPressureListener::Notify(MemoryPressureLevel memory_pressure_level) {
+ TRACE_EVENT2("base", "MemoryPressureListener::Notify",
+ "listener_creation_info", creation_location_.ToString(), "level",
+ memory_pressure_level);
callback_.Run(memory_pressure_level);
}
diff --git a/chromium/base/memory/memory_pressure_listener.h b/chromium/base/memory/memory_pressure_listener.h
index 084ddd54208..bfa374719f2 100644
--- a/chromium/base/memory/memory_pressure_listener.h
+++ b/chromium/base/memory/memory_pressure_listener.h
@@ -12,6 +12,7 @@
#include "base/base_export.h"
#include "base/callback.h"
+#include "base/location.h"
#include "base/macros.h"
namespace base {
@@ -67,9 +68,11 @@ class BASE_EXPORT MemoryPressureListener {
using SyncMemoryPressureCallback =
RepeatingCallback<void(MemoryPressureLevel)>;
- explicit MemoryPressureListener(
+ MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureCallback& memory_pressure_callback);
MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureCallback& memory_pressure_callback,
const SyncMemoryPressureCallback& sync_memory_pressure_callback);
@@ -95,6 +98,8 @@ class BASE_EXPORT MemoryPressureListener {
MemoryPressureCallback callback_;
SyncMemoryPressureCallback sync_memory_pressure_callback_;
+ const base::Location creation_location_;
+
DISALLOW_COPY_AND_ASSIGN(MemoryPressureListener);
};
diff --git a/chromium/base/memory/memory_pressure_listener_unittest.cc b/chromium/base/memory/memory_pressure_listener_unittest.cc
index f1c0006ab40..3df98487a52 100644
--- a/chromium/base/memory/memory_pressure_listener_unittest.cc
+++ b/chromium/base/memory/memory_pressure_listener_unittest.cc
@@ -19,8 +19,9 @@ class MemoryPressureListenerTest : public testing::Test {
: task_environment_(test::TaskEnvironment::MainThreadType::UI) {}
void SetUp() override {
- listener_ = std::make_unique<MemoryPressureListener>(BindRepeating(
- &MemoryPressureListenerTest::OnMemoryPressure, Unretained(this)));
+ listener_ = std::make_unique<MemoryPressureListener>(
+ FROM_HERE, BindRepeating(&MemoryPressureListenerTest::OnMemoryPressure,
+ Unretained(this)));
}
void TearDown() override {
diff --git a/chromium/base/memory/platform_shared_memory_region.cc b/chromium/base/memory/platform_shared_memory_region.cc
index 944b12cb297..964844adff6 100644
--- a/chromium/base/memory/platform_shared_memory_region.cc
+++ b/chromium/base/memory/platform_shared_memory_region.cc
@@ -4,6 +4,7 @@
#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/aligned_memory.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/memory/shared_memory_security_policy.h"
#include "base/metrics/histogram_functions.h"
@@ -15,7 +16,7 @@ namespace subtle {
namespace {
void RecordMappingWasBlockedHistogram(bool blocked) {
- base::UmaHistogramBoolean("SharedMemory.MapBlockedForSecurity", blocked);
+ UmaHistogramBoolean("SharedMemory.MapBlockedForSecurity", blocked);
}
} // namespace
@@ -62,14 +63,13 @@ bool PlatformSharedMemoryRegion::MapAt(off_t offset,
if (!SharedMemorySecurityPolicy::AcquireReservationForMapping(size)) {
RecordMappingWasBlockedHistogram(/*blocked=*/true);
return false;
- } else {
- RecordMappingWasBlockedHistogram(/*blocked=*/false);
}
+ RecordMappingWasBlockedHistogram(/*blocked=*/false);
+
bool success = MapAtInternal(offset, size, memory, mapped_size);
if (success) {
- DCHECK_EQ(
- 0U, reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+ DCHECK(IsAligned(*memory, kMapMinimumAlignment));
} else {
SharedMemorySecurityPolicy::ReleaseReservationForMapping(size);
}
diff --git a/chromium/base/memory/platform_shared_memory_region_android.cc b/chromium/base/memory/platform_shared_memory_region_android.cc
index 812ad67c85f..b862a115bc1 100644
--- a/chromium/base/memory/platform_shared_memory_region_android.cc
+++ b/chromium/base/memory/platform_shared_memory_region_android.cc
@@ -7,6 +7,7 @@
#include <sys/mman.h>
#include "base/bits.h"
+#include "base/logging.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/metrics/histogram_macros.h"
#include "base/posix/eintr_wrapper.h"
diff --git a/chromium/base/memory/platform_shared_memory_region_unittest.cc b/chromium/base/memory/platform_shared_memory_region_unittest.cc
index 10e8fe0db65..6f099f6d522 100644
--- a/chromium/base/memory/platform_shared_memory_region_unittest.cc
+++ b/chromium/base/memory/platform_shared_memory_region_unittest.cc
@@ -21,6 +21,7 @@
#include "base/debug/proc_maps_linux.h"
#elif defined(OS_WIN)
#include <windows.h>
+#include "base/logging.h"
#elif defined(OS_FUCHSIA)
#include <lib/zx/object.h>
#include <lib/zx/process.h>
diff --git a/chromium/base/memory/ref_counted.h b/chromium/base/memory/ref_counted.h
index 4ef63e85794..c9cad910f49 100644
--- a/chromium/base/memory/ref_counted.h
+++ b/chromium/base/memory/ref_counted.h
@@ -11,9 +11,9 @@
#include "base/atomic_ref_count.h"
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/sequence_checker.h"
diff --git a/chromium/base/memory/ref_counted_delete_on_sequence.h b/chromium/base/memory/ref_counted_delete_on_sequence.h
index 4a8ac744a48..f5f53c022b9 100644
--- a/chromium/base/memory/ref_counted_delete_on_sequence.h
+++ b/chromium/base/memory/ref_counted_delete_on_sequence.h
@@ -7,8 +7,8 @@
#include <utility>
+#include "base/check.h"
#include "base/location.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/sequenced_task_runner.h"
diff --git a/chromium/base/memory/scoped_refptr.h b/chromium/base/memory/scoped_refptr.h
index 238b61a7363..a746f95c010 100644
--- a/chromium/base/memory/scoped_refptr.h
+++ b/chromium/base/memory/scoped_refptr.h
@@ -11,8 +11,8 @@
#include <type_traits>
#include <utility>
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/macros.h"
template <class T>
diff --git a/chromium/base/memory/shared_memory_tracker.cc b/chromium/base/memory/shared_memory_tracker.cc
index 8d1ac504a74..79383838d80 100644
--- a/chromium/base/memory/shared_memory_tracker.cc
+++ b/chromium/base/memory/shared_memory_tracker.cc
@@ -4,10 +4,16 @@
#include "base/memory/shared_memory_tracker.h"
+#include "base/check.h"
+#include "base/notreached.h"
#include "base/strings/string_number_conversions.h"
-#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/base_tracing.h"
+#include "base/tracing_buildflags.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/process_memory_dump.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
namespace base {
@@ -58,8 +64,10 @@ void SharedMemoryTracker::DecrementMemoryUsage(
}
SharedMemoryTracker::SharedMemoryTracker() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "SharedMemoryTracker", nullptr);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
SharedMemoryTracker::~SharedMemoryTracker() = default;
@@ -83,6 +91,7 @@ SharedMemoryTracker::GetOrCreateSharedMemoryDumpInternal(
size_t mapped_size,
const UnguessableToken& mapped_id,
trace_event::ProcessMemoryDump* pmd) {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
const std::string dump_name = GetDumpNameForTracing(mapped_id);
trace_event::MemoryAllocatorDump* local_dump =
pmd->GetAllocatorDump(dump_name);
@@ -116,6 +125,10 @@ SharedMemoryTracker::GetOrCreateSharedMemoryDumpInternal(
pmd->AddOverridableOwnershipEdge(local_dump->guid(), global_dump->guid(),
0 /* importance */);
return local_dump;
+#else // BUILDFLAG(ENABLE_BASE_TRACING)
+ NOTREACHED();
+ return nullptr;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
} // namespace
diff --git a/chromium/base/memory/shared_memory_tracker.h b/chromium/base/memory/shared_memory_tracker.h
index 9df089edfbd..e0ae3a9b13d 100644
--- a/chromium/base/memory/shared_memory_tracker.h
+++ b/chromium/base/memory/shared_memory_tracker.h
@@ -10,7 +10,7 @@
#include "base/memory/shared_memory_mapping.h"
#include "base/synchronization/lock.h"
-#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
diff --git a/chromium/base/memory/singleton.h b/chromium/base/memory/singleton.h
index 87b57919c07..cd39b21f243 100644
--- a/chromium/base/memory/singleton.h
+++ b/chromium/base/memory/singleton.h
@@ -30,8 +30,8 @@
#include "base/at_exit.h"
#include "base/atomicops.h"
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/lazy_instance_helpers.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/threading/thread_restrictions.h"
diff --git a/chromium/base/memory/singleton_unittest.cc b/chromium/base/memory/singleton_unittest.cc
index 06e53b24cd8..be2253f27f0 100644
--- a/chromium/base/memory/singleton_unittest.cc
+++ b/chromium/base/memory/singleton_unittest.cc
@@ -5,6 +5,7 @@
#include <stdint.h>
#include "base/at_exit.h"
+#include "base/memory/aligned_memory.h"
#include "base/memory/singleton.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -156,10 +157,15 @@ void SingletonStatic(CallbackFunc CallOnQuit) {
}
CallbackFunc* GetStaticSingleton() {
- return &CallbackSingletonWithStaticTrait::GetInstance()->callback_;
+ CallbackSingletonWithStaticTrait* instance =
+ CallbackSingletonWithStaticTrait::GetInstance();
+ if (instance == nullptr) {
+ return nullptr;
+ } else {
+ return &instance->callback_;
+ }
}
-
class SingletonTest : public testing::Test {
public:
SingletonTest() = default;
@@ -273,9 +279,6 @@ TEST_F(SingletonTest, Basic) {
VerifiesCallbacksNotCalled();
}
-#define EXPECT_ALIGNED(ptr, align) \
- EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
-
TEST_F(SingletonTest, Alignment) {
// Create some static singletons with increasing sizes and alignment
// requirements. By ordering this way, the linker will need to do some work to
@@ -289,10 +292,10 @@ TEST_F(SingletonTest, Alignment) {
AlignedTestSingleton<AlignedData<4096>>* align4096 =
AlignedTestSingleton<AlignedData<4096>>::GetInstance();
- EXPECT_ALIGNED(align4, 4);
- EXPECT_ALIGNED(align32, 32);
- EXPECT_ALIGNED(align128, 128);
- EXPECT_ALIGNED(align4096, 4096);
+ EXPECT_TRUE(IsAligned(align4, 4));
+ EXPECT_TRUE(IsAligned(align32, 32));
+ EXPECT_TRUE(IsAligned(align128, 128));
+ EXPECT_TRUE(IsAligned(align4096, 4096));
}
} // namespace
diff --git a/chromium/base/memory/weak_ptr.h b/chromium/base/memory/weak_ptr.h
index d2749871681..42aa3412c5e 100644
--- a/chromium/base/memory/weak_ptr.h
+++ b/chromium/base/memory/weak_ptr.h
@@ -73,7 +73,7 @@
#include <type_traits>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/sequence_checker.h"