summaryrefslogtreecommitdiff
path: root/chromium/base/allocator
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/base/allocator')
-rw-r--r--chromium/base/allocator/BUILD.gn14
-rw-r--r--chromium/base/allocator/allocator.gni6
-rw-r--r--chromium/base/allocator/allocator_shim.cc10
-rw-r--r--chromium/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc110
-rw-r--r--chromium/base/allocator/malloc_zone_functions_mac.h2
-rw-r--r--chromium/base/allocator/partition_allocator/PartitionAlloc.md39
-rw-r--r--chromium/base/allocator/partition_allocator/address_pool_manager.cc223
-rw-r--r--chromium/base/allocator/partition_allocator/address_pool_manager.h100
-rw-r--r--chromium/base/allocator/partition_allocator/address_pool_manager_unittest.cc150
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization.cc3
-rw-r--r--chromium/base/allocator/partition_allocator/memory_reclaimer.cc79
-rw-r--r--chromium/base/allocator/partition_allocator/memory_reclaimer.h29
-rw-r--r--chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc52
-rw-r--r--chromium/base/allocator/partition_allocator/oom.h1
-rw-r--r--chromium/base/allocator/partition_allocator/oom_callback.cc3
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.cc71
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h7
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h22
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internals_win.h11
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_unittest.cc23
-rw-r--r--chromium/base/allocator/partition_allocator/partition_address_space.cc69
-rw-r--r--chromium/base/allocator/partition_allocator/partition_address_space.h151
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.cc325
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.h750
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_check.h36
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_constants.h77
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_features.cc16
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_features.h34
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_forward.h6
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc8
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc1103
-rw-r--r--chromium/base/allocator/partition_allocator/partition_bucket.cc161
-rw-r--r--chromium/base/allocator/partition_allocator/partition_bucket.h11
-rw-r--r--chromium/base/allocator/partition_allocator/partition_cookie.h57
-rw-r--r--chromium/base/allocator/partition_allocator/partition_direct_map_extent.h5
-rw-r--r--chromium/base/allocator/partition_allocator/partition_page.cc76
-rw-r--r--chromium/base/allocator/partition_allocator/partition_page.h96
-rw-r--r--chromium/base/allocator/partition_allocator/partition_root_base.cc50
-rw-r--r--chromium/base/allocator/partition_allocator/partition_root_base.h381
39 files changed, 2635 insertions, 1732 deletions
diff --git a/chromium/base/allocator/BUILD.gn b/chromium/base/allocator/BUILD.gn
index eb808ba556c..c6649c53e82 100644
--- a/chromium/base/allocator/BUILD.gn
+++ b/chromium/base/allocator/BUILD.gn
@@ -276,12 +276,14 @@ if (use_allocator == "tcmalloc") {
buildflag_header("buildflags") {
header = "buildflags.h"
- flags = [ "USE_ALLOCATOR_SHIM=$use_allocator_shim" ]
- if (use_allocator == "tcmalloc") {
- flags += [ "USE_TCMALLOC=1" ]
- } else {
- flags += [ "USE_TCMALLOC=0" ]
- }
+ _use_partition_alloc = use_allocator == "partition"
+ _use_tcmalloc = use_allocator == "tcmalloc"
+
+ flags = [
+ "USE_ALLOCATOR_SHIM=$use_allocator_shim",
+ "USE_TCMALLOC=$_use_tcmalloc",
+ "USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc",
+ ]
}
# Used to shim malloc symbols on Android. see //base/allocator/README.md.
diff --git a/chromium/base/allocator/allocator.gni b/chromium/base/allocator/allocator.gni
index 148e37d9106..8e23e49c162 100644
--- a/chromium/base/allocator/allocator.gni
+++ b/chromium/base/allocator/allocator.gni
@@ -33,7 +33,11 @@ declare_args() {
use_allocator_shim = _default_use_allocator_shim
}
-assert(use_allocator == "none" || use_allocator == "tcmalloc")
+assert(use_allocator == "none" || use_allocator == "tcmalloc" ||
+ use_allocator == "partition")
+
+# Don't ship this configuration, not ready yet.
+assert(!(use_allocator == "partition" && is_official_build))
assert(!is_win || use_allocator == "none", "Tcmalloc doesn't work on Windows.")
assert(!is_mac || use_allocator == "none", "Tcmalloc doesn't work on macOS.")
diff --git a/chromium/base/allocator/allocator_shim.cc b/chromium/base/allocator/allocator_shim.cc
index 320bca7e168..f19ad6f94d5 100644
--- a/chromium/base/allocator/allocator_shim.cc
+++ b/chromium/base/allocator/allocator_shim.cc
@@ -67,16 +67,8 @@ bool CallNewHandler(size_t size) {
}
inline const base::allocator::AllocatorDispatch* GetChainHead() {
- // TODO(primiano): Just use NoBarrier_Load once crbug.com/593344 is fixed.
- // Unfortunately due to that bug NoBarrier_Load() is mistakenly fully
- // barriered on Linux+Clang, and that causes visible perf regressons.
return reinterpret_cast<const base::allocator::AllocatorDispatch*>(
-#if defined(OS_LINUX) && defined(__clang__)
- *static_cast<const volatile base::subtle::AtomicWord*>(&g_chain_head)
-#else
- base::subtle::NoBarrier_Load(&g_chain_head)
-#endif
- );
+ base::subtle::NoBarrier_Load(&g_chain_head));
}
} // namespace
diff --git a/chromium/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc b/chromium/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc
new file mode 100644
index 00000000000..07f27d8fd4e
--- /dev/null
+++ b/chromium/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc
@@ -0,0 +1,110 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/allocator_shim_internals.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/bits.h"
+#include "base/no_destructor.h"
+
+namespace {
+
+base::ThreadSafePartitionRoot& Allocator() {
+ static base::NoDestructor<base::ThreadSafePartitionRoot> allocator;
+ allocator->Init();
+ return *allocator;
+}
+
+using base::allocator::AllocatorDispatch;
+
+void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
+ return Allocator().Alloc(size, "");
+}
+
+void* PartitionCalloc(const AllocatorDispatch*,
+ size_t n,
+ size_t size,
+ void* context) {
+ return Allocator().AllocFlags(base::PartitionAllocZeroFill, n * size, "");
+}
+
+void* PartitionMemalign(const AllocatorDispatch*,
+ size_t alignment,
+ size_t size,
+ void* context) {
+ // This is mandated by |posix_memalign()|, so should never fire.
+ //
+ // Note: CHECK() is fine here since we are not called from malloc(), but from
+ // posix_memalign(), so there is no recursion. It is also fine to make aligned
+ // allocations slower, as they are rare.
+ CHECK(base::bits::IsPowerOfTwo(alignment));
+
+ // PartitionAlloc only guarantees alignment for power-of-two sized
+ // allocations. To make sure this applies here, round up the allocation size.
+ size_t size_rounded_up =
+ static_cast<size_t>(1)
+ << (sizeof(size_t) * 8 - base::bits::CountLeadingZeroBits(size - 1));
+
+ void* ptr = Allocator().Alloc(size_rounded_up, "");
+ CHECK_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, 0ull);
+
+ return ptr;
+}
+
+void* PartitionRealloc(const AllocatorDispatch*,
+ void* address,
+ size_t size,
+ void* context) {
+ return Allocator().Realloc(address, size, "");
+}
+
+void PartitionFree(const AllocatorDispatch*, void* address, void* context) {
+ Allocator().Free(address);
+}
+
+size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
+ void* address,
+ void* context) {
+ // TODO(lizeb): Returns incorrect values for aligned allocations.
+ return base::PartitionAllocGetSize<base::internal::ThreadSafe>(address);
+}
+
+} // namespace
+
+constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
+ &PartitionMalloc, /* alloc_function */
+ &PartitionCalloc, /* alloc_zero_initialized_function */
+ &PartitionMemalign, /* alloc_aligned_function */
+ &PartitionRealloc, /* realloc_function */
+ &PartitionFree, /* free_function */
+ &PartitionGetSizeEstimate, /* get_size_estimate_function */
+ nullptr, /* batch_malloc_function */
+ nullptr, /* batch_free_function */
+ nullptr, /* free_definite_size_function */
+ nullptr, /* aligned_malloc_function */
+ nullptr, /* aligned_realloc_function */
+ nullptr, /* aligned_free_function */
+ nullptr, /* next */
+};
+
+// Intercept diagnostics symbols as well, even though they are not part of the
+// unified shim layer.
+//
+// TODO(lizeb): Implement the ones that doable.
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
+
+SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
+ return 0;
+}
+
+#ifdef HAVE_STRUCT_MALLINFO
+SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
+ return {};
+}
+#endif
+
+} // extern "C"
diff --git a/chromium/base/allocator/malloc_zone_functions_mac.h b/chromium/base/allocator/malloc_zone_functions_mac.h
index a7f55433785..1f2d990e4e1 100644
--- a/chromium/base/allocator/malloc_zone_functions_mac.h
+++ b/chromium/base/allocator/malloc_zone_functions_mac.h
@@ -9,7 +9,7 @@
#include <stddef.h>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/immediate_crash.h"
#include "third_party/apple_apsl/malloc.h"
namespace base {
diff --git a/chromium/base/allocator/partition_allocator/PartitionAlloc.md b/chromium/base/allocator/partition_allocator/PartitionAlloc.md
index d7c283c7465..8f025b91d24 100644
--- a/chromium/base/allocator/partition_allocator/PartitionAlloc.md
+++ b/chromium/base/allocator/partition_allocator/PartitionAlloc.md
@@ -27,12 +27,6 @@ example, if a partition has 3 buckets for 64 bytes, 256 bytes, and 1024 bytes,
then PartitionAlloc will satisfy an allocation request for 128 bytes by rounding
it up to 256 bytes and allocating from the second bucket.
-The special allocator class `template <size_t N> class
-SizeSpecificPartitionAllocator` will satisfy allocations only of size
-`kMaxAllocation = N - kAllocationGranularity` or less, and contains buckets for
-all `n * kAllocationGranularity` (n = 1, 2, ..., `kMaxAllocation`). Attempts to
-allocate more than `kMaxAllocation` will fail.
-
## Performance
The current implementation is optimized for the main thread use-case. For
@@ -52,16 +46,16 @@ bucket size is 1MB = 0x100000 which is greater than 1/2 the available space in
a SuperPage meaning it would not be possible to pack even 2 sequential
allocations in a SuperPage.
-`PartitionRootGeneric::Alloc()` acquires a lock for thread safety. (The current
-implementation uses a spin lock on the assumption that thread contention will be
-rare in its callers. The original caller was Blink, where this is generally
-true. Spin locks also have the benefit of simplicity.)
+`PartitionRoot<internal::ThreadSafe>::Alloc()` acquires a lock for thread
+safety. (The current implementation uses a spin lock on the assumption that
+thread contention will be rare in its callers. The original caller was Blink,
+where this is generally true. Spin locks also have the benefit of simplicity.)
Callers can get thread-unsafe performance using a
-`SizeSpecificPartitionAllocator` or otherwise using `PartitionAlloc` (instead of
-`PartitionRootGeneric::Alloc()`). Callers can also arrange for low contention,
-such as by using a dedicated partition for single-threaded, latency-critical
-allocations.
+`PartitionRoot<internal::NotThreadSafe>::Alloc()` or otherwise using
+`PartitionAlloc<internal::NotThreadSafe>`. Callers can also arrange for low
+contention, such as by using a dedicated partition for single-threaded,
+latency-critical allocations.
Because PartitionAlloc guarantees that address space regions used for one
partition are never reused for other partitions, partitions can eat a large
@@ -100,3 +94,20 @@ hence at different addresses. One page can contain only similar-sized objects.
* Partial pointer overwrite of freelist pointer should fault.
* Large allocations have guard pages at the beginning and end.
+
+## Alignment
+
+PartitionAlloc doesn't have explicit support for a `posix_memalign()` type call,
+however it provides some guarantees on the alignment of returned pointers.
+
+All pointers are aligned on the smallest allocation granularity, namely
+`sizeof(void*)`. Additionally, for power-of-two sized allocations, the behavior
+depends on the compilation flags:
+
+* With `DCHECK_IS_ON()`, returned pointers are never guaranteed to be aligned on
+ more than 16 bytes.
+
+* Otherwise, the returned pointer is guaranteed to be aligned on
+ `min(allocation_size, system page size)`.
+
+See the tests in `partition_alloc_unittest.cc` for more details.
diff --git a/chromium/base/allocator/partition_allocator/address_pool_manager.cc b/chromium/base/allocator/partition_allocator/address_pool_manager.cc
new file mode 100644
index 00000000000..4c2a3d0fad9
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/address_pool_manager.cc
@@ -0,0 +1,223 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/address_pool_manager.h"
+
+#if defined(OS_MACOSX)
+#include <sys/mman.h>
+#endif
+
+#include <algorithm>
+#include <limits>
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/bits.h"
+#include "base/notreached.h"
+#include "base/stl_util.h"
+
+namespace base {
+namespace internal {
+
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+namespace {
+
+void DecommitPages(void* address, size_t size) {
+#if defined(OS_MACOSX)
+ // MAP_FIXED replaces an existing mapping with a new one, when the address is
+ // already part of a mapping. Since newly-created mappings are guaranteed to
+ // be zero-filled, this has the desired effect. It is only required on macOS,
+ // as on other operating systems, |DecommitSystemPages()| provides the same
+ // behavior.
+ void* ptr = mmap(address, size, PROT_NONE,
+ MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ PA_CHECK(ptr == address);
+#else
+ SetSystemPagesAccess(address, size, PageInaccessible);
+ DecommitSystemPages(address, size);
+#endif
+}
+
+bool WARN_UNUSED_RESULT CommitPages(void* address, size_t size) {
+#if defined(OS_MACOSX)
+ SetSystemPagesAccess(address, size, PageReadWrite);
+#else
+ if (!RecommitSystemPages(address, size, PageReadWrite))
+ return false;
+ SetSystemPagesAccess(address, size, PageReadWrite);
+#endif
+
+ return true;
+}
+
+} // namespace
+
+constexpr size_t AddressPoolManager::Pool::kMaxBits;
+
+// static
+AddressPoolManager* AddressPoolManager::GetInstance() {
+ static NoDestructor<AddressPoolManager> instance;
+ return instance.get();
+}
+
+pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) {
+ PA_DCHECK(!(ptr & kSuperPageOffsetMask));
+ PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
+
+ for (pool_handle i = 0; i < base::size(pools_); ++i) {
+ if (!pools_[i].IsInitialized()) {
+ pools_[i].Initialize(ptr, length);
+ return i + 1;
+ }
+ }
+ NOTREACHED();
+ return 0;
+}
+
+void AddressPoolManager::ResetForTesting() {
+ for (pool_handle i = 0; i < base::size(pools_); ++i)
+ pools_[i].Reset();
+}
+
+void AddressPoolManager::Remove(pool_handle handle) {
+ Pool* pool = GetPool(handle);
+ PA_DCHECK(pool->IsInitialized());
+ pool->Reset();
+}
+
+char* AddressPoolManager::Alloc(pool_handle handle, size_t length) {
+ Pool* pool = GetPool(handle);
+ char* ptr = reinterpret_cast<char*>(pool->FindChunk(length));
+
+ if (UNLIKELY(!ptr) || !CommitPages(ptr, length))
+ return nullptr;
+ return ptr;
+}
+
+void AddressPoolManager::Free(pool_handle handle, void* ptr, size_t length) {
+ PA_DCHECK(0 < handle && handle <= kNumPools);
+ Pool* pool = GetPool(handle);
+ PA_DCHECK(pool->IsInitialized());
+ DecommitPages(ptr, length);
+ pool->FreeChunk(reinterpret_cast<uintptr_t>(ptr), length);
+}
+
+void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
+ PA_CHECK(ptr != 0);
+ PA_CHECK(!(ptr & kSuperPageOffsetMask));
+ PA_CHECK(!(length & kSuperPageOffsetMask));
+ address_begin_ = ptr;
+#if DCHECK_IS_ON()
+ address_end_ = ptr + length;
+ PA_DCHECK(address_begin_ < address_end_);
+#endif
+
+ total_bits_ = length / kSuperPageSize;
+ PA_CHECK(total_bits_ <= kMaxBits);
+
+ base::AutoLock scoped_lock(lock_);
+ alloc_bitset_.reset();
+ bit_hint_ = 0;
+}
+
+bool AddressPoolManager::Pool::IsInitialized() {
+ return address_begin_ != 0;
+}
+
+void AddressPoolManager::Pool::Reset() {
+ address_begin_ = 0;
+}
+
+uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
+ base::AutoLock scoped_lock(lock_);
+
+ const size_t required_size = bits::Align(requested_size, kSuperPageSize);
+ const size_t need_bits = required_size >> kSuperPageShift;
+
+ // Use first-fit policy to find an available chunk from free chunks. Start
+ // from |bit_hint_|, because we know there are no free chunks before.
+ size_t beg_bit = bit_hint_;
+ size_t curr_bit = bit_hint_;
+ while (true) {
+ // |end_bit| points 1 past the last bit that needs to be 0. If it goes past
+ // |total_bits_|, return |nullptr| to signal no free chunk was found.
+ size_t end_bit = beg_bit + need_bits;
+ if (end_bit > total_bits_)
+ return 0;
+
+ bool found = true;
+ for (; curr_bit < end_bit; ++curr_bit) {
+ if (alloc_bitset_.test(curr_bit)) {
+ // The bit was set, so this chunk isn't entirely free. Set |found=false|
+ // to ensure the outer loop continues. However, continue the inner loop
+ // to set |beg_bit| just past the last set bit in the investigated
+ // chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
+ // next outer loop pass from checking the same bits.
+ beg_bit = curr_bit + 1;
+ found = false;
+ if (bit_hint_ == curr_bit)
+ ++bit_hint_;
+ }
+ }
+
+ // An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
+ // mark as allocated) and return the allocated address.
+ if (found) {
+ for (size_t i = beg_bit; i < end_bit; ++i) {
+ PA_DCHECK(!alloc_bitset_.test(i));
+ alloc_bitset_.set(i);
+ }
+ if (bit_hint_ == beg_bit) {
+ bit_hint_ = end_bit;
+ }
+ uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
+#if DCHECK_IS_ON()
+ PA_DCHECK(address + required_size <= address_end_);
+#endif
+ return address;
+ }
+ }
+
+ NOTREACHED();
+ return 0;
+}
+
+void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
+ base::AutoLock scoped_lock(lock_);
+
+ PA_DCHECK(!(address & kSuperPageOffsetMask));
+
+ const size_t size = bits::Align(free_size, kSuperPageSize);
+ DCHECK_LE(address_begin_, address);
+#if DCHECK_IS_ON()
+ PA_DCHECK(address + size <= address_end_);
+#endif
+
+ const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
+ const size_t end_bit = beg_bit + size / kSuperPageSize;
+ for (size_t i = beg_bit; i < end_bit; ++i) {
+ PA_DCHECK(alloc_bitset_.test(i));
+ alloc_bitset_.reset(i);
+ }
+ bit_hint_ = std::min(bit_hint_, beg_bit);
+}
+
+AddressPoolManager::Pool::Pool() = default;
+AddressPoolManager::Pool::~Pool() = default;
+
+AddressPoolManager::AddressPoolManager() = default;
+AddressPoolManager::~AddressPoolManager() = default;
+
+ALWAYS_INLINE AddressPoolManager::Pool* AddressPoolManager::GetPool(
+ pool_handle handle) {
+ PA_DCHECK(0 < handle && handle <= kNumPools);
+ return &pools_[handle - 1];
+}
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/address_pool_manager.h b/chromium/base/allocator/partition_allocator/address_pool_manager.h
new file mode 100644
index 00000000000..cb46cb63c5a
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/address_pool_manager.h
@@ -0,0 +1,100 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
+
+#include <bitset>
+
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/atomicops.h"
+#include "base/no_destructor.h"
+#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace internal {
+
+using pool_handle = unsigned;
+
+// The feature is not applicable to 32-bit address space.
+// ARCH_CPU_64_BITS implies 64-bit instruction set, but not necessarily 64-bit
+// address space. The only known case where address space is 32-bit is NaCl, so
+// eliminate it explicitly. static_assert below ensures that other won't slip
+// through.
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+static_assert(sizeof(size_t) >= 8, "Nee more than 32-bit address space");
+
+// AddressPoolManager takes a reserved virtual address space and manages address
+// space allocation.
+//
+// AddressPoolManager (currently) supports up to 2 pools. Each pool manages a
+// contiguous reserved address space. Alloc() takes a pool_handle and returns
+// address regions from the specified pool. Free() also takes a pool_handle and
+// returns the address region back to the manager.
+class BASE_EXPORT AddressPoolManager {
+ public:
+ static AddressPoolManager* GetInstance();
+
+ pool_handle Add(uintptr_t address, size_t length);
+ void Remove(pool_handle handle);
+ char* Alloc(pool_handle handle, size_t length);
+ void Free(pool_handle handle, void* ptr, size_t length);
+ void ResetForTesting();
+
+ private:
+ AddressPoolManager();
+ ~AddressPoolManager();
+
+ class Pool {
+ public:
+ Pool();
+ ~Pool();
+
+ void Initialize(uintptr_t ptr, size_t length);
+ bool IsInitialized();
+ void Reset();
+
+ uintptr_t FindChunk(size_t size);
+ void FreeChunk(uintptr_t address, size_t size);
+
+ private:
+ // The bitset stores the allocation state of the address pool. 1 bit per
+ // super-page: 1 = allocated, 0 = free.
+ static constexpr size_t kGiB = 1024 * 1024 * 1024;
+ static constexpr size_t kMaxSupportedSize = 16 * kGiB;
+ static constexpr size_t kMaxBits = kMaxSupportedSize / kSuperPageSize;
+ base::Lock lock_;
+ std::bitset<kMaxBits> alloc_bitset_ GUARDED_BY(lock_);
+ // An index of a bit in the bitset before which we know for sure there all
+ // 1s. This is a best-effort hint in the sense that there still may be lots
+ // of 1s after this index, but at least we know there is no point in
+ // starting the search before it.
+ size_t bit_hint_ GUARDED_BY(lock_);
+
+ size_t total_bits_ = 0;
+ uintptr_t address_begin_ = 0;
+#if DCHECK_IS_ON()
+ uintptr_t address_end_ = 0;
+#endif
+ };
+
+ ALWAYS_INLINE Pool* GetPool(pool_handle handle);
+
+ static constexpr size_t kNumPools = 2;
+ Pool pools_[kNumPools];
+
+ friend class NoDestructor<AddressPoolManager>;
+ DISALLOW_COPY_AND_ASSIGN(AddressPoolManager);
+};
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
diff --git a/chromium/base/allocator/partition_allocator/address_pool_manager_unittest.cc b/chromium/base/allocator/partition_allocator/address_pool_manager_unittest.cc
new file mode 100644
index 00000000000..02ca8874ccf
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/address_pool_manager_unittest.cc
@@ -0,0 +1,150 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/address_pool_manager.h"
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+class AddressPoolManagerTest : public testing::Test {
+ protected:
+ AddressPoolManagerTest() = default;
+ ~AddressPoolManagerTest() override = default;
+
+ void SetUp() override {
+ AddressPoolManager::GetInstance()->ResetForTesting();
+ base_address_ =
+ AllocPages(nullptr, kPoolSize, kSuperPageSize, base::PageInaccessible,
+ PageTag::kPartitionAlloc, false);
+ ASSERT_TRUE(base_address_);
+ pool_ = AddressPoolManager::GetInstance()->Add(
+ reinterpret_cast<uintptr_t>(base_address_), kPoolSize);
+ }
+
+ void TearDown() override { FreePages(base_address_, kPoolSize); }
+
+ static constexpr size_t kPageCnt = 8192;
+ static constexpr size_t kPoolSize = kSuperPageSize * kPageCnt;
+
+ void* base_address_;
+ pool_handle pool_;
+};
+
+TEST_F(AddressPoolManagerTest, TooLargePool) {
+ uintptr_t base_addr = 0x4200000;
+
+ constexpr size_t kSize = 16ull * 1024 * 1024 * 1024;
+ EXPECT_DEATH_IF_SUPPORTED(
+ AddressPoolManager::GetInstance()->Add(base_addr, kSize + kSuperPageSize),
+ "");
+}
+
+TEST_F(AddressPoolManagerTest, ManyPages) {
+ char* base_ptr = reinterpret_cast<char*>(base_address_);
+
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_,
+ kPageCnt * kSuperPageSize),
+ base_ptr);
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize),
+ nullptr);
+ AddressPoolManager::GetInstance()->Free(pool_, base_ptr,
+ kPageCnt * kSuperPageSize);
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_,
+ kPageCnt * kSuperPageSize),
+ base_ptr);
+}
+
+TEST_F(AddressPoolManagerTest, PagesFragmented) {
+ char* base_ptr = reinterpret_cast<char*>(base_address_);
+ void* addrs[kPageCnt];
+ for (size_t i = 0; i < kPageCnt; ++i) {
+ addrs[i] = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
+ EXPECT_EQ(addrs[i], base_ptr + i * kSuperPageSize);
+ }
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize),
+ nullptr);
+ for (size_t i = 1; i < kPageCnt; i += 2) {
+ AddressPoolManager::GetInstance()->Free(pool_, addrs[i], kSuperPageSize);
+ }
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, 2 * kSuperPageSize),
+ nullptr);
+ for (size_t i = 1; i < kPageCnt; i += 2) {
+ addrs[i] = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
+ EXPECT_EQ(addrs[i], base_ptr + i * kSuperPageSize);
+ }
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize),
+ nullptr);
+}
+
+TEST_F(AddressPoolManagerTest, IrregularPattern) {
+ char* base_ptr = reinterpret_cast<char*>(base_address_);
+
+ void* a1 = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
+ EXPECT_EQ(a1, base_ptr);
+ void* a2 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 2 * kSuperPageSize);
+ EXPECT_EQ(a2, base_ptr + 1 * kSuperPageSize);
+ void* a3 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 3 * kSuperPageSize);
+ EXPECT_EQ(a3, base_ptr + 3 * kSuperPageSize);
+ void* a4 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 4 * kSuperPageSize);
+ EXPECT_EQ(a4, base_ptr + 6 * kSuperPageSize);
+ void* a5 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 5 * kSuperPageSize);
+ EXPECT_EQ(a5, base_ptr + 10 * kSuperPageSize);
+
+ AddressPoolManager::GetInstance()->Free(pool_, a4, 4 * kSuperPageSize);
+ void* a6 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 6 * kSuperPageSize);
+ EXPECT_EQ(a6, base_ptr + 15 * kSuperPageSize);
+
+ AddressPoolManager::GetInstance()->Free(pool_, a5, 5 * kSuperPageSize);
+ void* a7 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 7 * kSuperPageSize);
+ EXPECT_EQ(a7, base_ptr + 6 * kSuperPageSize);
+ void* a8 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 3 * kSuperPageSize);
+ EXPECT_EQ(a8, base_ptr + 21 * kSuperPageSize);
+ void* a9 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 2 * kSuperPageSize);
+ EXPECT_EQ(a9, base_ptr + 13 * kSuperPageSize);
+
+ AddressPoolManager::GetInstance()->Free(pool_, a7, 7 * kSuperPageSize);
+ AddressPoolManager::GetInstance()->Free(pool_, a9, 2 * kSuperPageSize);
+ AddressPoolManager::GetInstance()->Free(pool_, a6, 6 * kSuperPageSize);
+ void* a10 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 15 * kSuperPageSize);
+ EXPECT_EQ(a10, base_ptr + 6 * kSuperPageSize);
+}
+
+TEST_F(AddressPoolManagerTest, DecommittedDataIsErased) {
+ void* data = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
+ ASSERT_TRUE(data);
+
+ memset(data, 42, kSuperPageSize);
+ AddressPoolManager::GetInstance()->Free(pool_, data, kSuperPageSize);
+
+ void* data2 = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
+ ASSERT_EQ(data, data2);
+
+ uint32_t sum = 0;
+ for (size_t i = 0; i < kSuperPageSize; i++) {
+ sum += reinterpret_cast<uint8_t*>(data2)[i];
+ }
+
+ EXPECT_EQ(0u, sum) << sum / 42 << " bytes were not zeroed";
+}
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization.cc b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
index 72078fdaa50..b168f996148 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization.cc
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
@@ -5,6 +5,7 @@
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/random.h"
#include "base/allocator/partition_allocator/spin_lock.h"
#include "base/check_op.h"
@@ -61,7 +62,7 @@ void* GetRandomPageBase() {
random += internal::kASLROffset;
#endif // defined(ARCH_CPU_32_BITS)
- DCHECK_EQ(0ULL, (random & kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(random & kPageAllocationGranularityOffsetMask));
return reinterpret_cast<void*>(random);
}
diff --git a/chromium/base/allocator/partition_allocator/memory_reclaimer.cc b/chromium/base/allocator/partition_allocator/memory_reclaimer.cc
index 0d515b11463..3e4203abdbc 100644
--- a/chromium/base/allocator/partition_allocator/memory_reclaimer.cc
+++ b/chromium/base/allocator/partition_allocator/memory_reclaimer.cc
@@ -5,36 +5,34 @@
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/bind.h"
#include "base/location.h"
#include "base/metrics/histogram_functions.h"
-#include "base/timer/elapsed_timer.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
namespace {
template <bool thread_safe>
-void Insert(std::set<internal::PartitionRootBase<thread_safe>*>* partitions,
- internal::PartitionRootBase<thread_safe>* partition) {
- DCHECK(partition);
+void Insert(std::set<PartitionRoot<thread_safe>*>* partitions,
+ PartitionRoot<thread_safe>* partition) {
+ PA_DCHECK(partition);
auto it_and_whether_inserted = partitions->insert(partition);
- DCHECK(it_and_whether_inserted.second);
+ PA_DCHECK(it_and_whether_inserted.second);
}
template <bool thread_safe>
-void Remove(std::set<internal::PartitionRootBase<thread_safe>*>* partitions,
- internal::PartitionRootBase<thread_safe>* partition) {
- DCHECK(partition);
+void Remove(std::set<PartitionRoot<thread_safe>*>* partitions,
+ PartitionRoot<thread_safe>* partition) {
+ PA_DCHECK(partition);
size_t erased_count = partitions->erase(partition);
- DCHECK_EQ(1u, erased_count);
+ PA_DCHECK(erased_count == 1u);
}
} // namespace
-constexpr TimeDelta PartitionAllocMemoryReclaimer::kStatsRecordingTimeDelta;
-
// static
PartitionAllocMemoryReclaimer* PartitionAllocMemoryReclaimer::Instance() {
static NoDestructor<PartitionAllocMemoryReclaimer> instance;
@@ -42,37 +40,37 @@ PartitionAllocMemoryReclaimer* PartitionAllocMemoryReclaimer::Instance() {
}
void PartitionAllocMemoryReclaimer::RegisterPartition(
- internal::PartitionRootBase<internal::ThreadSafe>* partition) {
+ PartitionRoot<internal::ThreadSafe>* partition) {
AutoLock lock(lock_);
Insert(&thread_safe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::RegisterPartition(
- internal::PartitionRootBase<internal::NotThreadSafe>* partition) {
+ PartitionRoot<internal::NotThreadSafe>* partition) {
AutoLock lock(lock_);
Insert(&thread_unsafe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::UnregisterPartition(
- internal::PartitionRootBase<internal::ThreadSafe>* partition) {
+ PartitionRoot<internal::ThreadSafe>* partition) {
AutoLock lock(lock_);
Remove(&thread_safe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::UnregisterPartition(
- internal::PartitionRootBase<internal::NotThreadSafe>* partition) {
+ PartitionRoot<internal::NotThreadSafe>* partition) {
AutoLock lock(lock_);
Remove(&thread_unsafe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::Start(
scoped_refptr<SequencedTaskRunner> task_runner) {
- DCHECK(!timer_);
- DCHECK(task_runner);
+ PA_DCHECK(!timer_);
+ PA_DCHECK(task_runner);
{
AutoLock lock(lock_);
- DCHECK(!thread_safe_partitions_.empty());
+ PA_DCHECK(!thread_safe_partitions_.empty());
}
// This does not need to run on the main thread, however there are a few
@@ -98,58 +96,27 @@ void PartitionAllocMemoryReclaimer::Start(
timer_->Start(
FROM_HERE, kInterval,
BindRepeating(&PartitionAllocMemoryReclaimer::Reclaim, Unretained(this)));
-
- task_runner->PostDelayedTask(
- FROM_HERE,
- BindOnce(&PartitionAllocMemoryReclaimer::RecordStatistics,
- Unretained(this)),
- kStatsRecordingTimeDelta);
}
PartitionAllocMemoryReclaimer::PartitionAllocMemoryReclaimer() = default;
PartitionAllocMemoryReclaimer::~PartitionAllocMemoryReclaimer() = default;
void PartitionAllocMemoryReclaimer::Reclaim() {
+ AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
TRACE_EVENT0("base", "PartitionAllocMemoryReclaimer::Reclaim()");
- // Reclaim will almost always call into the kernel, so tail latency of this
- // task would likely be affected by descheduling.
- //
- // On Linux (and Android) at least, ThreadTicks also includes kernel time, so
- // this is a good measure of the true cost of decommit.
- ElapsedThreadTimer timer;
+
constexpr int kFlags =
PartitionPurgeDecommitEmptyPages | PartitionPurgeDiscardUnusedSystemPages;
- {
- AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
- for (auto* partition : thread_safe_partitions_)
- partition->PurgeMemory(kFlags);
- for (auto* partition : thread_unsafe_partitions_)
- partition->PurgeMemory(kFlags);
- }
-
- has_called_reclaim_ = true;
- if (timer.is_supported())
- total_reclaim_thread_time_ += timer.Elapsed();
-}
-
-void PartitionAllocMemoryReclaimer::RecordStatistics() {
- if (!ElapsedThreadTimer().is_supported())
- return;
- if (!has_called_reclaim_)
- return;
-
- UmaHistogramTimes("Memory.PartitionAlloc.MainThreadTime.5min",
- total_reclaim_thread_time_);
- has_called_reclaim_ = false;
- total_reclaim_thread_time_ = TimeDelta();
+ for (auto* partition : thread_safe_partitions_)
+ partition->PurgeMemory(kFlags);
+ for (auto* partition : thread_unsafe_partitions_)
+ partition->PurgeMemory(kFlags);
}
void PartitionAllocMemoryReclaimer::ResetForTesting() {
AutoLock lock(lock_);
- has_called_reclaim_ = false;
- total_reclaim_thread_time_ = TimeDelta();
timer_ = nullptr;
thread_safe_partitions_.clear();
thread_unsafe_partitions_.clear();
diff --git a/chromium/base/allocator/partition_allocator/memory_reclaimer.h b/chromium/base/allocator/partition_allocator/memory_reclaimer.h
index 4e51332dca6..d7594aa3a83 100644
--- a/chromium/base/allocator/partition_allocator/memory_reclaimer.h
+++ b/chromium/base/allocator/partition_allocator/memory_reclaimer.h
@@ -15,8 +15,6 @@
#include "base/no_destructor.h"
#include "base/single_thread_task_runner.h"
#include "base/thread_annotations.h"
-#include "base/time/time.h"
-#include "base/timer/elapsed_timer.h"
#include "base/timer/timer.h"
namespace base {
@@ -36,42 +34,31 @@ class BASE_EXPORT PartitionAllocMemoryReclaimer {
// Internal. Do not use.
// Registers a partition to be tracked by the reclaimer.
- void RegisterPartition(
- internal::PartitionRootBase<internal::ThreadSafe>* partition);
- void RegisterPartition(
- internal::PartitionRootBase<internal::NotThreadSafe>* partition);
+ void RegisterPartition(PartitionRoot<internal::ThreadSafe>* partition);
+ void RegisterPartition(PartitionRoot<internal::NotThreadSafe>* partition);
// Internal. Do not use.
// Unregisters a partition to be tracked by the reclaimer.
- void UnregisterPartition(
- internal::PartitionRootBase<internal::ThreadSafe>* partition);
- void UnregisterPartition(
- internal::PartitionRootBase<internal::NotThreadSafe>* partition);
+ void UnregisterPartition(PartitionRoot<internal::ThreadSafe>* partition);
+ void UnregisterPartition(PartitionRoot<internal::NotThreadSafe>* partition);
// Starts the periodic reclaim. Should be called once.
void Start(scoped_refptr<SequencedTaskRunner> task_runner);
// Triggers an explicit reclaim now.
void Reclaim();
- static constexpr TimeDelta kStatsRecordingTimeDelta =
- TimeDelta::FromMinutes(5);
-
private:
PartitionAllocMemoryReclaimer();
~PartitionAllocMemoryReclaimer();
void ReclaimAndReschedule();
- void RecordStatistics();
void ResetForTesting();
- // Total time spent in |Reclaim()|.
- bool has_called_reclaim_ = false;
- TimeDelta total_reclaim_thread_time_;
// Schedules periodic |Reclaim()|.
std::unique_ptr<RepeatingTimer> timer_;
Lock lock_;
- std::set<internal::PartitionRootBase<internal::ThreadSafe>*>
- thread_safe_partitions_ GUARDED_BY(lock_);
- std::set<internal::PartitionRootBase<internal::NotThreadSafe>*>
- thread_unsafe_partitions_ GUARDED_BY(lock_);
+ std::set<PartitionRoot<internal::ThreadSafe>*> thread_safe_partitions_
+ GUARDED_BY(lock_);
+ std::set<PartitionRoot<internal::NotThreadSafe>*> thread_unsafe_partitions_
+ GUARDED_BY(lock_);
friend class NoDestructor<PartitionAllocMemoryReclaimer>;
friend class PartitionAllocMemoryReclaimerTest;
diff --git a/chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc b/chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc
index 72c72011b69..c41a39b3911 100644
--- a/chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc
@@ -8,7 +8,6 @@
#include <utility>
#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/test/metrics/histogram_tester.h"
#include "base/test/task_environment.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -19,6 +18,14 @@
namespace base {
+namespace {
+
+void HandleOOM(size_t unused_size) {
+ LOG(FATAL) << "Out of memory";
+}
+
+} // namespace
+
class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
public:
PartitionAllocMemoryReclaimerTest()
@@ -28,8 +35,9 @@ class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
protected:
void SetUp() override {
+ PartitionAllocGlobalInit(HandleOOM);
PartitionAllocMemoryReclaimer::Instance()->ResetForTesting();
- allocator_ = std::make_unique<PartitionAllocatorGeneric>();
+ allocator_ = std::make_unique<PartitionAllocator>();
allocator_->init();
}
@@ -37,6 +45,7 @@ class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
allocator_ = nullptr;
PartitionAllocMemoryReclaimer::Instance()->ResetForTesting();
task_environment_.FastForwardUntilNoTasksRemain();
+ PartitionAllocGlobalUninitForTesting();
}
void StartReclaimer() {
@@ -49,32 +58,19 @@ class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
allocator_->root()->Free(data);
}
- size_t GetExpectedTasksCount() const {
- // Includes the stats recording task.
- if (ElapsedThreadTimer().is_supported())
- return 2;
- return 1;
- }
-
test::TaskEnvironment task_environment_;
- std::unique_ptr<PartitionAllocatorGeneric> allocator_;
+ std::unique_ptr<PartitionAllocator> allocator_;
};
TEST_F(PartitionAllocMemoryReclaimerTest, Simple) {
StartReclaimer();
- EXPECT_EQ(GetExpectedTasksCount(),
- task_environment_.GetPendingMainThreadTaskCount());
+ EXPECT_EQ(1u, task_environment_.GetPendingMainThreadTaskCount());
EXPECT_TRUE(task_environment_.NextTaskIsDelayed());
}
-TEST_F(PartitionAllocMemoryReclaimerTest, IsEnabledByDefault) {
- StartReclaimer();
- EXPECT_EQ(2u, task_environment_.GetPendingMainThreadTaskCount());
-}
-
TEST_F(PartitionAllocMemoryReclaimerTest, FreesMemory) {
- PartitionRootGeneric* root = allocator_->root();
+ PartitionRoot<internal::ThreadSafe>* root = allocator_->root();
size_t committed_initially = root->total_size_of_committed_pages;
AllocateAndFree();
@@ -91,7 +87,7 @@ TEST_F(PartitionAllocMemoryReclaimerTest, FreesMemory) {
}
TEST_F(PartitionAllocMemoryReclaimerTest, Reclaim) {
- PartitionRootGeneric* root = allocator_->root();
+ PartitionRoot<internal::ThreadSafe>* root = allocator_->root();
size_t committed_initially = root->total_size_of_committed_pages;
{
@@ -107,23 +103,5 @@ TEST_F(PartitionAllocMemoryReclaimerTest, Reclaim) {
}
}
-TEST_F(PartitionAllocMemoryReclaimerTest, StatsRecording) {
- // No stats reported if the timer is not.
- if (!ElapsedThreadTimer().is_supported())
- return;
-
- HistogramTester histogram_tester;
- StartReclaimer();
- EXPECT_EQ(GetExpectedTasksCount(),
- task_environment_.GetPendingMainThreadTaskCount());
-
- task_environment_.FastForwardBy(
- PartitionAllocMemoryReclaimer::kStatsRecordingTimeDelta);
- // Hard to make sure that the total time is >1ms, so cannot assert that the
- // value is not 0.
- histogram_tester.ExpectTotalCount("Memory.PartitionAlloc.MainThreadTime.5min",
- 1);
-}
-
} // namespace base
#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/chromium/base/allocator/partition_allocator/oom.h b/chromium/base/allocator/partition_allocator/oom.h
index c3a2d1b03cf..916ef125c87 100644
--- a/chromium/base/allocator/partition_allocator/oom.h
+++ b/chromium/base/allocator/partition_allocator/oom.h
@@ -6,7 +6,6 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
#include "base/allocator/partition_allocator/oom_callback.h"
-#include "base/logging.h"
#include "base/process/memory.h"
#include "build/build_config.h"
diff --git a/chromium/base/allocator/partition_allocator/oom_callback.cc b/chromium/base/allocator/partition_allocator/oom_callback.cc
index c734458acbb..b6efc31da02 100644
--- a/chromium/base/allocator/partition_allocator/oom_callback.cc
+++ b/chromium/base/allocator/partition_allocator/oom_callback.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "base/allocator/partition_allocator/oom_callback.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/check.h"
@@ -13,7 +14,7 @@ PartitionAllocOomCallback g_oom_callback;
} // namespace
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
- DCHECK(!g_oom_callback);
+ PA_DCHECK(!g_oom_callback);
g_oom_callback = callback;
}
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.cc b/chromium/base/allocator/partition_allocator/page_allocator.cc
index b7785505efc..25ab8d72875 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator.cc
@@ -10,11 +10,12 @@
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
-#include "base/allocator/partition_allocator/spin_lock.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/bits.h"
#include "base/check_op.h"
#include "base/no_destructor.h"
#include "base/numerics/checked_math.h"
+#include "base/synchronization/lock.h"
#include "build/build_config.h"
#if defined(OS_WIN)
@@ -36,14 +37,14 @@ namespace base {
namespace {
// We may reserve/release address space on different threads.
-subtle::SpinLock& GetReserveLock() {
- static NoDestructor<subtle::SpinLock> s_reserveLock;
- return *s_reserveLock;
+Lock& GetReserveLock() {
+ static NoDestructor<Lock> lock;
+ return *lock;
}
// We only support a single block of reserved address space.
-void* s_reservation_address = nullptr;
-size_t s_reservation_size = 0;
+void* s_reservation_address GUARDED_BY(GetReserveLock()) = nullptr;
+size_t s_reservation_size GUARDED_BY(GetReserveLock()) = 0;
void* AllocPagesIncludingReserved(void* address,
size_t length,
@@ -78,9 +79,9 @@ void* TrimMapping(void* base,
pre_slack = alignment - pre_slack;
}
size_t post_slack = base_length - pre_slack - trim_length;
- DCHECK(base_length >= trim_length || pre_slack || post_slack);
- DCHECK(pre_slack < base_length);
- DCHECK(post_slack < base_length);
+ PA_DCHECK(base_length >= trim_length || pre_slack || post_slack);
+ PA_DCHECK(pre_slack < base_length);
+ PA_DCHECK(post_slack < base_length);
return TrimMappingInternal(base, base_length, trim_length, accessibility,
commit, pre_slack, post_slack);
}
@@ -92,10 +93,10 @@ void* SystemAllocPages(void* hint,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
bool commit) {
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
- DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
- kPageAllocationGranularityOffsetMask));
- DCHECK(commit || accessibility == PageInaccessible);
+ PA_DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
+ kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(commit || accessibility == PageInaccessible);
return SystemAllocPagesInternal(hint, length, accessibility, page_tag,
commit);
}
@@ -106,16 +107,16 @@ void* AllocPages(void* address,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
bool commit) {
- DCHECK(length >= kPageAllocationGranularity);
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
- DCHECK(align >= kPageAllocationGranularity);
+ PA_DCHECK(length >= kPageAllocationGranularity);
+ PA_DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(align >= kPageAllocationGranularity);
// Alignment must be power of 2 for masking math to work.
- DCHECK(base::bits::IsPowerOfTwo(align));
- DCHECK(!(reinterpret_cast<uintptr_t>(address) &
- kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(base::bits::IsPowerOfTwo(align));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) &
+ kPageAllocationGranularityOffsetMask));
uintptr_t align_offset_mask = align - 1;
uintptr_t align_base_mask = ~align_offset_mask;
- DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
// If the client passed null as the address, choose a good one.
if (address == nullptr) {
@@ -165,7 +166,7 @@ void* AllocPages(void* address,
// Make a larger allocation so we can force alignment.
size_t try_length = length + (align - kPageAllocationGranularity);
- CHECK(try_length >= length);
+ PA_CHECK(try_length >= length);
void* ret;
do {
@@ -183,54 +184,54 @@ void* AllocPages(void* address,
}
void FreePages(void* address, size_t length) {
- DCHECK(!(reinterpret_cast<uintptr_t>(address) &
- kPageAllocationGranularityOffsetMask));
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) &
+ kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(length & kPageAllocationGranularityOffsetMask));
FreePagesInternal(address, length);
}
bool TrySetSystemPagesAccess(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
- DCHECK(!(length & kSystemPageOffsetMask));
+ PA_DCHECK(!(length & kSystemPageOffsetMask));
return TrySetSystemPagesAccessInternal(address, length, accessibility);
}
void SetSystemPagesAccess(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
- DCHECK(!(length & kSystemPageOffsetMask));
+ PA_DCHECK(!(length & kSystemPageOffsetMask));
SetSystemPagesAccessInternal(address, length, accessibility);
}
void DecommitSystemPages(void* address, size_t length) {
- DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ PA_DCHECK(!(length & kSystemPageOffsetMask));
DecommitSystemPagesInternal(address, length);
}
bool RecommitSystemPages(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
- DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
- DCHECK_NE(PageInaccessible, accessibility);
+ PA_DCHECK(!(length & kSystemPageOffsetMask));
+ PA_DCHECK(accessibility != PageInaccessible);
return RecommitSystemPagesInternal(address, length, accessibility);
}
void DiscardSystemPages(void* address, size_t length) {
- DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ PA_DCHECK(!(length & kSystemPageOffsetMask));
DiscardSystemPagesInternal(address, length);
}
bool ReserveAddressSpace(size_t size) {
// To avoid deadlock, call only SystemAllocPages.
- subtle::SpinLock::Guard guard(GetReserveLock());
+ AutoLock guard(GetReserveLock());
if (s_reservation_address == nullptr) {
void* mem = SystemAllocPages(nullptr, size, PageInaccessible,
PageTag::kChromium, false);
if (mem != nullptr) {
// We guarantee this alignment when reserving address space.
- DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
- kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
+ kPageAllocationGranularityOffsetMask));
s_reservation_address = mem;
s_reservation_size = size;
return true;
@@ -241,7 +242,7 @@ bool ReserveAddressSpace(size_t size) {
bool ReleaseReservation() {
// To avoid deadlock, call only FreePages.
- subtle::SpinLock::Guard guard(GetReserveLock());
+ AutoLock guard(GetReserveLock());
if (!s_reservation_address)
return false;
@@ -252,7 +253,7 @@ bool ReleaseReservation() {
}
bool HasReservationForTesting() {
- subtle::SpinLock::Guard guard(GetReserveLock());
+ AutoLock guard(GetReserveLock());
return s_reservation_address != nullptr;
}
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h b/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h
index 7e1bff18525..e2b99f18e70 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h
@@ -16,8 +16,9 @@
#include <lib/zx/vmo.h>
#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/fuchsia/fuchsia_logging.h"
-#include "base/logging.h"
+#include "base/notreached.h"
namespace base {
@@ -35,7 +36,7 @@ const char* PageTagToName(PageTag tag) {
case PageTag::kV8:
return "cr_v8";
default:
- DCHECK(false);
+ PA_DCHECK(false);
return "";
}
}
@@ -126,7 +127,7 @@ void* TrimMappingInternal(void* base,
bool commit,
size_t pre_slack,
size_t post_slack) {
- DCHECK_EQ(base_length, trim_length + pre_slack + post_slack);
+ PA_DCHECK(base_length == trim_length + pre_slack + post_slack);
uint64_t base_address = reinterpret_cast<uint64_t>(base);
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h b/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h
index 27fe5a9a1df..ebf2bcafcae 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h
@@ -8,7 +8,9 @@
#include <errno.h>
#include <sys/mman.h>
-#include "base/logging.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/check_op.h"
+#include "base/notreached.h"
#include "build/build_config.h"
#if defined(OS_MACOSX)
@@ -56,7 +58,7 @@ const char* PageTagToName(PageTag tag) {
case PageTag::kV8:
return "v8";
default:
- DCHECK(false);
+ PA_DCHECK(false);
return "";
}
}
@@ -112,8 +114,8 @@ void* SystemAllocPagesInternal(void* hint,
#if defined(OS_MACOSX)
// Use a custom tag to make it easier to distinguish Partition Alloc regions
// in vmmap(1). Tags between 240-255 are supported.
- DCHECK_LE(PageTag::kFirst, page_tag);
- DCHECK_GE(PageTag::kLast, page_tag);
+ PA_DCHECK(PageTag::kFirst <= page_tag);
+ PA_DCHECK(PageTag::kLast >= page_tag);
int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
#else
int fd = -1;
@@ -165,12 +167,12 @@ void* TrimMappingInternal(void* base,
// the aligned range.
if (pre_slack) {
int res = munmap(base, pre_slack);
- CHECK(!res);
+ PCHECK(!res);
ret = reinterpret_cast<char*>(base) + pre_slack;
}
if (post_slack) {
int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
- CHECK(!res);
+ PCHECK(!res);
}
return ret;
}
@@ -186,11 +188,11 @@ void SetSystemPagesAccessInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
- CHECK_EQ(0, mprotect(address, length, GetAccessFlags(accessibility)));
+ PCHECK(!mprotect(address, length, GetAccessFlags(accessibility)));
}
void FreePagesInternal(void* address, size_t length) {
- CHECK(!munmap(address, length));
+ PCHECK(!munmap(address, length));
}
void DecommitSystemPagesInternal(void* address, size_t length) {
@@ -227,7 +229,7 @@ void DiscardSystemPagesInternal(void* address, size_t length) {
// MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
ret = madvise(address, length, MADV_DONTNEED);
}
- CHECK(0 == ret);
+ PCHECK(0 == ret);
#else
// We have experimented with other flags, but with suboptimal results.
//
@@ -235,7 +237,7 @@ void DiscardSystemPagesInternal(void* address, size_t length) {
// performance benefits unclear.
//
// Therefore, we just do the simple thing: MADV_DONTNEED.
- CHECK(!madvise(address, length, MADV_DONTNEED));
+ PCHECK(!madvise(address, length, MADV_DONTNEED));
#endif
}
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h b/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h
index 60a3472f976..9f0fc8cedfa 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h
@@ -7,7 +7,8 @@
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
-#include "base/logging.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/notreached.h"
namespace base {
@@ -84,7 +85,7 @@ void SetSystemPagesAccessInternal(
if (!VirtualFree(address, length, MEM_DECOMMIT)) {
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
- CHECK_EQ(static_cast<uint32_t>(ERROR_SUCCESS), GetLastError());
+ PA_CHECK(static_cast<uint32_t>(ERROR_SUCCESS) == GetLastError());
}
} else {
if (!VirtualAlloc(address, length, MEM_COMMIT,
@@ -94,13 +95,13 @@ void SetSystemPagesAccessInternal(
OOM_CRASH(length);
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
- CHECK_EQ(ERROR_SUCCESS, error);
+ PA_CHECK(ERROR_SUCCESS == error);
}
}
}
void FreePagesInternal(void* address, size_t length) {
- CHECK(VirtualFree(address, 0, MEM_RELEASE));
+ PA_CHECK(VirtualFree(address, 0, MEM_RELEASE));
}
void DecommitSystemPagesInternal(void* address, size_t length) {
@@ -135,7 +136,7 @@ void DiscardSystemPagesInternal(void* address, size_t length) {
// failure.
if (ret) {
void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
- CHECK(ptr);
+ PA_CHECK(ptr);
}
}
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc b/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc
index 7612ad26977..2067a8d6222 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc
@@ -251,6 +251,29 @@ TEST(PageAllocatorTest, PageTagging) {
}
#endif // defined(OS_ANDROID)
+#if !defined(OS_MACOSX)
+
+TEST(PageAllocatorTest, DecommitErasesMemory) {
+ size_t size = kPageAllocationGranularity;
+ void* buffer = AllocPages(nullptr, size, kPageAllocationGranularity,
+ PageReadWrite, PageTag::kChromium, true);
+ ASSERT_TRUE(buffer);
+
+ memset(buffer, 42, size);
+
+ DecommitSystemPages(buffer, size);
+ EXPECT_TRUE(RecommitSystemPages(buffer, size, PageReadWrite));
+
+ uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
+ uint32_t sum = 0;
+ for (size_t i = 0; i < size; i++) {
+ sum += recommitted_buffer[i];
+ }
+ EXPECT_EQ(0u, sum) << "Data was not erased";
+}
+
+#endif // defined(OS_MACOSX)
+
} // namespace base
#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/chromium/base/allocator/partition_allocator/partition_address_space.cc b/chromium/base/allocator/partition_allocator/partition_address_space.cc
new file mode 100644
index 00000000000..d72db634785
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_address_space.cc
@@ -0,0 +1,69 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_address_space.h"
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/bits.h"
+
+namespace base {
+
+namespace internal {
+
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+// Before PartitionAddressSpace::Init(), no allocation are allocated from a
+// reserved address space. So initially make reserved_base_address_ to
+// be kReservedAddressSpaceOffsetMask. So PartitionAddressSpace::Contains()
+// always returns false.
+// Do something similar for normal_bucket_pool_base_address_.
+uintptr_t PartitionAddressSpace::reserved_base_address_ =
+ kReservedAddressSpaceOffsetMask;
+uintptr_t PartitionAddressSpace::normal_bucket_pool_base_address_ =
+ kNormalBucketPoolOffsetMask;
+
+pool_handle PartitionAddressSpace::direct_map_pool_ = 0;
+pool_handle PartitionAddressSpace::normal_bucket_pool_ = 0;
+
+void PartitionAddressSpace::Init() {
+ PA_DCHECK(kReservedAddressSpaceOffsetMask == reserved_base_address_);
+ reserved_base_address_ = reinterpret_cast<uintptr_t>(AllocPages(
+ nullptr, kDesiredAddressSpaceSize, kReservedAddressSpaceAlignment,
+ base::PageInaccessible, PageTag::kPartitionAlloc, false));
+ PA_CHECK(reserved_base_address_);
+ PA_DCHECK(!(reserved_base_address_ & kReservedAddressSpaceOffsetMask));
+
+ uintptr_t current = reserved_base_address_;
+
+ direct_map_pool_ = internal::AddressPoolManager::GetInstance()->Add(
+ current, kDirectMapPoolSize);
+ PA_DCHECK(direct_map_pool_);
+ current += kDirectMapPoolSize;
+
+ normal_bucket_pool_base_address_ = current;
+ normal_bucket_pool_ = internal::AddressPoolManager::GetInstance()->Add(
+ current, kNormalBucketPoolSize);
+ PA_DCHECK(normal_bucket_pool_);
+ current += kNormalBucketPoolSize;
+ PA_DCHECK(reserved_base_address_ + kDesiredAddressSpaceSize == current);
+}
+
+void PartitionAddressSpace::UninitForTesting() {
+ PA_DCHECK(kReservedAddressSpaceOffsetMask != reserved_base_address_);
+ FreePages(reinterpret_cast<void*>(reserved_base_address_),
+ kReservedAddressSpaceAlignment);
+ reserved_base_address_ = kReservedAddressSpaceOffsetMask;
+ direct_map_pool_ = 0;
+ normal_bucket_pool_ = 0;
+ internal::AddressPoolManager::GetInstance()->ResetForTesting();
+}
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+} // namespace internal
+
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_address_space.h b/chromium/base/allocator/partition_allocator/partition_address_space.h
new file mode 100644
index 00000000000..7297a2110db
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_address_space.h
@@ -0,0 +1,151 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
+
+#include "base/allocator/partition_allocator/address_pool_manager.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
+#include "base/base_export.h"
+#include "base/bits.h"
+#include "base/feature_list.h"
+#include "base/notreached.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace internal {
+
+// The feature is not applicable to 32-bit address space.
+// ARCH_CPU_64_BITS implies 64-bit instruction set, but not necessarily 64-bit
+// address space. The only known case where address space is 32-bit is NaCl, so
+// eliminate it explicitly. static_assert below ensures that other won't slip
+// through.
+// TODO(tasak): define ADDRESS_SPACE_64_BITS as "defined(ARCH_CPU_64_BITS) &&
+// !defined(OS_NACL)" and use it.
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+static_assert(sizeof(size_t) >= 8, "Nee more than 32-bit address space");
+
+// Reserves address space for PartitionAllocator.
+class BASE_EXPORT PartitionAddressSpace {
+ public:
+ static ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
+ return direct_map_pool_;
+ }
+ static ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
+ return normal_bucket_pool_;
+ }
+
+ static void Init();
+ static void UninitForTesting();
+
+ static ALWAYS_INLINE bool Contains(const void* address) {
+ return (reinterpret_cast<uintptr_t>(address) &
+ kReservedAddressSpaceBaseMask) == reserved_base_address_;
+ }
+
+ static ALWAYS_INLINE bool IsInNormalBucketPool(const void* address) {
+ return (reinterpret_cast<uintptr_t>(address) & kNormalBucketPoolBaseMask) ==
+ normal_bucket_pool_base_address_;
+ }
+
+ // PartitionAddressSpace is static_only class.
+ PartitionAddressSpace() = delete;
+ PartitionAddressSpace(const PartitionAddressSpace&) = delete;
+ void* operator new(size_t) = delete;
+ void* operator new(size_t, void*) = delete;
+
+ private:
+ // Partition Alloc Address Space
+ // Reserves 32GiB address space for 1 direct map space(16GiB) and 1 normal
+ // bucket space(16GiB).
+ // TODO(bartekn): Look into devices with 39-bit address space that have 256GiB
+ // user-mode space. Libraries loaded at random addresses may stand in the way
+ // of reserving a contiguous 64GiB region. (even though we're requesting only
+ // 32GiB, AllocPages may under the covers reserve 64GiB to satisfy the
+ // alignment requirements)
+ //
+ // +----------------+ reserved_base_address_(32GiB aligned)
+ // | direct map |
+ // | space |
+ // +----------------+ reserved_base_address_ + 16GiB
+ // | normal buckets |
+ // | space |
+ // +----------------+ reserved_base_address_ + 32GiB
+
+ static constexpr size_t kGigaBytes = 1024 * 1024 * 1024;
+ static constexpr size_t kDirectMapPoolSize = 16 * kGigaBytes;
+ static constexpr size_t kNormalBucketPoolSize = 16 * kGigaBytes;
+ static constexpr uintptr_t kNormalBucketPoolOffsetMask =
+ static_cast<uintptr_t>(kNormalBucketPoolSize) - 1;
+ static constexpr uintptr_t kNormalBucketPoolBaseMask =
+ ~kNormalBucketPoolOffsetMask;
+
+ // Reserves 32GiB aligned address space.
+ // We align on 32GiB as well, and since it's a power of two we can check a
+ // pointer with a single bitmask operation.
+ static constexpr size_t kDesiredAddressSpaceSize =
+ kDirectMapPoolSize + kNormalBucketPoolSize;
+ static constexpr size_t kReservedAddressSpaceAlignment =
+ kDesiredAddressSpaceSize;
+ static constexpr uintptr_t kReservedAddressSpaceOffsetMask =
+ static_cast<uintptr_t>(kReservedAddressSpaceAlignment) - 1;
+ static constexpr uintptr_t kReservedAddressSpaceBaseMask =
+ ~kReservedAddressSpaceOffsetMask;
+
+ static_assert(
+ bits::IsPowerOfTwo(PartitionAddressSpace::kReservedAddressSpaceAlignment),
+ "kReservedAddressSpaceALignment should be a power of two.");
+ static_assert(PartitionAddressSpace::kReservedAddressSpaceAlignment >=
+ PartitionAddressSpace::kDesiredAddressSpaceSize,
+ "kReservedAddressSpaceAlignment should be larger or equal to "
+ "kDesiredAddressSpaceSize.");
+ static_assert(
+ PartitionAddressSpace::kReservedAddressSpaceAlignment / 2 <
+ PartitionAddressSpace::kDesiredAddressSpaceSize,
+ "kReservedAddressSpaceAlignment should be the smallest power of "
+ "two greater or equal to kDesiredAddressSpaceSize. So a half of "
+ "the alignment should be smaller than the desired size.");
+
+ // See the comment describing the address layout above.
+ static uintptr_t reserved_base_address_;
+
+ static uintptr_t normal_bucket_pool_base_address_;
+
+ static internal::pool_handle direct_map_pool_;
+ static internal::pool_handle normal_bucket_pool_;
+};
+
+ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
+ PA_DCHECK(IsPartitionAllocGigaCageEnabled());
+ return PartitionAddressSpace::GetDirectMapPool();
+}
+
+ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
+ PA_DCHECK(IsPartitionAllocGigaCageEnabled());
+ return PartitionAddressSpace::GetNormalBucketPool();
+}
+
+#else // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
+ NOTREACHED();
+ return 0;
+}
+
+ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
+ NOTREACHED();
+ return 0;
+}
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc
index a186840ffe3..836daf90f1c 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc
@@ -7,8 +7,10 @@
#include <string.h>
#include <memory>
-#include <type_traits>
+#include "base/allocator/partition_allocator/oom.h"
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
@@ -19,20 +21,30 @@
namespace base {
-namespace {
-
template <bool thread_safe>
-bool InitializeOnce() {
- // We mark the sentinel bucket/page as free to make sure it is skipped by
- // our logic to find a new active page.
- internal::PartitionBucket<thread_safe>::get_sentinel_bucket()
- ->active_pages_head =
- internal::PartitionPage<thread_safe>::get_sentinel_page();
-
- return true;
+NOINLINE void PartitionRoot<thread_safe>::OutOfMemory(size_t size) {
+#if !defined(ARCH_CPU_64_BITS)
+ // Check whether this OOM is due to a lot of super pages that are allocated
+ // but not committed, probably due to http://crbug.com/421387.
+ if (total_size_of_super_pages + total_size_of_direct_mapped_pages -
+ total_size_of_committed_pages >
+ kReasonableSizeOfUnusedPages) {
+ internal::PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
+ }
+#endif
+ if (internal::g_oom_handling_function)
+ (*internal::g_oom_handling_function)(size);
+ OOM_CRASH(size);
}
-} // namespace
+template <bool thread_safe>
+void PartitionRoot<thread_safe>::DecommitEmptyPages() {
+ for (Page*& page : global_empty_page_ring) {
+ if (page)
+ page->DecommitIfPossible(this);
+ page = nullptr;
+ }
+}
// Two partition pages are used as guard / metadata page so make sure the super
// page size is bigger.
@@ -50,10 +62,6 @@ static_assert(sizeof(internal::PartitionPage<internal::ThreadSafe>) <=
static_assert(sizeof(internal::PartitionBucket<internal::ThreadSafe>) <=
kPageMetadataSize,
"PartitionBucket should not be too big");
-static_assert(
- sizeof(internal::PartitionSuperPageExtentEntry<internal::ThreadSafe>) <=
- kPageMetadataSize,
- "PartitionSuperPageExtentEntry should not be too big");
static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <=
kSystemPageSize,
"page metadata fits in hole");
@@ -62,17 +70,12 @@ static_assert(kGenericMaxDirectMapped <=
(1UL << 31) + kPageAllocationGranularity,
"maximum direct mapped allocation");
// Check that some of our zanier calculations worked out as expected.
-static_assert(kGenericSmallestBucket == 8, "generic smallest bucket");
+static_assert(kGenericSmallestBucket == alignof(std::max_align_t),
+ "generic smallest bucket");
static_assert(kGenericMaxBucketed == 983040, "generic max bucketed");
static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8),
"System pages per slot span must be less than 128.");
-PartitionRoot::PartitionRoot() = default;
-PartitionRoot::~PartitionRoot() = default;
-PartitionRootGeneric::PartitionRootGeneric() = default;
-PartitionRootGeneric::~PartitionRootGeneric() = default;
-PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default;
-
Lock& GetHooksLock() {
static NoDestructor<Lock> lock;
return *lock;
@@ -97,8 +100,8 @@ void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
// Chained hooks are not supported. Registering a non-null hook when a
// non-null hook is already registered indicates somebody is trying to
// overwrite a hook.
- CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
- (!alloc_hook && !free_hook))
+ PA_CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
+ (!alloc_hook && !free_hook))
<< "Overwriting already set observer hooks";
allocation_observer_hook_ = alloc_hook;
free_observer_hook_ = free_hook;
@@ -111,9 +114,9 @@ void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
ReallocOverrideHook realloc_hook) {
AutoLock guard(GetHooksLock());
- CHECK((!allocation_override_hook_ && !free_override_hook_ &&
- !realloc_override_hook_) ||
- (!alloc_hook && !free_hook && !realloc_hook))
+ PA_CHECK((!allocation_override_hook_ && !free_override_hook_ &&
+ !realloc_override_hook_) ||
+ (!alloc_hook && !free_hook && !realloc_hook))
<< "Overwriting already set override hooks";
allocation_override_hook_ = alloc_hook;
free_override_hook_ = free_hook;
@@ -126,10 +129,8 @@ void PartitionAllocHooks::AllocationObserverHookIfEnabled(
void* address,
size_t size,
const char* type_name) {
- if (AllocationObserverHook* hook =
- allocation_observer_hook_.load(std::memory_order_relaxed)) {
+ if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed))
hook(address, size, type_name);
- }
}
bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
@@ -137,25 +138,19 @@ bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
int flags,
size_t size,
const char* type_name) {
- if (AllocationOverrideHook* hook =
- allocation_override_hook_.load(std::memory_order_relaxed)) {
+ if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed))
return hook(out, flags, size, type_name);
- }
return false;
}
void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
- if (FreeObserverHook* hook =
- free_observer_hook_.load(std::memory_order_relaxed)) {
+ if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed))
hook(address);
- }
}
bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
- if (FreeOverrideHook* hook =
- free_override_hook_.load(std::memory_order_relaxed)) {
+ if (auto* hook = free_override_hook_.load(std::memory_order_relaxed))
return hook(address);
- }
return false;
}
@@ -173,6 +168,7 @@ void PartitionAllocHooks::ReallocObserverHookIfEnabled(void* old_address,
allocation_hook(new_address, size, type_name);
}
}
+
bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
void* address) {
if (ReallocOverrideHook* hook =
@@ -182,39 +178,41 @@ bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
return false;
}
-template <bool thread_safe>
-static void PartitionAllocBaseInit(
- internal::PartitionRootBase<thread_safe>* root) {
- DCHECK(!root->initialized);
-
- static bool intialized = InitializeOnce<thread_safe>();
- static_cast<void>(intialized);
-
- // This is a "magic" value so we can test if a root pointer is valid.
- root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
- root->initialized = true;
-}
-
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
- DCHECK(on_out_of_memory);
+ PA_DCHECK(on_out_of_memory);
internal::g_oom_handling_function = on_out_of_memory;
-}
-void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) {
- PartitionAllocBaseInit(this);
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ // Reserve address space for partition alloc.
+ if (IsPartitionAllocGigaCageEnabled())
+ internal::PartitionAddressSpace::Init();
+#endif
+}
- num_buckets = bucket_count;
- max_allocation = maximum_allocation;
- for (size_t i = 0; i < num_buckets; ++i) {
- Bucket& bucket = buckets()[i];
- bucket.Init(i == 0 ? kAllocationGranularity : (i << kBucketShift));
- }
+void PartitionAllocGlobalUninitForTesting() {
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ if (IsPartitionAllocGigaCageEnabled())
+ internal::PartitionAddressSpace::UninitForTesting();
+#endif
+ internal::g_oom_handling_function = nullptr;
}
-void PartitionRootGeneric::Init() {
+template <bool thread_safe>
+void PartitionRoot<thread_safe>::InitSlowPath() {
ScopedGuard guard{lock_};
- PartitionAllocBaseInit(this);
+ if (initialized.load(std::memory_order_relaxed))
+ return;
+
+ // We mark the sentinel bucket/page as free to make sure it is skipped by our
+ // logic to find a new active page.
+ //
+ // This may be executed several times, once per PartitionRoot. This is not an
+ // issue, as the operation is atomic and idempotent.
+ Bucket::get_sentinel_bucket()->active_pages_head = Page::get_sentinel_page();
+
+ // This is a "magic" value so we can test if a root pointer is valid.
+ inverted_self = ~reinterpret_cast<uintptr_t>(this);
// Precalculate some shift and mask constants used in the hot path.
// Example: malloc(41) == 101001 binary.
@@ -265,8 +263,8 @@ void PartitionRootGeneric::Init() {
}
current_increment <<= 1;
}
- DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
- DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
+ PA_DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
+ PA_DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
// Then set up the fast size -> bucket lookup table.
bucket = &buckets[0];
@@ -288,38 +286,37 @@ void PartitionRootGeneric::Init() {
}
}
}
- DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
- DCHECK(bucket_ptr == &bucket_lookups[0] +
- ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
+ PA_DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
+ PA_DCHECK(bucket_ptr == &bucket_lookups[0] + ((kBitsPerSizeT + 1) *
+ kGenericNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existent order.
*bucket_ptr = Bucket::get_sentinel_bucket();
+
+ initialized = true;
}
-bool PartitionReallocDirectMappedInPlace(
- PartitionRootGeneric* root,
- internal::PartitionPage<internal::ThreadSafe>* page,
- size_t raw_size) EXCLUSIVE_LOCKS_REQUIRED(root->lock_) {
- DCHECK(page->bucket->is_direct_mapped());
+template <bool thread_safe>
+bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
+ internal::PartitionPage<thread_safe>* page,
+ size_t raw_size) {
+ PA_DCHECK(page->bucket->is_direct_mapped());
raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size);
// Note that the new size might be a bucketed size; this function is called
// whenever we're reallocating a direct mapped allocation.
- size_t new_size = PartitionRootGeneric::Bucket::get_direct_map_size(raw_size);
+ size_t new_size = Bucket::get_direct_map_size(raw_size);
if (new_size < kGenericMinDirectMappedDownsize)
return false;
// bucket->slot_size is the current size of the allocation.
size_t current_size = page->bucket->slot_size;
- char* char_ptr =
- static_cast<char*>(PartitionRootGeneric::Page::ToPointer(page));
+ char* char_ptr = static_cast<char*>(Page::ToPointer(page));
if (new_size == current_size) {
// No need to move any memory around, but update size and cookie below.
} else if (new_size < current_size) {
- size_t map_size =
- internal::PartitionDirectMapExtent<internal::ThreadSafe>::FromPage(page)
- ->map_size;
+ size_t map_size = DirectMapExtent::FromPage(page)->map_size;
// Don't reallocate in-place if new size is less than 80 % of the full
// map size, to avoid holding on to too much unused address space.
@@ -328,17 +325,14 @@ bool PartitionReallocDirectMappedInPlace(
// Shrink by decommitting unneeded pages and making them inaccessible.
size_t decommit_size = current_size - new_size;
- root->DecommitSystemPages(char_ptr + new_size, decommit_size);
+ DecommitSystemPages(char_ptr + new_size, decommit_size);
SetSystemPagesAccess(char_ptr + new_size, decommit_size, PageInaccessible);
- } else if (new_size <=
- internal::PartitionDirectMapExtent<internal::ThreadSafe>::FromPage(
- page)
- ->map_size) {
+ } else if (new_size <= DirectMapExtent::FromPage(page)->map_size) {
// Grow within the actually allocated memory. Just need to make the
// pages accessible again.
size_t recommit_size = new_size - current_size;
SetSystemPagesAccess(char_ptr + current_size, recommit_size, PageReadWrite);
- root->RecommitSystemPages(char_ptr + current_size, recommit_size);
+ RecommitSystemPages(char_ptr + current_size, recommit_size);
#if DCHECK_IS_ON()
memset(char_ptr + current_size, kUninitializedByte, recommit_size);
@@ -356,27 +350,27 @@ bool PartitionReallocDirectMappedInPlace(
#endif
page->set_raw_size(raw_size);
- DCHECK(page->get_raw_size() == raw_size);
+ PA_DCHECK(page->get_raw_size() == raw_size);
page->bucket->slot_size = new_size;
return true;
}
-void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
- int flags,
- void* ptr,
- size_t new_size,
- const char* type_name) {
+template <bool thread_safe>
+void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
+ void* ptr,
+ size_t new_size,
+ const char* type_name) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags);
void* result = realloc(ptr, new_size);
- CHECK(result || flags & PartitionAllocReturnNull);
+ PA_CHECK(result || flags & PartitionAllocReturnNull);
return result;
#else
if (UNLIKELY(!ptr))
- return PartitionAllocGenericFlags(root, flags, new_size, type_name);
+ return AllocFlags(flags, new_size, type_name);
if (UNLIKELY(!new_size)) {
- root->Free(ptr);
+ Free(ptr);
return nullptr;
}
@@ -394,19 +388,19 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
&actual_old_size, ptr);
}
if (LIKELY(!overridden)) {
- PartitionRootGeneric::Page* page = PartitionRootGeneric::Page::FromPointer(
- internal::PartitionCookieFreePointerAdjust(ptr));
+ auto* page =
+ Page::FromPointer(internal::PartitionCookieFreePointerAdjust(ptr));
bool success = false;
{
- PartitionRootGeneric::ScopedGuard guard{root->lock_};
+ internal::ScopedGuard<thread_safe> guard{lock_};
// TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(root->IsValidPage(page));
+ PA_DCHECK(IsValidPage(page));
if (UNLIKELY(page->bucket->is_direct_mapped())) {
// We may be able to perform the realloc in place by changing the
// accessibility of memory pages and, if reducing the size, decommitting
// them.
- success = PartitionReallocDirectMappedInPlace(root, page, new_size);
+ success = ReallocDirectMappedInPlace(page, new_size);
}
}
if (success) {
@@ -417,8 +411,8 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
return ptr;
}
- const size_t actual_new_size = root->ActualSize(new_size);
- actual_old_size = PartitionAllocGetSize(ptr);
+ const size_t actual_new_size = ActualSize(new_size);
+ actual_old_size = PartitionAllocGetSize<thread_safe>(ptr);
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
// new size is a significant percentage smaller. We could do the same if we
@@ -439,7 +433,7 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
}
// This realloc cannot be resized in-place. Sadness.
- void* ret = PartitionAllocGenericFlags(root, flags, new_size, type_name);
+ void* ret = AllocFlags(flags, new_size, type_name);
if (!ret) {
if (flags & PartitionAllocReturnNull)
return nullptr;
@@ -451,22 +445,9 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
copy_size = new_size;
memcpy(ret, ptr, copy_size);
- root->Free(ptr);
+ Free(ptr);
return ret;
#endif
-} // namespace base
-
-void* PartitionRootGeneric::Realloc(void* ptr,
- size_t new_size,
- const char* type_name) {
- return PartitionReallocGenericFlags(this, 0, ptr, new_size, type_name);
-}
-
-void* PartitionRootGeneric::TryRealloc(void* ptr,
- size_t new_size,
- const char* type_name) {
- return PartitionReallocGenericFlags(this, PartitionAllocReturnNull, ptr,
- new_size, type_name);
}
template <bool thread_safe>
@@ -495,8 +476,8 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
constexpr size_t kMaxSlotCount =
(kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
- DCHECK(bucket_num_slots <= kMaxSlotCount);
- DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
+ PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
+ PA_DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
char slot_usage[kMaxSlotCount];
#if !defined(OS_WIN)
@@ -512,7 +493,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry;
/**/) {
size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
- DCHECK(slot_index < num_slots);
+ PA_DCHECK(slot_index < num_slots);
slot_usage[slot_index] = 0;
entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next);
#if !defined(OS_WIN)
@@ -532,7 +513,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
while (!slot_usage[num_slots - 1]) {
truncated_slots++;
num_slots--;
- DCHECK(num_slots);
+ PA_DCHECK(num_slots);
}
// First, do the work of calculating the discardable bytes. Don't actually
// discard anything unless the discard flag was passed in.
@@ -546,13 +527,13 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
// a slot span, so we "own" all the way up the page boundary.
end_ptr = reinterpret_cast<char*>(
RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
- DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
+ PA_DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
if (begin_ptr < end_ptr) {
unprovisioned_bytes = end_ptr - begin_ptr;
discardable_bytes += unprovisioned_bytes;
}
if (unprovisioned_bytes && discard) {
- DCHECK(truncated_slots > 0);
+ PA_DCHECK(truncated_slots > 0);
size_t num_new_entries = 0;
page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
@@ -582,7 +563,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
if (back)
back->next = internal::PartitionFreelistEntry::Encode(nullptr);
- DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
+ PA_DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
// Discard the memory.
DiscardSystemPages(begin_ptr, unprovisioned_bytes);
}
@@ -626,23 +607,15 @@ static void PartitionPurgeBucket(
internal::PartitionPage<thread_safe>::get_sentinel_page()) {
for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head;
page; page = page->next_page) {
- DCHECK(page != internal::PartitionPage<thread_safe>::get_sentinel_page());
+ PA_DCHECK(page !=
+ internal::PartitionPage<thread_safe>::get_sentinel_page());
PartitionPurgePage(page, true);
}
}
}
-void PartitionRoot::PurgeMemory(int flags) {
- ScopedGuard guard{lock_};
- if (flags & PartitionPurgeDecommitEmptyPages)
- DecommitEmptyPages();
- // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
- // here because that flag is only useful for allocations >= system page size.
- // We only have allocations that large inside generic partitions at the
- // moment.
-}
-
-void PartitionRootGeneric::PurgeMemory(int flags) {
+template <bool thread_safe>
+void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
ScopedGuard guard{lock_};
if (flags & PartitionPurgeDecommitEmptyPages)
DecommitEmptyPages();
@@ -685,7 +658,7 @@ static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
} else if (page->is_full()) {
++stats_out->num_full_pages;
} else {
- DCHECK(page->is_active());
+ PA_DCHECK(page->is_active());
++stats_out->num_active_pages;
}
}
@@ -694,7 +667,7 @@ template <bool thread_safe>
static void PartitionDumpBucketStats(
PartitionBucketMemoryStats* stats_out,
const internal::PartitionBucket<thread_safe>* bucket) {
- DCHECK(!bucket->is_direct_mapped());
+ PA_DCHECK(!bucket->is_direct_mapped());
stats_out->is_valid = false;
// If the active page list is empty (==
// internal::PartitionPage::get_sentinel_page()), the bucket might still need
@@ -719,13 +692,13 @@ static void PartitionDumpBucketStats(
for (internal::PartitionPage<thread_safe>* page = bucket->empty_pages_head;
page; page = page->next_page) {
- DCHECK(page->is_empty() || page->is_decommitted());
+ PA_DCHECK(page->is_empty() || page->is_decommitted());
PartitionDumpPageStats(stats_out, page);
}
for (internal::PartitionPage<thread_safe>* page =
bucket->decommitted_pages_head;
page; page = page->next_page) {
- DCHECK(page->is_decommitted());
+ PA_DCHECK(page->is_decommitted());
PartitionDumpPageStats(stats_out, page);
}
@@ -733,15 +706,17 @@ static void PartitionDumpBucketStats(
internal::PartitionPage<thread_safe>::get_sentinel_page()) {
for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head;
page; page = page->next_page) {
- DCHECK(page != internal::PartitionPage<thread_safe>::get_sentinel_page());
+ PA_DCHECK(page !=
+ internal::PartitionPage<thread_safe>::get_sentinel_page());
PartitionDumpPageStats(stats_out, page);
}
}
}
-void PartitionRootGeneric::DumpStats(const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* dumper) {
+template <bool thread_safe>
+void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* dumper) {
ScopedGuard guard{lock_};
PartitionMemoryStats stats = {0};
stats.total_mmapped_bytes =
@@ -767,7 +742,7 @@ void PartitionRootGeneric::DumpStats(const char* partition_name,
const Bucket* bucket = &buckets[i];
// Don't report the pseudo buckets that the generic allocator sets up in
// order to preserve a fast size->bucket map (see
- // PartitionRootGeneric::Init() for details).
+ // PartitionRoot::Init() for details).
if (!bucket->active_pages_head)
bucket_stats[i].is_valid = false;
else
@@ -780,12 +755,11 @@ void PartitionRootGeneric::DumpStats(const char* partition_name,
}
}
- for (internal::PartitionDirectMapExtent<internal::ThreadSafe>* extent =
- direct_map_list;
+ for (DirectMapExtent* extent = direct_map_list;
extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
extent = extent->next_extent, ++num_direct_mapped_allocations) {
- DCHECK(!extent->next_extent ||
- extent->next_extent->prev_extent == extent);
+ PA_DCHECK(!extent->next_extent ||
+ extent->next_extent->prev_extent == extent);
size_t slot_size = extent->bucket->slot_size;
direct_mapped_allocations_total_size += slot_size;
if (is_light_dump)
@@ -796,7 +770,7 @@ void PartitionRootGeneric::DumpStats(const char* partition_name,
if (!is_light_dump) {
// Call |PartitionsDumpBucketStats| after collecting stats because it can
- // try to allocate using |PartitionRootGeneric::Alloc()| and it can't
+ // try to allocate using |PartitionRoot::Alloc()| and it can't
// obtain the lock.
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
if (bucket_stats[i].is_valid)
@@ -823,52 +797,7 @@ void PartitionRootGeneric::DumpStats(const char* partition_name,
dumper->PartitionDumpTotals(partition_name, &stats);
}
-void PartitionRoot::DumpStats(const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* dumper) {
- ScopedGuard guard{lock_};
-
- PartitionMemoryStats stats = {0};
- stats.total_mmapped_bytes = total_size_of_super_pages;
- stats.total_committed_bytes = total_size_of_committed_pages;
- DCHECK(!total_size_of_direct_mapped_pages);
-
- static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*);
- std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats;
- if (!is_light_dump) {
- memory_stats =
- std::make_unique<PartitionBucketMemoryStats[]>(kMaxReportableBuckets);
- }
-
- const size_t partition_num_buckets = num_buckets;
- DCHECK(partition_num_buckets <= kMaxReportableBuckets);
-
- for (size_t i = 0; i < partition_num_buckets; ++i) {
- PartitionBucketMemoryStats bucket_stats = {0};
- PartitionDumpBucketStats(&bucket_stats, &buckets()[i]);
- if (bucket_stats.is_valid) {
- stats.total_resident_bytes += bucket_stats.resident_bytes;
- stats.total_active_bytes += bucket_stats.active_bytes;
- stats.total_decommittable_bytes += bucket_stats.decommittable_bytes;
- stats.total_discardable_bytes += bucket_stats.discardable_bytes;
- }
- if (!is_light_dump) {
- if (bucket_stats.is_valid)
- memory_stats[i] = bucket_stats;
- else
- memory_stats[i].is_valid = false;
- }
- }
- if (!is_light_dump) {
- // PartitionsDumpBucketStats is called after collecting stats because it
- // can use PartitionRoot::Alloc() to allocate and this can affect the
- // statistics.
- for (size_t i = 0; i < partition_num_buckets; ++i) {
- if (memory_stats[i].is_valid)
- dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
- }
- }
- dumper->PartitionDumpTotals(partition_name, &stats);
-}
+template struct BASE_EXPORT PartitionRoot<internal::ThreadSafe>;
+template struct BASE_EXPORT PartitionRoot<internal::NotThreadSafe>;
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.h b/chromium/base/allocator/partition_allocator/partition_alloc.h
index ef635e78291..662063ddde7 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.h
@@ -6,38 +6,29 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
// DESCRIPTION
-// PartitionRoot::Alloc() / PartitionRootGeneric::Alloc() and PartitionFree() /
-// PartitionRootGeneric::Free() are approximately analagous to malloc() and
-// free().
+// PartitionRoot::Alloc() and PartitionRoot::Free() are approximately analagous
+// to malloc() and free().
//
-// The main difference is that a PartitionRoot / PartitionRootGeneric object
-// must be supplied to these functions, representing a specific "heap partition"
-// that will be used to satisfy the allocation. Different partitions are
-// guaranteed to exist in separate address spaces, including being separate from
-// the main system heap. If the contained objects are all freed, physical memory
-// is returned to the system but the address space remains reserved.
-// See PartitionAlloc.md for other security properties PartitionAlloc provides.
+// The main difference is that a PartitionRoot object must be supplied to these
+// functions, representing a specific "heap partition" that will be used to
+// satisfy the allocation. Different partitions are guaranteed to exist in
+// separate address spaces, including being separate from the main system
+// heap. If the contained objects are all freed, physical memory is returned to
+// the system but the address space remains reserved. See PartitionAlloc.md for
+// other security properties PartitionAlloc provides.
//
// THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
-// SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To
-// minimize the instruction count to the fullest extent possible, the
-// PartitionRoot is really just a header adjacent to other data areas provided
-// by the allocator class.
+// PartitionAllocator classes. To minimize the instruction count to the fullest
+// extent possible, the PartitionRoot is really just a header adjacent to other
+// data areas provided by the allocator class.
//
-// The PartitionRoot::Alloc() variant of the API has the following caveats:
-// - Allocations and frees against a single partition must be single threaded.
-// - Allocations must not exceed a max size, chosen at compile-time via a
-// templated parameter to PartitionAllocator.
-// - Allocation sizes must be aligned to the system pointer size.
-// - Allocations are bucketed exactly according to size.
-//
-// And for PartitionRootGeneric::Alloc():
+// The constraints for PartitionRoot::Alloc() are:
// - Multi-threaded use against a single partition is ok; locking is handled.
// - Allocations of any arbitrary size can be handled (subject to a limit of
-// INT_MAX bytes for security reasons).
+// INT_MAX bytes for security reasons).
// - Bucketing is by approximate size, for example an allocation of 4000 bytes
-// might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
-// keep worst-case waste to ~10%.
+// might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
+// keep worst-case waste to ~10%.
//
// The allocators are designed to be extremely fast, thanks to the following
// properties and design:
@@ -65,19 +56,25 @@
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_address_space.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
+#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_root_base.h"
#include "base/allocator/partition_allocator/spin_lock.h"
#include "base/base_export.h"
#include "base/bits.h"
+#include "base/check_op.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
+#include "base/notreached.h"
+#include "base/partition_alloc_buildflags.h"
#include "base/stl_util.h"
#include "base/sys_byteorder.h"
#include "build/build_config.h"
+#include "build/buildflag.h"
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#include <stdlib.h>
@@ -90,86 +87,171 @@
if (flags & PartitionAllocReturnNull) { \
return nullptr; \
} \
- CHECK(false); \
+ PA_CHECK(false); \
}
namespace base {
-class PartitionStatsDumper;
+typedef void (*OomFunction)(size_t);
-enum PartitionPurgeFlags {
- // Decommitting the ring list of empty pages is reasonably fast.
- PartitionPurgeDecommitEmptyPages = 1 << 0,
- // Discarding unused system pages is slower, because it involves walking all
- // freelists in all active partition pages of all buckets >= system page
- // size. It often frees a similar amount of memory to decommitting the empty
- // pages, though.
- PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
+// PartitionAlloc supports setting hooks to observe allocations/frees as they
+// occur as well as 'override' hooks that allow overriding those operations.
+class BASE_EXPORT PartitionAllocHooks {
+ public:
+ // Log allocation and free events.
+ typedef void AllocationObserverHook(void* address,
+ size_t size,
+ const char* type_name);
+ typedef void FreeObserverHook(void* address);
+
+ // If it returns true, the allocation has been overridden with the pointer in
+ // *out.
+ typedef bool AllocationOverrideHook(void** out,
+ int flags,
+ size_t size,
+ const char* type_name);
+ // If it returns true, then the allocation was overridden and has been freed.
+ typedef bool FreeOverrideHook(void* address);
+ // If it returns true, the underlying allocation is overridden and *out holds
+ // the size of the underlying allocation.
+ typedef bool ReallocOverrideHook(size_t* out, void* address);
+
+ // To unhook, call Set*Hooks with nullptrs.
+ static void SetObserverHooks(AllocationObserverHook* alloc_hook,
+ FreeObserverHook* free_hook);
+ static void SetOverrideHooks(AllocationOverrideHook* alloc_hook,
+ FreeOverrideHook* free_hook,
+ ReallocOverrideHook realloc_hook);
+
+ // Helper method to check whether hooks are enabled. This is an optimization
+ // so that if a function needs to call observer and override hooks in two
+ // different places this value can be cached and only loaded once.
+ static bool AreHooksEnabled() {
+ return hooks_enabled_.load(std::memory_order_relaxed);
+ }
+
+ static void AllocationObserverHookIfEnabled(void* address,
+ size_t size,
+ const char* type_name);
+ static bool AllocationOverrideHookIfEnabled(void** out,
+ int flags,
+ size_t size,
+ const char* type_name);
+
+ static void FreeObserverHookIfEnabled(void* address);
+ static bool FreeOverrideHookIfEnabled(void* address);
+
+ static void ReallocObserverHookIfEnabled(void* old_address,
+ void* new_address,
+ size_t size,
+ const char* type_name);
+ static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
+
+ private:
+ // Single bool that is used to indicate whether observer or allocation hooks
+ // are set to reduce the numbers of loads required to check whether hooking is
+ // enabled.
+ static std::atomic<bool> hooks_enabled_;
+
+ // Lock used to synchronize Set*Hooks calls.
+ static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
+ static std::atomic<FreeObserverHook*> free_observer_hook_;
+
+ static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
+ static std::atomic<FreeOverrideHook*> free_override_hook_;
+ static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
};
-// Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
-struct BASE_EXPORT PartitionRoot
- : public internal::PartitionRootBase<internal::NotThreadSafe> {
- PartitionRoot();
- ~PartitionRoot() override;
- // This references the buckets OFF the edge of this struct. All uses of
- // PartitionRoot must have the bucket array come right after.
- //
- // The PartitionAlloc templated class ensures the following is correct.
- ALWAYS_INLINE Bucket* buckets() {
- return reinterpret_cast<Bucket*>(this + 1);
- }
- ALWAYS_INLINE const Bucket* buckets() const {
- return reinterpret_cast<const Bucket*>(this + 1);
- }
+namespace internal {
- void Init(size_t bucket_count, size_t maximum_allocation);
+template <bool thread_safe>
+class LOCKABLE MaybeSpinLock {
+ public:
+ void Lock() EXCLUSIVE_LOCK_FUNCTION() {}
+ void Unlock() UNLOCK_FUNCTION() {}
+ void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
+};
- ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
- ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
+template <bool thread_safe>
+class SCOPED_LOCKABLE ScopedGuard {
+ public:
+ explicit ScopedGuard(MaybeSpinLock<thread_safe>& lock)
+ EXCLUSIVE_LOCK_FUNCTION(lock)
+ : lock_(lock) {
+ lock_.Lock();
+ }
+ ~ScopedGuard() UNLOCK_FUNCTION() { lock_.Unlock(); }
- void PurgeMemory(int flags) override;
+ private:
+ MaybeSpinLock<thread_safe>& lock_;
+};
- void DumpStats(const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* dumper);
+#if DCHECK_IS_ON()
+template <>
+class LOCKABLE MaybeSpinLock<ThreadSafe> {
+ public:
+ MaybeSpinLock() : lock_() {}
+ void Lock() EXCLUSIVE_LOCK_FUNCTION() { lock_->Acquire(); }
+ void Unlock() UNLOCK_FUNCTION() { lock_->Release(); }
+ void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {
+ lock_->AssertAcquired();
+ }
+
+ private:
+ // NoDestructor to avoid issues with the "static destruction order fiasco".
+ //
+ // This also means that for DCHECK_IS_ON() builds we leak a lock when a
+ // partition is destructed. This will in practice only show in some tests, as
+ // partitons are not destructed in regular use. In addition, on most
+ // platforms, base::Lock doesn't allocate memory and neither does the OS
+ // library, and the destructor is a no-op.
+ base::NoDestructor<base::Lock> lock_;
};
-// Never instantiate a PartitionRootGeneric directly, instead use
-// PartitionAllocatorGeneric.
-struct BASE_EXPORT PartitionRootGeneric
- : public internal::PartitionRootBase<internal::ThreadSafe> {
- PartitionRootGeneric();
- ~PartitionRootGeneric() override;
- // Some pre-computed constants.
- size_t order_index_shifts[kBitsPerSizeT + 1] = {};
- size_t order_sub_index_masks[kBitsPerSizeT + 1] = {};
- // The bucket lookup table lets us map a size_t to a bucket quickly.
- // The trailing +1 caters for the overflow case for very large allocation
- // sizes. It is one flat array instead of a 2D array because in the 2D
- // world, we'd need to index array[blah][max+1] which risks undefined
- // behavior.
- Bucket* bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) +
- 1] = {};
- Bucket buckets[kGenericNumBuckets] = {};
+#else
+template <>
+class LOCKABLE MaybeSpinLock<ThreadSafe> {
+ public:
+ void Lock() EXCLUSIVE_LOCK_FUNCTION() { lock_.lock(); }
+ void Unlock() UNLOCK_FUNCTION() { lock_.unlock(); }
+ void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {
+ // Not supported by subtle::SpinLock.
+ }
- // Public API.
- void Init();
+ private:
+ subtle::SpinLock lock_;
+};
+#endif // DCHECK_IS_ON()
+
+// An "extent" is a span of consecutive superpages. We link to the partition's
+// next extent (if there is one) to the very start of a superpage's metadata
+// area.
+template <bool thread_safe>
+struct PartitionSuperPageExtentEntry {
+ PartitionRoot<thread_safe>* root;
+ char* super_page_base;
+ char* super_pages_end;
+ PartitionSuperPageExtentEntry<thread_safe>* next;
+};
+static_assert(
+ sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
+ "PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
- ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
- ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
- NOINLINE void* Realloc(void* ptr, size_t new_size, const char* type_name);
- // Overload that may return nullptr if reallocation isn't possible. In this
- // case, |ptr| remains valid.
- NOINLINE void* TryRealloc(void* ptr, size_t new_size, const char* type_name);
+// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
+static OomFunction g_oom_handling_function = nullptr;
- ALWAYS_INLINE size_t ActualSize(size_t size);
+} // namespace internal
- void PurgeMemory(int flags) override;
+class PartitionStatsDumper;
- void DumpStats(const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* partition_stats_dumper);
+enum PartitionPurgeFlags {
+ // Decommitting the ring list of empty pages is reasonably fast.
+ PartitionPurgeDecommitEmptyPages = 1 << 0,
+ // Discarding unused system pages is slower, because it involves walking all
+ // freelists in all active partition pages of all buckets >= system page
+ // size. It often frees a similar amount of memory to decommitting the empty
+ // pages, though.
+ PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
};
// Struct used to retrieve total memory usage of a partition. Used by
@@ -218,49 +300,286 @@ class BASE_EXPORT PartitionStatsDumper {
const PartitionBucketMemoryStats*) = 0;
};
-BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
+// Never instantiate a PartitionRoot directly, instead use
+// PartitionAllocator.
+template <bool thread_safe>
+struct BASE_EXPORT PartitionRoot {
+ using Page = internal::PartitionPage<thread_safe>;
+ using Bucket = internal::PartitionBucket<thread_safe>;
+ using SuperPageExtentEntry =
+ internal::PartitionSuperPageExtentEntry<thread_safe>;
+ using DirectMapExtent = internal::PartitionDirectMapExtent<thread_safe>;
+ using ScopedGuard = internal::ScopedGuard<thread_safe>;
+
+ internal::MaybeSpinLock<thread_safe> lock_;
+ size_t total_size_of_committed_pages = 0;
+ size_t total_size_of_super_pages = 0;
+ size_t total_size_of_direct_mapped_pages = 0;
+ // Invariant: total_size_of_committed_pages <=
+ // total_size_of_super_pages +
+ // total_size_of_direct_mapped_pages.
+ unsigned num_buckets = 0;
+ unsigned max_allocation = 0;
+ // Atomic as initialization can be concurrent.
+ std::atomic<bool> initialized = {};
+ char* next_super_page = nullptr;
+ char* next_partition_page = nullptr;
+ char* next_partition_page_end = nullptr;
+ SuperPageExtentEntry* current_extent = nullptr;
+ SuperPageExtentEntry* first_extent = nullptr;
+ DirectMapExtent* direct_map_list = nullptr;
+ Page* global_empty_page_ring[kMaxFreeableSpans] = {};
+ int16_t global_empty_page_ring_index = 0;
+ uintptr_t inverted_self = 0;
-ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) {
- return AllocFlags(0, size, type_name);
+ // Some pre-computed constants.
+ size_t order_index_shifts[kBitsPerSizeT + 1] = {};
+ size_t order_sub_index_masks[kBitsPerSizeT + 1] = {};
+ // The bucket lookup table lets us map a size_t to a bucket quickly.
+ // The trailing +1 caters for the overflow case for very large allocation
+ // sizes. It is one flat array instead of a 2D array because in the 2D
+ // world, we'd need to index array[blah][max+1] which risks undefined
+ // behavior.
+ Bucket* bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) +
+ 1] = {};
+ Bucket buckets[kGenericNumBuckets] = {};
+
+ PartitionRoot() = default;
+ ~PartitionRoot() = default;
+
+ // Public API
+ //
+ // Allocates out of the given bucket. Properly, this function should probably
+ // be in PartitionBucket, but because the implementation needs to be inlined
+ // for performance, and because it needs to inspect PartitionPage,
+ // it becomes impossible to have it in PartitionBucket as this causes a
+ // cyclical dependency on PartitionPage function implementations.
+ //
+ // Moving it a layer lower couples PartitionRoot and PartitionBucket, but
+ // preserves the layering of the includes.
+ ALWAYS_INLINE void Init() {
+ if (LIKELY(initialized.load(std::memory_order_relaxed)))
+ return;
+
+ InitSlowPath();
+ }
+
+ ALWAYS_INLINE static bool IsValidPage(Page* page);
+ ALWAYS_INLINE static PartitionRoot* FromPage(Page* page);
+
+ ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
+ ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
+ ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ NOINLINE void OutOfMemory(size_t size);
+
+ ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
+ ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
+
+ ALWAYS_INLINE void* Realloc(void* ptr,
+ size_t new_size,
+ const char* type_name);
+ // Overload that may return nullptr if reallocation isn't possible. In this
+ // case, |ptr| remains valid.
+ ALWAYS_INLINE void* TryRealloc(void* ptr,
+ size_t new_size,
+ const char* type_name);
+ NOINLINE void* ReallocFlags(int flags,
+ void* ptr,
+ size_t new_size,
+ const char* type_name);
+ ALWAYS_INLINE void Free(void* ptr);
+
+ ALWAYS_INLINE size_t ActualSize(size_t size);
+
+ // Frees memory from this partition, if possible, by decommitting pages.
+ // |flags| is an OR of base::PartitionPurgeFlags.
+ void PurgeMemory(int flags);
+
+ void DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* partition_stats_dumper);
+
+ internal::PartitionBucket<thread_safe>* SizeToBucket(size_t size) const;
+
+ private:
+ void InitSlowPath();
+ ALWAYS_INLINE void* AllocFromBucket(Bucket* bucket, int flags, size_t size)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ bool ReallocDirectMappedInPlace(internal::PartitionPage<thread_safe>* page,
+ size_t raw_size)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+};
+
+template <bool thread_safe>
+ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
+ int flags,
+ size_t size) {
+ bool zero_fill = flags & PartitionAllocZeroFill;
+ bool is_already_zeroed = false;
+
+ Page* page = bucket->active_pages_head;
+ // Check that this page is neither full nor freed.
+ PA_DCHECK(page);
+ PA_DCHECK(page->num_allocated_slots >= 0);
+ void* ret = page->freelist_head;
+ if (LIKELY(ret)) {
+ // If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See
+ // if we can afford to make these CHECKs.
+ PA_DCHECK(IsValidPage(page));
+
+ // All large allocations must go through the slow path to correctly update
+ // the size metadata.
+ PA_DCHECK(page->get_raw_size() == 0);
+ internal::PartitionFreelistEntry* new_head =
+ internal::EncodedPartitionFreelistEntry::Decode(
+ page->freelist_head->next);
+ page->freelist_head = new_head;
+ page->num_allocated_slots++;
+ } else {
+ ret = bucket->SlowPathAlloc(this, flags, size, &is_already_zeroed);
+ // TODO(palmer): See if we can afford to make this a CHECK.
+ PA_DCHECK(!ret || IsValidPage(Page::FromPointer(ret)));
+ }
+
+#if DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+ if (!ret) {
+ return nullptr;
+ }
+
+ page = Page::FromPointer(ret);
+ // TODO(ajwong): Can |page->bucket| ever not be |bucket|? If not, can this
+ // just be bucket->slot_size?
+ size_t new_slot_size = page->bucket->slot_size;
+ size_t raw_size = page->get_raw_size();
+ if (raw_size) {
+ PA_DCHECK(raw_size == size);
+ new_slot_size = raw_size;
+ }
+ size_t no_cookie_size =
+ internal::PartitionCookieSizeAdjustSubtract(new_slot_size);
+ char* char_ret = static_cast<char*>(ret);
+ // The value given to the application is actually just after the cookie.
+ ret = char_ret + internal::kCookieSize;
+
+ // Fill the region kUninitializedByte or 0, and surround it with 2 cookies.
+ internal::PartitionCookieWriteValue(char_ret);
+ if (!zero_fill) {
+ memset(ret, kUninitializedByte, no_cookie_size);
+ } else if (!is_already_zeroed) {
+ memset(ret, 0, no_cookie_size);
+ }
+ internal::PartitionCookieWriteValue(char_ret + internal::kCookieSize +
+ no_cookie_size);
+#else
+ if (ret && zero_fill && !is_already_zeroed) {
+ memset(ret, 0, size);
+ }
+#endif
+
+ return ret;
}
-ALWAYS_INLINE void* PartitionRoot::AllocFlags(int flags,
- size_t size,
- const char* type_name) {
+template <bool thread_safe>
+ALWAYS_INLINE void PartitionRoot<thread_safe>::Free(void* ptr) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
- CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
- void* result = malloc(size);
- CHECK(result);
- return result;
+ free(ptr);
#else
- DCHECK(max_allocation == 0 || size <= max_allocation);
- void* result;
- const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
- if (UNLIKELY(hooks_enabled)) {
- if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(&result, flags,
- size, type_name)) {
- PartitionAllocHooks::AllocationObserverHookIfEnabled(result, size,
- type_name);
- return result;
- }
+ PA_DCHECK(initialized);
+
+ if (UNLIKELY(!ptr))
+ return;
+
+ if (PartitionAllocHooks::AreHooksEnabled()) {
+ PartitionAllocHooks::FreeObserverHookIfEnabled(ptr);
+ if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr))
+ return;
}
- size_t requested_size = size;
- size = internal::PartitionCookieSizeAdjustAdd(size);
- DCHECK(initialized);
- size_t index = size >> kBucketShift;
- DCHECK(index < num_buckets);
- DCHECK(size == index << kBucketShift);
+
+ ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+ Page* page = Page::FromPointer(ptr);
+ // TODO(palmer): See if we can afford to make this a CHECK.
+ PA_DCHECK(IsValidPage(page));
+ internal::DeferredUnmap deferred_unmap;
{
ScopedGuard guard{lock_};
- Bucket* bucket = &buckets()[index];
- result = AllocFromBucket(bucket, flags, size);
- }
- if (UNLIKELY(hooks_enabled)) {
- PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size,
- type_name);
+ deferred_unmap = page->Free(ptr);
}
- return result;
-#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+ deferred_unmap.Run();
+#endif
+}
+
+// static
+template <bool thread_safe>
+ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidPage(Page* page) {
+ PartitionRoot* root = FromPage(page);
+ return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE PartitionRoot<thread_safe>* PartitionRoot<thread_safe>::FromPage(
+ Page* page) {
+ auto* extent_entry = reinterpret_cast<SuperPageExtentEntry*>(
+ reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
+ return extent_entry->root;
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE void PartitionRoot<thread_safe>::IncreaseCommittedPages(
+ size_t len) {
+ total_size_of_committed_pages += len;
+ PA_DCHECK(total_size_of_committed_pages <=
+ total_size_of_super_pages + total_size_of_direct_mapped_pages);
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE void PartitionRoot<thread_safe>::DecreaseCommittedPages(
+ size_t len) {
+ total_size_of_committed_pages -= len;
+ PA_DCHECK(total_size_of_committed_pages <=
+ total_size_of_super_pages + total_size_of_direct_mapped_pages);
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE void PartitionRoot<thread_safe>::DecommitSystemPages(
+ void* address,
+ size_t length) {
+ ::base::DecommitSystemPages(address, length);
+ DecreaseCommittedPages(length);
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE void PartitionRoot<thread_safe>::RecommitSystemPages(
+ void* address,
+ size_t length) {
+ PA_CHECK(::base::RecommitSystemPages(address, length, PageReadWrite));
+ IncreaseCommittedPages(length);
+}
+
+BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
+BASE_EXPORT void PartitionAllocGlobalUninitForTesting();
+
+ALWAYS_INLINE bool IsManagedByPartitionAlloc(const void* address) {
+#if BUILDFLAG(USE_PARTITION_ALLOC) && defined(ARCH_CPU_64_BITS) && \
+ !defined(OS_NACL)
+ return internal::PartitionAddressSpace::Contains(address);
+#else
+ return false;
+#endif
+}
+
+ALWAYS_INLINE bool IsManagedByPartitionAllocAndNotDirectMapped(
+ const void* address) {
+#if BUILDFLAG(USE_PARTITION_ALLOC) && defined(ARCH_CPU_64_BITS) && \
+ !defined(OS_NACL)
+ return internal::PartitionAddressSpace::IsInNormalBucketPool(address);
+#else
+ return false;
+#endif
}
ALWAYS_INLINE bool PartitionAllocSupportsGetSize() {
@@ -271,52 +590,91 @@ ALWAYS_INLINE bool PartitionAllocSupportsGetSize() {
#endif
}
-ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
+namespace internal {
+// Gets the PartitionPage object for the first partition page of the slot span
+// that contains |ptr|. It's used with intention to do obtain the slot size.
+// CAUTION! It works well for normal buckets, but for direct-mapped allocations
+// it'll only work if |ptr| is in the first partition page of the allocation.
+template <bool thread_safe>
+ALWAYS_INLINE internal::PartitionPage<thread_safe>*
+PartitionAllocGetPageForSize(void* ptr) {
// No need to lock here. Only |ptr| being freed by another thread could
// cause trouble, and the caller is responsible for that not happening.
- DCHECK(PartitionAllocSupportsGetSize());
- ptr = internal::PartitionCookieFreePointerAdjust(ptr);
- internal::PartitionPage<internal::ThreadSafe>* page =
- internal::PartitionPage<internal::ThreadSafe>::FromPointer(ptr);
+ PA_DCHECK(PartitionAllocSupportsGetSize());
+ auto* page =
+ internal::PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(internal::PartitionRootBase<internal::ThreadSafe>::IsValidPage(page));
- size_t size = page->bucket->slot_size;
- return internal::PartitionCookieSizeAdjustSubtract(size);
+ PA_DCHECK(PartitionRoot<thread_safe>::IsValidPage(page));
+ return page;
+}
+} // namespace internal
+
+// Gets the size of the allocated slot that contains |ptr|, adjusted for cookie
+// (if any).
+// CAUTION! For direct-mapped allocation, |ptr| has to be within the first
+// partition page.
+template <bool thread_safe>
+ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
+ ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+ auto* page = internal::PartitionAllocGetPageForSize<thread_safe>(ptr);
+ return internal::PartitionCookieSizeAdjustSubtract(page->bucket->slot_size);
}
-ALWAYS_INLINE internal::PartitionBucket<internal::ThreadSafe>*
-PartitionGenericSizeToBucket(PartitionRootGeneric* root, size_t size) {
+// Gets the offset from the beginning of the allocated slot, adjusted for cookie
+// (if any).
+// CAUTION! Use only for normal buckets. Using on direct-mapped allocations may
+// lead to undefined behavior.
+template <bool thread_safe>
+ALWAYS_INLINE size_t PartitionAllocGetSlotOffset(void* ptr) {
+ PA_DCHECK(IsManagedByPartitionAllocAndNotDirectMapped(ptr));
+ ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+ auto* page = internal::PartitionAllocGetPageForSize<thread_safe>(ptr);
+ size_t slot_size = page->bucket->slot_size;
+
+ // Get the offset from the beginning of the slot span.
+ uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
+ uintptr_t slot_span_start = reinterpret_cast<uintptr_t>(
+ internal::PartitionPage<thread_safe>::ToPointer(page));
+ size_t offset_in_slot_span = ptr_addr - slot_span_start;
+ // Knowing that slots are tightly packed in a slot span, calculate an offset
+ // within a slot using simple % operation.
+ // TODO(bartekn): Try to replace % with multiplication&shift magic.
+ size_t offset_in_slot = offset_in_slot_span % slot_size;
+ return offset_in_slot;
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE internal::PartitionBucket<thread_safe>*
+PartitionRoot<thread_safe>::SizeToBucket(size_t size) const {
size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size);
// The order index is simply the next few bits after the most significant bit.
- size_t order_index = (size >> root->order_index_shifts[order]) &
- (kGenericNumBucketsPerOrder - 1);
+ size_t order_index =
+ (size >> order_index_shifts[order]) & (kGenericNumBucketsPerOrder - 1);
// And if the remaining bits are non-zero we must bump the bucket up.
- size_t sub_order_index = size & root->order_sub_index_masks[order];
- internal::PartitionBucket<internal::ThreadSafe>* bucket =
- root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
- order_index + !!sub_order_index];
- CHECK(bucket);
- DCHECK(!bucket->slot_size || bucket->slot_size >= size);
- DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
+ size_t sub_order_index = size & order_sub_index_masks[order];
+ Bucket* bucket = bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
+ order_index + !!sub_order_index];
+ PA_CHECK(bucket);
+ PA_DCHECK(!bucket->slot_size || bucket->slot_size >= size);
+ PA_DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
return bucket;
}
-ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
- int flags,
- size_t size,
- const char* type_name) {
- DCHECK_LT(flags, PartitionAllocLastFlag << 1);
+template <bool thread_safe>
+ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags(
+ int flags,
+ size_t size,
+ const char* type_name) {
+ PA_DCHECK(flags < PartitionAllocLastFlag << 1);
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
const bool zero_fill = flags & PartitionAllocZeroFill;
void* result = zero_fill ? calloc(1, size) : malloc(size);
- CHECK(result || flags & PartitionAllocReturnNull);
+ PA_CHECK(result || flags & PartitionAllocReturnNull);
return result;
#else
- DCHECK(root->initialized);
- // Only SizeSpecificPartitionAllocator should use max_allocation.
- DCHECK(root->max_allocation == 0);
+ PA_DCHECK(initialized);
void* result;
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
if (UNLIKELY(hooks_enabled)) {
@@ -329,12 +687,11 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
}
size_t requested_size = size;
size = internal::PartitionCookieSizeAdjustAdd(size);
- internal::PartitionBucket<internal::ThreadSafe>* bucket =
- PartitionGenericSizeToBucket(root, size);
- DCHECK(bucket);
+ auto* bucket = SizeToBucket(size);
+ PA_DCHECK(bucket);
{
- PartitionRootGeneric::ScopedGuard guard{root->lock_};
- result = root->AllocFromBucket(bucket, flags, size);
+ internal::ScopedGuard<thread_safe> guard{lock_};
+ result = AllocFromBucket(bucket, flags, size);
}
if (UNLIKELY(hooks_enabled)) {
PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size,
@@ -345,30 +702,35 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
#endif
}
-ALWAYS_INLINE void* PartitionRootGeneric::Alloc(size_t size,
- const char* type_name) {
- return PartitionAllocGenericFlags(this, 0, size, type_name);
+template <bool thread_safe>
+ALWAYS_INLINE void* PartitionRoot<thread_safe>::Alloc(size_t size,
+ const char* type_name) {
+ return AllocFlags(0, size, type_name);
}
-ALWAYS_INLINE void* PartitionRootGeneric::AllocFlags(int flags,
- size_t size,
- const char* type_name) {
- return PartitionAllocGenericFlags(this, flags, size, type_name);
+template <bool thread_safe>
+ALWAYS_INLINE void* PartitionRoot<thread_safe>::Realloc(void* ptr,
+ size_t new_size,
+ const char* type_name) {
+ return ReallocFlags(0, ptr, new_size, type_name);
}
-BASE_EXPORT void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
- int flags,
- void* ptr,
- size_t new_size,
- const char* type_name);
+template <bool thread_safe>
+ALWAYS_INLINE void* PartitionRoot<thread_safe>::TryRealloc(
+ void* ptr,
+ size_t new_size,
+ const char* type_name) {
+ return ReallocFlags(PartitionAllocReturnNull, ptr, new_size, type_name);
+}
-ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
+template <bool thread_safe>
+ALWAYS_INLINE size_t PartitionRoot<thread_safe>::ActualSize(size_t size) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return size;
#else
- DCHECK(initialized);
+ PA_DCHECK(PartitionRoot<thread_safe>::initialized);
size = internal::PartitionCookieSizeAdjustAdd(size);
- Bucket* bucket = PartitionGenericSizeToBucket(this, size);
+ auto* bucket = SizeToBucket(size);
if (LIKELY(!bucket->is_direct_mapped())) {
size = bucket->slot_size;
} else if (size > kGenericMaxDirectMapped) {
@@ -380,35 +742,11 @@ ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
#endif
}
-template <size_t N>
-class SizeSpecificPartitionAllocator {
- public:
- SizeSpecificPartitionAllocator() {
- memset(actual_buckets_, 0,
- sizeof(PartitionRoot::Bucket) * base::size(actual_buckets_));
- }
- ~SizeSpecificPartitionAllocator() {
- PartitionAllocMemoryReclaimer::Instance()->UnregisterPartition(
- &partition_root_);
- }
- static const size_t kMaxAllocation = N - kAllocationGranularity;
- static const size_t kNumBuckets = N / kAllocationGranularity;
- void init() {
- partition_root_.Init(kNumBuckets, kMaxAllocation);
- PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
- &partition_root_);
- }
- ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
-
- private:
- PartitionRoot partition_root_;
- PartitionRoot::Bucket actual_buckets_[kNumBuckets];
-};
-
-class BASE_EXPORT PartitionAllocatorGeneric {
- public:
- PartitionAllocatorGeneric();
- ~PartitionAllocatorGeneric() {
+namespace internal {
+template <bool thread_safe>
+struct BASE_EXPORT PartitionAllocator {
+ PartitionAllocator() = default;
+ ~PartitionAllocator() {
PartitionAllocMemoryReclaimer::Instance()->UnregisterPartition(
&partition_root_);
}
@@ -418,11 +756,19 @@ class BASE_EXPORT PartitionAllocatorGeneric {
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
&partition_root_);
}
- ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
+ ALWAYS_INLINE PartitionRoot<thread_safe>* root() { return &partition_root_; }
private:
- PartitionRootGeneric partition_root_;
+ PartitionRoot<thread_safe> partition_root_;
};
+} // namespace internal
+
+using PartitionAllocator = internal::PartitionAllocator<internal::ThreadSafe>;
+using ThreadUnsafePartitionAllocator =
+ internal::PartitionAllocator<internal::NotThreadSafe>;
+
+using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
+using ThreadUnsafePartitionRoot = PartitionRoot<internal::NotThreadSafe>;
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_check.h b/chromium/base/allocator/partition_allocator/partition_alloc_check.h
new file mode 100644
index 00000000000..e8003498489
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_check.h
@@ -0,0 +1,36 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
+
+#include "base/allocator/buildflags.h"
+#include "base/check.h"
+
+// When PartitionAlloc is used as the default allocator, we cannot use the
+// regular (D)CHECK() macros, as they allocate internally. When an assertion is
+// triggered, they format strings, leading to reentrency in the code, which none
+// of PartitionAlloc is designed to support (and especially not for error
+// paths).
+//
+// As a consequence:
+// - When PartitionAlloc is not malloc(), use the regular macros
+// - Otherwise, crash immediately. This provides worse error messages though.
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+// See base/check.h for implementation details.
+#define PA_CHECK(condition) \
+ UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_CHECK_STREAM_PARAMS()
+
+#if DCHECK_IS_ON()
+#define PA_DCHECK(condition) PA_CHECK(condition)
+#else
+#define PA_DCHECK(condition) EAT_CHECK_STREAM_PARAMS(!(condition))
+#endif // DCHECK_IS_ON()
+
+#else
+#define PA_CHECK(condition) CHECK(condition)
+#define PA_DCHECK(condition) DCHECK(condition)
+#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_constants.h b/chromium/base/allocator/partition_allocator/partition_alloc_constants.h
index fbc851c15f9..ab001f168c8 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_constants.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_constants.h
@@ -6,19 +6,14 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
#include <limits.h>
+#include <cstddef>
#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/logging.h"
#include "build/build_config.h"
namespace base {
-// Allocation granularity of sizeof(void*) bytes.
-static const size_t kAllocationGranularity = sizeof(void*);
-static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
-static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
-
// Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size.
// It is typical for a `PartitionPage` to be based on multiple system pages.
// Most references to "page" refer to `PartitionPage`s.
@@ -63,9 +58,12 @@ static const size_t kMaxSystemPagesPerSlotSpan =
// We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
// These chunks are called *super pages*. We do this so that we can store
// metadata in the first few pages of each 2 MiB-aligned section. This makes
-// freeing memory very fast. We specifically choose 2 MiB because this virtual
-// address block represents a full but single PTE allocation on ARM, ia32 and
-// x64.
+// freeing memory very fast. 2 MiB size & alignment were chosen, because this
+// virtual address block represents a full but single page table allocation on
+// ARM, ia32 and x64, which may be slightly more performance&memory efficient.
+// (Note, these super pages are backed by 4 KiB system pages and have nothing to
+// do with OS concept of "huge pages"/"large pages", even though the size
+// coincides.)
//
// The layout of the super page is as follows. The sizes below are the same for
// 32- and 64-bit platforms.
@@ -78,16 +76,20 @@ static const size_t kMaxSystemPagesPerSlotSpan =
// | Slot span |
// | ... |
// | Slot span |
-// | Guard page (4 KiB) |
+// | Guard pages (16 KiB) |
// +-----------------------+
//
-// Each slot span is a contiguous range of one or more `PartitionPage`s.
+// Each slot span is a contiguous range of one or more `PartitionPage`s. Note
+// that slot spans of different sizes may co-exist with one super page. Even
+// slot spans of the same size may support different slot sizes. However, all
+// slots within a span have to be of the same size.
//
// The metadata page has the following format. Note that the `PartitionPage`
-// that is not at the head of a slot span is "unused". In other words, the
-// metadata for the slot span is stored only in the first `PartitionPage` of the
-// slot span. Metadata accesses to other `PartitionPage`s are redirected to the
-// first `PartitionPage`.
+// that is not at the head of a slot span is "unused" (by most part, it only
+// stores the offset from the head page). In other words, the metadata for the
+// slot span is stored only in the first `PartitionPage` of the slot span.
+// Metadata accesses to other `PartitionPage`s are redirected to the first
+// `PartitionPage`.
//
// +---------------------------------------------+
// | SuperPageExtentEntry (32 B) |
@@ -97,28 +99,30 @@ static const size_t kMaxSystemPagesPerSlotSpan =
// | PartitionPage of slot span 2 (32 B, used) |
// | PartitionPage of slot span 3 (32 B, used) |
// | ... |
+// | PartitionPage of slot span N (32 B, used) |
+// | PartitionPage of slot span N (32 B, unused) |
// | PartitionPage of slot span N (32 B, unused) |
// +---------------------------------------------+
//
-// A direct-mapped page has a similar layout to fake it looking like a super
-// page:
+// A direct-mapped page has an identical layout at the beginning to fake it
+// looking like a super page:
//
-// +-----------------------+
-// | Guard page (4 KiB) |
-// | Metadata page (4 KiB) |
-// | Guard pages (8 KiB) |
-// | Direct mapped object |
-// | Guard page (4 KiB) |
-// +-----------------------+
+// +---------------------------------+
+// | Guard page (4 KiB) |
+// | Metadata page (4 KiB) |
+// | Guard pages (8 KiB) |
+// | Direct mapped object |
+// | Guard page (4 KiB, 32-bit only) |
+// +---------------------------------+
//
// A direct-mapped page's metadata page has the following layout:
//
-// +--------------------------------+
-// | SuperPageExtentEntry (32 B) |
-// | PartitionPage (32 B) |
-// | PartitionBucket (32 B) |
-// | PartitionDirectMapExtent (8 B) |
-// +--------------------------------+
+// +---------------------------------+
+// | SuperPageExtentEntry (32 B) |
+// | PartitionPage (32 B) |
+// | PartitionBucket (32 B) |
+// | PartitionDirectMapExtent (32 B) |
+// +---------------------------------+
static const size_t kSuperPageShift = 21; // 2 MiB
static const size_t kSuperPageSize = 1 << kSuperPageShift;
@@ -136,7 +140,18 @@ static const size_t kNumPartitionPagesPerSuperPage =
// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
-static const size_t kGenericMinBucketedOrder = 4; // 8 bytes.
+static_assert(alignof(std::max_align_t) <= 16,
+ "PartitionAlloc doesn't support a fundamental alignment larger "
+ "than 16 bytes.");
+// PartitionAlloc should return memory properly aligned for any type, to behave
+// properly as a generic allocator. This is not strictly required as long as
+// types are explicitly allocated with PartitionAlloc, but is to use it as a
+// malloc() implementation, and generally to match malloc()'s behavior.
+//
+// In practice, this means 8 bytes alignment on 32 bit architectures, and 16
+// bytes on 64 bit ones.
+static const size_t kGenericMinBucketedOrder =
+ alignof(std::max_align_t) == 16 ? 5 : 4; // 2^(order - 1), that is 16 or 8.
// The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
static const size_t kGenericMaxBucketedOrder = 20;
static const size_t kGenericNumBucketedOrders =
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_features.cc b/chromium/base/allocator/partition_allocator/partition_alloc_features.cc
new file mode 100644
index 00000000000..7f8facf979a
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_features.cc
@@ -0,0 +1,16 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
+
+#include "base/feature_list.h"
+
+namespace base {
+
+// If enabled, PartitionAllocator reserves an address space(named, giga cage)
+// initially and uses a part of the address space for each allocation.
+const Feature kPartitionAllocGigaCage{"PartitionAllocGigaCage",
+ FEATURE_DISABLED_BY_DEFAULT};
+
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_features.h b/chromium/base/allocator/partition_allocator/partition_alloc_features.h
new file mode 100644
index 00000000000..7cf1f547afc
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_features.h
@@ -0,0 +1,34 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
+
+#include "base/allocator/buildflags.h"
+#include "base/base_export.h"
+#include "base/feature_list.h"
+#include "base/metrics/field_trial_params.h"
+#include "build/build_config.h"
+
+namespace base {
+
+struct Feature;
+
+extern const BASE_EXPORT Feature kPartitionAllocGigaCage;
+
+ALWAYS_INLINE bool IsPartitionAllocGigaCageEnabled() {
+ // The feature is not applicable to 32 bit architectures (not enough address
+ // space). It is also incompatible with PartitionAlloc as malloc(), as the
+ // base::Feature code allocates, leading to reentrancy in PartitionAlloc.
+#if !(defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)) || \
+ BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+ return false;
+#else
+ return FeatureList::IsEnabled(kPartitionAllocGigaCage);
+#endif
+}
+
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_forward.h b/chromium/base/allocator/partition_allocator/partition_alloc_forward.h
index c2019e511bf..0737d282400 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_forward.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_forward.h
@@ -13,10 +13,12 @@ struct PartitionPage;
constexpr bool ThreadSafe = true;
constexpr bool NotThreadSafe = false;
-template <bool thread_safe>
-struct PartitionRootBase;
} // namespace internal
+
+template <bool thread_safe>
+struct PartitionRoot;
+
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc
index e6afbd949b3..d3ab0b36edb 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc
@@ -6,6 +6,7 @@
#include <vector>
#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/strings/stringprintf.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
@@ -63,8 +64,7 @@ class SystemAllocator : public Allocator {
class PartitionAllocator : public Allocator {
public:
- PartitionAllocator()
- : alloc_(std::make_unique<PartitionAllocatorGeneric>()) {}
+ PartitionAllocator() : alloc_(std::make_unique<base::PartitionAllocator>()) {}
~PartitionAllocator() override = default;
void Init() override { alloc_->init(); }
@@ -72,14 +72,14 @@ class PartitionAllocator : public Allocator {
void Free(void* data) override { return alloc_->root()->Free(data); }
private:
- std::unique_ptr<PartitionAllocatorGeneric> alloc_;
+ std::unique_ptr<base::PartitionAllocator> alloc_;
};
class TestLoopThread : public PlatformThread::Delegate {
public:
explicit TestLoopThread(OnceCallback<float()> test_fn)
: test_fn_(std::move(test_fn)) {
- CHECK(PlatformThread::Create(0, this, &thread_handle_));
+ PA_CHECK(PlatformThread::Create(0, this, &thread_handle_));
}
float Run() {
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
index e6fae94cfcc..99219a8d559 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -7,15 +7,22 @@
#include <stdlib.h>
#include <string.h>
+#include <algorithm>
+#include <cstddef>
#include <limits>
#include <memory>
#include <vector>
#include "base/allocator/partition_allocator/address_space_randomization.h"
+#include "base/allocator/partition_allocator/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/logging.h"
#include "base/rand_util.h"
#include "base/stl_util.h"
#include "base/system/sys_info.h"
+#include "base/test/scoped_feature_list.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -29,8 +36,6 @@
namespace {
-constexpr size_t kTestMaxAllocation = base::kSystemPageSize;
-
bool IsLargeMemoryDevice() {
// Treat any device with 2GiB or more of physical memory as a "large memory
// device". We check for slightly less than 2GiB so that devices with a small
@@ -86,19 +91,20 @@ const size_t kTestSizes[] = {
100,
base::kSystemPageSize,
base::kSystemPageSize + 1,
- base::PartitionRootGeneric::Bucket::get_direct_map_size(100),
+ base::PartitionRoot<
+ base::internal::ThreadSafe>::Bucket::get_direct_map_size(100),
1 << 20,
1 << 21,
};
constexpr size_t kTestSizesCount = base::size(kTestSizes);
-void AllocateRandomly(base::PartitionRootGeneric* root,
+void AllocateRandomly(base::PartitionRoot<base::internal::ThreadSafe>* root,
size_t count,
int flags) {
std::vector<void*> allocations(count, nullptr);
for (size_t i = 0; i < count; ++i) {
const size_t size = kTestSizes[base::RandGenerator(kTestSizesCount)];
- allocations[i] = PartitionAllocGenericFlags(root, flags, size, nullptr);
+ allocations[i] = root->AllocFlags(flags, size, nullptr);
EXPECT_NE(nullptr, allocations[i]) << " size: " << size << " i: " << i;
}
@@ -108,6 +114,10 @@ void AllocateRandomly(base::PartitionRootGeneric* root,
}
}
+void HandleOOM(size_t unused_size) {
+ LOG(FATAL) << "Out of memory";
+}
+
} // namespace
namespace base {
@@ -129,7 +139,6 @@ const size_t kPointerOffset = kCookieSize;
const size_t kExtraAllocSize = kCookieSize * 2;
#endif
const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
-const size_t kTestBucketIndex = kRealAllocSize >> kBucketShift;
const char* type_name = nullptr;
@@ -140,16 +149,30 @@ class PartitionAllocTest : public testing::Test {
~PartitionAllocTest() override = default;
void SetUp() override {
+ scoped_feature_list.InitWithFeatures({kPartitionAllocGigaCage}, {});
+ PartitionAllocGlobalInit(HandleOOM);
allocator.init();
- generic_allocator.init();
+ test_bucket_index_ = SizeToIndex(kRealAllocSize);
+ }
+
+ size_t SizeToIndex(size_t size) {
+ return allocator.root()->SizeToBucket(size) - allocator.root()->buckets;
+ }
+
+ void TearDown() override {
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages |
+ PartitionPurgeDiscardUnusedSystemPages);
+ PartitionAllocGlobalUninitForTesting();
}
- PartitionRoot::Page* GetFullPage(size_t size) {
+ PartitionRoot<ThreadSafe>::Page* GetFullPage(size_t size) {
size_t real_size = size + kExtraAllocSize;
- size_t bucket_index = real_size >> kBucketShift;
- PartitionRoot::Bucket* bucket = &allocator.root()->buckets()[bucket_index];
+ size_t bucket_index = SizeToIndex(real_size);
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[bucket_index];
size_t num_slots =
- (bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size;
+ (bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+ bucket->slot_size;
void* first = nullptr;
void* last = nullptr;
size_t i;
@@ -161,8 +184,8 @@ class PartitionAllocTest : public testing::Test {
else if (i == num_slots - 1)
last = PartitionCookieFreePointerAdjust(ptr);
}
- EXPECT_EQ(PartitionRoot::Page::FromPointer(first),
- PartitionRoot::Page::FromPointer(last));
+ EXPECT_EQ(PartitionRoot<ThreadSafe>::Page::FromPointer(first),
+ PartitionRoot<ThreadSafe>::Page::FromPointer(last));
if (bucket->num_system_pages_per_slot_span ==
kNumSystemPagesPerPartitionPage)
EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask,
@@ -172,18 +195,16 @@ class PartitionAllocTest : public testing::Test {
EXPECT_EQ(nullptr, bucket->active_pages_head->freelist_head);
EXPECT_TRUE(bucket->active_pages_head);
EXPECT_TRUE(bucket->active_pages_head !=
- PartitionRoot::Page::get_sentinel_page());
+ PartitionRoot<ThreadSafe>::Page::get_sentinel_page());
return bucket->active_pages_head;
}
void CycleFreeCache(size_t size) {
- size_t real_size = size + kExtraAllocSize;
- size_t bucket_index = real_size >> kBucketShift;
- PartitionRoot::Bucket* bucket = &allocator.root()->buckets()[bucket_index];
- DCHECK(!bucket->active_pages_head->num_allocated_slots);
-
for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
void* ptr = allocator.root()->Alloc(size, type_name);
+ auto* page = PartitionRoot<base::internal::ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
+ auto* bucket = page->bucket;
EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots);
allocator.root()->Free(ptr);
EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots);
@@ -191,20 +212,6 @@ class PartitionAllocTest : public testing::Test {
}
}
- void CycleGenericFreeCache(size_t size) {
- for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
- void* ptr = generic_allocator.root()->Alloc(size, type_name);
- PartitionRootGeneric::Page* page =
- PartitionRootGeneric::Page::FromPointer(
- PartitionCookieFreePointerAdjust(ptr));
- PartitionRootGeneric::Bucket* bucket = page->bucket;
- EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots);
- generic_allocator.root()->Free(ptr);
- EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots);
- EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index);
- }
- }
-
enum ReturnNullTestMode {
kPartitionAllocGenericFlags,
kPartitionReallocGenericFlags,
@@ -226,40 +233,37 @@ class PartitionAllocTest : public testing::Test {
// Work out the number of allocations for 6 GB of memory.
const int num_allocations = (6 * 1024 * 1024) / (alloc_size / 1024);
- void** ptrs = reinterpret_cast<void**>(generic_allocator.root()->Alloc(
- num_allocations * sizeof(void*), type_name));
+ void** ptrs = reinterpret_cast<void**>(
+ allocator.root()->Alloc(num_allocations * sizeof(void*), type_name));
int i;
for (i = 0; i < num_allocations; ++i) {
switch (mode) {
case kPartitionAllocGenericFlags: {
- ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull,
- alloc_size, type_name);
+ ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ alloc_size, type_name);
break;
}
case kPartitionReallocGenericFlags: {
- ptrs[i] = PartitionAllocGenericFlags(
- generic_allocator.root(), PartitionAllocReturnNull, 1, type_name);
- ptrs[i] = PartitionReallocGenericFlags(
- generic_allocator.root(), PartitionAllocReturnNull, ptrs[i],
- alloc_size, type_name);
+ ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull, 1,
+ type_name);
+ ptrs[i] = allocator.root()->ReallocFlags(
+ PartitionAllocReturnNull, ptrs[i], alloc_size, type_name);
break;
}
case kPartitionRootGenericTryRealloc: {
- ptrs[i] = PartitionAllocGenericFlags(
- generic_allocator.root(), PartitionAllocReturnNull, 1, type_name);
- ptrs[i] = generic_allocator.root()->TryRealloc(ptrs[i], alloc_size,
- type_name);
+ ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull, 1,
+ type_name);
+ ptrs[i] =
+ allocator.root()->TryRealloc(ptrs[i], alloc_size, type_name);
}
}
if (!i)
EXPECT_TRUE(ptrs[0]);
if (!ptrs[i]) {
- ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull,
- alloc_size, type_name);
+ ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ alloc_size, type_name);
EXPECT_FALSE(ptrs[i]);
break;
}
@@ -272,34 +276,36 @@ class PartitionAllocTest : public testing::Test {
// Free, reallocate and free again each block we allocated. We do this to
// check that freeing memory also works correctly after a failed allocation.
for (--i; i >= 0; --i) {
- generic_allocator.root()->Free(ptrs[i]);
- ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull, alloc_size,
- type_name);
+ allocator.root()->Free(ptrs[i]);
+ ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ alloc_size, type_name);
EXPECT_TRUE(ptrs[i]);
- generic_allocator.root()->Free(ptrs[i]);
+ allocator.root()->Free(ptrs[i]);
}
- generic_allocator.root()->Free(ptrs);
+ allocator.root()->Free(ptrs);
EXPECT_TRUE(ClearAddressSpaceLimit());
LOG(FATAL) << "DoReturnNullTest";
}
- SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
- PartitionAllocatorGeneric generic_allocator;
+ base::test::ScopedFeatureList scoped_feature_list;
+ PartitionAllocator<base::internal::ThreadSafe> allocator;
+ size_t test_bucket_index_;
};
class PartitionAllocDeathTest : public PartitionAllocTest {};
namespace {
-void FreeFullPage(PartitionRoot* root, PartitionRoot::Page* page) {
+void FreeFullPage(PartitionRoot<base::internal::ThreadSafe>* root,
+ PartitionRoot<base::internal::ThreadSafe>::Page* page) {
size_t size = page->bucket->slot_size;
size_t num_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / size;
EXPECT_EQ(num_slots, static_cast<size_t>(abs(page->num_allocated_slots)));
- char* ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page));
+ char* ptr = reinterpret_cast<char*>(
+ PartitionRoot<base::internal::ThreadSafe>::Page::ToPointer(page));
size_t i;
for (i = 0; i < num_slots; ++i) {
root->Free(ptr + kPointerOffset);
@@ -342,7 +348,7 @@ class MockPartitionStatsDumper : public PartitionStatsDumper {
const PartitionBucketMemoryStats* stats) override {
(void)partition_name;
EXPECT_TRUE(stats->is_valid);
- EXPECT_EQ(0u, stats->bucket_slot_size & kAllocationGranularityMask);
+ EXPECT_EQ(0u, stats->bucket_slot_size & sizeof(void*));
bucket_stats.push_back(*stats);
total_resident_bytes += stats->resident_bytes;
total_active_bytes += stats->active_bytes;
@@ -375,9 +381,10 @@ class MockPartitionStatsDumper : public PartitionStatsDumper {
// Check that the most basic of allocate / free pairs work.
TEST_F(PartitionAllocTest, Basic) {
- PartitionRoot::Bucket* bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
- PartitionRoot::Page* seed_page = PartitionRoot::Page::get_sentinel_page();
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[test_bucket_index_];
+ PartitionRoot<ThreadSafe>::Page* seed_page =
+ PartitionRoot<ThreadSafe>::Page::get_sentinel_page();
EXPECT_FALSE(bucket->empty_pages_head);
EXPECT_FALSE(bucket->decommitted_pages_head);
@@ -437,25 +444,27 @@ TEST_F(PartitionAllocTest, MultiAlloc) {
// Test a bucket with multiple pages.
TEST_F(PartitionAllocTest, MultiPages) {
- PartitionRoot::Bucket* bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[test_bucket_index_];
- PartitionRoot::Page* page = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page = GetFullPage(kTestAllocSize);
FreeFullPage(allocator.root(), page);
EXPECT_TRUE(bucket->empty_pages_head);
- EXPECT_EQ(PartitionRoot::Page::get_sentinel_page(),
+ EXPECT_EQ(PartitionRoot<ThreadSafe>::Page::get_sentinel_page(),
bucket->active_pages_head);
EXPECT_EQ(nullptr, page->next_page);
EXPECT_EQ(0, page->num_allocated_slots);
page = GetFullPage(kTestAllocSize);
- PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page2 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page2, bucket->active_pages_head);
EXPECT_EQ(nullptr, page2->next_page);
- EXPECT_EQ(reinterpret_cast<uintptr_t>(PartitionRoot::Page::ToPointer(page)) &
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page)) &
kSuperPageBaseMask,
- reinterpret_cast<uintptr_t>(PartitionRoot::Page::ToPointer(page2)) &
+ reinterpret_cast<uintptr_t>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page2)) &
kSuperPageBaseMask);
// Fully free the non-current page. This will leave us with no current
@@ -463,7 +472,7 @@ TEST_F(PartitionAllocTest, MultiPages) {
FreeFullPage(allocator.root(), page);
EXPECT_EQ(0, page->num_allocated_slots);
EXPECT_TRUE(bucket->empty_pages_head);
- EXPECT_EQ(PartitionPage<base::internal::NotThreadSafe>::get_sentinel_page(),
+ EXPECT_EQ(PartitionPage<ThreadSafe>::get_sentinel_page(),
bucket->active_pages_head);
// Allocate a new page, it should pull from the freelist.
@@ -481,18 +490,19 @@ TEST_F(PartitionAllocTest, MultiPages) {
// Test some finer aspects of internal page transitions.
TEST_F(PartitionAllocTest, PageTransitions) {
- PartitionRoot::Bucket* bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[test_bucket_index_];
- PartitionRoot::Page* page1 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page1 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page1, bucket->active_pages_head);
EXPECT_EQ(nullptr, page1->next_page);
- PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page2 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page2, bucket->active_pages_head);
EXPECT_EQ(nullptr, page2->next_page);
// Bounce page1 back into the non-full list then fill it up again.
- char* ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page1)) +
+ char* ptr = reinterpret_cast<char*>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page1)) +
kPointerOffset;
allocator.root()->Free(ptr);
EXPECT_EQ(page1, bucket->active_pages_head);
@@ -503,12 +513,13 @@ TEST_F(PartitionAllocTest, PageTransitions) {
// Allocating another page at this point should cause us to scan over page1
// (which is both full and NOT our current page), and evict it from the
// freelist. Older code had a O(n^2) condition due to failure to do this.
- PartitionRoot::Page* page3 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page3 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page3, bucket->active_pages_head);
EXPECT_EQ(nullptr, page3->next_page);
// Work out a pointer into page2 and free it.
- ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page2)) +
+ ptr = reinterpret_cast<char*>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page2)) +
kPointerOffset;
allocator.root()->Free(ptr);
// Trying to allocate at this time should cause us to cycle around to page2
@@ -521,7 +532,8 @@ TEST_F(PartitionAllocTest, PageTransitions) {
// Work out a pointer into page1 and free it. This should pull the page
// back into the list of available pages.
- ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page1)) +
+ ptr = reinterpret_cast<char*>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page1)) +
kPointerOffset;
allocator.root()->Free(ptr);
// This allocation should be satisfied by page1.
@@ -544,16 +556,17 @@ TEST_F(PartitionAllocTest, PageTransitions) {
// Test some corner cases relating to page transitions in the internal
// free page list metadata bucket.
TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
- PartitionRoot::Bucket* bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[test_bucket_index_];
size_t num_to_fill_free_list_page =
- kPartitionPageSize / (sizeof(PartitionRoot::Page) + kExtraAllocSize);
+ kPartitionPageSize /
+ (sizeof(PartitionRoot<ThreadSafe>::Page) + kExtraAllocSize);
// The +1 is because we need to account for the fact that the current page
// never gets thrown on the freelist.
++num_to_fill_free_list_page;
- auto pages =
- std::make_unique<PartitionRoot::Page*[]>(num_to_fill_free_list_page);
+ auto pages = std::make_unique<PartitionRoot<ThreadSafe>::Page*[]>(
+ num_to_fill_free_list_page);
size_t i;
for (i = 0; i < num_to_fill_free_list_page; ++i) {
@@ -562,15 +575,15 @@ TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
EXPECT_EQ(pages[num_to_fill_free_list_page - 1], bucket->active_pages_head);
for (i = 0; i < num_to_fill_free_list_page; ++i)
FreeFullPage(allocator.root(), pages[i]);
- EXPECT_EQ(PartitionRoot::Page::get_sentinel_page(),
+ EXPECT_EQ(PartitionRoot<ThreadSafe>::Page::get_sentinel_page(),
bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
// Allocate / free in a different bucket size so we get control of a
// different free page list. We need two pages because one will be the last
// active page and not get freed.
- PartitionRoot::Page* page1 = GetFullPage(kTestAllocSize * 2);
- PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize * 2);
+ PartitionRoot<ThreadSafe>::Page* page1 = GetFullPage(kTestAllocSize * 2);
+ PartitionRoot<ThreadSafe>::Page* page2 = GetFullPage(kTestAllocSize * 2);
FreeFullPage(allocator.root(), page1);
FreeFullPage(allocator.root(), page2);
@@ -581,7 +594,7 @@ TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
for (i = 0; i < num_to_fill_free_list_page; ++i)
FreeFullPage(allocator.root(), pages[i]);
- EXPECT_EQ(PartitionRoot::Page::get_sentinel_page(),
+ EXPECT_EQ(PartitionRoot<ThreadSafe>::Page::get_sentinel_page(),
bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
}
@@ -597,12 +610,13 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) {
--num_pages_needed;
EXPECT_GT(num_pages_needed, 1u);
- auto pages = std::make_unique<PartitionRoot::Page*[]>(num_pages_needed);
+ auto pages =
+ std::make_unique<PartitionRoot<ThreadSafe>::Page*[]>(num_pages_needed);
uintptr_t first_super_page_base = 0;
size_t i;
for (i = 0; i < num_pages_needed; ++i) {
pages[i] = GetFullPage(kTestAllocSize);
- void* storage_ptr = PartitionRoot::Page::ToPointer(pages[i]);
+ void* storage_ptr = PartitionRoot<ThreadSafe>::Page::ToPointer(pages[i]);
if (!i)
first_super_page_base =
reinterpret_cast<uintptr_t>(storage_ptr) & kSuperPageBaseMask;
@@ -623,31 +637,30 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) {
// Test the generic allocation functions that can handle arbitrary sizes and
// reallocing etc.
TEST_F(PartitionAllocTest, GenericAlloc) {
- void* ptr = generic_allocator.root()->Alloc(1, type_name);
+ void* ptr = allocator.root()->Alloc(1, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
- ptr = generic_allocator.root()->Alloc(kGenericMaxBucketed + 1, type_name);
+ allocator.root()->Free(ptr);
+ ptr = allocator.root()->Alloc(kGenericMaxBucketed + 1, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
- ptr = generic_allocator.root()->Alloc(1, type_name);
+ ptr = allocator.root()->Alloc(1, type_name);
EXPECT_TRUE(ptr);
void* orig_ptr = ptr;
char* char_ptr = static_cast<char*>(ptr);
*char_ptr = 'A';
// Change the size of the realloc, remaining inside the same bucket.
- void* new_ptr = generic_allocator.root()->Realloc(ptr, 2, type_name);
+ void* new_ptr = allocator.root()->Realloc(ptr, 2, type_name);
EXPECT_EQ(ptr, new_ptr);
- new_ptr = generic_allocator.root()->Realloc(ptr, 1, type_name);
+ new_ptr = allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_EQ(ptr, new_ptr);
- new_ptr =
- generic_allocator.root()->Realloc(ptr, kGenericSmallestBucket, type_name);
+ new_ptr = allocator.root()->Realloc(ptr, kGenericSmallestBucket, type_name);
EXPECT_EQ(ptr, new_ptr);
// Change the size of the realloc, switching buckets.
- new_ptr = generic_allocator.root()->Realloc(ptr, kGenericSmallestBucket + 1,
- type_name);
+ new_ptr =
+ allocator.root()->Realloc(ptr, kGenericSmallestBucket + 1, type_name);
EXPECT_NE(new_ptr, ptr);
// Check that the realloc copied correctly.
char* new_char_ptr = static_cast<char*>(new_ptr);
@@ -663,13 +676,13 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// The realloc moved. To check that the old allocation was freed, we can
// do an alloc of the old allocation size and check that the old allocation
// address is at the head of the freelist and reused.
- void* reused_ptr = generic_allocator.root()->Alloc(1, type_name);
+ void* reused_ptr = allocator.root()->Alloc(1, type_name);
EXPECT_EQ(reused_ptr, orig_ptr);
- generic_allocator.root()->Free(reused_ptr);
+ allocator.root()->Free(reused_ptr);
// Downsize the realloc.
ptr = new_ptr;
- new_ptr = generic_allocator.root()->Realloc(ptr, 1, type_name);
+ new_ptr = allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_EQ(new_ptr, orig_ptr);
new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'B');
@@ -677,8 +690,7 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Upsize the realloc to outside the partition.
ptr = new_ptr;
- new_ptr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed + 1,
- type_name);
+ new_ptr = allocator.root()->Realloc(ptr, kGenericMaxBucketed + 1, type_name);
EXPECT_NE(new_ptr, ptr);
new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'C');
@@ -686,85 +698,85 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Upsize and downsize the realloc, remaining outside the partition.
ptr = new_ptr;
- new_ptr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed * 10,
- type_name);
+ new_ptr = allocator.root()->Realloc(ptr, kGenericMaxBucketed * 10, type_name);
new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'D');
*new_char_ptr = 'E';
ptr = new_ptr;
- new_ptr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed * 2,
- type_name);
+ new_ptr = allocator.root()->Realloc(ptr, kGenericMaxBucketed * 2, type_name);
new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'E');
*new_char_ptr = 'F';
// Downsize the realloc to inside the partition.
ptr = new_ptr;
- new_ptr = generic_allocator.root()->Realloc(ptr, 1, type_name);
+ new_ptr = allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_NE(new_ptr, ptr);
EXPECT_EQ(new_ptr, orig_ptr);
new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'F');
- generic_allocator.root()->Free(new_ptr);
+ allocator.root()->Free(new_ptr);
}
// Test the generic allocation functions can handle some specific sizes of
// interest.
TEST_F(PartitionAllocTest, GenericAllocSizes) {
- void* ptr = generic_allocator.root()->Alloc(0, type_name);
+ void* ptr = allocator.root()->Alloc(0, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// kPartitionPageSize is interesting because it results in just one
// allocation per page, which tripped up some corner cases.
size_t size = kPartitionPageSize - kExtraAllocSize;
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr2 = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// Should be freeable at this point.
- PartitionRootGeneric::Page* page = PartitionRootGeneric::Page::FromPointer(
- PartitionCookieFreePointerAdjust(ptr));
+ PartitionRoot<ThreadSafe>::Page* page =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_NE(-1, page->empty_cache_index);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr2);
size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) -
kSystemPageSize) /
2) -
kExtraAllocSize;
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
memset(ptr, 'A', size);
- ptr2 = generic_allocator.root()->Alloc(size, type_name);
+ ptr2 = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2);
- void* ptr3 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr3 = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr3);
- void* ptr4 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr4 = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr4);
page = PartitionPage<base::internal::ThreadSafe>::FromPointer(
PartitionCookieFreePointerAdjust(ptr));
- PartitionRootGeneric::Page* page2 = PartitionRootGeneric::Page::FromPointer(
- PartitionCookieFreePointerAdjust(ptr3));
+ PartitionRoot<ThreadSafe>::Page* page2 =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr3));
EXPECT_NE(page, page2);
- generic_allocator.root()->Free(ptr);
- generic_allocator.root()->Free(ptr3);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr2);
// Should be freeable at this point.
EXPECT_NE(-1, page->empty_cache_index);
EXPECT_EQ(0, page->num_allocated_slots);
EXPECT_EQ(0, page->num_unprovisioned_slots);
- void* new_ptr = generic_allocator.root()->Alloc(size, type_name);
+ void* new_ptr = allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr3, new_ptr);
- new_ptr = generic_allocator.root()->Alloc(size, type_name);
+ new_ptr = allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr2, new_ptr);
- generic_allocator.root()->Free(new_ptr);
- generic_allocator.root()->Free(ptr3);
- generic_allocator.root()->Free(ptr4);
+ allocator.root()->Free(new_ptr);
+ allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr4);
#if DCHECK_IS_ON()
// |PartitionPage::Free| must poison the slot's contents with |kFreedByte|.
@@ -777,8 +789,8 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
// Test this only if the device has enough memory or it might fail due
// to OOM.
if (IsLargeMemoryDevice()) {
- ptr = generic_allocator.root()->Alloc(512 * 1024 * 1024 + 1, type_name);
- generic_allocator.root()->Free(ptr);
+ ptr = allocator.root()->Alloc(512 * 1024 * 1024 + 1, type_name);
+ allocator.root()->Free(ptr);
}
// Check a more reasonable, but still direct mapped, size.
@@ -786,22 +798,22 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
size = 20 * 1024 * 1024;
size -= kSystemPageSize;
size -= 1;
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ ptr = allocator.root()->Alloc(size, type_name);
char* char_ptr = reinterpret_cast<char*>(ptr);
*(char_ptr + (size - 1)) = 'A';
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// Can we free null?
- generic_allocator.root()->Free(nullptr);
+ allocator.root()->Free(nullptr);
// Do we correctly get a null for a failed allocation?
- EXPECT_EQ(nullptr, PartitionAllocGenericFlags(
- generic_allocator.root(), PartitionAllocReturnNull,
- 3u * 1024 * 1024 * 1024, type_name));
+ EXPECT_EQ(nullptr,
+ allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ 3u * 1024 * 1024 * 1024, type_name));
}
// Test that we can fetch the real allocated size after an allocation.
-TEST_F(PartitionAllocTest, GenericAllocGetSize) {
+TEST_F(PartitionAllocTest, GenericAllocGetSizeAndOffset) {
void* ptr;
size_t requested_size, actual_size, predicted_size;
@@ -809,24 +821,38 @@ TEST_F(PartitionAllocTest, GenericAllocGetSize) {
// Allocate something small.
requested_size = 511 - kExtraAllocSize;
- predicted_size = generic_allocator.root()->ActualSize(requested_size);
- ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+ predicted_size = allocator.root()->ActualSize(requested_size);
+ ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actual_size = PartitionAllocGetSize(ptr);
+ actual_size = PartitionAllocGetSize<ThreadSafe>(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_LT(requested_size, actual_size);
- generic_allocator.root()->Free(ptr);
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ for (size_t offset = 0; offset < requested_size; ++offset) {
+ size_t actual_offset = PartitionAllocGetSlotOffset<ThreadSafe>(
+ static_cast<char*>(ptr) + offset);
+ EXPECT_EQ(actual_offset, offset);
+ }
+#endif
+ allocator.root()->Free(ptr);
// Allocate a size that should be a perfect match for a bucket, because it
// is an exact power of 2.
requested_size = (256 * 1024) - kExtraAllocSize;
- predicted_size = generic_allocator.root()->ActualSize(requested_size);
- ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+ predicted_size = allocator.root()->ActualSize(requested_size);
+ ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actual_size = PartitionAllocGetSize(ptr);
+ actual_size = PartitionAllocGetSize<ThreadSafe>(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_EQ(requested_size, actual_size);
- generic_allocator.root()->Free(ptr);
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ for (size_t offset = 0; offset < requested_size; offset += 877) {
+ size_t actual_offset = PartitionAllocGetSlotOffset<ThreadSafe>(
+ static_cast<char*>(ptr) + offset);
+ EXPECT_EQ(actual_offset, offset);
+ }
+#endif
+ allocator.root()->Free(ptr);
// Allocate a size that is a system page smaller than a bucket. GetSize()
// should return a larger size than we asked for now.
@@ -835,55 +861,93 @@ TEST_F(PartitionAllocTest, GenericAllocGetSize) {
num /= 2;
}
requested_size = num * kSystemPageSize - kSystemPageSize - kExtraAllocSize;
- predicted_size = generic_allocator.root()->ActualSize(requested_size);
- ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+ predicted_size = allocator.root()->ActualSize(requested_size);
+ ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actual_size = PartitionAllocGetSize(ptr);
+ actual_size = PartitionAllocGetSize<ThreadSafe>(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_EQ(requested_size + kSystemPageSize, actual_size);
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ for (size_t offset = 0; offset < requested_size; offset += 4999) {
+ size_t actual_offset = PartitionAllocGetSlotOffset<ThreadSafe>(
+ static_cast<char*>(ptr) + offset);
+ EXPECT_EQ(actual_offset, offset);
+ }
+#endif
// Check that we can write at the end of the reported size too.
char* char_ptr = reinterpret_cast<char*>(ptr);
*(char_ptr + (actual_size - 1)) = 'A';
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// Allocate something very large, and uneven.
if (IsLargeMemoryDevice()) {
requested_size = 512 * 1024 * 1024 - 1;
- predicted_size = generic_allocator.root()->ActualSize(requested_size);
- ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+ predicted_size = allocator.root()->ActualSize(requested_size);
+ ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actual_size = PartitionAllocGetSize(ptr);
+ actual_size = PartitionAllocGetSize<ThreadSafe>(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_LT(requested_size, actual_size);
- generic_allocator.root()->Free(ptr);
+ // Unlike above, don't test for PartitionAllocGetSlotOffset. Such large
+ // allocations are direct-mapped, for which one can't easily obtain the
+ // offset.
+ allocator.root()->Free(ptr);
}
// Too large allocation.
requested_size = kGenericMaxDirectMapped + 1;
- predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ predicted_size = allocator.root()->ActualSize(requested_size);
EXPECT_EQ(requested_size, predicted_size);
}
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+TEST_F(PartitionAllocTest, GetOffsetMultiplePages) {
+ size_t size = 48;
+ size_t real_size = size + kExtraAllocSize;
+ PartitionBucket<ThreadSafe>* bucket =
+ allocator.root()->SizeToBucket(real_size);
+ // Make sure the test is testing multiple partition pages case.
+ EXPECT_GT(bucket->num_system_pages_per_slot_span,
+ kPartitionPageSize / kSystemPageSize);
+ size_t num_slots =
+ (bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size;
+ std::vector<void*> ptrs;
+ for (size_t i = 0; i < num_slots; ++i) {
+ ptrs.push_back(allocator.root()->Alloc(size, type_name));
+ }
+ for (size_t i = 0; i < num_slots; ++i) {
+ char* ptr = static_cast<char*>(ptrs[i]);
+ for (size_t offset = 0; offset < size; offset += 13) {
+ EXPECT_EQ(PartitionAllocGetSize<ThreadSafe>(ptr), size);
+ size_t actual_offset =
+ PartitionAllocGetSlotOffset<ThreadSafe>(ptr + offset);
+ EXPECT_EQ(actual_offset, offset);
+ }
+ allocator.root()->Free(ptr);
+ }
+}
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
// Test the realloc() contract.
TEST_F(PartitionAllocTest, Realloc) {
// realloc(0, size) should be equivalent to malloc().
- void* ptr =
- generic_allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
+ void* ptr = allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
memset(ptr, 'A', kTestAllocSize);
- PartitionRootGeneric::Page* page = PartitionRootGeneric::Page::FromPointer(
- PartitionCookieFreePointerAdjust(ptr));
+ PartitionRoot<ThreadSafe>::Page* page =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
// realloc(ptr, 0) should be equivalent to free().
- void* ptr2 = generic_allocator.root()->Realloc(ptr, 0, type_name);
+ void* ptr2 = allocator.root()->Realloc(ptr, 0, type_name);
EXPECT_EQ(nullptr, ptr2);
EXPECT_EQ(PartitionCookieFreePointerAdjust(ptr), page->freelist_head);
// Test that growing an allocation with realloc() copies everything from the
// old allocation.
size_t size = kSystemPageSize - kExtraAllocSize;
- EXPECT_EQ(size, generic_allocator.root()->ActualSize(size));
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ EXPECT_EQ(size, allocator.root()->ActualSize(size));
+ ptr = allocator.root()->Alloc(size, type_name);
memset(ptr, 'A', size);
- ptr2 = generic_allocator.root()->Realloc(ptr, size + 1, type_name);
+ ptr2 = allocator.root()->Realloc(ptr, size + 1, type_name);
EXPECT_NE(ptr, ptr2);
char* char_ptr2 = static_cast<char*>(ptr2);
EXPECT_EQ('A', char_ptr2[0]);
@@ -894,7 +958,7 @@ TEST_F(PartitionAllocTest, Realloc) {
// Test that shrinking an allocation with realloc() also copies everything
// from the old allocation.
- ptr = generic_allocator.root()->Realloc(ptr2, size - 1, type_name);
+ ptr = allocator.root()->Realloc(ptr2, size - 1, type_name);
EXPECT_NE(ptr2, ptr);
char* char_ptr = static_cast<char*>(ptr);
EXPECT_EQ('A', char_ptr[0]);
@@ -903,47 +967,47 @@ TEST_F(PartitionAllocTest, Realloc) {
EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr[size - 1]));
#endif
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// Test that shrinking a direct mapped allocation happens in-place.
size = kGenericMaxBucketed + 16 * kSystemPageSize;
- ptr = generic_allocator.root()->Alloc(size, type_name);
- size_t actual_size = PartitionAllocGetSize(ptr);
- ptr2 = generic_allocator.root()->Realloc(
+ ptr = allocator.root()->Alloc(size, type_name);
+ size_t actual_size = PartitionAllocGetSize<ThreadSafe>(ptr);
+ ptr2 = allocator.root()->Realloc(
ptr, kGenericMaxBucketed + 8 * kSystemPageSize, type_name);
EXPECT_EQ(ptr, ptr2);
- EXPECT_EQ(actual_size - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2));
+ EXPECT_EQ(actual_size - 8 * kSystemPageSize,
+ PartitionAllocGetSize<ThreadSafe>(ptr2));
// Test that a previously in-place shrunk direct mapped allocation can be
// expanded up again within its original size.
- ptr = generic_allocator.root()->Realloc(ptr2, size - kSystemPageSize,
- type_name);
+ ptr = allocator.root()->Realloc(ptr2, size - kSystemPageSize, type_name);
EXPECT_EQ(ptr2, ptr);
- EXPECT_EQ(actual_size - kSystemPageSize, PartitionAllocGetSize(ptr));
+ EXPECT_EQ(actual_size - kSystemPageSize,
+ PartitionAllocGetSize<ThreadSafe>(ptr));
// Test that a direct mapped allocation is performed not in-place when the
// new size is small enough.
- ptr2 = generic_allocator.root()->Realloc(ptr, kSystemPageSize, type_name);
+ ptr2 = allocator.root()->Realloc(ptr, kSystemPageSize, type_name);
EXPECT_NE(ptr, ptr2);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr2);
}
// Tests the handing out of freelists for partial pages.
TEST_F(PartitionAllocTest, PartialPageFreelists) {
- size_t big_size = allocator.root()->max_allocation - kExtraAllocSize;
- EXPECT_EQ(kSystemPageSize - kAllocationGranularity,
- big_size + kExtraAllocSize);
- size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift;
- PartitionBucket<base::internal::NotThreadSafe>* bucket =
- &allocator.root()->buckets()[bucket_index];
+ size_t big_size = kSystemPageSize - kExtraAllocSize;
+ size_t bucket_index = SizeToIndex(big_size + kExtraAllocSize);
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[bucket_index];
EXPECT_EQ(nullptr, bucket->empty_pages_head);
void* ptr = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr);
- PartitionRoot::Page* page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ PartitionRoot<ThreadSafe>::Page* page =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
size_t total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
(big_size + kExtraAllocSize);
@@ -951,9 +1015,9 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
// The freelist should have one entry, because we were able to exactly fit
// one object slot and one freelist pointer (the null that the head points
// to) into a system page.
- EXPECT_TRUE(page->freelist_head);
+ EXPECT_FALSE(page->freelist_head);
EXPECT_EQ(1, page->num_allocated_slots);
- EXPECT_EQ(2, page->num_unprovisioned_slots);
+ EXPECT_EQ(3, page->num_unprovisioned_slots);
void* ptr2 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr2);
@@ -963,9 +1027,9 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
void* ptr3 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr3);
- EXPECT_TRUE(page->freelist_head);
+ EXPECT_FALSE(page->freelist_head);
EXPECT_EQ(3, page->num_allocated_slots);
- EXPECT_EQ(0, page->num_unprovisioned_slots);
+ EXPECT_EQ(1, page->num_unprovisioned_slots);
void* ptr4 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr4);
@@ -976,8 +1040,9 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
void* ptr5 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr5);
- PartitionRoot::Page* page2 =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr5));
+ PartitionRoot<ThreadSafe>::Page* page2 =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr5));
EXPECT_EQ(1, page2->num_allocated_slots);
// Churn things a little whilst there's a partial page freelist.
@@ -998,59 +1063,59 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
// And test a couple of sizes that do not cross kSystemPageSize with a single
// allocation.
- size_t mediumSize = (kSystemPageSize / 2) - kExtraAllocSize;
- bucket_index = (mediumSize + kExtraAllocSize) >> kBucketShift;
- bucket = &allocator.root()->buckets()[bucket_index];
+ size_t medium_size = (kSystemPageSize / 2) - kExtraAllocSize;
+ bucket_index = SizeToIndex(medium_size + kExtraAllocSize);
+ bucket = &allocator.root()->buckets[bucket_index];
EXPECT_EQ(nullptr, bucket->empty_pages_head);
- ptr = allocator.root()->Alloc(mediumSize, type_name);
+ ptr = allocator.root()->Alloc(medium_size, type_name);
EXPECT_TRUE(ptr);
- page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
- (mediumSize + kExtraAllocSize);
- size_t first_page_slots = kSystemPageSize / (mediumSize + kExtraAllocSize);
+ (medium_size + kExtraAllocSize);
+ size_t first_page_slots = kSystemPageSize / (medium_size + kExtraAllocSize);
EXPECT_EQ(2u, first_page_slots);
EXPECT_EQ(total_slots - first_page_slots, page->num_unprovisioned_slots);
allocator.root()->Free(ptr);
- size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize;
- bucket_index = (smallSize + kExtraAllocSize) >> kBucketShift;
- bucket = &allocator.root()->buckets()[bucket_index];
+ size_t small_size = (kSystemPageSize / 4) - kExtraAllocSize;
+ bucket_index = SizeToIndex(small_size + kExtraAllocSize);
+ bucket = &allocator.root()->buckets[bucket_index];
EXPECT_EQ(nullptr, bucket->empty_pages_head);
- ptr = allocator.root()->Alloc(smallSize, type_name);
+ ptr = allocator.root()->Alloc(small_size, type_name);
EXPECT_TRUE(ptr);
- page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
- (smallSize + kExtraAllocSize);
- first_page_slots = kSystemPageSize / (smallSize + kExtraAllocSize);
+ (small_size + kExtraAllocSize);
+ first_page_slots = kSystemPageSize / (small_size + kExtraAllocSize);
EXPECT_EQ(total_slots - first_page_slots, page->num_unprovisioned_slots);
allocator.root()->Free(ptr);
EXPECT_TRUE(page->freelist_head);
EXPECT_EQ(0, page->num_allocated_slots);
- size_t verySmallSize = 32 - kExtraAllocSize;
- bucket_index = (verySmallSize + kExtraAllocSize) >> kBucketShift;
- bucket = &allocator.root()->buckets()[bucket_index];
+ size_t very_small_size = 32 - kExtraAllocSize;
+ bucket_index = SizeToIndex(very_small_size + kExtraAllocSize);
+ bucket = &allocator.root()->buckets[bucket_index];
EXPECT_EQ(nullptr, bucket->empty_pages_head);
- ptr = allocator.root()->Alloc(verySmallSize, type_name);
+ ptr = allocator.root()->Alloc(very_small_size, type_name);
EXPECT_TRUE(ptr);
- page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
- (verySmallSize + kExtraAllocSize);
- first_page_slots = kSystemPageSize / (verySmallSize + kExtraAllocSize);
+ (very_small_size + kExtraAllocSize);
+ first_page_slots = kSystemPageSize / (very_small_size + kExtraAllocSize);
EXPECT_EQ(total_slots - first_page_slots, page->num_unprovisioned_slots);
allocator.root()->Free(ptr);
@@ -1061,54 +1126,57 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
// larger than a system page.
size_t page_and_a_half_size =
(kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize;
- ptr = generic_allocator.root()->Alloc(page_and_a_half_size, type_name);
+ ptr = allocator.root()->Alloc(page_and_a_half_size, type_name);
EXPECT_TRUE(ptr);
- page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
EXPECT_TRUE(page->freelist_head);
total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
(page_and_a_half_size + kExtraAllocSize);
EXPECT_EQ(total_slots - 2, page->num_unprovisioned_slots);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// And then make sure than exactly the page size only faults one page.
size_t pageSize = kSystemPageSize - kExtraAllocSize;
- ptr = generic_allocator.root()->Alloc(pageSize, type_name);
+ ptr = allocator.root()->Alloc(pageSize, type_name);
EXPECT_TRUE(ptr);
- page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
- EXPECT_FALSE(page->freelist_head);
+ EXPECT_TRUE(page->freelist_head);
total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
(pageSize + kExtraAllocSize);
- EXPECT_EQ(total_slots - 1, page->num_unprovisioned_slots);
- generic_allocator.root()->Free(ptr);
+ EXPECT_EQ(total_slots - 2, page->num_unprovisioned_slots);
+ allocator.root()->Free(ptr);
}
// Test some of the fragmentation-resistant properties of the allocator.
TEST_F(PartitionAllocTest, PageRefilling) {
- PartitionRoot::Bucket* bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[test_bucket_index_];
// Grab two full pages and a non-full page.
- PartitionRoot::Page* page1 = GetFullPage(kTestAllocSize);
- PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page1 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page2 = GetFullPage(kTestAllocSize);
void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr);
EXPECT_NE(page1, bucket->active_pages_head);
EXPECT_NE(page2, bucket->active_pages_head);
- PartitionRoot::Page* page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ PartitionRoot<ThreadSafe>::Page* page =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
// Work out a pointer into page2 and free it; and then page1 and free it.
- char* ptr2 = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page1)) +
+ char* ptr2 = reinterpret_cast<char*>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page1)) +
kPointerOffset;
allocator.root()->Free(ptr2);
- ptr2 = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page2)) +
+ ptr2 = reinterpret_cast<char*>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page2)) +
kPointerOffset;
allocator.root()->Free(ptr2);
@@ -1127,18 +1195,21 @@ TEST_F(PartitionAllocTest, PageRefilling) {
TEST_F(PartitionAllocTest, PartialPages) {
// Find a size that is backed by a partial partition page.
size_t size = sizeof(void*);
- PartitionRoot::Bucket* bucket = nullptr;
- while (size < kTestMaxAllocation) {
- bucket = &allocator.root()->buckets()[size >> kBucketShift];
+ size_t bucket_index;
+
+ PartitionRoot<ThreadSafe>::Bucket* bucket = nullptr;
+ while (size < 1000u) {
+ bucket_index = SizeToIndex(size + kExtraAllocSize);
+ bucket = &allocator.root()->buckets[bucket_index];
if (bucket->num_system_pages_per_slot_span %
kNumSystemPagesPerPartitionPage)
break;
size += sizeof(void*);
}
- EXPECT_LT(size, kTestMaxAllocation);
+ EXPECT_LT(size, 1000u);
- PartitionRoot::Page* page1 = GetFullPage(size);
- PartitionRoot::Page* page2 = GetFullPage(size);
+ PartitionRoot<ThreadSafe>::Page* page1 = GetFullPage(size);
+ PartitionRoot<ThreadSafe>::Page* page2 = GetFullPage(size);
FreeFullPage(allocator.root(), page2);
FreeFullPage(allocator.root(), page1);
}
@@ -1149,16 +1220,18 @@ TEST_F(PartitionAllocTest, MappingCollision) {
// guard pages.
size_t num_partition_pages_needed = kNumPartitionPagesPerSuperPage - 2;
auto first_super_page_pages =
- std::make_unique<PartitionRoot::Page*[]>(num_partition_pages_needed);
+ std::make_unique<PartitionRoot<ThreadSafe>::Page*[]>(
+ num_partition_pages_needed);
auto second_super_page_pages =
- std::make_unique<PartitionRoot::Page*[]>(num_partition_pages_needed);
+ std::make_unique<PartitionRoot<ThreadSafe>::Page*[]>(
+ num_partition_pages_needed);
size_t i;
for (i = 0; i < num_partition_pages_needed; ++i)
first_super_page_pages[i] = GetFullPage(kTestAllocSize);
char* page_base = reinterpret_cast<char*>(
- PartitionRoot::Page::ToPointer(first_super_page_pages[0]));
+ PartitionRoot<ThreadSafe>::Page::ToPointer(first_super_page_pages[0]));
EXPECT_EQ(kPartitionPageSize,
reinterpret_cast<uintptr_t>(page_base) & kSuperPageOffsetMask);
page_base -= kPartitionPageSize;
@@ -1180,7 +1253,7 @@ TEST_F(PartitionAllocTest, MappingCollision) {
FreePages(map2, kPageAllocationGranularity);
page_base = reinterpret_cast<char*>(
- PartitionRoot::Page::ToPointer(second_super_page_pages[0]));
+ PartitionRoot<ThreadSafe>::Page::ToPointer(second_super_page_pages[0]));
EXPECT_EQ(kPartitionPageSize,
reinterpret_cast<uintptr_t>(page_base) & kSuperPageOffsetMask);
page_base -= kPartitionPageSize;
@@ -1199,27 +1272,31 @@ TEST_F(PartitionAllocTest, MappingCollision) {
EXPECT_TRUE(TrySetSystemPagesAccess(map2, kPageAllocationGranularity,
PageInaccessible));
- PartitionRoot::Page* page_in_third_super_page = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page_in_third_super_page =
+ GetFullPage(kTestAllocSize);
FreePages(map1, kPageAllocationGranularity);
FreePages(map2, kPageAllocationGranularity);
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
- PartitionRoot::Page::ToPointer(page_in_third_super_page)) &
+ PartitionRoot<ThreadSafe>::Page::ToPointer(
+ page_in_third_super_page)) &
kPartitionPageOffsetMask);
// And make sure we really did get a page in a new superpage.
- EXPECT_NE(reinterpret_cast<uintptr_t>(
- PartitionRoot::Page::ToPointer(first_super_page_pages[0])) &
- kSuperPageBaseMask,
- reinterpret_cast<uintptr_t>(
- PartitionRoot::Page::ToPointer(page_in_third_super_page)) &
- kSuperPageBaseMask);
- EXPECT_NE(reinterpret_cast<uintptr_t>(
- PartitionRoot::Page::ToPointer(second_super_page_pages[0])) &
- kSuperPageBaseMask,
- reinterpret_cast<uintptr_t>(
- PartitionRoot::Page::ToPointer(page_in_third_super_page)) &
- kSuperPageBaseMask);
+ EXPECT_NE(
+ reinterpret_cast<uintptr_t>(PartitionRoot<ThreadSafe>::Page::ToPointer(
+ first_super_page_pages[0])) &
+ kSuperPageBaseMask,
+ reinterpret_cast<uintptr_t>(PartitionRoot<ThreadSafe>::Page::ToPointer(
+ page_in_third_super_page)) &
+ kSuperPageBaseMask);
+ EXPECT_NE(
+ reinterpret_cast<uintptr_t>(PartitionRoot<ThreadSafe>::Page::ToPointer(
+ second_super_page_pages[0])) &
+ kSuperPageBaseMask,
+ reinterpret_cast<uintptr_t>(PartitionRoot<ThreadSafe>::Page::ToPointer(
+ page_in_third_super_page)) &
+ kSuperPageBaseMask);
FreeFullPage(allocator.root(), page_in_third_super_page);
for (i = 0; i < num_partition_pages_needed; ++i) {
@@ -1232,18 +1309,20 @@ TEST_F(PartitionAllocTest, MappingCollision) {
TEST_F(PartitionAllocTest, FreeCache) {
EXPECT_EQ(0U, allocator.root()->total_size_of_committed_pages);
- size_t big_size = allocator.root()->max_allocation - kExtraAllocSize;
- size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift;
- PartitionBucket<base::internal::NotThreadSafe>* bucket =
- &allocator.root()->buckets()[bucket_index];
+ size_t big_size = 1000 - kExtraAllocSize;
+ size_t bucket_index = SizeToIndex(big_size + kExtraAllocSize);
+ PartitionBucket<base::internal::ThreadSafe>* bucket =
+ &allocator.root()->buckets[bucket_index];
void* ptr = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr);
- PartitionRoot::Page* page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ PartitionRoot<ThreadSafe>::Page* page =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(nullptr, bucket->empty_pages_head);
EXPECT_EQ(1, page->num_allocated_slots);
- EXPECT_EQ(kPartitionPageSize,
+ size_t expected_committed_size = kPartitionPageSize;
+ EXPECT_EQ(expected_committed_size,
allocator.root()->total_size_of_committed_pages);
allocator.root()->Free(ptr);
EXPECT_EQ(0, page->num_allocated_slots);
@@ -1256,11 +1335,11 @@ TEST_F(PartitionAllocTest, FreeCache) {
EXPECT_FALSE(page->freelist_head);
EXPECT_EQ(-1, page->empty_cache_index);
EXPECT_EQ(0, page->num_allocated_slots);
- PartitionBucket<base::internal::NotThreadSafe>* cycle_free_cache_bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
- EXPECT_EQ(
- cycle_free_cache_bucket->num_system_pages_per_slot_span * kSystemPageSize,
- allocator.root()->total_size_of_committed_pages);
+ PartitionBucket<base::internal::ThreadSafe>* cycle_free_cache_bucket =
+ &allocator.root()->buckets[test_bucket_index_];
+ size_t expected_size =
+ cycle_free_cache_bucket->num_system_pages_per_slot_span * kSystemPageSize;
+ EXPECT_EQ(expected_size, allocator.root()->total_size_of_committed_pages);
// Check that an allocation works ok whilst in this state (a free'd page
// as the active pages head).
@@ -1276,7 +1355,7 @@ TEST_F(PartitionAllocTest, FreeCache) {
allocator.root()->Free(ptr);
EXPECT_TRUE(page->freelist_head);
}
- EXPECT_EQ(kPartitionPageSize,
+ EXPECT_EQ(expected_committed_size,
allocator.root()->total_size_of_committed_pages);
}
@@ -1284,9 +1363,9 @@ TEST_F(PartitionAllocTest, FreeCache) {
TEST_F(PartitionAllocTest, LostFreePagesBug) {
size_t size = kPartitionPageSize - kExtraAllocSize;
- void* ptr = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr2 = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2);
PartitionPage<base::internal::ThreadSafe>* page =
@@ -1301,8 +1380,8 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
EXPECT_EQ(-1, page->num_allocated_slots);
EXPECT_EQ(1, page2->num_allocated_slots);
- generic_allocator.root()->Free(ptr);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr2);
EXPECT_TRUE(bucket->empty_pages_head);
EXPECT_TRUE(bucket->empty_pages_head->next_page);
@@ -1311,7 +1390,7 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
EXPECT_TRUE(page->freelist_head);
EXPECT_TRUE(page2->freelist_head);
- CycleGenericFreeCache(kTestAllocSize);
+ CycleFreeCache(kTestAllocSize);
EXPECT_FALSE(page->freelist_head);
EXPECT_FALSE(page2->freelist_head);
@@ -1322,23 +1401,23 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
bucket->active_pages_head);
// At this moment, we have two decommitted pages, on the empty list.
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
EXPECT_EQ(PartitionPage<base::internal::ThreadSafe>::get_sentinel_page(),
bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
EXPECT_TRUE(bucket->decommitted_pages_head);
- CycleGenericFreeCache(kTestAllocSize);
+ CycleFreeCache(kTestAllocSize);
// We're now set up to trigger a historical bug by scanning over the active
// pages list. The current code gets into a different state, but we'll keep
// the test as being an interesting corner case.
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
EXPECT_TRUE(bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
@@ -1421,35 +1500,33 @@ TEST_F(PartitionAllocDeathTest, RepeatedTryReallocReturnNull) {
// malloc(0), which is not good.
TEST_F(PartitionAllocDeathTest, LargeAllocs) {
// Largest alloc.
- EXPECT_DEATH(
- generic_allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
+ EXPECT_DEATH(allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
// And the smallest allocation we expect to die.
- EXPECT_DEATH(
- generic_allocator.root()->Alloc(kGenericMaxDirectMapped + 1, type_name),
- "");
+ EXPECT_DEATH(allocator.root()->Alloc(kGenericMaxDirectMapped + 1, type_name),
+ "");
}
// Check that our immediate double-free detection works.
TEST_F(PartitionAllocDeathTest, ImmediateDoubleFree) {
- void* ptr = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
+ void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
- EXPECT_DEATH(generic_allocator.root()->Free(ptr), "");
+ EXPECT_DEATH(allocator.root()->Free(ptr), "");
}
// Check that our refcount-based double-free detection works.
TEST_F(PartitionAllocDeathTest, RefcountDoubleFree) {
- void* ptr = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
+ void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr);
- void* ptr2 = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
+ void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr2);
- generic_allocator.root()->Free(ptr);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr2);
// This is not an immediate double-free so our immediate detection won't
// fire. However, it does take the "refcount" of the partition page to -1,
// which is illegal and should be trapped.
- EXPECT_DEATH(generic_allocator.root()->Free(ptr), "");
+ EXPECT_DEATH(allocator.root()->Free(ptr), "");
}
// Check that guard pages are present where expected.
@@ -1474,7 +1551,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
static_assert(kSize > kGenericMaxBucketed,
"allocation not large enough for direct allocation");
size_t size = kSize - kExtraAllocSize;
- void* ptr = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
char* char_ptr = reinterpret_cast<char*>(ptr) - kPointerOffset;
@@ -1482,22 +1559,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
EXPECT_DEATH(*(char_ptr - 1) = 'A', "");
EXPECT_DEATH(*(char_ptr + size + kExtraAllocSize) = 'A', "");
- generic_allocator.root()->Free(ptr);
-}
-
-// Check that a bad free() is caught where the free() refers to an unused
-// partition page of a large allocation.
-TEST_F(PartitionAllocDeathTest, FreeWrongPartitionPage) {
- // This large size will result in a direct mapped allocation with guard
- // pages at either end.
- void* ptr =
- generic_allocator.root()->Alloc(kPartitionPageSize * 2, type_name);
- EXPECT_TRUE(ptr);
- char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize;
-
- EXPECT_DEATH(generic_allocator.root()->Free(badPtr), "");
-
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
}
#endif // !defined(OS_ANDROID) && !defined(OS_IOS)
@@ -1517,11 +1579,10 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// This series of tests checks the active -> empty -> decommitted states.
{
{
- void* ptr =
- generic_allocator.root()->Alloc(2048 - kExtraAllocSize, type_name);
+ void* ptr = allocator.root()->Alloc(2048 - kExtraAllocSize, type_name);
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1536,13 +1597,13 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(1u, stats->num_active_pages);
EXPECT_EQ(0u, stats->num_empty_pages);
EXPECT_EQ(0u, stats->num_decommitted_pages);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
}
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1562,12 +1623,12 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// TODO(crbug.com/722911): Commenting this out causes this test to fail when
// run singly (--gtest_filter=PartitionAllocTest.DumpMemoryStats), but not
// when run with the others (--gtest_filter=PartitionAllocTest.*).
- CycleGenericFreeCache(kTestAllocSize);
+ CycleFreeCache(kTestAllocSize);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1588,19 +1649,19 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// This test checks for correct empty page list accounting.
{
size_t size = kPartitionPageSize - kExtraAllocSize;
- void* ptr1 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
- generic_allocator.root()->Free(ptr1);
- generic_allocator.root()->Free(ptr2);
+ void* ptr1 = allocator.root()->Alloc(size, type_name);
+ void* ptr2 = allocator.root()->Alloc(size, type_name);
+ allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr2);
- CycleGenericFreeCache(kTestAllocSize);
+ CycleFreeCache(kTestAllocSize);
- ptr1 = generic_allocator.root()->Alloc(size, type_name);
+ ptr1 = allocator.root()->Alloc(size, type_name);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1617,7 +1678,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_empty_pages);
EXPECT_EQ(1u, stats->num_decommitted_pages);
}
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr1);
}
// This test checks for correct direct mapped accounting.
@@ -1628,13 +1689,13 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
(size_smaller + kSystemPageOffsetMask) & kSystemPageBaseMask;
size_t real_size_bigger =
(size_bigger + kSystemPageOffsetMask) & kSystemPageBaseMask;
- void* ptr = generic_allocator.root()->Alloc(size_smaller, type_name);
- void* ptr2 = generic_allocator.root()->Alloc(size_bigger, type_name);
+ void* ptr = allocator.root()->Alloc(size_smaller, type_name);
+ void* ptr2 = allocator.root()->Alloc(size_bigger, type_name);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1667,26 +1728,26 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- generic_allocator.root()->Free(ptr2);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr);
// Whilst we're here, allocate again and free with different ordering to
// give a workout to our linked list code.
- ptr = generic_allocator.root()->Alloc(size_smaller, type_name);
- ptr2 = generic_allocator.root()->Alloc(size_bigger, type_name);
- generic_allocator.root()->Free(ptr);
- generic_allocator.root()->Free(ptr2);
+ ptr = allocator.root()->Alloc(size_smaller, type_name);
+ ptr2 = allocator.root()->Alloc(size_bigger, type_name);
+ allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr2);
}
// This test checks large-but-not-quite-direct allocations.
{
constexpr size_t requested_size = 16 * kSystemPageSize;
- void* ptr = generic_allocator.root()->Alloc(requested_size + 1, type_name);
+ void* ptr = allocator.root()->Alloc(requested_size + 1, type_name);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
size_t slot_size =
@@ -1707,12 +1768,12 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
size_t slot_size =
@@ -1732,14 +1793,14 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- void* ptr2 = generic_allocator.root()->Alloc(
- requested_size + kSystemPageSize + 1, type_name);
+ void* ptr2 = allocator.root()->Alloc(requested_size + kSystemPageSize + 1,
+ type_name);
EXPECT_EQ(ptr, ptr2);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
size_t slot_size =
@@ -1761,19 +1822,19 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr2);
}
}
// Tests the API to purge freeable memory.
TEST_F(PartitionAllocTest, Purge) {
char* ptr = reinterpret_cast<char*>(
- generic_allocator.root()->Alloc(2048 - kExtraAllocSize, type_name));
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Alloc(2048 - kExtraAllocSize, type_name));
+ allocator.root()->Free(ptr);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1782,11 +1843,11 @@ TEST_F(PartitionAllocTest, Purge) {
EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes);
EXPECT_EQ(kSystemPageSize, stats->resident_bytes);
}
- generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1797,12 +1858,12 @@ TEST_F(PartitionAllocTest, Purge) {
}
// Calling purge again here is a good way of testing we didn't mess up the
// state of the free cache ring.
- generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
- char* big_ptr = reinterpret_cast<char*>(
- generic_allocator.root()->Alloc(256 * 1024, type_name));
- generic_allocator.root()->Free(big_ptr);
- generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+ char* big_ptr =
+ reinterpret_cast<char*>(allocator.root()->Alloc(256 * 1024, type_name));
+ allocator.root()->Free(big_ptr);
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
CHECK_PAGE_IN_CORE(ptr - kPointerOffset, false);
CHECK_PAGE_IN_CORE(big_ptr - kPointerOffset, false);
@@ -1816,12 +1877,12 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) {
// Allocate 3 full slot spans worth of 8192-byte allocations.
// Each slot span for this size is 16384 bytes, or 1 partition page and 2
// slots.
- void* ptr1 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr3 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr4 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr5 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr6 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr1 = allocator.root()->Alloc(size, type_name);
+ void* ptr2 = allocator.root()->Alloc(size, type_name);
+ void* ptr3 = allocator.root()->Alloc(size, type_name);
+ void* ptr4 = allocator.root()->Alloc(size, type_name);
+ void* ptr5 = allocator.root()->Alloc(size, type_name);
+ void* ptr6 = allocator.root()->Alloc(size, type_name);
PartitionPage<base::internal::ThreadSafe>* page1 =
PartitionPage<base::internal::ThreadSafe>::FromPointer(
@@ -1840,45 +1901,45 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) {
// Free up the 2nd slot in each slot span.
// This leaves the active list containing 3 pages, each with 1 used and 1
// free slot. The active page will be the one containing ptr1.
- generic_allocator.root()->Free(ptr6);
- generic_allocator.root()->Free(ptr4);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr6);
+ allocator.root()->Free(ptr4);
+ allocator.root()->Free(ptr2);
EXPECT_EQ(page1, bucket->active_pages_head);
// Empty the middle page in the active list.
- generic_allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr3);
EXPECT_EQ(page1, bucket->active_pages_head);
// Empty the the first page in the active list -- also the current page.
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr1);
// A good choice here is to re-fill the third page since the first two are
// empty. We used to fail that.
- void* ptr7 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr7 = allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr6, ptr7);
EXPECT_EQ(page3, bucket->active_pages_head);
- generic_allocator.root()->Free(ptr5);
- generic_allocator.root()->Free(ptr7);
+ allocator.root()->Free(ptr5);
+ allocator.root()->Free(ptr7);
}
// Tests the API to purge discardable memory.
TEST_F(PartitionAllocTest, PurgeDiscardable) {
// Free the second of two 4096 byte allocations and then purge.
{
- void* ptr1 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- char* ptr2 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name));
- generic_allocator.root()->Free(ptr2);
+ void* ptr1 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ char* ptr2 = reinterpret_cast<char*>(
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name));
+ allocator.root()->Free(ptr2);
PartitionPage<base::internal::ThreadSafe>* page =
PartitionPage<base::internal::ThreadSafe>::FromPointer(
PartitionCookieFreePointerAdjust(ptr1));
EXPECT_EQ(2u, page->num_unprovisioned_slots);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1891,24 +1952,23 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
}
CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
EXPECT_EQ(3u, page->num_unprovisioned_slots);
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr1);
}
// Free the first of two 4096 byte allocations and then purge.
{
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name));
- void* ptr2 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- generic_allocator.root()->Free(ptr1);
+ char* ptr1 = reinterpret_cast<char*>(
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ allocator.root()->Free(ptr1);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1925,30 +1985,29 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
}
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr2);
}
{
constexpr size_t requested_size = 2.25 * kSystemPageSize;
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
- requested_size - kExtraAllocSize, type_name));
- void* ptr2 = generic_allocator.root()->Alloc(
- requested_size - kExtraAllocSize, type_name);
- void* ptr3 = generic_allocator.root()->Alloc(
- requested_size - kExtraAllocSize, type_name);
- void* ptr4 = generic_allocator.root()->Alloc(
- requested_size - kExtraAllocSize, type_name);
+ char* ptr1 = reinterpret_cast<char*>(
+ allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name));
+ void* ptr2 =
+ allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name);
+ void* ptr3 =
+ allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name);
+ void* ptr4 =
+ allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name);
memset(ptr1, 'A', requested_size - kExtraAllocSize);
memset(ptr2, 'A', requested_size - kExtraAllocSize);
- generic_allocator.root()->Free(ptr2);
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr1);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1965,16 +2024,15 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, false);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
- generic_allocator.root()->Free(ptr3);
- generic_allocator.root()->Free(ptr4);
+ allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr4);
}
// When kSystemPageSize = 16384 (as on _MIPS_ARCH_LOONGSON), 64 *
@@ -1985,16 +2043,16 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
// for clarity of purpose and for applicability to more architectures.
#if defined(_MIPS_ARCH_LOONGSON)
{
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ char* ptr1 = reinterpret_cast<char*>(allocator.root()->Alloc(
(32 * kSystemPageSize) - kExtraAllocSize, type_name));
memset(ptr1, 'A', (32 * kSystemPageSize) - kExtraAllocSize);
- generic_allocator.root()->Free(ptr1);
- ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ allocator.root()->Free(ptr1);
+ ptr1 = reinterpret_cast<char*>(allocator.root()->Alloc(
(31 * kSystemPageSize) - kExtraAllocSize, type_name));
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -2008,25 +2066,24 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
}
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), false);
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr1);
}
#else
{
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ char* ptr1 = reinterpret_cast<char*>(allocator.root()->Alloc(
(64 * kSystemPageSize) - kExtraAllocSize, type_name));
memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize);
- generic_allocator.root()->Free(ptr1);
- ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ allocator.root()->Free(ptr1);
+ ptr1 = reinterpret_cast<char*>(allocator.root()->Alloc(
(61 * kSystemPageSize) - kExtraAllocSize, type_name));
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -2042,28 +2099,27 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 61), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 62), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 63), true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 61), false);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 62), false);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 63), false);
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr1);
}
#endif
// This sub-test tests truncation of the provisioned slots in a trickier
// case where the freelist is rewritten.
- generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
{
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name));
- void* ptr2 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr3 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr4 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
+ char* ptr1 = reinterpret_cast<char*>(
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr3 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr4 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
ptr1[0] = 'A';
ptr1[kSystemPageSize] = 'A';
ptr1[kSystemPageSize * 2] = 'A';
@@ -2071,15 +2127,15 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
PartitionPage<base::internal::ThreadSafe>* page =
PartitionPage<base::internal::ThreadSafe>::FromPointer(
PartitionCookieFreePointerAdjust(ptr1));
- generic_allocator.root()->Free(ptr2);
- generic_allocator.root()->Free(ptr4);
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr4);
+ allocator.root()->Free(ptr1);
EXPECT_EQ(0u, page->num_unprovisioned_slots);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -2099,8 +2155,7 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
EXPECT_EQ(1u, page->num_unprovisioned_slots);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, false);
@@ -2108,29 +2163,29 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
// Let's check we didn't brick the freelist.
- void* ptr1b = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr1b =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
EXPECT_EQ(ptr1, ptr1b);
- void* ptr2b = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr2b =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
EXPECT_EQ(ptr2, ptr2b);
EXPECT_FALSE(page->freelist_head);
- generic_allocator.root()->Free(ptr1);
- generic_allocator.root()->Free(ptr2);
- generic_allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr3);
}
// This sub-test is similar, but tests a double-truncation.
- generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
{
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name));
- void* ptr2 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr3 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr4 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
+ char* ptr1 = reinterpret_cast<char*>(
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr3 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr4 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
ptr1[0] = 'A';
ptr1[kSystemPageSize] = 'A';
ptr1[kSystemPageSize * 2] = 'A';
@@ -2138,14 +2193,14 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
PartitionPage<base::internal::ThreadSafe>* page =
PartitionPage<base::internal::ThreadSafe>::FromPointer(
PartitionCookieFreePointerAdjust(ptr1));
- generic_allocator.root()->Free(ptr4);
- generic_allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr4);
+ allocator.root()->Free(ptr3);
EXPECT_EQ(0u, page->num_unprovisioned_slots);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -2161,8 +2216,7 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
EXPECT_EQ(2u, page->num_unprovisioned_slots);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
@@ -2171,8 +2225,8 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_FALSE(page->freelist_head);
- generic_allocator.root()->Free(ptr1);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr2);
}
}
@@ -2183,35 +2237,35 @@ TEST_F(PartitionAllocTest, ReallocMovesCookies) {
// and we can track the "raw" size. See https://crbug.com/709271
static constexpr size_t kSize =
base::kMaxSystemPagesPerSlotSpan * base::kSystemPageSize;
- void* ptr = generic_allocator.root()->Alloc(kSize + 1, type_name);
+ void* ptr = allocator.root()->Alloc(kSize + 1, type_name);
EXPECT_TRUE(ptr);
memset(ptr, 0xbd, kSize + 1);
- ptr = generic_allocator.root()->Realloc(ptr, kSize + 2, type_name);
+ ptr = allocator.root()->Realloc(ptr, kSize + 2, type_name);
EXPECT_TRUE(ptr);
memset(ptr, 0xbd, kSize + 2);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
}
TEST_F(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
// For crbug.com/781473
static constexpr size_t kSize = 264;
- void* ptr = generic_allocator.root()->Alloc(kSize, type_name);
+ void* ptr = allocator.root()->Alloc(kSize, type_name);
EXPECT_TRUE(ptr);
- ptr = generic_allocator.root()->Realloc(ptr, kSize + 16, type_name);
+ ptr = allocator.root()->Realloc(ptr, kSize + 16, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
}
TEST_F(PartitionAllocTest, ZeroFill) {
constexpr static size_t kAllZerosSentinel =
std::numeric_limits<size_t>::max();
for (size_t size : kTestSizes) {
- char* p = static_cast<char*>(PartitionAllocGenericFlags(
- generic_allocator.root(), PartitionAllocZeroFill, size, nullptr));
+ char* p = static_cast<char*>(
+ allocator.root()->AllocFlags(PartitionAllocZeroFill, size, nullptr));
size_t non_zero_position = kAllZerosSentinel;
for (size_t i = 0; i < size; ++i) {
if (0 != p[i]) {
@@ -2221,12 +2275,12 @@ TEST_F(PartitionAllocTest, ZeroFill) {
}
EXPECT_EQ(kAllZerosSentinel, non_zero_position)
<< "test allocation size: " << size;
- generic_allocator.root()->Free(p);
+ allocator.root()->Free(p);
}
for (int i = 0; i < 10; ++i) {
SCOPED_TRACE(i);
- AllocateRandomly(generic_allocator.root(), 250, PartitionAllocZeroFill);
+ AllocateRandomly(allocator.root(), 250, PartitionAllocZeroFill);
}
}
@@ -2236,16 +2290,14 @@ TEST_F(PartitionAllocTest, Bug_897585) {
// test case in the indicated bug.
size_t kInitialSize = 983040;
size_t kDesiredSize = 983100;
- void* ptr = PartitionAllocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull, kInitialSize,
- nullptr);
+ void* ptr = allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ kInitialSize, nullptr);
ASSERT_NE(nullptr, ptr);
- ptr = PartitionReallocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull, ptr,
- kDesiredSize, nullptr);
+ ptr = allocator.root()->ReallocFlags(PartitionAllocReturnNull, ptr,
+ kDesiredSize, nullptr);
ASSERT_NE(nullptr, ptr);
memset(ptr, 0xbd, kDesiredSize);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
}
TEST_F(PartitionAllocTest, OverrideHooks) {
@@ -2282,29 +2334,90 @@ TEST_F(PartitionAllocTest, OverrideHooks) {
return false;
});
- void* ptr = PartitionAllocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull,
- kOverriddenSize, kOverriddenType);
+ void* ptr = allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ kOverriddenSize, kOverriddenType);
ASSERT_EQ(ptr, overridden_allocation);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
EXPECT_TRUE(free_called);
// overridden_allocation has not actually been freed so we can now immediately
// realloc it.
free_called = false;
- ptr = PartitionReallocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull, ptr, 1, nullptr);
+ ptr =
+ allocator.root()->ReallocFlags(PartitionAllocReturnNull, ptr, 1, nullptr);
ASSERT_NE(ptr, nullptr);
EXPECT_NE(ptr, overridden_allocation);
EXPECT_TRUE(free_called);
EXPECT_EQ(*(char*)ptr, kOverriddenChar);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
PartitionAllocHooks::SetOverrideHooks(nullptr, nullptr, nullptr);
free(overridden_allocation);
}
+TEST_F(PartitionAllocTest, Alignment) {
+ std::vector<void*> allocated_ptrs;
+
+ for (size_t size = 1; size <= base::kSystemPageSize; size <<= 1) {
+ // All allocations which are not direct-mapped occupy contiguous slots of a
+ // span, starting on a page boundary. This means that allocations are first
+ // rounded up to the nearest bucket size, then have an address of the form:
+ //
+ // (page-aligned address) + i * bucket_size.
+#if DCHECK_IS_ON()
+ // When DCHECK_IS_ON(), a kCookieSize (16) cookie is added on both sides
+ // before rounding up the allocation size. The returned pointer points after
+ // the cookie.
+ //
+ // All in all, a power-of-two allocation is aligned on
+ // min(16, requested_size).
+ size_t expected_alignment = std::min(size, static_cast<size_t>(16));
+#else
+ // All powers of two are bucket sizes, meaning that all power of two
+ // allocations smaller than a page will be aligned on the allocation size.
+ size_t expected_alignment = size;
+#endif
+ for (int index = 0; index < 3; index++) {
+ void* ptr = allocator.root()->Alloc(size, "");
+ allocated_ptrs.push_back(ptr);
+ EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) % expected_alignment)
+ << index << "-th allocation of size = " << size;
+ }
+ }
+
+ for (void* ptr : allocated_ptrs)
+ allocator.root()->Free(ptr);
+}
+
+TEST_F(PartitionAllocTest, FundamentalAlignment) {
+ // See the test above for details. Essentially, checking the bucket size is
+ // sufficient to ensure that alignment will always be respected, as long as
+ // the fundamental alignment is <= 16 bytes.
+ size_t fundamental_alignment = alignof(std::max_align_t);
+ for (size_t size = 0; size < base::kSystemPageSize; size++) {
+ // Allocate several pointers, as the first one in use in a size class will
+ // be aligned on a page boundary.
+ void* ptr = allocator.root()->Alloc(size, "");
+ void* ptr2 = allocator.root()->Alloc(size, "");
+ void* ptr3 = allocator.root()->Alloc(size, "");
+
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % fundamental_alignment,
+ static_cast<uintptr_t>(0));
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr2) % fundamental_alignment,
+ static_cast<uintptr_t>(0));
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % fundamental_alignment,
+ static_cast<uintptr_t>(0));
+
+ EXPECT_EQ(PartitionAllocGetSize<ThreadSafe>(ptr) % fundamental_alignment,
+ static_cast<uintptr_t>(0));
+
+ allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr3);
+ }
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_bucket.cc b/chromium/base/allocator/partition_allocator/partition_bucket.cc
index a52efccbf6a..f73a43eef8d 100644
--- a/chromium/base/allocator/partition_allocator/partition_bucket.cc
+++ b/chromium/base/allocator/partition_allocator/partition_bucket.cc
@@ -4,13 +4,18 @@
#include "base/allocator/partition_allocator/partition_bucket.h"
+#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_address_space.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_root_base.h"
+
#include "base/check.h"
#include "build/build_config.h"
@@ -21,7 +26,7 @@ namespace {
template <bool thread_safe>
ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap(
- PartitionRootBase<thread_safe>* root,
+ PartitionRoot<thread_safe>* root,
int flags,
size_t raw_size) {
size_t size = PartitionBucket<thread_safe>::get_direct_map_size(raw_size);
@@ -41,9 +46,19 @@ ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap(
map_size += kPageAllocationGranularityOffsetMask;
map_size &= kPageAllocationGranularityBaseMask;
- char* ptr = reinterpret_cast<char*>(AllocPages(nullptr, map_size,
- kSuperPageSize, PageReadWrite,
- PageTag::kPartitionAlloc));
+ char* ptr = nullptr;
+ if (IsPartitionAllocGigaCageEnabled()) {
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ ptr = internal::AddressPoolManager::GetInstance()->Alloc(GetDirectMapPool(),
+ map_size);
+#else
+ NOTREACHED();
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ } else {
+ ptr = reinterpret_cast<char*>(AllocPages(nullptr, map_size, kSuperPageSize,
+ PageReadWrite,
+ PageTag::kPartitionAlloc));
+ }
if (UNLIKELY(!ptr))
return nullptr;
@@ -65,29 +80,29 @@ ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap(
extent->root = root;
// The new structures are all located inside a fresh system page so they
// will all be zeroed out. These DCHECKs are for documentation.
- DCHECK(!extent->super_page_base);
- DCHECK(!extent->super_pages_end);
- DCHECK(!extent->next);
+ PA_DCHECK(!extent->super_page_base);
+ PA_DCHECK(!extent->super_pages_end);
+ PA_DCHECK(!extent->next);
PartitionPage<thread_safe>* page =
PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(slot);
auto* bucket = reinterpret_cast<PartitionBucket<thread_safe>*>(
reinterpret_cast<char*>(page) + (kPageMetadataSize * 2));
- DCHECK(!page->next_page);
- DCHECK(!page->num_allocated_slots);
- DCHECK(!page->num_unprovisioned_slots);
- DCHECK(!page->page_offset);
- DCHECK(!page->empty_cache_index);
+ PA_DCHECK(!page->next_page);
+ PA_DCHECK(!page->num_allocated_slots);
+ PA_DCHECK(!page->num_unprovisioned_slots);
+ PA_DCHECK(!page->page_offset);
+ PA_DCHECK(!page->empty_cache_index);
page->bucket = bucket;
page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(slot);
next_entry->next = PartitionFreelistEntry::Encode(nullptr);
- DCHECK(!bucket->active_pages_head);
- DCHECK(!bucket->empty_pages_head);
- DCHECK(!bucket->decommitted_pages_head);
- DCHECK(!bucket->num_system_pages_per_slot_span);
- DCHECK(!bucket->num_full_pages);
+ PA_DCHECK(!bucket->active_pages_head);
+ PA_DCHECK(!bucket->empty_pages_head);
+ PA_DCHECK(!bucket->decommitted_pages_head);
+ PA_DCHECK(!bucket->num_system_pages_per_slot_span);
+ PA_DCHECK(!bucket->num_full_pages);
bucket->slot_size = size;
PartitionDirectMapExtent<thread_safe>* map_extent =
@@ -142,15 +157,15 @@ uint8_t PartitionBucket<thread_safe>::get_system_pages_per_slot_span() {
if (slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
// TODO(ajwong): Why is there a DCHECK here for this?
// http://crbug.com/776537
- DCHECK(!(slot_size % kSystemPageSize));
+ PA_DCHECK(!(slot_size % kSystemPageSize));
best_pages = static_cast<uint16_t>(slot_size / kSystemPageSize);
// TODO(ajwong): Should this be checking against
// kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max?
// http://crbug.com/776537
- CHECK(best_pages < (1 << 8));
+ PA_CHECK(best_pages < (1 << 8));
return static_cast<uint8_t>(best_pages);
}
- DCHECK(slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
+ PA_DCHECK(slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
i <= kMaxSystemPagesPerSlotSpan; ++i) {
size_t page_size = kSystemPageSize * i;
@@ -176,8 +191,8 @@ uint8_t PartitionBucket<thread_safe>::get_system_pages_per_slot_span() {
best_pages = i;
}
}
- DCHECK(best_pages > 0);
- CHECK(best_pages <= kMaxSystemPagesPerSlotSpan);
+ PA_DCHECK(best_pages > 0);
+ PA_CHECK(best_pages <= kMaxSystemPagesPerSlotSpan);
return static_cast<uint8_t>(best_pages);
}
@@ -198,14 +213,14 @@ NOINLINE void PartitionBucket<thread_safe>::OnFull() {
template <bool thread_safe>
ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
- PartitionRootBase<thread_safe>* root,
+ PartitionRoot<thread_safe>* root,
int flags,
uint16_t num_partition_pages) {
- DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
- kPartitionPageSize));
- DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
- kPartitionPageSize));
- DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage);
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
+ kPartitionPageSize));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
+ kPartitionPageSize));
+ PA_DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage);
size_t total_size = kPartitionPageSize * num_partition_pages;
size_t num_partition_pages_left =
(root->next_partition_page_end - root->next_partition_page) >>
@@ -229,9 +244,19 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
// page table bloat and not fragmenting address spaces in 32 bit
// architectures.
char* requested_address = root->next_super_page;
- char* super_page = reinterpret_cast<char*>(
- AllocPages(requested_address, kSuperPageSize, kSuperPageSize,
- PageReadWrite, PageTag::kPartitionAlloc));
+ char* super_page = nullptr;
+ if (IsPartitionAllocGigaCageEnabled()) {
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ super_page = AddressPoolManager::GetInstance()->Alloc(GetNormalBucketPool(),
+ kSuperPageSize);
+#else
+ NOTREACHED();
+#endif
+ } else {
+ super_page = reinterpret_cast<char*>(
+ AllocPages(requested_address, kSuperPageSize, kSuperPageSize,
+ PageReadWrite, PageTag::kPartitionAlloc));
+ }
if (UNLIKELY(!super_page))
return nullptr;
@@ -294,13 +319,13 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
PartitionSuperPageExtentEntry<thread_safe>* current_extent =
root->current_extent;
- bool is_new_extent = (super_page != requested_address);
+ const bool is_new_extent = super_page != requested_address;
if (UNLIKELY(is_new_extent)) {
if (UNLIKELY(!current_extent)) {
- DCHECK(!root->first_extent);
+ PA_DCHECK(!root->first_extent);
root->first_extent = latest_extent;
} else {
- DCHECK(current_extent->super_page_base);
+ PA_DCHECK(current_extent->super_page_base);
current_extent->next = latest_extent;
}
root->current_extent = latest_extent;
@@ -309,10 +334,10 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
} else {
// We allocated next to an existing extent so just nudge the size up a
// little.
- DCHECK(current_extent->super_pages_end);
+ PA_DCHECK(current_extent->super_pages_end);
current_extent->super_pages_end += kSuperPageSize;
- DCHECK(ret >= current_extent->super_page_base &&
- ret < current_extent->super_pages_end);
+ PA_DCHECK(ret >= current_extent->super_page_base &&
+ ret < current_extent->super_pages_end);
}
return ret;
}
@@ -334,12 +359,6 @@ ALWAYS_INLINE void PartitionBucket<thread_safe>::InitializeSlotSpan(
page->Reset();
- // If this page has just a single slot, do not set up page offsets for any
- // page metadata other than the first one. This ensures that attempts to
- // touch invalid page metadata fail.
- if (page->num_unprovisioned_slots == 1)
- return;
-
uint16_t num_partition_pages = get_pages_per_slot_span();
char* page_char_ptr = reinterpret_cast<char*>(page);
for (uint16_t i = 1; i < num_partition_pages; ++i) {
@@ -353,16 +372,16 @@ ALWAYS_INLINE void PartitionBucket<thread_safe>::InitializeSlotSpan(
template <bool thread_safe>
ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist(
PartitionPage<thread_safe>* page) {
- DCHECK(page != PartitionPage<thread_safe>::get_sentinel_page());
+ PA_DCHECK(page != PartitionPage<thread_safe>::get_sentinel_page());
uint16_t num_slots = page->num_unprovisioned_slots;
- DCHECK(num_slots);
+ PA_DCHECK(num_slots);
// We should only get here when _every_ slot is either used or unprovisioned.
// (The third state is "on the freelist". If we have a non-empty freelist, we
// should not get here.)
- DCHECK(num_slots + page->num_allocated_slots == get_slots_per_span());
+ PA_DCHECK(num_slots + page->num_allocated_slots == get_slots_per_span());
// Similarly, make explicitly sure that the freelist is empty.
- DCHECK(!page->freelist_head);
- DCHECK(page->num_allocated_slots >= 0);
+ PA_DCHECK(!page->freelist_head);
+ PA_DCHECK(page->num_allocated_slots >= 0);
size_t size = slot_size;
char* base =
@@ -396,7 +415,7 @@ ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist(
// We always return an object slot -- that's the +1 below.
// We do not neccessarily create any new freelist entries, because we cross
// sub page boundaries frequently for large bucket sizes.
- DCHECK(num_new_freelist_entries + 1 <= num_slots);
+ PA_DCHECK(num_new_freelist_entries + 1 <= num_slots);
num_slots -= (num_new_freelist_entries + 1);
page->num_unprovisioned_slots = num_slots;
page->num_allocated_slots++;
@@ -429,9 +448,9 @@ bool PartitionBucket<thread_safe>::SetNewActivePage() {
for (; page; page = next_page) {
next_page = page->next_page;
- DCHECK(page->bucket == this);
- DCHECK(page != empty_pages_head);
- DCHECK(page != decommitted_pages_head);
+ PA_DCHECK(page->bucket == this);
+ PA_DCHECK(page != empty_pages_head);
+ PA_DCHECK(page != decommitted_pages_head);
if (LIKELY(page->is_active())) {
// This page is usable because it has freelist entries, or has
@@ -448,7 +467,7 @@ bool PartitionBucket<thread_safe>::SetNewActivePage() {
page->next_page = decommitted_pages_head;
decommitted_pages_head = page;
} else {
- DCHECK(page->is_full());
+ PA_DCHECK(page->is_full());
// If we get here, we found a full page. Skip over it too, and also
// tag it as full (via a negative value). We need it tagged so that
// free'ing can tell, and move it back into the active page list.
@@ -469,12 +488,12 @@ bool PartitionBucket<thread_safe>::SetNewActivePage() {
template <bool thread_safe>
void* PartitionBucket<thread_safe>::SlowPathAlloc(
- PartitionRootBase<thread_safe>* root,
+ PartitionRoot<thread_safe>* root,
int flags,
size_t size,
bool* is_already_zeroed) {
// The slow path is called when the freelist is empty.
- DCHECK(!active_pages_head->freelist_head);
+ PA_DCHECK(!active_pages_head->freelist_head);
PartitionPage<thread_safe>* new_page = nullptr;
*is_already_zeroed = false;
@@ -490,10 +509,10 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// the empty or decommitted lists which affects the subsequent conditional.
bool return_null = flags & PartitionAllocReturnNull;
if (UNLIKELY(is_direct_mapped())) {
- DCHECK(size > kGenericMaxBucketed);
- DCHECK(this == get_sentinel_bucket());
- DCHECK(active_pages_head ==
- PartitionPage<thread_safe>::get_sentinel_page());
+ PA_DCHECK(size > kGenericMaxBucketed);
+ PA_DCHECK(this == get_sentinel_bucket());
+ PA_DCHECK(active_pages_head ==
+ PartitionPage<thread_safe>::get_sentinel_page());
if (size > kGenericMaxDirectMapped) {
if (return_null)
return nullptr;
@@ -504,29 +523,29 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
} else if (LIKELY(SetNewActivePage())) {
// First, did we find an active page in the active pages list?
new_page = active_pages_head;
- DCHECK(new_page->is_active());
+ PA_DCHECK(new_page->is_active());
} else if (LIKELY(empty_pages_head != nullptr) ||
LIKELY(decommitted_pages_head != nullptr)) {
// Second, look in our lists of empty and decommitted pages.
// Check empty pages first, which are preferred, but beware that an
// empty page might have been decommitted.
while (LIKELY((new_page = empty_pages_head) != nullptr)) {
- DCHECK(new_page->bucket == this);
- DCHECK(new_page->is_empty() || new_page->is_decommitted());
+ PA_DCHECK(new_page->bucket == this);
+ PA_DCHECK(new_page->is_empty() || new_page->is_decommitted());
empty_pages_head = new_page->next_page;
// Accept the empty page unless it got decommitted.
if (new_page->freelist_head) {
new_page->next_page = nullptr;
break;
}
- DCHECK(new_page->is_decommitted());
+ PA_DCHECK(new_page->is_decommitted());
new_page->next_page = decommitted_pages_head;
decommitted_pages_head = new_page;
}
if (UNLIKELY(!new_page) && LIKELY(decommitted_pages_head != nullptr)) {
new_page = decommitted_pages_head;
- DCHECK(new_page->bucket == this);
- DCHECK(new_page->is_decommitted());
+ PA_DCHECK(new_page->bucket == this);
+ PA_DCHECK(new_page->is_decommitted());
decommitted_pages_head = new_page->next_page;
void* addr = PartitionPage<thread_safe>::ToPointer(new_page);
root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span());
@@ -535,7 +554,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// not be zeroed.
// *is_already_zeroed = true;
}
- DCHECK(new_page);
+ PA_DCHECK(new_page);
} else {
// Third. If we get here, we need a brand new page.
uint16_t num_partition_pages = get_pages_per_slot_span();
@@ -552,8 +571,8 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// Bail if we had a memory allocation failure.
if (UNLIKELY(!new_page)) {
- DCHECK(active_pages_head ==
- PartitionPage<thread_safe>::get_sentinel_page());
+ PA_DCHECK(active_pages_head ==
+ PartitionPage<thread_safe>::get_sentinel_page());
if (return_null)
return nullptr;
root->OutOfMemory(size);
@@ -563,7 +582,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// It seems like in many of the conditional branches above, |this| ==
// |new_page->bucket|. Maybe pull this into another function?
PartitionBucket* bucket = new_page->bucket;
- DCHECK(bucket != get_sentinel_bucket());
+ PA_DCHECK(bucket != get_sentinel_bucket());
bucket->active_pages_head = new_page;
new_page->set_raw_size(size);
@@ -578,7 +597,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
return entry;
}
// Otherwise, we need to build the freelist.
- DCHECK(new_page->num_unprovisioned_slots);
+ PA_DCHECK(new_page->num_unprovisioned_slots);
return AllocAndFillFreelist(new_page);
}
diff --git a/chromium/base/allocator/partition_allocator/partition_bucket.h b/chromium/base/allocator/partition_allocator/partition_bucket.h
index 608b81b0dd8..030a98b1c9a 100644
--- a/chromium/base/allocator/partition_allocator/partition_bucket.h
+++ b/chromium/base/allocator/partition_allocator/partition_bucket.h
@@ -8,11 +8,12 @@
#include <stddef.h>
#include <stdint.h>
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/base_export.h"
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/thread_annotations.h"
namespace base {
@@ -36,10 +37,10 @@ struct PartitionBucket {
// requesting (a) new page(s) from the operating system, or false otherwise.
// This enables an optimization for when callers use |PartitionAllocZeroFill|:
// there is no need to call memset on fresh pages; the OS has already zeroed
- // them. (See |PartitionRootBase::AllocFromBucket|.)
+ // them. (See |PartitionRoot::AllocFromBucket|.)
//
// Note the matching Free() functions are in PartitionPage.
- BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRootBase<thread_safe>* root,
+ BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root,
int flags,
size_t size,
bool* is_already_zeroed)
@@ -63,7 +64,7 @@ struct PartitionBucket {
// Caller must check that the size is not above the kGenericMaxDirectMapped
// limit before calling. This also guards against integer overflow in the
// calculation here.
- DCHECK(size <= kGenericMaxDirectMapped);
+ PA_DCHECK(size <= kGenericMaxDirectMapped);
return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
}
@@ -101,7 +102,7 @@ struct PartitionBucket {
// Allocates a new slot span with size |num_partition_pages| from the
// current extent. Metadata within this slot span will be uninitialized.
// Returns nullptr on error.
- ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRootBase<thread_safe>* root,
+ ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRoot<thread_safe>* root,
int flags,
uint16_t num_partition_pages)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
diff --git a/chromium/base/allocator/partition_allocator/partition_cookie.h b/chromium/base/allocator/partition_allocator/partition_cookie.h
index 750ac6154f8..ca29ab64f73 100644
--- a/chromium/base/allocator/partition_allocator/partition_cookie.h
+++ b/chromium/base/allocator/partition_allocator/partition_cookie.h
@@ -5,65 +5,90 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
+#include "base/allocator/buildflags.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
namespace base {
namespace internal {
-#if DCHECK_IS_ON()
// Handles alignment up to XMM instructions on Intel.
static constexpr size_t kCookieSize = 16;
+// Cookies are enabled for debug builds, unless PartitionAlloc is used as the
+// malloc() implementation. This is a temporary workaround the alignment issues
+// caused by cookies. With them, PartitionAlloc cannot support posix_memalign(),
+// which is required.
+//
+// TODO(lizeb): Support cookies when used as the malloc() implementation.
+#if DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
static constexpr unsigned char kCookieValue[kCookieSize] = {
0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
-#endif
ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
-#if DCHECK_IS_ON()
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
- DCHECK(*cookie_ptr == kCookieValue[i]);
-#endif
+ PA_DCHECK(*cookie_ptr == kCookieValue[i]);
}
ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
-#if DCHECK_IS_ON()
// Add space for cookies, checking for integer overflow. TODO(palmer):
// Investigate the performance and code size implications of using
// CheckedNumeric throughout PA.
- DCHECK(size + (2 * kCookieSize) > size);
+ PA_DCHECK(size + (2 * kCookieSize) > size);
size += 2 * kCookieSize;
-#endif
return size;
}
ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
-#if DCHECK_IS_ON()
// The value given to the application is actually just after the cookie.
ptr = static_cast<char*>(ptr) - kCookieSize;
-#endif
return ptr;
}
ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
-#if DCHECK_IS_ON()
// Remove space for cookies.
- DCHECK(size >= 2 * kCookieSize);
+ PA_DCHECK(size >= 2 * kCookieSize);
size -= 2 * kCookieSize;
-#endif
return size;
}
-ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
+ALWAYS_INLINE size_t PartitionCookieOffsetSubtract(size_t offset) {
#if DCHECK_IS_ON()
+ // Convert offset from the beginning of the allocated slot to offset from
+ // the value given to the application, which is just after the cookie.
+ offset -= kCookieSize;
+#endif
+ return offset;
+}
+
+ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
*cookie_ptr = kCookieValue[i];
-#endif
}
+#else
+
+ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
+ return size;
+}
+
+ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
+ return ptr;
+}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
+ return size;
+}
+
+ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {}
+#endif // DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h b/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h
index c9b6d13b6f0..494f23526f0 100644
--- a/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h
+++ b/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h
@@ -5,9 +5,10 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/logging.h"
+#include "base/check.h"
namespace base {
namespace internal {
@@ -27,7 +28,7 @@ template <bool thread_safe>
ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>*
PartitionDirectMapExtent<thread_safe>::FromPage(
PartitionPage<thread_safe>* page) {
- DCHECK(page->bucket->is_direct_mapped());
+ PA_DCHECK(page->bucket->is_direct_mapped());
return reinterpret_cast<PartitionDirectMapExtent<thread_safe>*>(
reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
}
diff --git a/chromium/base/allocator/partition_allocator/partition_page.cc b/chromium/base/allocator/partition_allocator/partition_page.cc
index b094abc05b6..e40230c201d 100644
--- a/chromium/base/allocator/partition_allocator/partition_page.cc
+++ b/chromium/base/allocator/partition_allocator/partition_page.cc
@@ -4,9 +4,15 @@
#include "base/allocator/partition_allocator/partition_page.h"
+#include "base/allocator/partition_allocator/address_pool_manager.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
-#include "base/allocator/partition_allocator/partition_root_base.h"
#include "base/check.h"
+#include "base/feature_list.h"
+#include "base/notreached.h"
+#include "build/build_config.h"
namespace base {
namespace internal {
@@ -16,21 +22,20 @@ namespace {
template <bool thread_safe>
ALWAYS_INLINE DeferredUnmap
PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
- PartitionRootBase<thread_safe>* root =
- PartitionRootBase<thread_safe>::FromPage(page);
+ PartitionRoot<thread_safe>* root = PartitionRoot<thread_safe>::FromPage(page);
const PartitionDirectMapExtent<thread_safe>* extent =
PartitionDirectMapExtent<thread_safe>::FromPage(page);
size_t unmap_size = extent->map_size;
// Maintain the doubly-linked list of all direct mappings.
if (extent->prev_extent) {
- DCHECK(extent->prev_extent->next_extent == extent);
+ PA_DCHECK(extent->prev_extent->next_extent == extent);
extent->prev_extent->next_extent = extent->next_extent;
} else {
root->direct_map_list = extent->next_extent;
}
if (extent->next_extent) {
- DCHECK(extent->next_extent->prev_extent == extent);
+ PA_DCHECK(extent->next_extent->prev_extent == extent);
extent->next_extent->prev_extent = extent->prev_extent;
}
@@ -40,10 +45,10 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize;
root->DecreaseCommittedPages(uncommitted_page_size);
- DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
+ PA_DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
- DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
char* ptr =
reinterpret_cast<char*>(PartitionPage<thread_safe>::ToPointer(page));
@@ -56,16 +61,16 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
template <bool thread_safe>
ALWAYS_INLINE void PartitionRegisterEmptyPage(
PartitionPage<thread_safe>* page) {
- DCHECK(page->is_empty());
- PartitionRootBase<thread_safe>* root =
- PartitionRootBase<thread_safe>::FromPage(page);
+ PA_DCHECK(page->is_empty());
+ PartitionRoot<thread_safe>* root = PartitionRoot<thread_safe>::FromPage(page);
root->lock_.AssertAcquired();
// If the page is already registered as empty, give it another life.
if (page->empty_cache_index != -1) {
- DCHECK(page->empty_cache_index >= 0);
- DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
- DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
+ PA_DCHECK(page->empty_cache_index >= 0);
+ PA_DCHECK(static_cast<unsigned>(page->empty_cache_index) <
+ kMaxFreeableSpans);
+ PA_DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
root->global_empty_page_ring[page->empty_cache_index] = nullptr;
}
@@ -103,7 +108,7 @@ PartitionPage<thread_safe>* PartitionPage<thread_safe>::get_sentinel_page() {
template <bool thread_safe>
DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
- DCHECK(this != get_sentinel_page());
+ PA_DCHECK(this != get_sentinel_page());
if (LIKELY(num_allocated_slots == 0)) {
// Page became fully unused.
if (UNLIKELY(bucket->is_direct_mapped())) {
@@ -113,27 +118,27 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
// the empty list as a force towards defragmentation.
if (LIKELY(this == bucket->active_pages_head))
bucket->SetNewActivePage();
- DCHECK(bucket->active_pages_head != this);
+ PA_DCHECK(bucket->active_pages_head != this);
set_raw_size(0);
- DCHECK(!get_raw_size());
+ PA_DCHECK(!get_raw_size());
PartitionRegisterEmptyPage(this);
} else {
- DCHECK(!bucket->is_direct_mapped());
+ PA_DCHECK(!bucket->is_direct_mapped());
// Ensure that the page is full. That's the only valid case if we
// arrive here.
- DCHECK(num_allocated_slots < 0);
+ PA_DCHECK(num_allocated_slots < 0);
// A transition of num_allocated_slots from 0 to -1 is not legal, and
// likely indicates a double-free.
- CHECK(num_allocated_slots != -1);
+ PA_CHECK(num_allocated_slots != -1);
num_allocated_slots = -num_allocated_slots - 2;
- DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
+ PA_DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
// Fully used page became partially used. It must be put back on the
// non-full page list. Also make it the current page to increase the
// chances of it being filled up again. The old current page will be
// the next page.
- DCHECK(!next_page);
+ PA_DCHECK(!next_page);
if (LIKELY(bucket->active_pages_head != get_sentinel_page()))
next_page = bucket->active_pages_head;
bucket->active_pages_head = this;
@@ -147,11 +152,10 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
}
template <bool thread_safe>
-void PartitionPage<thread_safe>::Decommit(
- PartitionRootBase<thread_safe>* root) {
+void PartitionPage<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) {
root->lock_.AssertAcquired();
- DCHECK(is_empty());
- DCHECK(!bucket->is_direct_mapped());
+ PA_DCHECK(is_empty());
+ PA_DCHECK(!bucket->is_direct_mapped());
void* addr = PartitionPage::ToPointer(this);
root->DecommitSystemPages(addr, bucket->get_bytes_per_span());
@@ -163,23 +167,33 @@ void PartitionPage<thread_safe>::Decommit(
// 32 bytes in size.
freelist_head = nullptr;
num_unprovisioned_slots = 0;
- DCHECK(is_decommitted());
+ PA_DCHECK(is_decommitted());
}
template <bool thread_safe>
void PartitionPage<thread_safe>::DecommitIfPossible(
- PartitionRootBase<thread_safe>* root) {
+ PartitionRoot<thread_safe>* root) {
root->lock_.AssertAcquired();
- DCHECK(empty_cache_index >= 0);
- DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
- DCHECK(this == root->global_empty_page_ring[empty_cache_index]);
+ PA_DCHECK(empty_cache_index >= 0);
+ PA_DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
+ PA_DCHECK(this == root->global_empty_page_ring[empty_cache_index]);
empty_cache_index = -1;
if (is_empty())
Decommit(root);
}
void DeferredUnmap::Unmap() {
- FreePages(ptr, size);
+ PA_DCHECK(ptr && size > 0);
+ if (IsManagedByPartitionAlloc(ptr)) {
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ internal::AddressPoolManager::GetInstance()->Free(
+ internal::GetDirectMapPool(), ptr, size);
+#else
+ NOTREACHED();
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ } else {
+ FreePages(ptr, size);
+ }
}
template struct PartitionPage<ThreadSafe>;
diff --git a/chromium/base/allocator/partition_allocator/partition_page.h b/chromium/base/allocator/partition_allocator/partition_page.h
index cc04e78fb50..642b3d93115 100644
--- a/chromium/base/allocator/partition_allocator/partition_page.h
+++ b/chromium/base/allocator/partition_allocator/partition_page.h
@@ -7,13 +7,14 @@
#include <string.h>
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/random.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/thread_annotations.h"
namespace base {
@@ -81,8 +82,8 @@ struct PartitionPage {
BASE_EXPORT NOINLINE DeferredUnmap FreeSlowPath() WARN_UNUSED_RESULT;
ALWAYS_INLINE DeferredUnmap Free(void* ptr) WARN_UNUSED_RESULT;
- void Decommit(PartitionRootBase<thread_safe>* root);
- void DecommitIfPossible(PartitionRootBase<thread_safe>* root);
+ void Decommit(PartitionRoot<thread_safe>* root);
+ void DecommitIfPossible(PartitionRoot<thread_safe>* root);
// Pointer manipulation functions. These must be static as the input |page|
// pointer may be the result of an offset calculation and therefore cannot
@@ -134,12 +135,13 @@ static_assert(sizeof(PartitionPage<ThreadSafe>) <= kPageMetadataSize,
ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
- DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
+ PA_DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
// The metadata area is exactly one system page (the guard page) into the
// super page.
return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
}
+// See the comment for |FromPointer|.
template <bool thread_safe>
ALWAYS_INLINE PartitionPage<thread_safe>*
PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(void* ptr) {
@@ -148,22 +150,25 @@ PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(void* ptr) {
reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
uintptr_t partition_page_index =
(pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
- // Index 0 is invalid because it is the metadata and guard area and
- // the last index is invalid because it is a guard page.
- DCHECK(partition_page_index);
- DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+ // Index 0 is invalid because it is the super page extent metadata and the
+ // last index is invalid because the whole PartitionPage is set as guard
+ // pages.
+ PA_DCHECK(partition_page_index);
+ PA_DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
auto* page = reinterpret_cast<PartitionPage*>(
PartitionSuperPageToMetadataArea(super_page_ptr) +
(partition_page_index << kPageMetadataShift));
- // Partition pages in the same slot span can share the same page object.
- // Adjust for that.
+ // Partition pages in the same slot span share the same page object. Adjust
+ // for that.
size_t delta = page->page_offset << kPageMetadataShift;
page =
reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
return page;
}
-// Returns: start of the slot span for the PartitionPage.
+// Converts from a pointer to the PartitionPage object (within super pages's
+// metadata) into a pointer to the beginning of the partition page.
+// This doesn't have to be the first page in the slot span.
template <bool thread_safe>
ALWAYS_INLINE void* PartitionPage<thread_safe>::ToPointer(
const PartitionPage<thread_safe>* page) {
@@ -173,31 +178,36 @@ ALWAYS_INLINE void* PartitionPage<thread_safe>::ToPointer(
// A valid |page| must be past the first guard System page and within
// the following metadata region.
- DCHECK(super_page_offset > kSystemPageSize);
+ PA_DCHECK(super_page_offset > kSystemPageSize);
// Must be less than total metadata region.
- DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
- kPageMetadataSize));
+ PA_DCHECK(super_page_offset <
+ kSystemPageSize +
+ (kNumPartitionPagesPerSuperPage * kPageMetadataSize));
uintptr_t partition_page_index =
(super_page_offset - kSystemPageSize) >> kPageMetadataShift;
- // Index 0 is invalid because it is the superpage extent metadata and the
+ // Index 0 is invalid because it is the super page extent metadata and the
// last index is invalid because the whole PartitionPage is set as guard
- // pages for the metadata region.
- DCHECK(partition_page_index);
- DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+ // pages.
+ PA_DCHECK(partition_page_index);
+ PA_DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
void* ret = reinterpret_cast<void*>(
super_page_base + (partition_page_index << kPartitionPageShift));
return ret;
}
+// Converts from a pointer inside a partition page into a pointer to the
+// PartitionPage object (within super pages's metadata).
+// The first PartitionPage of the slot span will be returned, regardless where
+// inside of the slot span |ptr| points to.
template <bool thread_safe>
ALWAYS_INLINE PartitionPage<thread_safe>*
PartitionPage<thread_safe>::FromPointer(void* ptr) {
PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
// Checks that the pointer is a multiple of bucket size.
- DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
- reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
- page->bucket->slot_size));
+ PA_DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
+ reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
+ page->bucket->slot_size));
return page;
}
@@ -210,8 +220,8 @@ ALWAYS_INLINE const size_t* PartitionPage<thread_safe>::get_raw_size_ptr()
if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
return nullptr;
- DCHECK((bucket->slot_size % kSystemPageSize) == 0);
- DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
+ PA_DCHECK((bucket->slot_size % kSystemPageSize) == 0);
+ PA_DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
const PartitionPage* the_next_page = this + 1;
return reinterpret_cast<const size_t*>(&the_next_page->freelist_head);
@@ -228,7 +238,7 @@ ALWAYS_INLINE size_t PartitionPage<thread_safe>::get_raw_size() const {
template <bool thread_safe>
ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
#if DCHECK_IS_ON()
- PartitionRootBase<thread_safe>::FromPage(this)->lock_.AssertAcquired();
+ PartitionRoot<thread_safe>::FromPage(this)->lock_.AssertAcquired();
size_t slot_size = bucket->slot_size;
const size_t raw_size = get_raw_size();
@@ -244,12 +254,12 @@ ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
memset(ptr, kFreedByte, slot_size);
#endif
- DCHECK(num_allocated_slots);
+ PA_DCHECK(num_allocated_slots);
// Catches an immediate double free.
- CHECK(ptr != freelist_head);
+ PA_CHECK(ptr != freelist_head);
// Look for double free one level deeper in debug.
- DCHECK(!freelist_head ||
- ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next));
+ PA_DCHECK(!freelist_head ||
+ ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next));
auto* entry = static_cast<internal::PartitionFreelistEntry*>(ptr);
entry->next = internal::PartitionFreelistEntry::Encode(freelist_head);
freelist_head = entry;
@@ -259,46 +269,46 @@ ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
} else {
// All single-slot allocations must go through the slow path to
// correctly update the size metadata.
- DCHECK(get_raw_size() == 0);
+ PA_DCHECK(get_raw_size() == 0);
}
return {};
}
template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_active() const {
- DCHECK(this != get_sentinel_page());
- DCHECK(!page_offset);
+ PA_DCHECK(this != get_sentinel_page());
+ PA_DCHECK(!page_offset);
return (num_allocated_slots > 0 &&
(freelist_head || num_unprovisioned_slots));
}
template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_full() const {
- DCHECK(this != get_sentinel_page());
- DCHECK(!page_offset);
+ PA_DCHECK(this != get_sentinel_page());
+ PA_DCHECK(!page_offset);
bool ret = (num_allocated_slots == bucket->get_slots_per_span());
if (ret) {
- DCHECK(!freelist_head);
- DCHECK(!num_unprovisioned_slots);
+ PA_DCHECK(!freelist_head);
+ PA_DCHECK(!num_unprovisioned_slots);
}
return ret;
}
template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_empty() const {
- DCHECK(this != get_sentinel_page());
- DCHECK(!page_offset);
+ PA_DCHECK(this != get_sentinel_page());
+ PA_DCHECK(!page_offset);
return (!num_allocated_slots && freelist_head);
}
template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_decommitted() const {
- DCHECK(this != get_sentinel_page());
- DCHECK(!page_offset);
+ PA_DCHECK(this != get_sentinel_page());
+ PA_DCHECK(!page_offset);
bool ret = (!num_allocated_slots && !freelist_head);
if (ret) {
- DCHECK(!num_unprovisioned_slots);
- DCHECK(empty_cache_index == -1);
+ PA_DCHECK(!num_unprovisioned_slots);
+ PA_DCHECK(empty_cache_index == -1);
}
return ret;
}
@@ -312,10 +322,10 @@ ALWAYS_INLINE void PartitionPage<thread_safe>::set_raw_size(size_t size) {
template <bool thread_safe>
ALWAYS_INLINE void PartitionPage<thread_safe>::Reset() {
- DCHECK(is_decommitted());
+ PA_DCHECK(is_decommitted());
num_unprovisioned_slots = bucket->get_slots_per_span();
- DCHECK(num_unprovisioned_slots);
+ PA_DCHECK(num_unprovisioned_slots);
next_page = nullptr;
}
diff --git a/chromium/base/allocator/partition_allocator/partition_root_base.cc b/chromium/base/allocator/partition_allocator/partition_root_base.cc
deleted file mode 100644
index c55a166b415..00000000000
--- a/chromium/base/allocator/partition_allocator/partition_root_base.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_root_base.h"
-
-#include "base/allocator/partition_allocator/oom.h"
-#include "base/allocator/partition_allocator/partition_oom.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "build/build_config.h"
-
-namespace base {
-namespace internal {
-
-template <bool thread_safety>
-NOINLINE void PartitionRootBase<thread_safety>::OutOfMemory(size_t size) {
-#if !defined(ARCH_CPU_64_BITS)
- // Check whether this OOM is due to a lot of super pages that are allocated
- // but not committed, probably due to http://crbug.com/421387.
- if (total_size_of_super_pages + total_size_of_direct_mapped_pages -
- total_size_of_committed_pages >
- kReasonableSizeOfUnusedPages) {
- PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
- }
-#endif
- if (g_oom_handling_function)
- (*g_oom_handling_function)(size);
- OOM_CRASH(size);
-}
-
-template <bool thread_safe>
-void PartitionRootBase<thread_safe>::DecommitEmptyPages() {
- for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
- Page* page = global_empty_page_ring[i];
- if (page)
- page->DecommitIfPossible(this);
- global_empty_page_ring[i] = nullptr;
- }
-}
-
-template <bool thread_safe>
-internal::PartitionRootBase<thread_safe>::PartitionRootBase() = default;
-template <bool thread_safe>
-internal::PartitionRootBase<thread_safe>::~PartitionRootBase() = default;
-
-template struct PartitionRootBase<ThreadSafe>;
-template struct PartitionRootBase<NotThreadSafe>;
-
-} // namespace internal
-} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_root_base.h b/chromium/base/allocator/partition_allocator/partition_root_base.h
deleted file mode 100644
index de9551c71d5..00000000000
--- a/chromium/base/allocator/partition_allocator/partition_root_base.h
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright (c) 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_bucket.h"
-#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/spin_lock.h"
-#include "base/logging.h"
-#include "base/no_destructor.h"
-#include "base/synchronization/lock.h"
-#include "base/thread_annotations.h"
-#include "build/build_config.h"
-
-namespace base {
-
-typedef void (*OomFunction)(size_t);
-
-// PartitionAlloc supports setting hooks to observe allocations/frees as they
-// occur as well as 'override' hooks that allow overriding those operations.
-class BASE_EXPORT PartitionAllocHooks {
- public:
- // Log allocation and free events.
- typedef void AllocationObserverHook(void* address,
- size_t size,
- const char* type_name);
- typedef void FreeObserverHook(void* address);
-
- // If it returns true, the allocation has been overridden with the pointer in
- // *out.
- typedef bool AllocationOverrideHook(void** out,
- int flags,
- size_t size,
- const char* type_name);
- // If it returns true, then the allocation was overridden and has been freed.
- typedef bool FreeOverrideHook(void* address);
- // If it returns true, the underlying allocation is overridden and *out holds
- // the size of the underlying allocation.
- typedef bool ReallocOverrideHook(size_t* out, void* address);
-
- // To unhook, call Set*Hooks with nullptrs.
- static void SetObserverHooks(AllocationObserverHook* alloc_hook,
- FreeObserverHook* free_hook);
- static void SetOverrideHooks(AllocationOverrideHook* alloc_hook,
- FreeOverrideHook* free_hook,
- ReallocOverrideHook realloc_hook);
-
- // Helper method to check whether hooks are enabled. This is an optimization
- // so that if a function needs to call observer and override hooks in two
- // different places this value can be cached and only loaded once.
- static bool AreHooksEnabled() {
- return hooks_enabled_.load(std::memory_order_relaxed);
- }
-
- static void AllocationObserverHookIfEnabled(void* address,
- size_t size,
- const char* type_name);
- static bool AllocationOverrideHookIfEnabled(void** out,
- int flags,
- size_t size,
- const char* type_name);
-
- static void FreeObserverHookIfEnabled(void* address);
- static bool FreeOverrideHookIfEnabled(void* address);
-
- static void ReallocObserverHookIfEnabled(void* old_address,
- void* new_address,
- size_t size,
- const char* type_name);
- static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
-
- private:
- // Single bool that is used to indicate whether observer or allocation hooks
- // are set to reduce the numbers of loads required to check whether hooking is
- // enabled.
- static std::atomic<bool> hooks_enabled_;
-
- // Lock used to synchronize Set*Hooks calls.
- static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
- static std::atomic<FreeObserverHook*> free_observer_hook_;
-
- static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
- static std::atomic<FreeOverrideHook*> free_override_hook_;
- static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
-};
-
-namespace internal {
-
-template <bool thread_safe>
-class LOCKABLE MaybeSpinLock {
- public:
- void Lock() EXCLUSIVE_LOCK_FUNCTION() {}
- void Unlock() UNLOCK_FUNCTION() {}
- void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
-};
-
-template <bool thread_safe>
-class SCOPED_LOCKABLE ScopedGuard {
- public:
- explicit ScopedGuard(MaybeSpinLock<thread_safe>& lock)
- EXCLUSIVE_LOCK_FUNCTION(lock)
- : lock_(lock) {
- lock_.Lock();
- }
- ~ScopedGuard() UNLOCK_FUNCTION() { lock_.Unlock(); }
-
- private:
- MaybeSpinLock<thread_safe>& lock_;
-};
-
-#if DCHECK_IS_ON()
-template <>
-class LOCKABLE MaybeSpinLock<ThreadSafe> {
- public:
- MaybeSpinLock() : lock_() {}
- void Lock() EXCLUSIVE_LOCK_FUNCTION() { lock_->Acquire(); }
- void Unlock() UNLOCK_FUNCTION() { lock_->Release(); }
- void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {
- lock_->AssertAcquired();
- }
-
- private:
- // NoDestructor to avoid issues with the "static destruction order fiasco".
- //
- // This also means that for DCHECK_IS_ON() builds we leak a lock when a
- // partition is destructed. This will in practice only show in some tests, as
- // partitons are not destructed in regular use. In addition, on most
- // platforms, base::Lock doesn't allocate memory and neither does the OS
- // library, and the destructor is a no-op.
- base::NoDestructor<base::Lock> lock_;
-};
-
-#else
-template <>
-class LOCKABLE MaybeSpinLock<ThreadSafe> {
- public:
- void Lock() EXCLUSIVE_LOCK_FUNCTION() { lock_.lock(); }
- void Unlock() UNLOCK_FUNCTION() { lock_.unlock(); }
- void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {
- // Not supported by subtle::SpinLock.
- }
-
- private:
- subtle::SpinLock lock_;
-};
-#endif // DCHECK_IS_ON()
-
-// An "extent" is a span of consecutive superpages. We link to the partition's
-// next extent (if there is one) to the very start of a superpage's metadata
-// area.
-template <bool thread_safety>
-struct PartitionSuperPageExtentEntry {
- PartitionRootBase<thread_safety>* root;
- char* super_page_base;
- char* super_pages_end;
- PartitionSuperPageExtentEntry<thread_safety>* next;
-};
-static_assert(
- sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
- "PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
-
-// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
-static OomFunction g_oom_handling_function = nullptr;
-
-template <bool thread_safety>
-struct BASE_EXPORT PartitionRootBase {
- using Page = PartitionPage<thread_safety>;
- using Bucket = PartitionBucket<thread_safety>;
- using ScopedGuard = internal::ScopedGuard<thread_safety>;
-
- PartitionRootBase();
- virtual ~PartitionRootBase();
- MaybeSpinLock<thread_safety> lock_;
- size_t total_size_of_committed_pages = 0;
- size_t total_size_of_super_pages = 0;
- size_t total_size_of_direct_mapped_pages = 0;
- // Invariant: total_size_of_committed_pages <=
- // total_size_of_super_pages +
- // total_size_of_direct_mapped_pages.
- unsigned num_buckets = 0;
- unsigned max_allocation = 0;
- bool initialized = false;
- char* next_super_page = nullptr;
- char* next_partition_page = nullptr;
- char* next_partition_page_end = nullptr;
- PartitionSuperPageExtentEntry<thread_safety>* current_extent = nullptr;
- PartitionSuperPageExtentEntry<thread_safety>* first_extent = nullptr;
- PartitionDirectMapExtent<thread_safety>* direct_map_list = nullptr;
- Page* global_empty_page_ring[kMaxFreeableSpans] = {};
- int16_t global_empty_page_ring_index = 0;
- uintptr_t inverted_self = 0;
-
- // Public API
-
- // Allocates out of the given bucket. Properly, this function should probably
- // be in PartitionBucket, but because the implementation needs to be inlined
- // for performance, and because it needs to inspect PartitionPage,
- // it becomes impossible to have it in PartitionBucket as this causes a
- // cyclical dependency on PartitionPage function implementations.
- //
- // Moving it a layer lower couples PartitionRootBase and PartitionBucket, but
- // preserves the layering of the includes.
- //
- // Note the matching Free() functions are in PartitionPage.
- ALWAYS_INLINE void* AllocFromBucket(Bucket* bucket, int flags, size_t size)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
- ALWAYS_INLINE void Free(void* ptr);
-
- ALWAYS_INLINE static bool IsValidPage(Page* page);
- ALWAYS_INLINE static PartitionRootBase* FromPage(Page* page);
-
- ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
- ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
- ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
- ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
- // Frees memory from this partition, if possible, by decommitting pages.
- // |flags| is an OR of base::PartitionPurgeFlags.
- virtual void PurgeMemory(int flags) = 0;
- NOINLINE void OutOfMemory(size_t size);
-
- protected:
- void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
-};
-
-template <bool thread_safety>
-ALWAYS_INLINE void* PartitionRootBase<thread_safety>::AllocFromBucket(
- Bucket* bucket,
- int flags,
- size_t size) {
- bool zero_fill = flags & PartitionAllocZeroFill;
- bool is_already_zeroed = false;
-
- Page* page = bucket->active_pages_head;
- // Check that this page is neither full nor freed.
- DCHECK(page);
- DCHECK(page->num_allocated_slots >= 0);
- void* ret = page->freelist_head;
- if (LIKELY(ret != 0)) {
- // If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See
- // if we can afford to make these CHECKs.
- DCHECK(IsValidPage(page));
-
- // All large allocations must go through the slow path to correctly update
- // the size metadata.
- DCHECK(page->get_raw_size() == 0);
- internal::PartitionFreelistEntry* new_head =
- internal::EncodedPartitionFreelistEntry::Decode(
- page->freelist_head->next);
- page->freelist_head = new_head;
- page->num_allocated_slots++;
- } else {
- ret = bucket->SlowPathAlloc(this, flags, size, &is_already_zeroed);
- // TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(!ret || IsValidPage(Page::FromPointer(ret)));
- }
-
-#if DCHECK_IS_ON()
- if (!ret) {
- return nullptr;
- }
-
- page = Page::FromPointer(ret);
- // TODO(ajwong): Can |page->bucket| ever not be |this|? If not, can this just
- // be bucket->slot_size?
- size_t new_slot_size = page->bucket->slot_size;
- size_t raw_size = page->get_raw_size();
- if (raw_size) {
- DCHECK(raw_size == size);
- new_slot_size = raw_size;
- }
- size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(new_slot_size);
- char* char_ret = static_cast<char*>(ret);
- // The value given to the application is actually just after the cookie.
- ret = char_ret + kCookieSize;
-
- // Fill the region kUninitializedByte or 0, and surround it with 2 cookies.
- PartitionCookieWriteValue(char_ret);
- if (!zero_fill) {
- memset(ret, kUninitializedByte, no_cookie_size);
- } else if (!is_already_zeroed) {
- memset(ret, 0, no_cookie_size);
- }
- PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size);
-#else
- if (ret && zero_fill && !is_already_zeroed) {
- memset(ret, 0, size);
- }
-#endif
-
- return ret;
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE void PartitionRootBase<thread_safety>::Free(void* ptr) {
-#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
- free(ptr);
-#else
- DCHECK(initialized);
-
- if (UNLIKELY(!ptr))
- return;
-
- if (PartitionAllocHooks::AreHooksEnabled()) {
- PartitionAllocHooks::FreeObserverHookIfEnabled(ptr);
- if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr))
- return;
- }
-
- ptr = internal::PartitionCookieFreePointerAdjust(ptr);
- Page* page = Page::FromPointer(ptr);
- // TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(IsValidPage(page));
- internal::DeferredUnmap deferred_unmap;
- {
- ScopedGuard guard{lock_};
- deferred_unmap = page->Free(ptr);
- }
- deferred_unmap.Run();
-#endif
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE bool PartitionRootBase<thread_safety>::IsValidPage(Page* page) {
- PartitionRootBase* root = PartitionRootBase::FromPage(page);
- return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE PartitionRootBase<thread_safety>*
-PartitionRootBase<thread_safety>::FromPage(Page* page) {
- auto* extent_entry =
- reinterpret_cast<PartitionSuperPageExtentEntry<thread_safety>*>(
- reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
- return extent_entry->root;
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE void PartitionRootBase<thread_safety>::IncreaseCommittedPages(
- size_t len) {
- total_size_of_committed_pages += len;
- DCHECK(total_size_of_committed_pages <=
- total_size_of_super_pages + total_size_of_direct_mapped_pages);
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE void PartitionRootBase<thread_safety>::DecreaseCommittedPages(
- size_t len) {
- total_size_of_committed_pages -= len;
- DCHECK(total_size_of_committed_pages <=
- total_size_of_super_pages + total_size_of_direct_mapped_pages);
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE void PartitionRootBase<thread_safety>::DecommitSystemPages(
- void* address,
- size_t length) {
- ::base::DecommitSystemPages(address, length);
- DecreaseCommittedPages(length);
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE void PartitionRootBase<thread_safety>::RecommitSystemPages(
- void* address,
- size_t length) {
- CHECK(::base::RecommitSystemPages(address, length, PageReadWrite));
- IncreaseCommittedPages(length);
-}
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_