summaryrefslogtreecommitdiff
path: root/chromium/base/allocator
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-01-31 16:33:43 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-02-06 16:33:22 +0000
commitda51f56cc21233c2d30f0fe0d171727c3102b2e0 (patch)
tree4e579ab70ce4b19bee7984237f3ce05a96d59d83 /chromium/base/allocator
parentc8c2d1901aec01e934adf561a9fdf0cc776cdef8 (diff)
downloadqtwebengine-chromium-da51f56cc21233c2d30f0fe0d171727c3102b2e0.tar.gz
BASELINE: Update Chromium to 65.0.3525.40
Also imports missing submodules Change-Id: I36901b7c6a325cda3d2c10cedb2186c25af3b79b Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/base/allocator')
-rw-r--r--chromium/base/allocator/OWNERS3
-rw-r--r--chromium/base/allocator/README.md4
-rw-r--r--chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc3
-rw-r--r--chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h2
-rw-r--r--chromium/base/allocator/allocator_shim_unittest.cc3
-rw-r--r--chromium/base/allocator/partition_allocator/OWNERS5
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization.cc2
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.cc142
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.h9
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.cc299
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.h142
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc177
-rw-r--r--chromium/base/allocator/partition_allocator/spin_lock.cc3
-rw-r--r--chromium/base/allocator/partition_allocator/spin_lock.h4
14 files changed, 464 insertions, 334 deletions
diff --git a/chromium/base/allocator/OWNERS b/chromium/base/allocator/OWNERS
index 6a22df647f0..de658d09c92 100644
--- a/chromium/base/allocator/OWNERS
+++ b/chromium/base/allocator/OWNERS
@@ -1,7 +1,4 @@
primiano@chromium.org
wfh@chromium.org
-# For changes to tcmalloc it is advisable to ask jar@chromium.org
-# before proceeding.
-
# COMPONENT: Internals
diff --git a/chromium/base/allocator/README.md b/chromium/base/allocator/README.md
index a0bc24aaf62..d69c09c870c 100644
--- a/chromium/base/allocator/README.md
+++ b/chromium/base/allocator/README.md
@@ -41,8 +41,8 @@ config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
**Mac/iOS**
`use_allocator: none`, we always use the system's allocator implementation.
-In addition, when building for `asan` / `msan` / `syzyasan` `valgrind`, the
-both the allocator and the shim layer are disabled.
+In addition, when building for `asan` / `msan` / `syzyasan`, both the allocator
+and the shim layer are disabled.
Layering and build deps
-----------------------
diff --git a/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc b/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
index e33754a443f..c351a7c9259 100644
--- a/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
+++ b/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
@@ -9,6 +9,9 @@
#if defined(OS_ANDROID) && __ANDROID_API__ < 17
#include <dlfcn.h>
+// This is defined in malloc.h on other platforms. We just need the definition
+// for the decltype(malloc_usable_size)* call to work.
+size_t malloc_usable_size(const void*);
#endif
// This translation unit defines a default dispatch for the allocator shim which
diff --git a/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h b/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h
index 9fb7d067f47..ed02656332d 100644
--- a/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h
+++ b/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h
@@ -12,6 +12,8 @@
#include <malloc.h>
+#include <windows.h>
+
extern "C" {
void* (*malloc_unchecked)(size_t) = &base::allocator::UncheckedAlloc;
diff --git a/chromium/base/allocator/allocator_shim_unittest.cc b/chromium/base/allocator/allocator_shim_unittest.cc
index 6f5f7f6e4d7..2bafed559a5 100644
--- a/chromium/base/allocator/allocator_shim_unittest.cc
+++ b/chromium/base/allocator/allocator_shim_unittest.cc
@@ -12,6 +12,7 @@
#include <vector>
#include "base/allocator/features.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/atomicops.h"
#include "base/process/process_metrics.h"
#include "base/synchronization/waitable_event.h"
@@ -54,7 +55,7 @@ using testing::_;
class AllocatorShimTest : public testing::Test {
public:
- static const size_t kMaxSizeTracked = 8192;
+ static const size_t kMaxSizeTracked = 2 * base::kSystemPageSize;
AllocatorShimTest() : testing::Test() {}
static size_t Hash(const void* ptr) {
diff --git a/chromium/base/allocator/partition_allocator/OWNERS b/chromium/base/allocator/partition_allocator/OWNERS
index 374d1aed926..b0a2a850f7b 100644
--- a/chromium/base/allocator/partition_allocator/OWNERS
+++ b/chromium/base/allocator/partition_allocator/OWNERS
@@ -1,5 +1,8 @@
+ajwong@chromium.org
haraken@chromium.org
palmer@chromium.org
+tsepez@chromium.org
# TEAM: platform-architecture-dev@chromium.org
-# COMPONENT: Blink>MemoryAllocator
+# Also: security-dev@chromium.org
+# COMPONENT: Blink>MemoryAllocator>Partition
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization.cc b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
index 114ad9557f0..dc90824b54c 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization.cc
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
@@ -11,6 +11,8 @@
#include "build/build_config.h"
#if defined(OS_WIN)
+#include <windows.h> // Must be in front of other Windows header files.
+
#include <VersionHelpers.h>
#endif
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.cc b/chromium/base/allocator/partition_allocator/page_allocator.cc
index 87fead7b87e..61cd43b1837 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator.cc
@@ -46,8 +46,6 @@ int GetAccessFlags(PageAccessibilityConfiguration page_accessibility) {
return PROT_READ | PROT_WRITE;
case PageReadExecute:
return PROT_READ | PROT_EXEC;
- case PageReadWriteExecute:
- return PROT_READ | PROT_WRITE | PROT_EXEC;
default:
NOTREACHED();
// Fall through.
@@ -74,8 +72,6 @@ int GetAccessFlags(PageAccessibilityConfiguration page_accessibility) {
return PAGE_READWRITE;
case PageReadExecute:
return PAGE_EXECUTE_READ;
- case PageReadWriteExecute:
- return PAGE_EXECUTE_READWRITE;
default:
NOTREACHED();
// Fall through.
@@ -107,22 +103,12 @@ static void* SystemAllocPages(void* hint,
DCHECK(commit || page_accessibility == PageInaccessible);
void* ret;
- // Retry failed allocations once after calling ReleaseReservation().
- bool have_retried = false;
#if defined(OS_WIN)
DWORD access_flag = GetAccessFlags(page_accessibility);
const DWORD type_flags = commit ? (MEM_RESERVE | MEM_COMMIT) : MEM_RESERVE;
- while (true) {
- ret = VirtualAlloc(hint, length, type_flags, access_flag);
- if (ret)
- break;
- if (have_retried) {
- s_allocPageErrorCode = GetLastError();
- break;
- }
- ReleaseReservation();
- have_retried = true;
- }
+ ret = VirtualAlloc(hint, length, type_flags, access_flag);
+ if (ret == nullptr)
+ s_allocPageErrorCode = GetLastError();
#else
#if defined(OS_MACOSX)
@@ -133,22 +119,33 @@ static void* SystemAllocPages(void* hint,
int fd = -1;
#endif
int access_flag = GetAccessFlags(page_accessibility);
- while (true) {
- ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, fd, 0);
- if (ret != MAP_FAILED)
- break;
- if (have_retried) {
- s_allocPageErrorCode = errno;
- ret = nullptr;
- break;
- }
- ReleaseReservation();
- have_retried = true;
+ ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, fd, 0);
+ if (ret == MAP_FAILED) {
+ s_allocPageErrorCode = errno;
+ ret = nullptr;
}
#endif
return ret;
}
+static void* AllocPagesIncludingReserved(
+ void* address,
+ size_t length,
+ PageAccessibilityConfiguration page_accessibility,
+ bool commit) {
+ void* ret = SystemAllocPages(address, length, page_accessibility, commit);
+ if (ret == nullptr) {
+ const bool cant_alloc_length = kHintIsAdvisory || address == nullptr;
+ if (cant_alloc_length) {
+ // The system cannot allocate |length| bytes. Release any reserved address
+ // space and try once more.
+ ReleaseReservation();
+ ret = SystemAllocPages(address, length, page_accessibility, commit);
+ }
+ }
+ return ret;
+}
+
// Trims base to given length and alignment. Windows returns null on failure and
// frees base.
static void* TrimMapping(void* base,
@@ -166,7 +163,9 @@ static void* TrimMapping(void* base,
DCHECK(post_slack < base_length);
void* ret = base;
-#if defined(OS_POSIX) // On POSIX we can resize the allocation run.
+#if defined(OS_POSIX)
+ // On POSIX we can resize the allocation run. Release unneeded memory before
+ // and after the aligned range.
(void)page_accessibility;
if (pre_slack) {
int res = munmap(base, pre_slack);
@@ -177,8 +176,10 @@ static void* TrimMapping(void* base,
int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
CHECK(!res);
}
-#else // On Windows we can't resize the allocation run.
+#else
if (pre_slack || post_slack) {
+ // On Windows we can't resize the allocation run. Free it and retry at the
+ // aligned address within the freed range.
ret = reinterpret_cast<char*>(base) + pre_slack;
FreePages(base, base_length);
ret = SystemAllocPages(ret, trim_length, page_accessibility, commit);
@@ -207,34 +208,43 @@ void* AllocPages(void* address,
DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
// If the client passed null as the address, choose a good one.
- if (!address) {
+ if (address == nullptr) {
address = GetRandomPageBase();
address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
align_base_mask);
}
// First try to force an exact-size, aligned allocation from our random base.
- for (int count = 0; count < 3; ++count) {
- void* ret = SystemAllocPages(address, length, page_accessibility, commit);
- if (kHintIsAdvisory || ret) {
+#if defined(ARCH_CPU_32_BITS)
+ // On 32 bit systems, first try one random aligned address, and then try an
+ // aligned address derived from the value of |ret|.
+ constexpr int kExactSizeTries = 2;
+#else
+ // On 64 bit systems, try 3 random aligned addresses.
+ constexpr int kExactSizeTries = 3;
+#endif
+ for (int i = 0; i < kExactSizeTries; ++i) {
+ void* ret = AllocPagesIncludingReserved(address, length, page_accessibility,
+ commit);
+ if (ret != nullptr) {
// If the alignment is to our liking, we're done.
if (!(reinterpret_cast<uintptr_t>(ret) & align_offset_mask))
return ret;
+ // Free the memory and try again.
FreePages(ret, length);
-#if defined(ARCH_CPU_32_BITS)
- address = reinterpret_cast<void*>(
- (reinterpret_cast<uintptr_t>(ret) + align) & align_base_mask);
-#endif
- } else if (!address) { // We know we're OOM when an unhinted allocation
- // fails.
- return nullptr;
} else {
-#if defined(ARCH_CPU_32_BITS)
- address = reinterpret_cast<char*>(address) + align;
-#endif
+ // |ret| is null; if this try was unhinted, we're OOM.
+ if (kHintIsAdvisory || address == nullptr)
+ return nullptr;
}
-#if !defined(ARCH_CPU_32_BITS)
+#if defined(ARCH_CPU_32_BITS)
+ // For small address spaces, try the first aligned address >= |ret|. Note
+ // |ret| may be null, in which case |address| becomes null.
+ address = reinterpret_cast<void*>(
+ (reinterpret_cast<uintptr_t>(ret) + align_offset_mask) &
+ align_base_mask);
+#else // defined(ARCH_CPU_64_BITS)
// Keep trying random addresses on systems that have a large address space.
address = GetRandomPageBase();
address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
@@ -242,20 +252,21 @@ void* AllocPages(void* address,
#endif
}
- // Map a larger allocation so we can force alignment, but continue randomizing
- // only on 64-bit POSIX.
+ // Make a larger allocation so we can force alignment.
size_t try_length = length + (align - kPageAllocationGranularity);
CHECK(try_length >= length);
void* ret;
do {
- // Don't continue to burn cycles on mandatory hints (Windows).
+ // Continue randomizing only on POSIX.
address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
- ret = SystemAllocPages(address, try_length, page_accessibility, commit);
+ ret = AllocPagesIncludingReserved(address, try_length, page_accessibility,
+ commit);
// The retries are for Windows, where a race can steal our mapping on
// resize.
- } while (ret && (ret = TrimMapping(ret, try_length, length, align,
- page_accessibility, commit)) == nullptr);
+ } while (ret != nullptr &&
+ (ret = TrimMapping(ret, try_length, length, align,
+ page_accessibility, commit)) == nullptr);
return ret;
}
@@ -367,29 +378,24 @@ void DiscardSystemPages(void* address, size_t length) {
}
bool ReserveAddressSpace(size_t size) {
- // Don't take |s_reserveLock| while allocating, since a failure would invoke
- // ReleaseReservation and deadlock.
- void* mem = AllocPages(nullptr, size, kPageAllocationGranularity,
- PageInaccessible, false);
- // We guarantee this alignment when reserving address space.
- DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
- kPageAllocationGranularityOffsetMask));
- if (mem != nullptr) {
- {
- subtle::SpinLock::Guard guard(s_reserveLock.Get());
- if (s_reservation_address == nullptr) {
- s_reservation_address = mem;
- s_reservation_size = size;
- return true;
- }
+ // To avoid deadlock, call only SystemAllocPages.
+ subtle::SpinLock::Guard guard(s_reserveLock.Get());
+ if (s_reservation_address == nullptr) {
+ void* mem = SystemAllocPages(nullptr, size, PageInaccessible, false);
+ if (mem != nullptr) {
+ // We guarantee this alignment when reserving address space.
+ DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
+ kPageAllocationGranularityOffsetMask));
+ s_reservation_address = mem;
+ s_reservation_size = size;
+ return true;
}
- // There was already a reservation.
- FreePages(mem, size);
}
return false;
}
void ReleaseReservation() {
+ // To avoid deadlock, call only FreePages.
subtle::SpinLock::Guard guard(s_reserveLock.Get());
if (s_reservation_address != nullptr) {
FreePages(s_reservation_address, s_reservation_size);
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.h b/chromium/base/allocator/partition_allocator/page_allocator.h
index 297d817cd9f..a39b4e3013e 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator.h
@@ -17,6 +17,8 @@ namespace base {
#if defined(OS_WIN)
static const size_t kPageAllocationGranularityShift = 16; // 64KB
+#elif defined(_MIPS_ARCH_LOONGSON)
+static const size_t kPageAllocationGranularityShift = 14; // 16KB
#else
static const size_t kPageAllocationGranularityShift = 12; // 4KB
#endif
@@ -27,9 +29,11 @@ static const size_t kPageAllocationGranularityOffsetMask =
static const size_t kPageAllocationGranularityBaseMask =
~kPageAllocationGranularityOffsetMask;
-// All Blink-supported systems have 4096 sized system pages and can handle
-// permissions and commit / decommit at this granularity.
+#if defined(_MIPS_ARCH_LOONGSON)
+static const size_t kSystemPageSize = 16384;
+#else
static const size_t kSystemPageSize = 4096;
+#endif
static const size_t kSystemPageOffsetMask = kSystemPageSize - 1;
static_assert((kSystemPageSize & (kSystemPageSize - 1)) == 0,
"kSystemPageSize must be power of 2");
@@ -39,7 +43,6 @@ enum PageAccessibilityConfiguration {
PageInaccessible,
PageReadWrite,
PageReadExecute,
- PageReadWriteExecute,
};
// Allocate one or more pages.
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc
index 8afd6c07e50..0ca9522bcb0 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc
@@ -56,10 +56,6 @@ PartitionBucket g_sentinel_bucket;
} // namespace
-PartitionPage* GetSentinelPageForTesting() {
- return &g_sentinel_page;
-}
-
PartitionRootBase::PartitionRootBase() = default;
PartitionRootBase::~PartitionRootBase() = default;
PartitionRoot::PartitionRoot() = default;
@@ -78,19 +74,15 @@ PartitionAllocHooks::AllocationHook* PartitionAllocHooks::allocation_hook_ =
nullptr;
PartitionAllocHooks::FreeHook* PartitionAllocHooks::free_hook_ = nullptr;
-// Find the best number of System Pages to allocate for |size| to minimize
-// wasted space. Uses a heuristic that looks at number of bytes wasted after
-// the last slot and attempts to account for the PTE usage of each System Page.
-//
// TODO(ajwong): This seems to interact badly with
-// PartitionBucketPartitionPages() which rounds the value from this up to a
+// get_pages_per_slot_span() which rounds the value from this up to a
// multiple of kNumSystemPagesPerPartitionPage (aka 4) anyways.
// http://crbug.com/776537
//
// TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover
// both used and unsed pages.
// http://crbug.com/776537
-static uint8_t PartitionBucketNumSystemPages(size_t size) {
+uint8_t PartitionBucket::get_system_pages_per_slot_span() {
// This works out reasonably for the current bucket sizes of the generic
// allocator, and the current values of partition page size and constants.
// Specifically, we have enough room to always pack the slots perfectly into
@@ -103,23 +95,23 @@ static uint8_t PartitionBucketNumSystemPages(size_t size) {
// to using fewer system pages.
double best_waste_ratio = 1.0f;
uint16_t best_pages = 0;
- if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
+ if (this->slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
// TODO(ajwong): Why is there a DCHECK here for this?
// http://crbug.com/776537
- DCHECK(!(size % kSystemPageSize));
- best_pages = static_cast<uint16_t>(size / kSystemPageSize);
+ DCHECK(!(this->slot_size % kSystemPageSize));
+ best_pages = static_cast<uint16_t>(this->slot_size / kSystemPageSize);
// TODO(ajwong): Should this be checking against
// kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max?
// http://crbug.com/776537
CHECK(best_pages < (1 << 8));
return static_cast<uint8_t>(best_pages);
}
- DCHECK(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
+ DCHECK(this->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
i <= kMaxSystemPagesPerSlotSpan; ++i) {
size_t page_size = kSystemPageSize * i;
- size_t num_slots = page_size / size;
- size_t waste = page_size - (num_slots * size);
+ size_t num_slots = page_size / this->slot_size;
+ size_t waste = page_size - (num_slots * this->slot_size);
// Leaving a page unfaulted is not free; the page will occupy an empty page
// table entry. Make a simple attempt to account for that.
//
@@ -162,14 +154,13 @@ static void PartitionAllocBaseInit(PartitionRootBase* root) {
root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
}
-static void PartitionBucketInitBase(PartitionBucket* bucket,
- PartitionRootBase* root) {
- bucket->active_pages_head = &g_sentinel_page;
- bucket->empty_pages_head = nullptr;
- bucket->decommitted_pages_head = nullptr;
- bucket->num_full_pages = 0;
- bucket->num_system_pages_per_slot_span =
- PartitionBucketNumSystemPages(bucket->slot_size);
+void PartitionBucket::Init(uint32_t new_slot_size) {
+ slot_size = new_slot_size;
+ active_pages_head = &g_sentinel_page;
+ empty_pages_head = nullptr;
+ decommitted_pages_head = nullptr;
+ num_full_pages = 0;
+ num_system_pages_per_slot_span = get_system_pages_per_slot_span();
}
void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
@@ -186,10 +177,9 @@ void PartitionRoot::Init(size_t num_buckets, size_t max_allocation) {
for (i = 0; i < this->num_buckets; ++i) {
PartitionBucket* bucket = &this->buckets()[i];
if (!i)
- bucket->slot_size = kAllocationGranularity;
+ bucket->Init(kAllocationGranularity);
else
- bucket->slot_size = i << kBucketShift;
- PartitionBucketInitBase(bucket, this);
+ bucket->Init(i << kBucketShift);
}
}
@@ -238,8 +228,7 @@ void PartitionRootGeneric::Init() {
PartitionBucket* bucket = &this->buckets[0];
for (i = 0; i < kGenericNumBucketedOrders; ++i) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
- bucket->slot_size = current_size;
- PartitionBucketInitBase(bucket, this);
+ bucket->Init(current_size);
// Disable psuedo buckets so that touching them faults.
if (current_size % kGenericSmallestBucket)
bucket->active_pages_head = nullptr;
@@ -260,7 +249,7 @@ void PartitionRootGeneric::Init() {
// Use the bucket of the finest granularity for malloc(0) etc.
*bucketPtr++ = &this->buckets[0];
} else if (order > kGenericMaxBucketedOrder) {
- *bucketPtr++ = &g_sentinel_bucket;
+ *bucketPtr++ = PartitionBucket::get_sentinel_bucket();
} else {
PartitionBucket* validBucket = bucket;
// Skip over invalid buckets.
@@ -276,7 +265,7 @@ void PartitionRootGeneric::Init() {
((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existant order.
- *bucketPtr = &g_sentinel_bucket;
+ *bucketPtr = PartitionBucket::get_sentinel_bucket();
}
#if !defined(ARCH_CPU_64_BITS)
@@ -305,47 +294,41 @@ static NOINLINE void PartitionExcessiveAllocationSize() {
OOM_CRASH();
}
-static NOINLINE void PartitionBucketFull() {
+NOINLINE void PartitionBucket::OnFull() {
OOM_CRASH();
}
-// PartitionPageStateIs*
-// Note that it's only valid to call these functions on pages found on one of
-// the page lists. Specifically, you can't call these functions on full pages
-// that were detached from the active list.
-static bool ALWAYS_INLINE
-PartitionPageStateIsActive(const PartitionPage* page) {
- DCHECK(page != &g_sentinel_page);
- DCHECK(!page->page_offset);
- return (page->num_allocated_slots > 0 &&
- (page->freelist_head || page->num_unprovisioned_slots));
+ALWAYS_INLINE bool PartitionPage::is_active() const {
+ DCHECK(this != &g_sentinel_page);
+ DCHECK(!page_offset);
+ return (num_allocated_slots > 0 &&
+ (freelist_head || num_unprovisioned_slots));
}
-static bool ALWAYS_INLINE PartitionPageStateIsFull(const PartitionPage* page) {
- DCHECK(page != &g_sentinel_page);
- DCHECK(!page->page_offset);
- bool ret = (page->num_allocated_slots == page->bucket->get_slots_per_span());
+ALWAYS_INLINE bool PartitionPage::is_full() const {
+ DCHECK(this != &g_sentinel_page);
+ DCHECK(!page_offset);
+ bool ret = (num_allocated_slots == bucket->get_slots_per_span());
if (ret) {
- DCHECK(!page->freelist_head);
- DCHECK(!page->num_unprovisioned_slots);
+ DCHECK(!freelist_head);
+ DCHECK(!num_unprovisioned_slots);
}
return ret;
}
-static bool ALWAYS_INLINE PartitionPageStateIsEmpty(const PartitionPage* page) {
- DCHECK(page != &g_sentinel_page);
- DCHECK(!page->page_offset);
- return (!page->num_allocated_slots && page->freelist_head);
+ALWAYS_INLINE bool PartitionPage::is_empty() const {
+ DCHECK(this != &g_sentinel_page);
+ DCHECK(!page_offset);
+ return (!num_allocated_slots && freelist_head);
}
-static bool ALWAYS_INLINE
-PartitionPageStateIsDecommitted(const PartitionPage* page) {
- DCHECK(page != &g_sentinel_page);
- DCHECK(!page->page_offset);
- bool ret = (!page->num_allocated_slots && !page->freelist_head);
+ALWAYS_INLINE bool PartitionPage::is_decommitted() const {
+ DCHECK(this != &g_sentinel_page);
+ DCHECK(!page_offset);
+ bool ret = (!num_allocated_slots && !freelist_head);
if (ret) {
- DCHECK(!page->num_unprovisioned_slots);
- DCHECK(page->empty_cache_index == -1);
+ DCHECK(!num_unprovisioned_slots);
+ DCHECK(empty_cache_index == -1);
}
return ret;
}
@@ -380,7 +363,7 @@ static ALWAYS_INLINE void PartitionRecommitSystemPages(PartitionRootBase* root,
PartitionIncreaseCommittedPages(root, length);
}
-static ALWAYS_INLINE void* PartitionAllocPartitionPages(
+ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan(
PartitionRootBase* root,
int flags,
uint16_t num_partition_pages) {
@@ -498,37 +481,28 @@ static ALWAYS_INLINE void* PartitionAllocPartitionPages(
return ret;
}
-// Returns a natural number of PartitionPages (calculated by
-// PartitionBucketNumSystemPages()) to allocate from the current SuperPage
-// when the bucket runs out of slots.
-static ALWAYS_INLINE uint16_t
-PartitionBucketPartitionPages(const PartitionBucket* bucket) {
+ALWAYS_INLINE uint16_t PartitionBucket::get_pages_per_slot_span() {
// Rounds up to nearest multiple of kNumSystemPagesPerPartitionPage.
- return (bucket->num_system_pages_per_slot_span +
+ return (num_system_pages_per_slot_span +
(kNumSystemPagesPerPartitionPage - 1)) /
kNumSystemPagesPerPartitionPage;
}
-static ALWAYS_INLINE void PartitionPageReset(PartitionPage* page) {
- DCHECK(PartitionPageStateIsDecommitted(page));
+ALWAYS_INLINE void PartitionPage::Reset() {
+ DCHECK(this->is_decommitted());
- page->num_unprovisioned_slots = page->bucket->get_slots_per_span();
- DCHECK(page->num_unprovisioned_slots);
+ num_unprovisioned_slots = bucket->get_slots_per_span();
+ DCHECK(num_unprovisioned_slots);
- page->next_page = nullptr;
+ next_page = nullptr;
}
-// Each bucket allocates a slot span when it runs out of slots.
-// A slot span's size is equal to PartitionBucketPartitionPages(bucket)
-// number of PartitionPages. This function initializes all pages within the
-// span.
-static ALWAYS_INLINE void PartitionPageSetup(PartitionPage* page,
- PartitionBucket* bucket) {
+ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(PartitionPage* page) {
// The bucket never changes. We set it up once.
- page->bucket = bucket;
+ page->bucket = this;
page->empty_cache_index = -1;
- PartitionPageReset(page);
+ page->Reset();
// If this page has just a single slot, do not set up page offsets for any
// page metadata other than the first one. This ensures that attempts to
@@ -536,7 +510,7 @@ static ALWAYS_INLINE void PartitionPageSetup(PartitionPage* page,
if (page->num_unprovisioned_slots == 1)
return;
- uint16_t num_partition_pages = PartitionBucketPartitionPages(bucket);
+ uint16_t num_partition_pages = get_pages_per_slot_span();
char* page_char_ptr = reinterpret_cast<char*>(page);
for (uint16_t i = 1; i < num_partition_pages; ++i) {
page_char_ptr += kPageMetadataSize;
@@ -546,21 +520,19 @@ static ALWAYS_INLINE void PartitionPageSetup(PartitionPage* page,
}
}
-static ALWAYS_INLINE char* PartitionPageAllocAndFillFreelist(
- PartitionPage* page) {
- DCHECK(page != &g_sentinel_page);
+ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) {
+ DCHECK(page != PartitionPage::get_sentinel_page());
uint16_t num_slots = page->num_unprovisioned_slots;
DCHECK(num_slots);
- PartitionBucket* bucket = page->bucket;
// We should only get here when _every_ slot is either used or unprovisioned.
// (The third state is "on the freelist". If we have a non-empty freelist, we
// should not get here.)
- DCHECK(num_slots + page->num_allocated_slots == bucket->get_slots_per_span());
+ DCHECK(num_slots + page->num_allocated_slots == this->get_slots_per_span());
// Similarly, make explicitly sure that the freelist is empty.
DCHECK(!page->freelist_head);
DCHECK(page->num_allocated_slots >= 0);
- size_t size = bucket->slot_size;
+ size_t size = this->slot_size;
char* base = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
char* return_object = base + (size * page->num_allocated_slots);
char* firstFreelistPointer = return_object + size;
@@ -605,80 +577,72 @@ static ALWAYS_INLINE char* PartitionPageAllocAndFillFreelist(
freelist_pointer += size;
PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
- entry->next = PartitionFreelistMask(next_entry);
+ entry->next = PartitionFreelistEntry::Transform(next_entry);
entry = next_entry;
}
- entry->next = PartitionFreelistMask(nullptr);
+ entry->next = PartitionFreelistEntry::Transform(nullptr);
} else {
page->freelist_head = nullptr;
}
return return_object;
}
-// This helper function scans a bucket's active page list for a suitable new
-// active page.
-// When it finds a suitable new active page (one that has free slots and is not
-// empty), it is set as the new active page. If there is no suitable new
-// active page, the current active page is set to &g_sentinel_page.
-// As potential pages are scanned, they are tidied up according to their state.
-// Empty pages are swept on to the empty page list, decommitted pages on to the
-// decommitted page list and full pages are unlinked from any list.
-static bool PartitionSetNewActivePage(PartitionBucket* bucket) {
- PartitionPage* page = bucket->active_pages_head;
- if (page == &g_sentinel_page)
+bool PartitionBucket::SetNewActivePage() {
+ PartitionPage* page = this->active_pages_head;
+ if (page == PartitionPage::get_sentinel_page())
return false;
PartitionPage* next_page;
for (; page; page = next_page) {
next_page = page->next_page;
- DCHECK(page->bucket == bucket);
- DCHECK(page != bucket->empty_pages_head);
- DCHECK(page != bucket->decommitted_pages_head);
+ DCHECK(page->bucket == this);
+ DCHECK(page != this->empty_pages_head);
+ DCHECK(page != this->decommitted_pages_head);
- // Deal with empty and decommitted pages.
- if (LIKELY(PartitionPageStateIsActive(page))) {
+ if (LIKELY(page->is_active())) {
// This page is usable because it has freelist entries, or has
// unprovisioned slots we can create freelist entries from.
- bucket->active_pages_head = page;
+ this->active_pages_head = page;
return true;
}
- if (LIKELY(PartitionPageStateIsEmpty(page))) {
- page->next_page = bucket->empty_pages_head;
- bucket->empty_pages_head = page;
- } else if (LIKELY(PartitionPageStateIsDecommitted(page))) {
- page->next_page = bucket->decommitted_pages_head;
- bucket->decommitted_pages_head = page;
+
+ // Deal with empty and decommitted pages.
+ if (LIKELY(page->is_empty())) {
+ page->next_page = this->empty_pages_head;
+ this->empty_pages_head = page;
+ } else if (LIKELY(page->is_decommitted())) {
+ page->next_page = this->decommitted_pages_head;
+ this->decommitted_pages_head = page;
} else {
- DCHECK(PartitionPageStateIsFull(page));
+ DCHECK(page->is_full());
// If we get here, we found a full page. Skip over it too, and also
// tag it as full (via a negative value). We need it tagged so that
// free'ing can tell, and move it back into the active page list.
page->num_allocated_slots = -page->num_allocated_slots;
- ++bucket->num_full_pages;
+ ++this->num_full_pages;
// num_full_pages is a uint16_t for efficient packing so guard against
// overflow to be safe.
- if (UNLIKELY(!bucket->num_full_pages))
- PartitionBucketFull();
+ if (UNLIKELY(!this->num_full_pages))
+ OnFull();
// Not necessary but might help stop accidents.
page->next_page = nullptr;
}
}
- bucket->active_pages_head = &g_sentinel_page;
+ this->active_pages_head = PartitionPage::get_sentinel_page();
return false;
}
-static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(
+ALWAYS_INLINE PartitionDirectMapExtent* PartitionDirectMapExtent::FromPage(
PartitionPage* page) {
DCHECK(page->bucket->is_direct_mapped());
return reinterpret_cast<PartitionDirectMapExtent*>(
reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
}
-static ALWAYS_INLINE void PartitionPageSetRawSize(PartitionPage* page,
- size_t size) {
- size_t* raw_size_ptr = page->get_raw_size_ptr();
+ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) {
+ size_t* raw_size_ptr = get_raw_size_ptr();
if (UNLIKELY(raw_size_ptr != nullptr))
*raw_size_ptr = size;
}
@@ -744,7 +708,7 @@ static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(slot);
- next_entry->next = PartitionFreelistMask(nullptr);
+ next_entry->next = PartitionFreelistEntry::Transform(nullptr);
DCHECK(!bucket->active_pages_head);
DCHECK(!bucket->empty_pages_head);
@@ -753,7 +717,8 @@ static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
DCHECK(!bucket->num_full_pages);
bucket->slot_size = size;
- PartitionDirectMapExtent* map_extent = partitionPageToDirectMapExtent(page);
+ PartitionDirectMapExtent* map_extent =
+ PartitionDirectMapExtent::FromPage(page);
map_extent->map_size = map_size - kPartitionPageSize - kSystemPageSize;
map_extent->bucket = bucket;
@@ -768,8 +733,9 @@ static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
}
static ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
- PartitionRootBase* root = PartitionPageToRoot(page);
- const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page);
+ PartitionRootBase* root = PartitionRootBase::FromPage(page);
+ const PartitionDirectMapExtent* extent =
+ PartitionDirectMapExtent::FromPage(page);
size_t unmap_size = extent->map_size;
// Maintain the doubly-linked list of all direct mappings.
@@ -817,13 +783,13 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
// branches.
//
// Note: The ordering of the conditionals matter! In particular,
- // PartitionSetNewActivePage() has a side-effect even when returning
+ // SetNewActivePage() has a side-effect even when returning
// false where it sweeps the active page list and may move things into
// the empty or decommitted lists which affects the subsequent conditional.
bool returnNull = flags & PartitionAllocReturnNull;
if (UNLIKELY(this->is_direct_mapped())) {
DCHECK(size > kGenericMaxBucketed);
- DCHECK(this == &g_sentinel_bucket);
+ DCHECK(this == get_sentinel_bucket());
DCHECK(this->active_pages_head == &g_sentinel_page);
if (size > kGenericMaxDirectMapped) {
if (returnNull)
@@ -831,10 +797,10 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
PartitionExcessiveAllocationSize();
}
new_page = PartitionDirectMap(root, flags, size);
- } else if (LIKELY(PartitionSetNewActivePage(this))) {
+ } else if (LIKELY(this->SetNewActivePage())) {
// First, did we find an active page in the active pages list?
new_page = this->active_pages_head;
- DCHECK(PartitionPageStateIsActive(new_page));
+ DCHECK(new_page->is_active());
} else if (LIKELY(this->empty_pages_head != nullptr) ||
LIKELY(this->decommitted_pages_head != nullptr)) {
// Second, look in our lists of empty and decommitted pages.
@@ -842,15 +808,14 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
// empty page might have been decommitted.
while (LIKELY((new_page = this->empty_pages_head) != nullptr)) {
DCHECK(new_page->bucket == this);
- DCHECK(PartitionPageStateIsEmpty(new_page) ||
- PartitionPageStateIsDecommitted(new_page));
+ DCHECK(new_page->is_empty() || new_page->is_decommitted());
this->empty_pages_head = new_page->next_page;
// Accept the empty page unless it got decommitted.
if (new_page->freelist_head) {
new_page->next_page = nullptr;
break;
}
- DCHECK(PartitionPageStateIsDecommitted(new_page));
+ DCHECK(new_page->is_decommitted());
new_page->next_page = this->decommitted_pages_head;
this->decommitted_pages_head = new_page;
}
@@ -858,22 +823,21 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
LIKELY(this->decommitted_pages_head != nullptr)) {
new_page = this->decommitted_pages_head;
DCHECK(new_page->bucket == this);
- DCHECK(PartitionPageStateIsDecommitted(new_page));
+ DCHECK(new_page->is_decommitted());
this->decommitted_pages_head = new_page->next_page;
void* addr = PartitionPage::ToPointer(new_page);
PartitionRecommitSystemPages(root, addr,
new_page->bucket->get_bytes_per_span());
- PartitionPageReset(new_page);
+ new_page->Reset();
}
DCHECK(new_page);
} else {
// Third. If we get here, we need a brand new page.
- uint16_t num_partition_pages = PartitionBucketPartitionPages(this);
- void* rawPages =
- PartitionAllocPartitionPages(root, flags, num_partition_pages);
+ uint16_t num_partition_pages = this->get_pages_per_slot_span();
+ void* rawPages = AllocNewSlotSpan(root, flags, num_partition_pages);
if (LIKELY(rawPages != nullptr)) {
new_page = PartitionPage::FromPointerNoAlignmentCheck(rawPages);
- PartitionPageSetup(new_page, this);
+ InitializeSlotSpan(new_page);
}
}
@@ -889,27 +853,32 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
// It seems like in many of the conditional branches above, |this| ==
// |new_page->bucket|. Maybe pull this into another function?
PartitionBucket* bucket = new_page->bucket;
- DCHECK(bucket != &g_sentinel_bucket);
+ DCHECK(bucket != get_sentinel_bucket());
bucket->active_pages_head = new_page;
- PartitionPageSetRawSize(new_page, size);
+ new_page->set_raw_size(size);
// If we found an active page with free slots, or an empty page, we have a
// usable freelist head.
if (LIKELY(new_page->freelist_head != nullptr)) {
PartitionFreelistEntry* entry = new_page->freelist_head;
- PartitionFreelistEntry* new_head = PartitionFreelistMask(entry->next);
+ PartitionFreelistEntry* new_head =
+ PartitionFreelistEntry::Transform(entry->next);
new_page->freelist_head = new_head;
new_page->num_allocated_slots++;
return entry;
}
// Otherwise, we need to build the freelist.
DCHECK(new_page->num_unprovisioned_slots);
- return PartitionPageAllocAndFillFreelist(new_page);
+ return AllocAndFillFreelist(new_page);
+}
+
+PartitionBucket* PartitionBucket::get_sentinel_bucket() {
+ return &g_sentinel_bucket;
}
static ALWAYS_INLINE void PartitionDecommitPage(PartitionRootBase* root,
PartitionPage* page) {
- DCHECK(PartitionPageStateIsEmpty(page));
+ DCHECK(page->is_empty());
DCHECK(!page->bucket->is_direct_mapped());
void* addr = PartitionPage::ToPointer(page);
PartitionDecommitSystemPages(root, addr, page->bucket->get_bytes_per_span());
@@ -922,7 +891,7 @@ static ALWAYS_INLINE void PartitionDecommitPage(PartitionRootBase* root,
// 32 bytes in size.
page->freelist_head = nullptr;
page->num_unprovisioned_slots = 0;
- DCHECK(PartitionPageStateIsDecommitted(page));
+ DCHECK(page->is_decommitted());
}
static void PartitionDecommitPageIfPossible(PartitionRootBase* root,
@@ -931,13 +900,13 @@ static void PartitionDecommitPageIfPossible(PartitionRootBase* root,
DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
DCHECK(page == root->global_empty_page_ring[page->empty_cache_index]);
page->empty_cache_index = -1;
- if (PartitionPageStateIsEmpty(page))
+ if (page->is_empty())
PartitionDecommitPage(root, page);
}
static ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
- DCHECK(PartitionPageStateIsEmpty(page));
- PartitionRootBase* root = PartitionPageToRoot(page);
+ DCHECK(page->is_empty());
+ PartitionRootBase* root = PartitionRootBase::FromPage(page);
// If the page is already registered as empty, give it another life.
if (page->empty_cache_index != -1) {
@@ -975,6 +944,10 @@ static void PartitionDecommitEmptyPages(PartitionRootBase* root) {
}
}
+PartitionPage* PartitionPage::get_sentinel_page() {
+ return &g_sentinel_page;
+}
+
void PartitionPage::FreeSlowPath() {
DCHECK(this != &g_sentinel_page);
if (LIKELY(this->num_allocated_slots == 0)) {
@@ -986,10 +959,10 @@ void PartitionPage::FreeSlowPath() {
// If it's the current active page, change it. We bounce the page to
// the empty list as a force towards defragmentation.
if (LIKELY(this == bucket->active_pages_head))
- PartitionSetNewActivePage(bucket);
+ bucket->SetNewActivePage();
DCHECK(bucket->active_pages_head != this);
- PartitionPageSetRawSize(this, 0);
+ set_raw_size(0);
DCHECK(!get_raw_size());
PartitionRegisterEmptyPage(this);
@@ -1040,7 +1013,7 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
char* char_ptr = static_cast<char*>(PartitionPage::ToPointer(page));
if (new_size < current_size) {
- size_t map_size = partitionPageToDirectMapExtent(page)->map_size;
+ size_t map_size = PartitionDirectMapExtent::FromPage(page)->map_size;
// Don't reallocate in-place if new size is less than 80 % of the full
// map size, to avoid holding on to too much unused address space.
@@ -1052,7 +1025,7 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
PartitionDecommitSystemPages(root, char_ptr + new_size, decommitSize);
CHECK(SetSystemPagesAccess(char_ptr + new_size, decommitSize,
PageInaccessible));
- } else if (new_size <= partitionPageToDirectMapExtent(page)->map_size) {
+ } else if (new_size <= PartitionDirectMapExtent::FromPage(page)->map_size) {
// Grow within the actually allocated memory. Just need to make the
// pages accessible again.
size_t recommit_size = new_size - current_size;
@@ -1074,7 +1047,7 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
PartitionCookieWriteValue(char_ptr + raw_size - kCookieSize);
#endif
- PartitionPageSetRawSize(page, raw_size);
+ page->set_raw_size(raw_size);
DCHECK(page->get_raw_size() == raw_size);
page->bucket->slot_size = new_size;
@@ -1122,7 +1095,7 @@ void* PartitionRootGeneric::Realloc(void* ptr,
// Trying to allocate a block of size new_size would give us a block of
// the same size as the one we've already got, so re-use the allocation
// after updating statistics (and cookies, if present).
- PartitionPageSetRawSize(page, PartitionCookieSizeAdjustAdd(new_size));
+ page->set_raw_size(PartitionCookieSizeAdjustAdd(new_size));
#if DCHECK_IS_ON()
// Write a new trailing cookie when it is possible to keep track of
// |new_size| via the raw size pointer.
@@ -1184,14 +1157,14 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
DCHECK(slotIndex < num_slots);
slot_usage[slotIndex] = 0;
- entry = PartitionFreelistMask(entry->next);
+ entry = PartitionFreelistEntry::Transform(entry->next);
#if !defined(OS_WIN)
// If we have a slot where the masked freelist entry is 0, we can
// actually discard that freelist entry because touching a discarded
// page is guaranteed to return original content or 0.
// (Note that this optimization won't fire on big endian machines
// because the masking function is negation.)
- if (!PartitionFreelistMask(entry))
+ if (!PartitionFreelistEntry::Transform(entry))
last_slot = slotIndex;
#endif
}
@@ -1232,7 +1205,7 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
continue;
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(
ptr + (slot_size * slotIndex));
- *entry_ptr = PartitionFreelistMask(entry);
+ *entry_ptr = PartitionFreelistEntry::Transform(entry);
entry_ptr = reinterpret_cast<PartitionFreelistEntry**>(entry);
num_new_entries++;
#if !defined(OS_WIN)
@@ -1242,7 +1215,8 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
// Terminate the freelist chain.
*entry_ptr = nullptr;
// The freelist head is stored unmasked.
- page->freelist_head = PartitionFreelistMask(page->freelist_head);
+ page->freelist_head =
+ PartitionFreelistEntry::Transform(page->freelist_head);
DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
// Discard the memory.
DiscardSystemPages(begin_ptr, unprovisioned_bytes);
@@ -1317,7 +1291,7 @@ static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
PartitionPage* page) {
uint16_t bucket_num_slots = page->bucket->get_slots_per_span();
- if (PartitionPageStateIsDecommitted(page)) {
+ if (page->is_decommitted()) {
++stats_out->num_decommitted_pages;
return;
}
@@ -1336,13 +1310,13 @@ static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
stats_out->bucket_slot_size);
stats_out->resident_bytes += page_bytes_resident;
- if (PartitionPageStateIsEmpty(page)) {
+ if (page->is_empty()) {
stats_out->decommittable_bytes += page_bytes_resident;
++stats_out->num_empty_pages;
- } else if (PartitionPageStateIsFull(page)) {
+ } else if (page->is_full()) {
++stats_out->num_full_pages;
} else {
- DCHECK(PartitionPageStateIsActive(page));
+ DCHECK(page->is_active());
++stats_out->num_active_pages;
}
}
@@ -1373,13 +1347,12 @@ static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
for (PartitionPage* page = bucket->empty_pages_head; page;
page = page->next_page) {
- DCHECK(PartitionPageStateIsEmpty(page) ||
- PartitionPageStateIsDecommitted(page));
+ DCHECK(page->is_empty() || page->is_decommitted());
PartitionDumpPageStats(stats_out, page);
}
for (PartitionPage* page = bucket->decommitted_pages_head; page;
page = page->next_page) {
- DCHECK(PartitionPageStateIsDecommitted(page));
+ DCHECK(page->is_decommitted());
PartitionDumpPageStats(stats_out, page);
}
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.h b/chromium/base/allocator/partition_allocator/partition_alloc.h
index 68201dffb17..6e5143b094a 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.h
@@ -97,7 +97,11 @@ static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
// system page of the span. For our current max slot span size of 64k and other
// constant values, we pack _all_ PartitionRootGeneric::Alloc() sizes perfectly
// up against the end of a system page.
+#if defined(_MIPS_ARCH_LOONGSON)
+static const size_t kPartitionPageShift = 16; // 64KB
+#else
static const size_t kPartitionPageShift = 14; // 16KB
+#endif
static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
@@ -230,8 +234,31 @@ class PartitionStatsDumper;
struct PartitionBucket;
struct PartitionRootBase;
+// TODO(ajwong): Introduce an EncodedFreelistEntry type and then replace
+// Transform() with Encode()/Decode() such that the API provides some static
+// type safety.
+//
+// https://crbug.com/787153
struct PartitionFreelistEntry {
PartitionFreelistEntry* next;
+
+ static ALWAYS_INLINE PartitionFreelistEntry* Transform(
+ PartitionFreelistEntry* ptr) {
+// We use bswap on little endian as a fast mask for two reasons:
+// 1) If an object is freed and its vtable used where the attacker doesn't
+// get the chance to run allocations between the free and use, the vtable
+// dereference is likely to fault.
+// 2) If the attacker has a linear buffer overflow and elects to try and
+// corrupt a freelist pointer, partial pointer overwrite attacks are
+// thwarted.
+// For big endian, similar guarantees are arrived at with a negation.
+#if defined(ARCH_CPU_BIG_ENDIAN)
+ uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
+#else
+ uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
+#endif
+ return reinterpret_cast<PartitionFreelistEntry*>(masked);
+ }
};
// Some notes on page states. A page can be in one of four major states:
@@ -295,6 +322,26 @@ struct PartitionPage {
}
ALWAYS_INLINE size_t get_raw_size() const;
+ ALWAYS_INLINE void set_raw_size(size_t size);
+
+ ALWAYS_INLINE void Reset();
+
+ // TODO(ajwong): Can this be made private? https://crbug.com/787153
+ BASE_EXPORT static PartitionPage* get_sentinel_page();
+
+ // Page State accessors.
+ // Note that it's only valid to call these functions on pages found on one of
+ // the page lists. Specifically, you can't call these functions on full pages
+ // that were detached from the active list.
+ //
+ // This restriction provides the flexibity for some of the status fields to
+ // be repurposed when a page is taken off a list. See the negation of
+ // |num_allocated_slots| when a full page is removed from the active list
+ // for an example of such repurposing.
+ ALWAYS_INLINE bool is_active() const;
+ ALWAYS_INLINE bool is_full() const;
+ ALWAYS_INLINE bool is_empty() const;
+ ALWAYS_INLINE bool is_decommitted() const;
};
static_assert(sizeof(PartitionPage) <= kPageMetadataSize,
"PartitionPage must be able to fit in a metadata slot");
@@ -310,6 +357,7 @@ struct PartitionBucket {
unsigned num_full_pages : 24;
// Public API.
+ void Init(uint32_t new_slot_size);
// Note the matching Free() functions are in PartitionPage.
BASE_EXPORT void* Alloc(PartitionRootBase* root, int flags, size_t size);
@@ -328,6 +376,60 @@ struct PartitionBucket {
// TODO(ajwong): Chagne to CheckedMul. https://crbug.com/787153
return static_cast<uint16_t>(get_bytes_per_span() / slot_size);
}
+
+ // TODO(ajwong): Can this be made private? https://crbug.com/787153
+ static PartitionBucket* get_sentinel_bucket();
+
+ // This helper function scans a bucket's active page list for a suitable new
+ // active page. When it finds a suitable new active page (one that has
+ // free slots and is not empty), it is set as the new active page. If there
+ // is no suitable new active page, the current active page is set to
+ // PartitionPage::get_sentinel_page(). As potential pages are scanned, they
+ // are tidied up according to their state. Empty pages are swept on to the
+ // empty page list, decommitted pages on to the decommitted page list and full
+ // pages are unlinked from any list.
+ //
+ // This is where the guts of the bucket maintenance is done!
+ bool SetNewActivePage();
+
+ private:
+ static void OutOfMemory(const PartitionRootBase* root);
+ static void OutOfMemoryWithLotsOfUncommitedPages();
+
+ static NOINLINE void OnFull();
+
+ // Returns a natural number of PartitionPages (calculated by
+ // get_system_pages_per_slot_span()) to allocate from the current
+ // SuperPage when the bucket runs out of slots.
+ ALWAYS_INLINE uint16_t get_pages_per_slot_span();
+
+ // Returns the number of system pages in a slot span.
+ //
+ // The calculation attemps to find the best number of System Pages to
+ // allocate for the given slot_size to minimize wasted space. It uses a
+ // heuristic that looks at number of bytes wasted after the last slot and
+ // attempts to account for the PTE usage of each System Page.
+ uint8_t get_system_pages_per_slot_span();
+
+ // Allocates a new slot span with size |num_partition_pages| from the
+ // current extent. Metadata within this slot span will be uninitialized.
+ // Returns nullptr on error.
+ ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRootBase* root,
+ int flags,
+ uint16_t num_partition_pages);
+
+ // Each bucket allocates a slot span when it runs out of slots.
+ // A slot span's size is equal to get_pages_per_slot_span() number of
+ // PartitionPages. This function initializes all PartitionPage within the
+ // span to point to the first PartitionPage which holds all the metadata
+ // for the span and registers this bucket as the owner of the span. It does
+ // NOT put the slots into the bucket's freelist.
+ ALWAYS_INLINE void InitializeSlotSpan(PartitionPage* page);
+
+ // Allocates one slot from the given |page| and then adds the remainder to
+ // the current bucket. If the |page| was freshly allocated, it must have been
+ // passed through InitializeSlotSpan() first.
+ ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage* page);
};
// An "extent" is a span of consecutive superpages. We link to the partition's
@@ -348,6 +450,8 @@ struct PartitionDirectMapExtent {
PartitionDirectMapExtent* prev_extent;
PartitionBucket* bucket;
size_t map_size; // Mapped size, not including guard pages and meta-data.
+
+ ALWAYS_INLINE static PartitionDirectMapExtent* FromPage(PartitionPage* page);
};
struct BASE_EXPORT PartitionRootBase {
@@ -374,8 +478,10 @@ struct BASE_EXPORT PartitionRootBase {
// Pubic API
- // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory.
+ // gOomHandlingFunction is invoked when PartitionAlloc hits OutOfMemory.
static void (*gOomHandlingFunction)();
+
+ ALWAYS_INLINE static PartitionRootBase* FromPage(PartitionPage* page);
};
enum PartitionPurgeFlags {
@@ -555,24 +661,6 @@ class BASE_EXPORT PartitionAllocHooks {
static FreeHook* free_hook_;
};
-ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistMask(
- PartitionFreelistEntry* ptr) {
-// We use bswap on little endian as a fast mask for two reasons:
-// 1) If an object is freed and its vtable used where the attacker doesn't
-// get the chance to run allocations between the free and use, the vtable
-// dereference is likely to fault.
-// 2) If the attacker has a linear buffer overflow and elects to try and
-// corrupt a freelist pointer, partial pointer overwrite attacks are
-// thwarted.
-// For big endian, similar guarantees are arrived at with a negation.
-#if defined(ARCH_CPU_BIG_ENDIAN)
- uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
-#else
- uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
-#endif
- return reinterpret_cast<PartitionFreelistEntry*>(masked);
-}
-
ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
#if DCHECK_IS_ON()
// Add space for cookies, checking for integer overflow. TODO(palmer):
@@ -702,7 +790,8 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
return 0;
}
-ALWAYS_INLINE PartitionRootBase* PartitionPageToRoot(PartitionPage* page) {
+ALWAYS_INLINE PartitionRootBase* PartitionRootBase::FromPage(
+ PartitionPage* page) {
PartitionSuperPageExtentEntry* extent_entry =
reinterpret_cast<PartitionSuperPageExtentEntry*>(
reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
@@ -710,7 +799,7 @@ ALWAYS_INLINE PartitionRootBase* PartitionPageToRoot(PartitionPage* page) {
}
ALWAYS_INLINE bool PartitionPage::IsPointerValid(PartitionPage* page) {
- PartitionRootBase* root = PartitionPageToRoot(page);
+ PartitionRootBase* root = PartitionRootBase::FromPage(page);
return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
}
@@ -728,8 +817,8 @@ ALWAYS_INLINE void* PartitionBucket::Alloc(PartitionRootBase* root,
// All large allocations must go through the slow path to correctly
// update the size metadata.
DCHECK(page->get_raw_size() == 0);
- PartitionFreelistEntry* new_head =
- PartitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
+ PartitionFreelistEntry* new_head = PartitionFreelistEntry::Transform(
+ static_cast<PartitionFreelistEntry*>(ret)->next);
page->freelist_head = new_head;
page->num_allocated_slots++;
} else {
@@ -802,9 +891,10 @@ ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
PartitionPage::FromPointer(freelist_head)));
CHECK(ptr != freelist_head); // Catches an immediate double free.
// Look for double free one level deeper in debug.
- DCHECK(!freelist_head || ptr != PartitionFreelistMask(freelist_head->next));
+ DCHECK(!freelist_head ||
+ ptr != PartitionFreelistEntry::Transform(freelist_head->next));
PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
- entry->next = PartitionFreelistMask(freelist_head);
+ entry->next = PartitionFreelistEntry::Transform(freelist_head);
freelist_head = entry;
--this->num_allocated_slots;
if (UNLIKELY(this->num_allocated_slots <= 0)) {
@@ -973,8 +1063,6 @@ class BASE_EXPORT PartitionAllocatorGeneric {
PartitionRootGeneric partition_root_;
};
-BASE_EXPORT PartitionPage* GetSentinelPageForTesting();
-
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
index 6ea89b7c875..b84db9b7308 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -33,7 +33,7 @@ std::unique_ptr<T[]> WrapArrayUnique(T* ptr) {
return std::unique_ptr<T[]>(ptr);
}
-const size_t kTestMaxAllocation = 4096;
+constexpr size_t kTestMaxAllocation = base::kSystemPageSize;
bool IsLargeMemoryDevice() {
// Treat any device with 2GiB or more of physical memory as a "large memory
@@ -139,7 +139,8 @@ class PartitionAllocTest : public testing::Test {
bucket->active_pages_head->num_allocated_slots));
EXPECT_EQ(nullptr, bucket->active_pages_head->freelist_head);
EXPECT_TRUE(bucket->active_pages_head);
- EXPECT_TRUE(bucket->active_pages_head != GetSentinelPageForTesting());
+ EXPECT_TRUE(bucket->active_pages_head !=
+ PartitionPage::get_sentinel_page());
return bucket->active_pages_head;
}
@@ -380,7 +381,7 @@ TEST(PageAllocatorTest, MAYBE_ReserveAddressSpace) {
// Check that the most basic of allocate / free pairs work.
TEST_F(PartitionAllocTest, Basic) {
PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
- PartitionPage* seedPage = GetSentinelPageForTesting();
+ PartitionPage* seedPage = PartitionPage::get_sentinel_page();
EXPECT_FALSE(bucket->empty_pages_head);
EXPECT_FALSE(bucket->decommitted_pages_head);
@@ -445,7 +446,7 @@ TEST_F(PartitionAllocTest, MultiPages) {
PartitionPage* page = GetFullPage(kTestAllocSize);
FreeFullPage(page);
EXPECT_TRUE(bucket->empty_pages_head);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
EXPECT_EQ(nullptr, page->next_page);
EXPECT_EQ(0, page->num_allocated_slots);
@@ -464,7 +465,7 @@ TEST_F(PartitionAllocTest, MultiPages) {
FreeFullPage(page);
EXPECT_EQ(0, page->num_allocated_slots);
EXPECT_TRUE(bucket->empty_pages_head);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
// Allocate a new page, it should pull from the freelist.
page = GetFullPage(kTestAllocSize);
@@ -560,7 +561,7 @@ TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head);
for (i = 0; i < numToFillFreeListPage; ++i)
FreeFullPage(pages[i]);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
// Allocate / free in a different bucket size so we get control of a
@@ -578,7 +579,7 @@ TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
for (i = 0; i < numToFillFreeListPage; ++i)
FreeFullPage(pages[i]);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
}
@@ -802,61 +803,65 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
// Test that we can fetch the real allocated size after an allocation.
TEST_F(PartitionAllocTest, GenericAllocGetSize) {
void* ptr;
- size_t requestedSize, actualSize, predictedSize;
+ size_t requested_size, actual_size, predicted_size;
EXPECT_TRUE(PartitionAllocSupportsGetSize());
// Allocate something small.
- requestedSize = 511 - kExtraAllocSize;
- predictedSize = generic_allocator.root()->ActualSize(requestedSize);
- ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
+ requested_size = 511 - kExtraAllocSize;
+ predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ ptr = generic_allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actualSize = PartitionAllocGetSize(ptr);
- EXPECT_EQ(predictedSize, actualSize);
- EXPECT_LT(requestedSize, actualSize);
+ actual_size = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predicted_size, actual_size);
+ EXPECT_LT(requested_size, actual_size);
generic_allocator.root()->Free(ptr);
// Allocate a size that should be a perfect match for a bucket, because it
// is an exact power of 2.
- requestedSize = (256 * 1024) - kExtraAllocSize;
- predictedSize = generic_allocator.root()->ActualSize(requestedSize);
- ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
+ requested_size = (256 * 1024) - kExtraAllocSize;
+ predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ ptr = generic_allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actualSize = PartitionAllocGetSize(ptr);
- EXPECT_EQ(predictedSize, actualSize);
- EXPECT_EQ(requestedSize, actualSize);
+ actual_size = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predicted_size, actual_size);
+ EXPECT_EQ(requested_size, actual_size);
generic_allocator.root()->Free(ptr);
// Allocate a size that is a system page smaller than a bucket. GetSize()
// should return a larger size than we asked for now.
- requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize;
- predictedSize = generic_allocator.root()->ActualSize(requestedSize);
- ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
+ size_t num = 64;
+ while (num * kSystemPageSize >= 1024 * 1024) {
+ num /= 2;
+ }
+ requested_size = num * kSystemPageSize - kSystemPageSize - kExtraAllocSize;
+ predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ ptr = generic_allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actualSize = PartitionAllocGetSize(ptr);
- EXPECT_EQ(predictedSize, actualSize);
- EXPECT_EQ(requestedSize + kSystemPageSize, actualSize);
+ actual_size = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predicted_size, actual_size);
+ EXPECT_EQ(requested_size + kSystemPageSize, actual_size);
// Check that we can write at the end of the reported size too.
char* charPtr = reinterpret_cast<char*>(ptr);
- *(charPtr + (actualSize - 1)) = 'A';
+ *(charPtr + (actual_size - 1)) = 'A';
generic_allocator.root()->Free(ptr);
// Allocate something very large, and uneven.
if (IsLargeMemoryDevice()) {
- requestedSize = 512 * 1024 * 1024 - 1;
- predictedSize = generic_allocator.root()->ActualSize(requestedSize);
- ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
+ requested_size = 512 * 1024 * 1024 - 1;
+ predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ ptr = generic_allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actualSize = PartitionAllocGetSize(ptr);
- EXPECT_EQ(predictedSize, actualSize);
- EXPECT_LT(requestedSize, actualSize);
+ actual_size = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predicted_size, actual_size);
+ EXPECT_LT(requested_size, actual_size);
generic_allocator.root()->Free(ptr);
}
// Too large allocation.
- requestedSize = kGenericMaxDirectMapped + 1;
- predictedSize = generic_allocator.root()->ActualSize(requestedSize);
- EXPECT_EQ(requestedSize, predictedSize);
+ requested_size = kGenericMaxDirectMapped + 1;
+ predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ EXPECT_EQ(requested_size, predicted_size);
}
// Test the realloc() contract.
@@ -903,18 +908,18 @@ TEST_F(PartitionAllocTest, Realloc) {
// Test that shrinking a direct mapped allocation happens in-place.
size = kGenericMaxBucketed + 16 * kSystemPageSize;
ptr = generic_allocator.root()->Alloc(size, type_name);
- size_t actualSize = PartitionAllocGetSize(ptr);
+ size_t actual_size = PartitionAllocGetSize(ptr);
ptr2 = generic_allocator.root()->Realloc(
ptr, kGenericMaxBucketed + 8 * kSystemPageSize, type_name);
EXPECT_EQ(ptr, ptr2);
- EXPECT_EQ(actualSize - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2));
+ EXPECT_EQ(actual_size - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2));
// Test that a previously in-place shrunk direct mapped allocation can be
// expanded up again within its original size.
ptr = generic_allocator.root()->Realloc(ptr2, size - kSystemPageSize,
type_name);
EXPECT_EQ(ptr2, ptr);
- EXPECT_EQ(actualSize - kSystemPageSize, PartitionAllocGetSize(ptr));
+ EXPECT_EQ(actual_size - kSystemPageSize, PartitionAllocGetSize(ptr));
// Test that a direct mapped allocation is performed not in-place when the
// new size is small enough.
@@ -1301,14 +1306,14 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
EXPECT_TRUE(bucket->empty_pages_head);
EXPECT_TRUE(bucket->empty_pages_head->next_page);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
// At this moment, we have two decommitted pages, on the empty list.
ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
generic_allocator.root()->Free(ptr);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
EXPECT_TRUE(bucket->decommitted_pages_head);
@@ -1636,7 +1641,8 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// This test checks large-but-not-quite-direct allocations.
{
- void* ptr = generic_allocator.root()->Alloc(65536 + 1, type_name);
+ constexpr size_t requested_size = 16 * kSystemPageSize;
+ void* ptr = generic_allocator.root()->Alloc(requested_size + 1, type_name);
{
MockPartitionStatsDumper dumper;
@@ -1644,14 +1650,15 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
- size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
+ size_t slot_size =
+ requested_size + (requested_size / kGenericNumBucketsPerOrder);
const PartitionBucketMemoryStats* stats =
dumper.GetBucketStats(slot_size);
EXPECT_TRUE(stats);
EXPECT_TRUE(stats->is_valid);
EXPECT_FALSE(stats->is_direct_map);
EXPECT_EQ(slot_size, stats->bucket_slot_size);
- EXPECT_EQ(65536u + 1 + kExtraAllocSize, stats->active_bytes);
+ EXPECT_EQ(requested_size + 1 + kExtraAllocSize, stats->active_bytes);
EXPECT_EQ(slot_size, stats->resident_bytes);
EXPECT_EQ(0u, stats->decommittable_bytes);
EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
@@ -1669,7 +1676,8 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
- size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
+ size_t slot_size =
+ requested_size + (requested_size / kGenericNumBucketsPerOrder);
const PartitionBucketMemoryStats* stats =
dumper.GetBucketStats(slot_size);
EXPECT_TRUE(stats);
@@ -1685,8 +1693,8 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- void* ptr2 =
- generic_allocator.root()->Alloc(65536 + kSystemPageSize + 1, type_name);
+ void* ptr2 = generic_allocator.root()->Alloc(
+ requested_size + kSystemPageSize + 1, type_name);
EXPECT_EQ(ptr, ptr2);
{
@@ -1695,14 +1703,15 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
- size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
+ size_t slot_size =
+ requested_size + (requested_size / kGenericNumBucketsPerOrder);
const PartitionBucketMemoryStats* stats =
dumper.GetBucketStats(slot_size);
EXPECT_TRUE(stats);
EXPECT_TRUE(stats->is_valid);
EXPECT_FALSE(stats->is_direct_map);
EXPECT_EQ(slot_size, stats->bucket_slot_size);
- EXPECT_EQ(65536u + kSystemPageSize + 1 + kExtraAllocSize,
+ EXPECT_EQ(requested_size + kSystemPageSize + 1 + kExtraAllocSize,
stats->active_bytes);
EXPECT_EQ(slot_size, stats->resident_bytes);
EXPECT_EQ(0u, stats->decommittable_bytes);
@@ -1880,16 +1889,17 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
generic_allocator.root()->Free(ptr2);
}
{
- char* ptr1 = reinterpret_cast<char*>(
- generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name));
- void* ptr2 =
- generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
- void* ptr3 =
- generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
- void* ptr4 =
- generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
- memset(ptr1, 'A', 9216 - kExtraAllocSize);
- memset(ptr2, 'A', 9216 - kExtraAllocSize);
+ constexpr size_t requested_size = 2.25 * kSystemPageSize;
+ char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ requested_size - kExtraAllocSize, type_name));
+ void* ptr2 = generic_allocator.root()->Alloc(
+ requested_size - kExtraAllocSize, type_name);
+ void* ptr3 = generic_allocator.root()->Alloc(
+ requested_size - kExtraAllocSize, type_name);
+ void* ptr4 = generic_allocator.root()->Alloc(
+ requested_size - kExtraAllocSize, type_name);
+ memset(ptr1, 'A', requested_size - kExtraAllocSize);
+ memset(ptr2, 'A', requested_size - kExtraAllocSize);
generic_allocator.root()->Free(ptr2);
generic_allocator.root()->Free(ptr1);
{
@@ -1898,12 +1908,13 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
- const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(9216);
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(requested_size);
EXPECT_TRUE(stats);
EXPECT_TRUE(stats->is_valid);
EXPECT_EQ(0u, stats->decommittable_bytes);
EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes);
- EXPECT_EQ(9216u * 2, stats->active_bytes);
+ EXPECT_EQ(requested_size * 2, stats->active_bytes);
EXPECT_EQ(9 * kSystemPageSize, stats->resident_bytes);
}
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
@@ -1922,6 +1933,49 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
generic_allocator.root()->Free(ptr3);
generic_allocator.root()->Free(ptr4);
}
+
+// When kSystemPageSize = 16384 (as on _MIPS_ARCH_LOONGSON), 64 *
+// kSystemPageSize (see the #else branch below) caused this test to OOM.
+// Therefore, for systems with 16 KiB pages, use 32 * kSystemPageSize.
+//
+// TODO(palmer): Refactor this to branch on page size instead of architecture,
+// for clarity of purpose and for applicability to more architectures.
+#if defined(_MIPS_ARCH_LOONGSON)
+ {
+ char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
+ generic_allocator.root(), (32 * kSystemPageSize) - kExtraAllocSize,
+ type_name));
+ memset(ptr1, 'A', (32 * kSystemPageSize) - kExtraAllocSize);
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
+ generic_allocator.root(), (31 * kSystemPageSize) - kExtraAllocSize,
+ type_name));
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(32 * kSystemPageSize);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
+ EXPECT_EQ(31 * kSystemPageSize, stats->active_bytes);
+ EXPECT_EQ(32 * kSystemPageSize, stats->resident_bytes);
+ }
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), true);
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDiscardUnusedSystemPages);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), false);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ }
+#else
{
char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
(64 * kSystemPageSize) - kExtraAllocSize, type_name));
@@ -1957,6 +2011,7 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
generic_allocator.root()->Free(ptr1);
}
+#endif
// This sub-test tests truncation of the provisioned slots in a trickier
// case where the freelist is rewritten.
generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
diff --git a/chromium/base/allocator/partition_allocator/spin_lock.cc b/chromium/base/allocator/partition_allocator/spin_lock.cc
index f127610c18f..c30d6cd43ad 100644
--- a/chromium/base/allocator/partition_allocator/spin_lock.cc
+++ b/chromium/base/allocator/partition_allocator/spin_lock.cc
@@ -62,9 +62,6 @@
namespace base {
namespace subtle {
-SpinLock::SpinLock() = default;
-SpinLock::~SpinLock() = default;
-
void SpinLock::LockSlow() {
// The value of |kYieldProcessorTries| is cargo culted from TCMalloc, Windows
// critical section defaults, and various other recommendations.
diff --git a/chromium/base/allocator/partition_allocator/spin_lock.h b/chromium/base/allocator/partition_allocator/spin_lock.h
index d0afc477620..e698b565b07 100644
--- a/chromium/base/allocator/partition_allocator/spin_lock.h
+++ b/chromium/base/allocator/partition_allocator/spin_lock.h
@@ -22,8 +22,8 @@ namespace subtle {
class BASE_EXPORT SpinLock {
public:
- SpinLock();
- ~SpinLock();
+ constexpr SpinLock() = default;
+ ~SpinLock() = default;
using Guard = std::lock_guard<SpinLock>;
ALWAYS_INLINE void lock() {