summaryrefslogtreecommitdiff
path: root/chromium/base/allocator
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-07-16 11:45:35 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-07-17 08:59:23 +0000
commit552906b0f222c5d5dd11b9fd73829d510980461a (patch)
tree3a11e6ed0538a81dd83b20cf3a4783e297f26d91 /chromium/base/allocator
parent1b05827804eaf047779b597718c03e7d38344261 (diff)
downloadqtwebengine-chromium-552906b0f222c5d5dd11b9fd73829d510980461a.tar.gz
BASELINE: Update Chromium to 83.0.4103.122
Change-Id: Ie3a82f5bb0076eec2a7c6a6162326b4301ee291e Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/base/allocator')
-rw-r--r--chromium/base/allocator/BUILD.gn5
-rw-r--r--chromium/base/allocator/allocator_extension.cc12
-rw-r--r--chromium/base/allocator/partition_allocator/oom.h19
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.cc19
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.h7
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h2
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h31
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internals_win.h2
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.cc29
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.h11
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc373
-rw-r--r--chromium/base/allocator/partition_allocator/partition_bucket.cc9
-rw-r--r--chromium/base/allocator/partition_allocator/partition_oom.cc8
-rw-r--r--chromium/base/allocator/partition_allocator/partition_oom.h6
-rw-r--r--chromium/base/allocator/partition_allocator/partition_page.cc17
-rw-r--r--chromium/base/allocator/partition_allocator/partition_page.h30
-rw-r--r--chromium/base/allocator/partition_allocator/partition_root_base.cc10
-rw-r--r--chromium/base/allocator/partition_allocator/partition_root_base.h9
-rw-r--r--chromium/base/allocator/partition_allocator/random.cc58
-rw-r--r--chromium/base/allocator/partition_allocator/spin_lock_perftest.cc22
20 files changed, 400 insertions, 279 deletions
diff --git a/chromium/base/allocator/BUILD.gn b/chromium/base/allocator/BUILD.gn
index 5f4fff8c69d..eb808ba556c 100644
--- a/chromium/base/allocator/BUILD.gn
+++ b/chromium/base/allocator/BUILD.gn
@@ -121,7 +121,6 @@ if (use_allocator == "tcmalloc") {
"$tcmalloc_dir/src/base/spinlock.h",
"$tcmalloc_dir/src/base/spinlock_internal.cc",
"$tcmalloc_dir/src/base/spinlock_internal.h",
- "$tcmalloc_dir/src/base/synchronization_profiling.h",
"$tcmalloc_dir/src/base/sysinfo.cc",
"$tcmalloc_dir/src/base/sysinfo.h",
"$tcmalloc_dir/src/base/vdso_support.cc",
@@ -214,9 +213,7 @@ if (use_allocator == "tcmalloc") {
configs -= [ "//build/config/compiler:afdo" ]
}
- deps = [
- ":buildflags",
- ]
+ deps = [ ":buildflags" ]
if (enable_profiling) {
sources += [
diff --git a/chromium/base/allocator/allocator_extension.cc b/chromium/base/allocator/allocator_extension.cc
index 05c3539cad6..87dd5731c06 100644
--- a/chromium/base/allocator/allocator_extension.cc
+++ b/chromium/base/allocator/allocator_extension.cc
@@ -24,15 +24,17 @@ void ReleaseFreeMemory() {
bool GetNumericProperty(const char* name, size_t* value) {
#if BUILDFLAG(USE_TCMALLOC)
return ::MallocExtension::instance()->GetNumericProperty(name, value);
-#endif
+#else
return false;
+#endif
}
bool SetNumericProperty(const char* name, size_t value) {
#if BUILDFLAG(USE_TCMALLOC)
return ::MallocExtension::instance()->SetNumericProperty(name, value);
-#endif
+#else
return false;
+#endif
}
void GetHeapSample(std::string* writer) {
@@ -44,8 +46,9 @@ void GetHeapSample(std::string* writer) {
bool IsHeapProfilerRunning() {
#if BUILDFLAG(USE_TCMALLOC) && defined(ENABLE_PROFILING)
return ::IsHeapProfilerRunning();
-#endif
+#else
return false;
+#endif
}
void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook) {
@@ -65,8 +68,9 @@ void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook) {
int GetCallStack(void** stack, int max_stack_size) {
#if BUILDFLAG(USE_TCMALLOC)
return MallocHook::GetCallerStackTrace(stack, max_stack_size, 0);
-#endif
+#else
return 0;
+#endif
}
} // namespace allocator
diff --git a/chromium/base/allocator/partition_allocator/oom.h b/chromium/base/allocator/partition_allocator/oom.h
index 8d3fe086056..c3a2d1b03cf 100644
--- a/chromium/base/allocator/partition_allocator/oom.h
+++ b/chromium/base/allocator/partition_allocator/oom.h
@@ -17,23 +17,20 @@
namespace {
// The crash is generated in a NOINLINE function so that we can classify the
// crash as an OOM solely by analyzing the stack trace.
-NOINLINE void OnNoMemory() {
+NOINLINE void OnNoMemory(size_t size) {
base::internal::RunPartitionAllocOomCallback();
-#if defined(OS_WIN)
- ::RaiseException(base::win::kOomExceptionCode, EXCEPTION_NONCONTINUABLE, 0,
- nullptr);
-#endif
+ base::internal::OnNoMemoryInternal(size);
IMMEDIATE_CRASH();
}
} // namespace
-// OOM_CRASH() - Specialization of IMMEDIATE_CRASH which will raise a custom
+// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
// exception on Windows to signal this is OOM and not a normal assert.
-// OOM_CRASH() is called by users of PageAllocator (including PartitionAlloc) to
-// signify an allocation failure from the platform.
-#define OOM_CRASH() \
- do { \
- OnNoMemory(); \
+// OOM_CRASH(size) is called by users of PageAllocator (including
+// PartitionAlloc) to signify an allocation failure from the platform.
+#define OOM_CRASH(size) \
+ do { \
+ OnNoMemory(size); \
} while (0)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.cc b/chromium/base/allocator/partition_allocator/page_allocator.cc
index b2e8cf3b15e..28d90fe9b8f 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator.cc
@@ -239,14 +239,21 @@ bool ReserveAddressSpace(size_t size) {
return false;
}
-void ReleaseReservation() {
+bool ReleaseReservation() {
// To avoid deadlock, call only FreePages.
subtle::SpinLock::Guard guard(GetReserveLock());
- if (s_reservation_address != nullptr) {
- FreePages(s_reservation_address, s_reservation_size);
- s_reservation_address = nullptr;
- s_reservation_size = 0;
- }
+ if (!s_reservation_address)
+ return false;
+
+ FreePages(s_reservation_address, s_reservation_size);
+ s_reservation_address = nullptr;
+ s_reservation_size = 0;
+ return true;
+}
+
+bool HasReservationForTesting() {
+ subtle::SpinLock::Guard guard(GetReserveLock());
+ return s_reservation_address != nullptr;
}
uint32_t GetAllocPageErrorCode() {
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.h b/chromium/base/allocator/partition_allocator/page_allocator.h
index a93694bcc8a..da070bcf92e 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator.h
@@ -182,7 +182,12 @@ BASE_EXPORT bool ReserveAddressSpace(size_t size);
// Releases any reserved address space. |AllocPages| calls this automatically on
// an allocation failure. External allocators may also call this on failure.
-BASE_EXPORT void ReleaseReservation();
+//
+// Returns true when an existing reservation was released.
+BASE_EXPORT bool ReleaseReservation();
+
+// Returns true if there is currently an address space reservation.
+BASE_EXPORT bool HasReservationForTesting();
// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
// (POSIX) or |VirtualAlloc| (Windows) fails.
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h b/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h
index d78f5f9d5a0..7e1bff18525 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h
@@ -89,7 +89,7 @@ void* SystemAllocPagesInternal(void* hint,
if (page_tag == PageTag::kV8) {
// V8 uses JIT. Call zx_vmo_replace_as_executable() to allow code execution
// in the new VMO.
- status = vmo.replace_as_executable(zx::handle(), &vmo);
+ status = vmo.replace_as_executable(zx::resource(), &vmo);
if (status != ZX_OK) {
ZX_DLOG(INFO, status) << "zx_vmo_replace_as_executable";
return nullptr;
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h b/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h
index 5d71a1dbc43..27fe5a9a1df 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h
@@ -12,8 +12,11 @@
#include "build/build_config.h"
#if defined(OS_MACOSX)
+#include "base/mac/foundation_util.h"
#include "base/mac/mac_util.h"
+#include "base/mac/scoped_cftyperef.h"
+#include <Security/Security.h>
#include <mach/mach.h>
#endif
#if defined(OS_ANDROID)
@@ -33,8 +36,9 @@
namespace base {
-#if defined(OS_ANDROID)
namespace {
+
+#if defined(OS_ANDROID)
const char* PageTagToName(PageTag tag) {
// Important: All the names should be string literals. As per prctl.h in
// //third_party/android_ndk the kernel keeps a pointer to the name instead
@@ -56,9 +60,28 @@ const char* PageTagToName(PageTag tag) {
return "";
}
}
-} // namespace
#endif // defined(OS_ANDROID)
+#if defined(OS_MACOSX)
+// Tests whether the version of macOS supports the MAP_JIT flag and if the
+// current process is signed with the allow-jit entitlement.
+bool UseMapJit() {
+ if (!mac::IsAtLeastOS10_14())
+ return false;
+
+ ScopedCFTypeRef<SecTaskRef> task(SecTaskCreateFromSelf(kCFAllocatorDefault));
+ ScopedCFTypeRef<CFErrorRef> error;
+ ScopedCFTypeRef<CFTypeRef> value(SecTaskCopyValueForEntitlement(
+ task.get(), CFSTR("com.apple.security.cs.allow-jit"),
+ error.InitializeInto()));
+ if (error)
+ return false;
+ return mac::CFCast<CFBooleanRef>(value.get()) == kCFBooleanTrue;
+}
+#endif // defined(OS_MACOSX)
+
+} // namespace
+
// |mmap| uses a nearby address if the hint address is blocked.
constexpr bool kHintIsAdvisory = true;
std::atomic<int32_t> s_allocPageErrorCode{0};
@@ -104,8 +127,8 @@ void* SystemAllocPagesInternal(void* hint,
// "runtime" option cannot execute writable memory by default. They can opt
// into this capability by specifying the "com.apple.security.cs.allow-jit"
// code signing entitlement and allocating the region with the MAP_JIT flag.
- static const bool kNeedMapJIT = mac::IsAtLeastOS10_14();
- if (page_tag == PageTag::kV8 && kNeedMapJIT) {
+ static const bool kUseMapJit = UseMapJit();
+ if (page_tag == PageTag::kV8 && kUseMapJit) {
map_flags |= MAP_JIT;
}
#endif
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h b/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h
index 23f7a43e994..60a3472f976 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h
@@ -91,7 +91,7 @@ void SetSystemPagesAccessInternal(
GetAccessFlags(accessibility))) {
int32_t error = GetLastError();
if (error == ERROR_COMMITMENT_LIMIT)
- OOM_CRASH();
+ OOM_CRASH(length);
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
CHECK_EQ(ERROR_SUCCESS, error);
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc
index 5923b7e2870..11602606817 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc
@@ -15,6 +15,7 @@
#include "base/allocator/partition_allocator/spin_lock.h"
#include "base/logging.h"
#include "base/no_destructor.h"
+#include "base/synchronization/lock.h"
namespace base {
@@ -56,15 +57,19 @@ PartitionRootGeneric::PartitionRootGeneric() = default;
PartitionRootGeneric::~PartitionRootGeneric() = default;
PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default;
-subtle::SpinLock& GetLock() {
- static NoDestructor<subtle::SpinLock> s_initialized_lock;
+Lock& GetLock() {
+ static NoDestructor<Lock> s_initialized_lock;
return *s_initialized_lock;
}
static bool g_initialized = false;
-void (*internal::PartitionRootBase::gOomHandlingFunction)() = nullptr;
+Lock& GetHooksLock() {
+ static NoDestructor<Lock> lock;
+ return *lock;
+}
+
+OomFunction internal::PartitionRootBase::g_oom_handling_function = nullptr;
std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false);
-subtle::SpinLock PartitionAllocHooks::set_hooks_lock_;
std::atomic<PartitionAllocHooks::AllocationObserverHook*>
PartitionAllocHooks::allocation_observer_hook_(nullptr);
std::atomic<PartitionAllocHooks::FreeObserverHook*>
@@ -78,7 +83,7 @@ std::atomic<PartitionAllocHooks::ReallocOverrideHook*>
void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
FreeObserverHook* free_hook) {
- subtle::SpinLock::Guard guard(set_hooks_lock_);
+ AutoLock guard(GetHooksLock());
// Chained hooks are not supported. Registering a non-null hook when a
// non-null hook is already registered indicates somebody is trying to
@@ -95,7 +100,7 @@ void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
FreeOverrideHook* free_hook,
ReallocOverrideHook realloc_hook) {
- subtle::SpinLock::Guard guard(set_hooks_lock_);
+ AutoLock guard(GetHooksLock());
CHECK((!allocation_override_hook_ && !free_override_hook_ &&
!realloc_override_hook_) ||
@@ -171,7 +176,7 @@ bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
static void PartitionAllocBaseInit(internal::PartitionRootBase* root) {
DCHECK(!root->initialized);
{
- subtle::SpinLock::Guard guard(GetLock());
+ AutoLock guard(GetLock());
if (!g_initialized) {
g_initialized = true;
// We mark the sentinel bucket/page as free to make sure it is skipped by
@@ -187,9 +192,9 @@ static void PartitionAllocBaseInit(internal::PartitionRootBase* root) {
root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
}
-void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
- DCHECK(oom_handling_function);
- internal::PartitionRootBase::gOomHandlingFunction = oom_handling_function;
+void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
+ DCHECK(on_out_of_memory);
+ internal::PartitionRootBase::g_oom_handling_function = on_out_of_memory;
}
void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) {
@@ -370,7 +375,7 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
if (new_size > kGenericMaxDirectMapped) {
if (flags & PartitionAllocReturnNull)
return nullptr;
- internal::PartitionExcessiveAllocationSize();
+ internal::PartitionExcessiveAllocationSize(new_size);
}
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
@@ -425,7 +430,7 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
if (!ret) {
if (flags & PartitionAllocReturnNull)
return nullptr;
- internal::PartitionExcessiveAllocationSize();
+ internal::PartitionExcessiveAllocationSize(new_size);
}
size_t copy_size = actual_old_size;
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.h b/chromium/base/allocator/partition_allocator/partition_alloc.h
index fa73d8c1918..06ecc577675 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.h
@@ -220,7 +220,7 @@ class BASE_EXPORT PartitionStatsDumper {
const PartitionBucketMemoryStats*) = 0;
};
-BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)());
+BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
// PartitionAlloc supports setting hooks to observe allocations/frees as they
// occur as well as 'override' hooks that allow overriding those operations.
@@ -282,8 +282,6 @@ class BASE_EXPORT PartitionAllocHooks {
static std::atomic<bool> hooks_enabled_;
// Lock used to synchronize Set*Hooks calls.
- static subtle::SpinLock set_hooks_lock_;
-
static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
static std::atomic<FreeObserverHook*> free_observer_hook_;
@@ -368,7 +366,8 @@ ALWAYS_INLINE void PartitionFree(void* ptr) {
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(internal::PartitionRootBase::IsValidPage(page));
- page->Free(ptr);
+ internal::DeferredUnmap deferred_unmap = page->Free(ptr);
+ deferred_unmap.Run();
#endif
}
@@ -462,10 +461,12 @@ ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(IsValidPage(page));
+ internal::DeferredUnmap deferred_unmap;
{
subtle::SpinLock::Guard guard(lock);
- page->Free(ptr);
+ deferred_unmap = page->Free(ptr);
}
+ deferred_unmap.Run();
#endif
}
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc
index bdd85f0ae1d..e6afbd949b3 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc
@@ -6,12 +6,13 @@
#include <vector>
#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/strings/stringprintf.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
#include "base/timer/lap_timer.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/perf/perf_test.h"
+#include "testing/perf/perf_result_reporter.h"
namespace base {
namespace {
@@ -29,57 +30,87 @@ constexpr int kMultiBucketIncrement = 13;
// Final size is 24 + (13 * 22) = 310 bytes.
constexpr int kMultiBucketRounds = 22;
-class AllocatingThread : public PlatformThread::Delegate {
+constexpr char kMetricPrefixMemoryAllocation[] = "MemoryAllocation";
+constexpr char kMetricThroughput[] = "throughput";
+constexpr char kMetricTimePerAllocation[] = "time_per_allocation";
+
+perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
+ perf_test::PerfResultReporter reporter(kMetricPrefixMemoryAllocation,
+ story_name);
+ reporter.RegisterImportantMetric(kMetricThroughput, "runs/s");
+ reporter.RegisterImportantMetric(kMetricTimePerAllocation, "ns");
+ return reporter;
+}
+
+enum class AllocatorType { kSystem, kPartitionAlloc };
+
+class Allocator {
+ public:
+ Allocator() = default;
+ virtual ~Allocator() = default;
+ virtual void Init() {}
+ virtual void* Alloc(size_t size) = 0;
+ virtual void Free(void* data) = 0;
+};
+
+class SystemAllocator : public Allocator {
+ public:
+ SystemAllocator() = default;
+ ~SystemAllocator() override = default;
+ void* Alloc(size_t size) override { return malloc(size); }
+ void Free(void* data) override { free(data); }
+};
+
+class PartitionAllocator : public Allocator {
public:
- explicit AllocatingThread(PartitionAllocatorGeneric* allocator)
- : allocator_(allocator), should_stop_(false) {
- PlatformThread::Create(0, this, &thread_handle_);
+ PartitionAllocator()
+ : alloc_(std::make_unique<PartitionAllocatorGeneric>()) {}
+ ~PartitionAllocator() override = default;
+
+ void Init() override { alloc_->init(); }
+ void* Alloc(size_t size) override { return alloc_->root()->Alloc(size, ""); }
+ void Free(void* data) override { return alloc_->root()->Free(data); }
+
+ private:
+ std::unique_ptr<PartitionAllocatorGeneric> alloc_;
+};
+
+class TestLoopThread : public PlatformThread::Delegate {
+ public:
+ explicit TestLoopThread(OnceCallback<float()> test_fn)
+ : test_fn_(std::move(test_fn)) {
+ CHECK(PlatformThread::Create(0, this, &thread_handle_));
}
- ~AllocatingThread() override {
- should_stop_ = true;
+ float Run() {
PlatformThread::Join(thread_handle_);
+ return laps_per_second_;
}
- // Allocates and frees memory in a loop until |should_stop_| becomes true.
- void ThreadMain() override {
- uint64_t count = 0;
- while (true) {
- // Only check |should_stop_| every 2^15 iterations, as it is a
- // sequentially consistent access, hence expensive.
- if (count % (1 << 15) == 0 && should_stop_)
- break;
- void* data = allocator_->root()->Alloc(10, "");
- allocator_->root()->Free(data);
- count++;
- }
- }
+ void ThreadMain() override { laps_per_second_ = std::move(test_fn_).Run(); }
- PartitionAllocatorGeneric* allocator_;
- std::atomic<bool> should_stop_;
+ OnceCallback<float()> test_fn_;
PlatformThreadHandle thread_handle_;
+ std::atomic<float> laps_per_second_;
};
-void DisplayResults(const std::string& measurement,
- const std::string& modifier,
- size_t iterations_per_second) {
- perf_test::PrintResult(measurement, modifier, "", iterations_per_second,
- "runs/s", true);
- perf_test::PrintResult(measurement, modifier, "",
- static_cast<size_t>(1e9 / iterations_per_second),
- "ns/run", true);
+void DisplayResults(const std::string& story_name,
+ float iterations_per_second) {
+ auto reporter = SetUpReporter(story_name);
+ reporter.AddResult(kMetricThroughput, iterations_per_second);
+ reporter.AddResult(kMetricTimePerAllocation,
+ static_cast<size_t>(1e9 / iterations_per_second));
}
class MemoryAllocationPerfNode {
public:
MemoryAllocationPerfNode* GetNext() const { return next_; }
void SetNext(MemoryAllocationPerfNode* p) { next_ = p; }
- static void FreeAll(MemoryAllocationPerfNode* first,
- PartitionAllocatorGeneric& alloc) {
+ static void FreeAll(MemoryAllocationPerfNode* first, Allocator* alloc) {
MemoryAllocationPerfNode* cur = first;
while (cur != nullptr) {
MemoryAllocationPerfNode* next = cur->GetNext();
- alloc.root()->Free(cur);
+ alloc->Free(cur);
cur = next;
}
}
@@ -88,171 +119,177 @@ class MemoryAllocationPerfNode {
MemoryAllocationPerfNode* next_ = nullptr;
};
-class MemoryAllocationPerfTest : public testing::Test {
- public:
- MemoryAllocationPerfTest()
- : timer_(kWarmupRuns, kTimeLimit, kTimeCheckInterval) {}
- void SetUp() override { alloc_.init(); }
- void TearDown() override {
- alloc_.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages |
- PartitionPurgeDiscardUnusedSystemPages);
- }
+#if !defined(OS_ANDROID)
+float SingleBucket(Allocator* allocator) {
+ auto* first =
+ reinterpret_cast<MemoryAllocationPerfNode*>(allocator->Alloc(40));
+
+ LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
+ MemoryAllocationPerfNode* cur = first;
+ do {
+ auto* next =
+ reinterpret_cast<MemoryAllocationPerfNode*>(allocator->Alloc(40));
+ CHECK_NE(next, nullptr);
+ cur->SetNext(next);
+ cur = next;
+ timer.NextLap();
+ } while (!timer.HasTimeLimitExpired());
+ // next_ = nullptr only works if the class constructor is called (it's not
+ // called in this case because then we can allocate arbitrary-length
+ // payloads.)
+ cur->SetNext(nullptr);
+
+ MemoryAllocationPerfNode::FreeAll(first, allocator);
+ return timer.LapsPerSecond();
+}
+#endif // defined(OS_ANDROID)
+
+float SingleBucketWithFree(Allocator* allocator) {
+ // Allocate an initial element to make sure the bucket stays set up.
+ void* elem = allocator->Alloc(40);
+
+ LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
+ do {
+ void* cur = allocator->Alloc(40);
+ CHECK_NE(cur, nullptr);
+ allocator->Free(cur);
+ timer.NextLap();
+ } while (!timer.HasTimeLimitExpired());
+
+ allocator->Free(elem);
+ return timer.LapsPerSecond();
+}
- protected:
- void TestSingleBucket() {
- MemoryAllocationPerfNode* first =
- reinterpret_cast<MemoryAllocationPerfNode*>(
- alloc_.root()->Alloc(40, "<testing>"));
+#if !defined(OS_ANDROID)
+float MultiBucket(Allocator* allocator) {
+ auto* first =
+ reinterpret_cast<MemoryAllocationPerfNode*>(allocator->Alloc(40));
+ MemoryAllocationPerfNode* cur = first;
- timer_.Reset();
- MemoryAllocationPerfNode* cur = first;
- do {
- MemoryAllocationPerfNode* next =
- reinterpret_cast<MemoryAllocationPerfNode*>(
- alloc_.root()->Alloc(40, "<testing>"));
+ LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
+ do {
+ for (int i = 0; i < kMultiBucketRounds; i++) {
+ auto* next = reinterpret_cast<MemoryAllocationPerfNode*>(allocator->Alloc(
+ kMultiBucketMinimumSize + (i * kMultiBucketIncrement)));
CHECK_NE(next, nullptr);
cur->SetNext(next);
cur = next;
- timer_.NextLap();
- } while (!timer_.HasTimeLimitExpired());
- // next_ = nullptr only works if the class constructor is called (it's not
- // called in this case because then we can allocate arbitrary-length
- // payloads.)
- cur->SetNext(nullptr);
-
- MemoryAllocationPerfNode::FreeAll(first, alloc_);
-
- DisplayResults("MemoryAllocationPerfTest",
- " single bucket allocation (40 bytes)",
- timer_.LapsPerSecond());
- }
-
- void TestSingleBucketWithFree() {
- // Allocate an initial element to make sure the bucket stays set up.
- void* elem = alloc_.root()->Alloc(40, "<testing>");
-
- timer_.Reset();
- do {
- void* cur = alloc_.root()->Alloc(40, "<testing>");
- CHECK_NE(cur, nullptr);
- alloc_.root()->Free(cur);
- timer_.NextLap();
- } while (!timer_.HasTimeLimitExpired());
-
- alloc_.root()->Free(elem);
- DisplayResults("MemoryAllocationPerfTest",
- " single bucket allocation + free (40 bytes)",
- timer_.LapsPerSecond());
- }
+ }
+ timer.NextLap();
+ } while (!timer.HasTimeLimitExpired());
+ cur->SetNext(nullptr);
- void TestMultiBucket() {
- MemoryAllocationPerfNode* first =
- reinterpret_cast<MemoryAllocationPerfNode*>(
- alloc_.root()->Alloc(40, "<testing>"));
- MemoryAllocationPerfNode* cur = first;
+ MemoryAllocationPerfNode::FreeAll(first, allocator);
- timer_.Reset();
- do {
- for (int i = 0; i < kMultiBucketRounds; i++) {
- MemoryAllocationPerfNode* next =
- reinterpret_cast<MemoryAllocationPerfNode*>(alloc_.root()->Alloc(
- kMultiBucketMinimumSize + (i * kMultiBucketIncrement),
- "<testing>"));
- CHECK_NE(next, nullptr);
- cur->SetNext(next);
- cur = next;
- }
- timer_.NextLap();
- } while (!timer_.HasTimeLimitExpired());
- cur->SetNext(nullptr);
-
- MemoryAllocationPerfNode::FreeAll(first, alloc_);
-
- DisplayResults("MemoryAllocationPerfTest", " multi-bucket allocation",
- timer_.LapsPerSecond() * kMultiBucketRounds);
+ return timer.LapsPerSecond() * kMultiBucketRounds;
+}
+#endif // defined(OS_ANDROID)
+
+float MultiBucketWithFree(Allocator* allocator) {
+ std::vector<void*> elems;
+ elems.reserve(kMultiBucketRounds);
+ // Do an initial round of allocation to make sure that the buckets stay in
+ // use (and aren't accidentally released back to the OS).
+ for (int i = 0; i < kMultiBucketRounds; i++) {
+ void* cur =
+ allocator->Alloc(kMultiBucketMinimumSize + (i * kMultiBucketIncrement));
+ CHECK_NE(cur, nullptr);
+ elems.push_back(cur);
}
- void TestMultiBucketWithFree() {
- std::vector<void*> elems;
- elems.reserve(kMultiBucketRounds);
- // Do an initial round of allocation to make sure that the buckets stay in
- // use (and aren't accidentally released back to the OS).
+ LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
+ do {
for (int i = 0; i < kMultiBucketRounds; i++) {
- void* cur = alloc_.root()->Alloc(
- kMultiBucketMinimumSize + (i * kMultiBucketIncrement), "<testing>");
+ void* cur = allocator->Alloc(kMultiBucketMinimumSize +
+ (i * kMultiBucketIncrement));
CHECK_NE(cur, nullptr);
- elems.push_back(cur);
+ allocator->Free(cur);
}
-
- timer_.Reset();
- do {
- for (int i = 0; i < kMultiBucketRounds; i++) {
- void* cur = alloc_.root()->Alloc(
- kMultiBucketMinimumSize + (i * kMultiBucketIncrement), "<testing>");
- CHECK_NE(cur, nullptr);
- alloc_.root()->Free(cur);
- }
- timer_.NextLap();
- } while (!timer_.HasTimeLimitExpired());
+ timer.NextLap();
+ } while (!timer.HasTimeLimitExpired());
for (void* ptr : elems) {
- alloc_.root()->Free(ptr);
+ allocator->Free(ptr);
}
- DisplayResults("MemoryAllocationPerfTest",
- " multi-bucket allocation + free",
- timer_.LapsPerSecond() * kMultiBucketRounds);
- }
-
- LapTimer timer_;
- PartitionAllocatorGeneric alloc_;
-};
-
-TEST_F(MemoryAllocationPerfTest, SingleBucket) {
- TestSingleBucket();
+ return timer.LapsPerSecond() * kMultiBucketRounds;
}
-TEST_F(MemoryAllocationPerfTest, SingleBucketWithCompetingThread) {
- AllocatingThread t(&alloc_);
- TestSingleBucket();
+std::unique_ptr<Allocator> CreateAllocator(AllocatorType type) {
+ if (type == AllocatorType::kSystem)
+ return std::make_unique<SystemAllocator>();
+ return std::make_unique<PartitionAllocator>();
}
-TEST_F(MemoryAllocationPerfTest, SingleBucketWithFree) {
- TestSingleBucketWithFree();
-}
+void RunTest(int thread_count,
+ AllocatorType alloc_type,
+ float (*test_fn)(Allocator*),
+ const char* story_base_name) {
+ auto alloc = CreateAllocator(alloc_type);
+ alloc->Init();
+
+ std::vector<std::unique_ptr<TestLoopThread>> threads;
+ for (int i = 0; i < thread_count; ++i) {
+ threads.push_back(std::make_unique<TestLoopThread>(
+ BindOnce(test_fn, Unretained(alloc.get()))));
+ }
-TEST_F(MemoryAllocationPerfTest, SingleBucketWithFreeWithCompetingThread) {
- AllocatingThread t(&alloc_);
- TestSingleBucketWithFree();
+ uint64_t total_laps_per_second = 0;
+ uint64_t min_laps_per_second = std::numeric_limits<uint64_t>::max();
+ for (int i = 0; i < thread_count; ++i) {
+ uint64_t laps_per_second = threads[i]->Run();
+ min_laps_per_second = std::min(laps_per_second, min_laps_per_second);
+ total_laps_per_second += laps_per_second;
+ }
+
+ std::string name = base::StringPrintf(
+ "%s.%s_%s_%d", kMetricPrefixMemoryAllocation, story_base_name,
+ alloc_type == AllocatorType::kSystem ? "System" : "PartitionAlloc",
+ thread_count);
+
+ DisplayResults(name + "_total", total_laps_per_second);
+ DisplayResults(name + "_worst", min_laps_per_second);
}
-// Failing on Nexus5x: crbug.com/949838
-#if defined(OS_ANDROID)
-#define MAYBE_MultiBucket DISABLED_MultiBucket
-#define MAYBE_MultiBucketWithCompetingThread \
- DISABLED_MultiBucketWithCompetingThread
-#else
-#define MAYBE_MultiBucket MultiBucket
-#define MAYBE_MultiBucketWithCompetingThread MultiBucketWithCompetingThread
-#endif
-TEST_F(MemoryAllocationPerfTest, MAYBE_MultiBucket) {
- TestMultiBucket();
+class MemoryAllocationPerfTest
+ : public testing::TestWithParam<std::tuple<int, AllocatorType>> {};
+
+INSTANTIATE_TEST_SUITE_P(
+ ,
+ MemoryAllocationPerfTest,
+ ::testing::Combine(::testing::Values(1, 2, 3, 4),
+ ::testing::Values(AllocatorType::kSystem,
+ AllocatorType::kPartitionAlloc)));
+
+// This test (and the other one below) allocates a large amount of memory, which
+// can cause issues on Android.
+#if !defined(OS_ANDROID)
+TEST_P(MemoryAllocationPerfTest, SingleBucket) {
+ auto params = GetParam();
+ RunTest(std::get<0>(params), std::get<1>(params), SingleBucket,
+ "SingleBucket");
}
+#endif
-TEST_F(MemoryAllocationPerfTest, MAYBE_MultiBucketWithCompetingThread) {
- AllocatingThread t(&alloc_);
- TestMultiBucket();
+TEST_P(MemoryAllocationPerfTest, SingleBucketWithFree) {
+ auto params = GetParam();
+ RunTest(std::get<0>(params), std::get<1>(params), SingleBucketWithFree,
+ "SingleBucketWithFree");
}
-TEST_F(MemoryAllocationPerfTest, MultiBucketWithFree) {
- TestMultiBucketWithFree();
+#if !defined(OS_ANDROID)
+TEST_P(MemoryAllocationPerfTest, MultiBucket) {
+ auto params = GetParam();
+ RunTest(std::get<0>(params), std::get<1>(params), MultiBucket, "MultiBucket");
}
+#endif
-TEST_F(MemoryAllocationPerfTest, MultiBucketWithFreeWithCompetingThread) {
- AllocatingThread t(&alloc_);
- TestMultiBucketWithFree();
+TEST_P(MemoryAllocationPerfTest, MultiBucketWithFree) {
+ auto params = GetParam();
+ RunTest(std::get<0>(params), std::get<1>(params), MultiBucketWithFree,
+ "MultiBucketWithFree");
}
-} // anonymous namespace
+} // namespace
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_bucket.cc b/chromium/base/allocator/partition_allocator/partition_bucket.cc
index c2879e46966..0ff8661878b 100644
--- a/chromium/base/allocator/partition_allocator/partition_bucket.cc
+++ b/chromium/base/allocator/partition_allocator/partition_bucket.cc
@@ -185,7 +185,7 @@ void PartitionBucket::Init(uint32_t new_slot_size) {
}
NOINLINE void PartitionBucket::OnFull() {
- OOM_CRASH();
+ OOM_CRASH(0);
}
ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan(
@@ -478,13 +478,10 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
if (size > kGenericMaxDirectMapped) {
if (return_null)
return nullptr;
- PartitionExcessiveAllocationSize();
+ PartitionExcessiveAllocationSize(size);
}
new_page = PartitionDirectMap(root, flags, size);
-#if !defined(OS_MACOSX)
- // Turn off the optimization to see if it helps https://crbug.com/892550.
*is_already_zeroed = true;
-#endif
} else if (LIKELY(SetNewActivePage())) {
// First, did we find an active page in the active pages list?
new_page = active_pages_head;
@@ -538,7 +535,7 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
DCHECK(active_pages_head == PartitionPage::get_sentinel_page());
if (return_null)
return nullptr;
- root->OutOfMemory();
+ root->OutOfMemory(size);
}
// TODO(ajwong): Is there a way to avoid the reading of bucket here?
diff --git a/chromium/base/allocator/partition_allocator/partition_oom.cc b/chromium/base/allocator/partition_allocator/partition_oom.cc
index 5e1cf79ea34..e23705f2d2d 100644
--- a/chromium/base/allocator/partition_allocator/partition_oom.cc
+++ b/chromium/base/allocator/partition_allocator/partition_oom.cc
@@ -10,13 +10,13 @@
namespace base {
namespace internal {
-void NOINLINE PartitionExcessiveAllocationSize() {
- OOM_CRASH();
+void NOINLINE PartitionExcessiveAllocationSize(size_t size) {
+ OOM_CRASH(size);
}
#if !defined(ARCH_CPU_64_BITS)
-NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages() {
- OOM_CRASH();
+NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) {
+ OOM_CRASH(size);
}
#endif
diff --git a/chromium/base/allocator/partition_allocator/partition_oom.h b/chromium/base/allocator/partition_allocator/partition_oom.h
index da8fc15a5cd..2c5e0d3ec1f 100644
--- a/chromium/base/allocator/partition_allocator/partition_oom.h
+++ b/chromium/base/allocator/partition_allocator/partition_oom.h
@@ -8,16 +8,18 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
+#include <stddef.h>
+
#include "base/compiler_specific.h"
#include "build/build_config.h"
namespace base {
namespace internal {
-NOINLINE void PartitionExcessiveAllocationSize();
+NOINLINE void PartitionExcessiveAllocationSize(size_t size);
#if !defined(ARCH_CPU_64_BITS)
-NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages();
+NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size);
#endif
} // namespace internal
diff --git a/chromium/base/allocator/partition_allocator/partition_page.cc b/chromium/base/allocator/partition_allocator/partition_page.cc
index 8375b4e4e76..bfb98950812 100644
--- a/chromium/base/allocator/partition_allocator/partition_page.cc
+++ b/chromium/base/allocator/partition_allocator/partition_page.cc
@@ -13,7 +13,7 @@ namespace internal {
namespace {
-ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
+ALWAYS_INLINE DeferredUnmap PartitionDirectUnmap(PartitionPage* page) {
PartitionRootBase* root = PartitionRootBase::FromPage(page);
const PartitionDirectMapExtent* extent =
PartitionDirectMapExtent::FromPage(page);
@@ -46,8 +46,7 @@ ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
// Account for the mapping starting a partition page before the actual
// allocation address.
ptr -= kPartitionPageSize;
-
- FreePages(ptr, unmap_size);
+ return {ptr, unmap_size};
}
ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
@@ -90,13 +89,12 @@ PartitionPage* PartitionPage::get_sentinel_page() {
return &sentinel_page_;
}
-void PartitionPage::FreeSlowPath() {
+DeferredUnmap PartitionPage::FreeSlowPath() {
DCHECK(this != get_sentinel_page());
if (LIKELY(num_allocated_slots == 0)) {
// Page became fully unused.
if (UNLIKELY(bucket->is_direct_mapped())) {
- PartitionDirectUnmap(this);
- return;
+ return PartitionDirectUnmap(this);
}
// If it's the current active page, change it. We bounce the page to
// the empty list as a force towards defragmentation.
@@ -130,8 +128,9 @@ void PartitionPage::FreeSlowPath() {
// Special case: for a partition page with just a single slot, it may
// now be empty and we want to run it through the empty logic.
if (UNLIKELY(num_allocated_slots == 0))
- FreeSlowPath();
+ return FreeSlowPath();
}
+ return {};
}
void PartitionPage::Decommit(PartitionRootBase* root) {
@@ -160,5 +159,9 @@ void PartitionPage::DecommitIfPossible(PartitionRootBase* root) {
Decommit(root);
}
+void DeferredUnmap::Unmap() {
+ FreePages(ptr, size);
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_page.h b/chromium/base/allocator/partition_allocator/partition_page.h
index 07fdcdfc83e..a4849b15bc0 100644
--- a/chromium/base/allocator/partition_allocator/partition_page.h
+++ b/chromium/base/allocator/partition_allocator/partition_page.h
@@ -19,6 +19,20 @@ namespace internal {
struct PartitionRootBase;
+// PartitionPage::Free() defers unmapping a large page until the lock is
+// released. Callers of PartitionPage::Free() must invoke Run().
+// TODO(1061437): Reconsider once the new locking mechanism is implemented.
+struct DeferredUnmap {
+ void* ptr = nullptr;
+ size_t size = 0;
+ // In most cases there is no page to unmap and ptr == nullptr. This function
+ // is inlined to avoid the overhead of a function call in the common case.
+ ALWAYS_INLINE void Run();
+
+ private:
+ BASE_EXPORT NOINLINE void Unmap();
+};
+
// Some notes on page states. A page can be in one of four major states:
// 1) Active.
// 2) Full.
@@ -62,8 +76,9 @@ struct PartitionPage {
// Public API
// Note the matching Alloc() functions are in PartitionPage.
- BASE_EXPORT NOINLINE void FreeSlowPath();
- ALWAYS_INLINE void Free(void* ptr);
+ // Callers must invoke DeferredUnmap::Run() after releasing the lock.
+ BASE_EXPORT NOINLINE DeferredUnmap FreeSlowPath() WARN_UNUSED_RESULT;
+ ALWAYS_INLINE DeferredUnmap Free(void* ptr) WARN_UNUSED_RESULT;
void Decommit(PartitionRootBase* root);
void DecommitIfPossible(PartitionRootBase* root);
@@ -201,7 +216,7 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
return 0;
}
-ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
+ALWAYS_INLINE DeferredUnmap PartitionPage::Free(void* ptr) {
#if DCHECK_IS_ON()
size_t slot_size = bucket->slot_size;
const size_t raw_size = get_raw_size();
@@ -229,12 +244,13 @@ ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
freelist_head = entry;
--num_allocated_slots;
if (UNLIKELY(num_allocated_slots <= 0)) {
- FreeSlowPath();
+ return FreeSlowPath();
} else {
// All single-slot allocations must go through the slow path to
// correctly update the size metadata.
DCHECK(get_raw_size() == 0);
}
+ return {};
}
ALWAYS_INLINE bool PartitionPage::is_active() const {
@@ -287,6 +303,12 @@ ALWAYS_INLINE void PartitionPage::Reset() {
next_page = nullptr;
}
+ALWAYS_INLINE void DeferredUnmap::Run() {
+ if (UNLIKELY(ptr)) {
+ Unmap();
+ }
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_root_base.cc b/chromium/base/allocator/partition_allocator/partition_root_base.cc
index 91b998fbf00..6e1442f7b79 100644
--- a/chromium/base/allocator/partition_allocator/partition_root_base.cc
+++ b/chromium/base/allocator/partition_allocator/partition_root_base.cc
@@ -12,19 +12,19 @@
namespace base {
namespace internal {
-NOINLINE void PartitionRootBase::OutOfMemory() {
+NOINLINE void PartitionRootBase::OutOfMemory(size_t size) {
#if !defined(ARCH_CPU_64_BITS)
// Check whether this OOM is due to a lot of super pages that are allocated
// but not committed, probably due to http://crbug.com/421387.
if (total_size_of_super_pages + total_size_of_direct_mapped_pages -
total_size_of_committed_pages >
kReasonableSizeOfUnusedPages) {
- PartitionOutOfMemoryWithLotsOfUncommitedPages();
+ PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
}
#endif
- if (PartitionRootBase::gOomHandlingFunction)
- (*PartitionRootBase::gOomHandlingFunction)();
- OOM_CRASH();
+ if (PartitionRootBase::g_oom_handling_function)
+ (*PartitionRootBase::g_oom_handling_function)(size);
+ OOM_CRASH(size);
}
void PartitionRootBase::DecommitEmptyPages() {
diff --git a/chromium/base/allocator/partition_allocator/partition_root_base.h b/chromium/base/allocator/partition_allocator/partition_root_base.h
index a3f9175b3cb..42c1d8d787d 100644
--- a/chromium/base/allocator/partition_allocator/partition_root_base.h
+++ b/chromium/base/allocator/partition_allocator/partition_root_base.h
@@ -14,6 +14,9 @@
#include "build/build_config.h"
namespace base {
+
+typedef void (*OomFunction)(size_t);
+
namespace internal {
struct PartitionPage;
@@ -73,9 +76,9 @@ struct BASE_EXPORT PartitionRootBase {
ALWAYS_INLINE static bool IsValidPage(PartitionPage* page);
ALWAYS_INLINE static PartitionRootBase* FromPage(PartitionPage* page);
- // gOomHandlingFunction is invoked when PartitionAlloc hits OutOfMemory.
- static void (*gOomHandlingFunction)();
- NOINLINE void OutOfMemory();
+ // g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
+ static OomFunction g_oom_handling_function;
+ NOINLINE void OutOfMemory(size_t size);
ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
diff --git a/chromium/base/allocator/partition_allocator/random.cc b/chromium/base/allocator/partition_allocator/random.cc
index 46ffaaba338..7da12ddb1ed 100644
--- a/chromium/base/allocator/partition_allocator/random.cc
+++ b/chromium/base/allocator/partition_allocator/random.cc
@@ -8,13 +8,22 @@
#include "base/logging.h"
#include "base/no_destructor.h"
#include "base/rand_util.h"
+#include "base/synchronization/lock.h"
namespace base {
+namespace {
+
+Lock& GetLock() {
+ static NoDestructor<Lock> lock;
+ return *lock;
+}
+
+} // namespace
+
// This is the same PRNG as used by tcmalloc for mapping address randomness;
// see http://burtleburtle.net/bob/rand/smallprng.html.
struct RandomContext {
- subtle::SpinLock lock;
bool initialized;
uint32_t a;
uint32_t b;
@@ -22,45 +31,44 @@ struct RandomContext {
uint32_t d;
};
+static RandomContext g_context GUARDED_BY(GetLock());
+
namespace {
-RandomContext* GetRandomContext() {
- static NoDestructor<RandomContext> g_random_context;
- RandomContext* x = g_random_context.get();
- subtle::SpinLock::Guard guard(x->lock);
- if (UNLIKELY(!x->initialized)) {
+RandomContext& GetRandomContext() EXCLUSIVE_LOCKS_REQUIRED(GetLock()) {
+ if (UNLIKELY(!g_context.initialized)) {
const uint64_t r1 = RandUint64();
const uint64_t r2 = RandUint64();
- x->a = static_cast<uint32_t>(r1);
- x->b = static_cast<uint32_t>(r1 >> 32);
- x->c = static_cast<uint32_t>(r2);
- x->d = static_cast<uint32_t>(r2 >> 32);
- x->initialized = true;
+ g_context.a = static_cast<uint32_t>(r1);
+ g_context.b = static_cast<uint32_t>(r1 >> 32);
+ g_context.c = static_cast<uint32_t>(r2);
+ g_context.d = static_cast<uint32_t>(r2 >> 32);
+ g_context.initialized = true;
}
- return x;
+ return g_context;
}
} // namespace
uint32_t RandomValue() {
- RandomContext* x = GetRandomContext();
- subtle::SpinLock::Guard guard(x->lock);
+ AutoLock guard(GetLock());
+ RandomContext& x = GetRandomContext();
#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
- uint32_t e = x->a - rot(x->b, 27);
- x->a = x->b ^ rot(x->c, 17);
- x->b = x->c + x->d;
- x->c = x->d + e;
- x->d = e + x->a;
- return x->d;
+ uint32_t e = x.a - rot(x.b, 27);
+ x.a = x.b ^ rot(x.c, 17);
+ x.b = x.c + x.d;
+ x.c = x.d + e;
+ x.d = e + x.a;
+ return x.d;
#undef rot
}
void SetMmapSeedForTesting(uint64_t seed) {
- RandomContext* x = GetRandomContext();
- subtle::SpinLock::Guard guard(x->lock);
- x->a = x->b = static_cast<uint32_t>(seed);
- x->c = x->d = static_cast<uint32_t>(seed >> 32);
- x->initialized = true;
+ AutoLock guard(GetLock());
+ RandomContext& x = GetRandomContext();
+ x.a = x.b = static_cast<uint32_t>(seed);
+ x.c = x.d = static_cast<uint32_t>(seed >> 32);
+ x.initialized = true;
}
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/spin_lock_perftest.cc b/chromium/base/allocator/partition_allocator/spin_lock_perftest.cc
index 49811cbb27d..c60e3631d0c 100644
--- a/chromium/base/allocator/partition_allocator/spin_lock_perftest.cc
+++ b/chromium/base/allocator/partition_allocator/spin_lock_perftest.cc
@@ -7,7 +7,7 @@
#include "base/time/time.h"
#include "base/timer/lap_timer.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/perf/perf_test.h"
+#include "testing/perf/perf_result_reporter.h"
namespace base {
namespace {
@@ -16,6 +16,17 @@ constexpr int kWarmupRuns = 1;
constexpr TimeDelta kTimeLimit = TimeDelta::FromSeconds(1);
constexpr int kTimeCheckInterval = 100000;
+constexpr char kMetricPrefixSpinLock[] = "SpinLock.";
+constexpr char kMetricLockUnlockThroughput[] = "lock_unlock_throughput";
+constexpr char kStoryBaseline[] = "baseline_story";
+constexpr char kStoryWithCompetingThread[] = "with_competing_thread";
+
+perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
+ perf_test::PerfResultReporter reporter(kMetricPrefixSpinLock, story_name);
+ reporter.RegisterImportantMetric(kMetricLockUnlockThroughput, "runs/s");
+ return reporter;
+}
+
class Spin : public PlatformThread::Delegate {
public:
Spin(subtle::SpinLock* lock, size_t* data)
@@ -53,8 +64,8 @@ TEST(SpinLockPerfTest, Simple) {
timer.NextLap();
} while (!timer.HasTimeLimitExpired());
- perf_test::PrintResult("SpinLockPerfTest", " lock()/unlock()", "",
- timer.LapsPerSecond(), "runs/s", true);
+ auto reporter = SetUpReporter(kStoryBaseline);
+ reporter.AddResult(kMetricLockUnlockThroughput, timer.LapsPerSecond());
}
TEST(SpinLockPerfTest, WithCompetingThread) {
@@ -78,9 +89,8 @@ TEST(SpinLockPerfTest, WithCompetingThread) {
thread_main.Stop();
PlatformThread::Join(thread_handle);
- perf_test::PrintResult("SpinLockPerfTest.WithCompetingThread",
- " lock()/unlock()", "", timer.LapsPerSecond(),
- "runs/s", true);
+ auto reporter = SetUpReporter(kStoryWithCompetingThread);
+ reporter.AddResult(kMetricLockUnlockThroughput, timer.LapsPerSecond());
}
} // namespace base