summaryrefslogtreecommitdiff
path: root/chromium/third_party/blink/renderer/platform/heap/impl
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/blink/renderer/platform/heap/impl')
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/atomic_entry_flag.h51
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/blink_gc.cc71
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/blink_gc.h126
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/blink_gc_memory_dump_provider.cc144
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/blink_gc_memory_dump_provider.h47
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/disallow_new_wrapper.h53
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/finalizer_traits.h95
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/garbage_collected.h117
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/gc_info.cc137
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/gc_info.h128
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/gc_task_runner.h90
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/heap.cc796
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/heap.h757
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/heap_allocator.cc139
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/heap_allocator.h909
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/heap_compact.cc457
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/heap_compact.h167
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/heap_page.cc1910
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/heap_page.h1614
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/heap_stats_collector.cc275
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/heap_stats_collector.h469
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/heap_traits.h40
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.cc97
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.h65
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/marking_verifier.cc82
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/marking_verifier.h43
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/marking_visitor.cc362
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/marking_visitor.h281
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/member.h577
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/name_traits.h62
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/page_bloom_filter.h48
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/page_memory.cc137
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/page_memory.h186
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/page_pool.cc55
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/page_pool.h48
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/persistent.h971
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/persistent_node.cc206
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/persistent_node.h385
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/process_heap.cc71
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/process_heap.h69
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/thread_state.cc1754
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/thread_state.h716
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/thread_state_scopes.h128
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/thread_state_statistics.cc32
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/thread_state_statistics.h67
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/threading_traits.h168
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/trace_traits.h381
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_controller.cc252
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_controller.h75
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_marking_visitor.cc108
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_marking_visitor.h89
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/unsanitized_atomic.cc87
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/unsanitized_atomic.h65
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/visitor.h312
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/impl/worklist.h469
55 files changed, 17040 insertions, 0 deletions
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/atomic_entry_flag.h b/chromium/third_party/blink/renderer/platform/heap/impl/atomic_entry_flag.h
new file mode 100644
index 00000000000..46342d94529
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/atomic_entry_flag.h
@@ -0,0 +1,51 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_ATOMIC_ENTRY_FLAG_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_ATOMIC_ENTRY_FLAG_H_
+
+#include <atomic>
+
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+
+namespace blink {
+
+// A flag which provides a fast check whether a scope may be entered on the
+// current thread, without needing to access thread-local storage or mutex.
+//
+// Can have false positives (i.e., spuriously report that it might be entered),
+// so it is expected that this will be used in tandem with a precise check that
+// the scope is in fact entered on that thread.
+//
+// Example:
+// g_frobnicating_flag.MightBeEntered() &&
+// ThreadLocalFrobnicator().IsFrobnicating()
+//
+// Relaxed atomic operations are sufficient, since:
+// - all accesses remain atomic
+// - each thread must observe its own operations in order
+// - no thread ever exits the flag more times than it enters (if used correctly)
+// And so if a thread observes zero, it must be because it has observed an equal
+// number of exits as entries.
+class AtomicEntryFlag {
+ DISALLOW_NEW();
+
+ public:
+ inline void Enter() { entries_.fetch_add(1, std::memory_order_relaxed); }
+ inline void Exit() { entries_.fetch_sub(1, std::memory_order_relaxed); }
+
+ // Returns false only if the current thread is not between a call to Enter and
+ // a call to Exit. Returns true if this thread or another thread may currently
+ // be in the scope guarded by this flag.
+ inline bool MightBeEntered() const {
+ return entries_.load(std::memory_order_relaxed) != 0;
+ }
+
+ private:
+ std::atomic_int entries_{0};
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_ATOMIC_ENTRY_FLAG_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc.cc b/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc.cc
new file mode 100644
index 00000000000..58c86eb7a12
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc.cc
@@ -0,0 +1,71 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+
+namespace blink {
+
+const char* BlinkGC::ToString(BlinkGC::GCReason reason) {
+ switch (reason) {
+ case BlinkGC::GCReason::kForcedGCForTesting:
+ return "ForcedGCForTesting";
+ case BlinkGC::GCReason::kThreadTerminationGC:
+ return "ThreadTerminationGC";
+ case BlinkGC::GCReason::kUnifiedHeapGC:
+ return "UnifiedHeapGC";
+ case BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC:
+ return "UnifiedHeapForMemoryReductionGC";
+ case BlinkGC::GCReason::kUnifiedHeapForcedForTestingGC:
+ return "UnifiedHeapForcedForTestingGC";
+ }
+ IMMEDIATE_CRASH();
+}
+
+const char* BlinkGC::ToString(BlinkGC::MarkingType type) {
+ switch (type) {
+ case BlinkGC::MarkingType::kAtomicMarking:
+ return "AtomicMarking";
+ case BlinkGC::MarkingType::kIncrementalAndConcurrentMarking:
+ return "IncrementalAndConcurrentMarking";
+ }
+ IMMEDIATE_CRASH();
+}
+
+const char* BlinkGC::ToString(BlinkGC::SweepingType type) {
+ switch (type) {
+ case BlinkGC::SweepingType::kConcurrentAndLazySweeping:
+ return "ConcurrentAndLazySweeping";
+ case BlinkGC::SweepingType::kEagerSweeping:
+ return "EagerSweeping";
+ }
+ IMMEDIATE_CRASH();
+}
+
+const char* BlinkGC::ToString(BlinkGC::StackState stack_state) {
+ switch (stack_state) {
+ case BlinkGC::kNoHeapPointersOnStack:
+ return "NoHeapPointersOnStack";
+ case BlinkGC::kHeapPointersOnStack:
+ return "HeapPointersOnStack";
+ }
+ IMMEDIATE_CRASH();
+}
+
+const char* BlinkGC::ToString(BlinkGC::ArenaIndices arena_index) {
+#define ArenaCase(name) \
+ case k##name##ArenaIndex: \
+ return "" #name "Arena";
+
+ switch (arena_index) {
+ FOR_EACH_ARENA(ArenaCase)
+
+ case BlinkGC::ArenaIndices::kNumberOfArenas:
+ IMMEDIATE_CRASH();
+ }
+ IMMEDIATE_CRASH();
+
+#undef ArenaCase
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc.h b/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc.h
new file mode 100644
index 00000000000..1aa6ec6ad8c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc.h
@@ -0,0 +1,126 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_BLINK_GC_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_BLINK_GC_H_
+
+// BlinkGC.h is a file that defines common things used by Blink GC.
+
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+
+#define PRINT_HEAP_STATS 0 // Enable this macro to print heap stats to stderr.
+
+namespace blink {
+
+class LivenessBroker;
+class MarkingVisitor;
+class Visitor;
+
+using Address = uint8_t*;
+using ConstAddress = const uint8_t*;
+
+using VisitorCallback = void (*)(Visitor*, const void*);
+using MarkingVisitorCallback = void (*)(MarkingVisitor*, const void*);
+using TraceCallback = VisitorCallback;
+using WeakCallback = void (*)(const LivenessBroker&, const void*);
+using EphemeronCallback = VisitorCallback;
+
+// Simple alias to avoid heap compaction type signatures turning into
+// a sea of generic |void*|s.
+using MovableReference = const void*;
+
+// List of all arenas. Includes typed arenas as well.
+#define FOR_EACH_ARENA(H) \
+ H(NormalPage1) \
+ H(NormalPage2) \
+ H(NormalPage3) \
+ H(NormalPage4) \
+ H(Vector) \
+ H(HashTable) \
+ H(Node) \
+ H(CSSValue) \
+ H(LargeObject)
+
+class PLATFORM_EXPORT WorklistTaskId {
+ public:
+ static constexpr int MutatorThread = 0;
+ static constexpr int ConcurrentThreadBase = 1;
+};
+
+class PLATFORM_EXPORT BlinkGC final {
+ STATIC_ONLY(BlinkGC);
+
+ public:
+ // CollectionType represents generational collection. kMinor collects objects
+ // in the young generation (i.e. allocated since the previous collection
+ // cycle, since we use sticky bits), kMajor collects the entire heap.
+ enum class CollectionType { kMinor, kMajor };
+
+ // When garbage collecting we need to know whether or not there
+ // can be pointers to Blink GC managed objects on the stack for
+ // each thread. When threads reach a safe point they record
+ // whether or not they have pointers on the stack.
+ enum StackState { kNoHeapPointersOnStack, kHeapPointersOnStack };
+
+ enum MarkingType {
+ // The marking completes synchronously.
+ kAtomicMarking,
+ // The marking task is split and executed in chunks (either on the mutator
+ // thread or concurrently).
+ kIncrementalAndConcurrentMarking
+ };
+
+ enum SweepingType {
+ // The sweeping task is split into chunks and scheduled lazily and
+ // concurrently.
+ kConcurrentAndLazySweeping,
+ // The sweeping task executes synchronously right after marking.
+ kEagerSweeping,
+ };
+
+ // Commented out reasons have been used in the past but are not used any
+ // longer. We keep them here as the corresponding UMA histograms cannot be
+ // changed.
+ enum class GCReason {
+ // kIdleGC = 0
+ // kPreciseGC = 1
+ // kConservativeGC = 2
+ kForcedGCForTesting = 3,
+ // kMemoryPressureGC = 4
+ // kPageNavigationGC = 5
+ kThreadTerminationGC = 6,
+ // kTesting = 7
+ // kIncrementalIdleGC = 8
+ // kIncrementalV8FollowupGC = 9
+ kUnifiedHeapGC = 10,
+ kUnifiedHeapForMemoryReductionGC = 11,
+ kUnifiedHeapForcedForTestingGC = 12,
+ // Used by UMA_HISTOGRAM_ENUMERATION macro.
+ kMaxValue = kUnifiedHeapForcedForTestingGC,
+ };
+
+#define DeclareArenaIndex(name) k##name##ArenaIndex,
+ enum ArenaIndices {
+ FOR_EACH_ARENA(DeclareArenaIndex)
+ // Values used for iteration of heap segments.
+ kNumberOfArenas,
+ };
+#undef DeclareArenaIndex
+
+ enum V8GCType {
+ kV8MinorGC,
+ kV8MajorGC,
+ };
+
+ static const char* ToString(GCReason);
+ static const char* ToString(MarkingType);
+ static const char* ToString(StackState);
+ static const char* ToString(SweepingType);
+ static const char* ToString(ArenaIndices);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_BLINK_GC_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc_memory_dump_provider.cc b/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc_memory_dump_provider.cc
new file mode 100644
index 00000000000..cb8ce725ffa
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc_memory_dump_provider.cc
@@ -0,0 +1,144 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/blink_gc_memory_dump_provider.h"
+
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+#include "third_party/blink/renderer/platform/heap/impl/thread_state_statistics.h"
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/web_memory_allocator_dump.h"
+#include "third_party/blink/renderer/platform/wtf/std_lib_extras.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+
+namespace blink {
+namespace {
+
+constexpr const char* HeapTypeString(
+ BlinkGCMemoryDumpProvider::HeapType heap_type) {
+ switch (heap_type) {
+ case BlinkGCMemoryDumpProvider::HeapType::kBlinkMainThread:
+ return "main";
+ case BlinkGCMemoryDumpProvider::HeapType::kBlinkWorkerThread:
+ return "workers";
+ }
+}
+
+} // namespace
+
+BlinkGCMemoryDumpProvider::BlinkGCMemoryDumpProvider(
+ ThreadState* thread_state,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ BlinkGCMemoryDumpProvider::HeapType heap_type)
+ : thread_state_(thread_state),
+ heap_type_(heap_type),
+ dump_base_name_(
+ "blink_gc/" + std::string(HeapTypeString(heap_type_)) + "/heap" +
+ (heap_type_ == HeapType::kBlinkWorkerThread
+ ? "/" + base::StringPrintf(
+ "worker_0x%" PRIXPTR,
+ reinterpret_cast<uintptr_t>(thread_state_))
+ : "")) {
+ base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "BlinkGC", task_runner);
+}
+
+BlinkGCMemoryDumpProvider::~BlinkGCMemoryDumpProvider() {
+ base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
+ this);
+}
+
+bool BlinkGCMemoryDumpProvider::OnMemoryDump(
+ const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* process_memory_dump) {
+ ThreadState::Statistics::DetailLevel detail_level =
+ args.level_of_detail ==
+ base::trace_event::MemoryDumpLevelOfDetail::DETAILED
+ ? ThreadState::Statistics::kDetailed
+ : ThreadState::Statistics::kBrief;
+
+ ThreadState::Statistics stats =
+ ThreadState::StatisticsCollector(thread_state_)
+ .CollectStatistics(detail_level);
+
+ auto* heap_dump = process_memory_dump->CreateAllocatorDump(dump_base_name_);
+ heap_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ stats.committed_size_bytes);
+ heap_dump->AddScalar("allocated_objects_size",
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ stats.used_size_bytes);
+
+ if (detail_level == ThreadState::Statistics::kBrief) {
+ return true;
+ }
+
+ // Detailed statistics.
+ for (const ThreadState::Statistics::ArenaStatistics& arena_stats :
+ stats.arena_stats) {
+ std::string arena_dump_name = dump_base_name_ + "/" + arena_stats.name;
+ auto* arena_dump =
+ process_memory_dump->CreateAllocatorDump(arena_dump_name);
+ arena_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ arena_stats.committed_size_bytes);
+ arena_dump->AddScalar("allocated_objects_size",
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ arena_stats.used_size_bytes);
+
+ size_t page_count = 0;
+ for (const ThreadState::Statistics::PageStatistics& page_stats :
+ arena_stats.page_stats) {
+ auto* page_dump = process_memory_dump->CreateAllocatorDump(
+ arena_dump_name + "/pages/page_" +
+ base::NumberToString(page_count++));
+ page_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ page_stats.committed_size_bytes);
+ page_dump->AddScalar("allocated_objects_size",
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ page_stats.used_size_bytes);
+ }
+
+ const ThreadState::Statistics::FreeListStatistics& free_list_stats =
+ arena_stats.free_list_stats;
+ for (wtf_size_t i = 0; i < free_list_stats.bucket_size.size(); ++i) {
+ constexpr size_t kDigits = 8;
+ std::string original_bucket_size =
+ base::NumberToString(free_list_stats.bucket_size[i]);
+ std::string padded_bucket_size =
+ std::string(kDigits - original_bucket_size.length(), '0') +
+ original_bucket_size;
+ auto* free_list_bucket_dump = process_memory_dump->CreateAllocatorDump(
+ arena_dump_name + "/freelist/bucket_" + padded_bucket_size);
+ free_list_bucket_dump->AddScalar(
+ "free_size", base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ free_list_stats.free_size[i]);
+ }
+
+ const ThreadState::Statistics::ObjectStatistics& object_stats =
+ arena_stats.object_stats;
+ for (wtf_size_t i = 1; i < object_stats.num_types; i++) {
+ if (object_stats.type_name[i].empty())
+ continue;
+
+ auto* class_dump = process_memory_dump->CreateAllocatorDump(
+ arena_dump_name + "/classes/" + object_stats.type_name[i]);
+ class_dump->AddScalar(
+ "object_count", base::trace_event::MemoryAllocatorDump::kUnitsObjects,
+ object_stats.type_count[i]);
+ class_dump->AddScalar("object_size",
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ object_stats.type_bytes[i]);
+ }
+ }
+ return true;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc_memory_dump_provider.h b/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc_memory_dump_provider.h
new file mode 100644
index 00000000000..e730f8d58f0
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/blink_gc_memory_dump_provider.h
@@ -0,0 +1,47 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_BLINK_GC_MEMORY_DUMP_PROVIDER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_BLINK_GC_MEMORY_DUMP_PROVIDER_H_
+
+#include "base/trace_event/memory_dump_provider.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/text/wtf_string.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+} // namespace base
+
+namespace blink {
+
+class ThreadState;
+
+class PLATFORM_EXPORT BlinkGCMemoryDumpProvider final
+ : public base::trace_event::MemoryDumpProvider {
+ USING_FAST_MALLOC(BlinkGCMemoryDumpProvider);
+
+ public:
+ enum class HeapType { kBlinkMainThread, kBlinkWorkerThread };
+
+ ~BlinkGCMemoryDumpProvider() final;
+ BlinkGCMemoryDumpProvider(
+ ThreadState* thread_state,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ HeapType heap_type);
+
+ // MemoryDumpProvider implementation.
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs&,
+ base::trace_event::ProcessMemoryDump*) final;
+
+ private:
+ ThreadState* const thread_state_;
+ const HeapType heap_type_;
+ const std::string dump_base_name_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_BLINK_GC_MEMORY_DUMP_PROVIDER_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/disallow_new_wrapper.h b/chromium/third_party/blink/renderer/platform/heap/impl/disallow_new_wrapper.h
new file mode 100644
index 00000000000..b6562e440e3
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/disallow_new_wrapper.h
@@ -0,0 +1,53 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_DISALLOW_NEW_WRAPPER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_DISALLOW_NEW_WRAPPER_H_
+
+#include "third_party/blink/renderer/platform/heap/garbage_collected.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/visitor.h"
+
+namespace blink {
+
+// DisallowNewWrapper wraps a disallow new type in a GarbageCollected class.
+template <typename T>
+class DisallowNewWrapper final
+ : public GarbageCollected<DisallowNewWrapper<T>> {
+ public:
+ explicit DisallowNewWrapper(const T& value) : value_(value) {
+ static_assert(WTF::IsDisallowNew<T>::value,
+ "T needs to be a disallow new type");
+ static_assert(WTF::IsTraceable<T>::value, "T needs to be traceable");
+ }
+ explicit DisallowNewWrapper(T&& value) : value_(std::forward<T>(value)) {
+ static_assert(WTF::IsDisallowNew<T>::value,
+ "T needs to be a disallow new type");
+ static_assert(WTF::IsTraceable<T>::value, "T needs to be traceable");
+ }
+
+ const T& Value() const { return value_; }
+ T&& TakeValue() { return std::move(value_); }
+
+ void Trace(Visitor* visitor) const { visitor->Trace(value_); }
+
+ private:
+ T value_;
+};
+
+// Wraps a disallow new type in a GarbageCollected class, making it possible to
+// be referenced off heap from a Persistent.
+template <typename T>
+DisallowNewWrapper<T>* WrapDisallowNew(const T& value) {
+ return MakeGarbageCollected<DisallowNewWrapper<T>>(value);
+}
+
+template <typename T>
+DisallowNewWrapper<T>* WrapDisallowNew(T&& value) {
+ return MakeGarbageCollected<DisallowNewWrapper<T>>(std::forward<T>(value));
+}
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_DISALLOW_NEW_WRAPPER_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/finalizer_traits.h b/chromium/third_party/blink/renderer/platform/heap/impl/finalizer_traits.h
new file mode 100644
index 00000000000..2f0e0ee9f44
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/finalizer_traits.h
@@ -0,0 +1,95 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_FINALIZER_TRAITS_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_FINALIZER_TRAITS_H_
+
+#include <type_traits>
+
+#include "base/template_util.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+
+namespace blink {
+namespace internal {
+
+using FinalizationCallback = void (*)(void*);
+
+template <typename T, typename = void>
+struct HasFinalizeGarbageCollectedObject : std::false_type {};
+
+template <typename T>
+struct HasFinalizeGarbageCollectedObject<
+ T,
+ base::void_t<decltype(std::declval<T>().FinalizeGarbageCollectedObject())>>
+ : std::true_type {};
+
+// The FinalizerTraitImpl specifies how to finalize objects.
+template <typename T, bool isFinalized>
+struct FinalizerTraitImpl;
+
+template <typename T>
+struct FinalizerTraitImpl<T, true> {
+ private:
+ STATIC_ONLY(FinalizerTraitImpl);
+ struct Custom {
+ static void Call(void* obj) {
+ static_cast<T*>(obj)->FinalizeGarbageCollectedObject();
+ }
+ };
+ struct Destructor {
+ static void Call(void* obj) {
+// The garbage collector differs from regular C++ here as it remembers whether
+// an object's base class has a virtual destructor. In case there is no virtual
+// destructor present, the object is always finalized through its leaf type. In
+// other words: there is no finalization through a base pointer.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdelete-non-virtual-dtor"
+ static_cast<T*>(obj)->~T();
+#pragma GCC diagnostic pop
+ }
+ };
+ using FinalizeImpl =
+ std::conditional_t<HasFinalizeGarbageCollectedObject<T>::value,
+ Custom,
+ Destructor>;
+
+ public:
+ static void Finalize(void* obj) {
+ static_assert(sizeof(T), "T must be fully defined");
+ FinalizeImpl::Call(obj);
+ }
+};
+
+template <typename T>
+struct FinalizerTraitImpl<T, false> {
+ STATIC_ONLY(FinalizerTraitImpl);
+ static void Finalize(void* obj) {
+ static_assert(sizeof(T), "T must be fully defined");
+ }
+};
+
+// The FinalizerTrait is used to determine if a type requires finalization and
+// what finalization means.
+template <typename T>
+struct FinalizerTrait {
+ STATIC_ONLY(FinalizerTrait);
+
+ private:
+ static constexpr bool kNonTrivialFinalizer =
+ internal::HasFinalizeGarbageCollectedObject<T>::value ||
+ !std::is_trivially_destructible<typename std::remove_cv<T>::type>::value;
+
+ static void Finalize(void* obj) {
+ internal::FinalizerTraitImpl<T, kNonTrivialFinalizer>::Finalize(obj);
+ }
+
+ public:
+ static constexpr FinalizationCallback kCallback =
+ kNonTrivialFinalizer ? Finalize : nullptr;
+};
+
+} // namespace internal
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_FINALIZER_TRAITS_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/garbage_collected.h b/chromium/third_party/blink/renderer/platform/heap/impl/garbage_collected.h
new file mode 100644
index 00000000000..b7af662ed5d
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/garbage_collected.h
@@ -0,0 +1,117 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_GARBAGE_COLLECTED_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_GARBAGE_COLLECTED_H_
+
+#include "base/macros.h"
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+#include "third_party/blink/renderer/platform/wtf/type_traits.h"
+
+namespace blink {
+
+template <typename T>
+class GarbageCollected;
+
+// GC_PLUGIN_IGNORE is used to make the plugin ignore a particular class or
+// field when checking for proper usage. When using GC_PLUGIN_IGNORE
+// a bug-number should be provided as an argument where the bug describes
+// what needs to happen to remove the GC_PLUGIN_IGNORE again.
+#if defined(__clang__)
+#define GC_PLUGIN_IGNORE(bug) \
+ __attribute__((annotate("blink_gc_plugin_ignore")))
+#else
+#define GC_PLUGIN_IGNORE(bug)
+#endif
+
+// Template to determine if a class is a GarbageCollectedMixin by checking if it
+// has IsGarbageCollectedMixinMarker
+template <typename T>
+struct IsGarbageCollectedMixin {
+ private:
+ typedef char YesType;
+ struct NoType {
+ char padding[8];
+ };
+
+ template <typename U>
+ static YesType CheckMarker(typename U::IsGarbageCollectedMixinMarker*);
+ template <typename U>
+ static NoType CheckMarker(...);
+
+ public:
+ static const bool value = sizeof(CheckMarker<T>(nullptr)) == sizeof(YesType);
+};
+
+// TraceDescriptor is used to describe how to trace an object.
+struct TraceDescriptor {
+ STACK_ALLOCATED();
+
+ public:
+ // The adjusted base pointer of the object that should be traced.
+ const void* base_object_payload;
+ // A callback for tracing the object.
+ TraceCallback callback;
+};
+
+// The GarbageCollectedMixin interface can be used to automatically define
+// TraceTrait/ObjectAliveTrait on non-leftmost deriving classes which need
+// to be garbage collected.
+class PLATFORM_EXPORT GarbageCollectedMixin {
+ public:
+ typedef int IsGarbageCollectedMixinMarker;
+ virtual void Trace(Visitor*) const {}
+};
+
+// Base class for objects allocated in the Blink garbage-collected heap.
+//
+// Instances of GarbageCollected will be finalized if they are non-trivially
+// destructible.
+template <typename T>
+class GarbageCollected;
+
+template <typename T,
+ bool = WTF::IsSubclassOfTemplate<typename std::remove_const<T>::type,
+ GarbageCollected>::value>
+class NeedsAdjustPointer;
+
+template <typename T>
+class NeedsAdjustPointer<T, true> {
+ static_assert(sizeof(T), "T must be fully defined");
+
+ public:
+ static const bool value = false;
+};
+
+template <typename T>
+class NeedsAdjustPointer<T, false> {
+ static_assert(sizeof(T), "T must be fully defined");
+
+ public:
+ static const bool value =
+ IsGarbageCollectedMixin<typename std::remove_const<T>::type>::value;
+};
+
+// TODO(sof): migrate to wtf/TypeTraits.h
+template <typename T>
+class IsFullyDefined {
+ using TrueType = char;
+ struct FalseType {
+ char dummy[2];
+ };
+
+ template <typename U, size_t sz = sizeof(U)>
+ static TrueType IsSizeofKnown(U*);
+ static FalseType IsSizeofKnown(...);
+ static T& t_;
+
+ public:
+ static const bool value = sizeof(TrueType) == sizeof(IsSizeofKnown(&t_));
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_GARBAGE_COLLECTED_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/gc_info.cc b/chromium/third_party/blink/renderer/platform/heap/impl/gc_info.cc
new file mode 100644
index 00000000000..14361c26f3c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/gc_info.cc
@@ -0,0 +1,137 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/impl/gc_info.h"
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/bits.h"
+#include "build/build_config.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/wtf/std_lib_extras.h"
+
+namespace blink {
+
+namespace {
+
+constexpr size_t kEntrySize = sizeof(GCInfo*);
+
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t ComputeInitialTableLimit() {
+ // (Light) experimentation suggests that Blink doesn't need more than this
+ // while handling content on popular web properties.
+ constexpr size_t kInitialWantedLimit = 512;
+
+ // Different OSes have different page sizes, so we have to choose the minimum
+ // of memory wanted and OS page size.
+ constexpr size_t memory_wanted = kInitialWantedLimit * kEntrySize;
+ return base::RoundUpToPageAllocationGranularity(memory_wanted) / kEntrySize;
+}
+
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t MaxTableSize() {
+ return base::RoundUpToPageAllocationGranularity(GCInfoTable::kMaxIndex *
+ kEntrySize);
+}
+
+} // namespace
+
+GCInfoTable* GCInfoTable::global_table_ = nullptr;
+constexpr GCInfoIndex GCInfoTable::kMaxIndex;
+constexpr GCInfoIndex GCInfoTable::kMinIndex;
+
+void GCInfoTable::CreateGlobalTable() {
+ // Allocation and resizing are built around the following invariants.
+ static_assert(base::bits::IsPowerOfTwo(kEntrySize),
+ "GCInfoTable entries size must be power of "
+ "two");
+
+#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
+#define STATIC_ASSERT_OR_CHECK(condition, message) \
+ static_assert(condition, message)
+#else
+#define STATIC_ASSERT_OR_CHECK(condition, message) \
+ do { \
+ CHECK(condition) << (message); \
+ } while (false)
+#endif
+
+ STATIC_ASSERT_OR_CHECK(
+ 0 == base::PageAllocationGranularity() % base::SystemPageSize(),
+ "System page size must be a multiple of page allocation granularity");
+
+#undef STATIC_ASSERT_OR_CHECK
+
+ DEFINE_STATIC_LOCAL(GCInfoTable, table, ());
+ global_table_ = &table;
+}
+
+GCInfoIndex GCInfoTable::EnsureGCInfoIndex(
+ const GCInfo* gc_info,
+ std::atomic<GCInfoIndex>* gc_info_index_slot) {
+ DCHECK(gc_info);
+ DCHECK(gc_info_index_slot);
+
+ // Ensuring a new index involves current index adjustment as well as
+ // potentially resizing the table. For simplicity we use a lock.
+ MutexLocker locker(table_mutex_);
+
+ // If more than one thread ends up allocating a slot for the same GCInfo, have
+ // later threads reuse the slot allocated by the first.
+ GCInfoIndex gc_info_index =
+ gc_info_index_slot->load(std::memory_order_relaxed);
+ if (gc_info_index)
+ return gc_info_index;
+
+ if (current_index_ == limit_)
+ Resize();
+
+ gc_info_index = current_index_++;
+ CHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
+
+ table_[gc_info_index] = gc_info;
+ gc_info_index_slot->store(gc_info_index, std::memory_order_release);
+ return gc_info_index;
+}
+
+void GCInfoTable::Resize() {
+ const GCInfoIndex new_limit =
+ (limit_) ? 2 * limit_ : ComputeInitialTableLimit();
+ CHECK_GT(new_limit, limit_);
+ const size_t old_committed_size = limit_ * kEntrySize;
+ const size_t new_committed_size = new_limit * kEntrySize;
+ CHECK(table_);
+ CHECK_EQ(0u, new_committed_size % base::PageAllocationGranularity());
+ CHECK_GE(MaxTableSize(), limit_ * kEntrySize);
+
+ // Recommitting and zapping assumes byte-addressable storage.
+ uint8_t* const current_table_end =
+ reinterpret_cast<uint8_t*>(table_) + old_committed_size;
+ const size_t table_size_delta = new_committed_size - old_committed_size;
+
+ // Commit the new size and allow read/write.
+ // TODO(ajwong): SetSystemPagesAccess should be part of RecommitSystemPages to
+ // avoid having two calls here.
+ base::SetSystemPagesAccess(current_table_end, table_size_delta,
+ base::PageReadWrite);
+ bool ok = base::RecommitSystemPages(current_table_end, table_size_delta,
+ base::PageReadWrite);
+ CHECK(ok);
+
+#if DCHECK_IS_ON()
+ // Check that newly-committed memory is zero-initialized.
+ for (size_t i = 0; i < (table_size_delta / sizeof(uintptr_t)); ++i) {
+ DCHECK(!reinterpret_cast<uintptr_t*>(current_table_end)[i]);
+ }
+#endif // DCHECK_IS_ON()
+
+ limit_ = new_limit;
+}
+
+GCInfoTable::GCInfoTable() {
+ table_ = reinterpret_cast<GCInfo const**>(base::AllocPages(
+ nullptr, MaxTableSize(), base::PageAllocationGranularity(),
+ base::PageInaccessible, base::PageTag::kBlinkGC));
+ CHECK(table_);
+ Resize();
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/gc_info.h b/chromium/third_party/blink/renderer/platform/heap/impl/gc_info.h
new file mode 100644
index 00000000000..bec05ee6c1b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/gc_info.h
@@ -0,0 +1,128 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_GC_INFO_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_GC_INFO_H_
+
+#include <atomic>
+#include "base/gtest_prod_util.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+#include "third_party/blink/renderer/platform/heap/impl/finalizer_traits.h"
+#include "third_party/blink/renderer/platform/heap/impl/name_traits.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+
+namespace blink {
+
+template <typename T>
+struct TraceTrait;
+
+using GCInfoIndex = uint32_t;
+
+// GCInfo contains metadata for objects that are instantiated from classes that
+// inherit from GarbageCollected.
+struct PLATFORM_EXPORT GCInfo final {
+ static inline const GCInfo& From(GCInfoIndex);
+
+ const TraceCallback trace;
+ const internal::FinalizationCallback finalize;
+ const NameCallback name;
+ const bool has_v_table;
+};
+
+class PLATFORM_EXPORT GCInfoTable final {
+ public:
+ // At maximum |kMaxIndex - 1| indices are supported.
+ //
+ // We assume that 14 bits is enough to represent all possible types: during
+ // telemetry runs, we see about 1,000 different types; looking at the output
+ // of the Oilpan GC Clang plugin, there appear to be at most about 6,000
+ // types. Thus 14 bits should be more than twice as many bits as we will ever
+ // need.
+ static constexpr GCInfoIndex kMaxIndex = 1 << 14;
+
+ // Minimum index returned. Values smaller |kMinIndex| may be used as
+ // sentinels.
+ static constexpr GCInfoIndex kMinIndex = 1;
+
+ // Sets up a singleton table that can be acquired using Get().
+ static void CreateGlobalTable();
+
+ static GCInfoTable* GetMutable() { return global_table_; }
+ static const GCInfoTable& Get() { return *global_table_; }
+
+ const GCInfo& GCInfoFromIndex(GCInfoIndex index) const {
+ DCHECK_GE(index, kMinIndex);
+ DCHECK_LT(index, kMaxIndex);
+ DCHECK(table_);
+ const GCInfo* info = table_[index];
+ DCHECK(info);
+ return *info;
+ }
+
+ GCInfoIndex EnsureGCInfoIndex(const GCInfo*, std::atomic<GCInfoIndex>*);
+
+ // Returns the number of recorded GCInfo objects, including |kMinIndex|.
+ GCInfoIndex NumberOfGCInfos() const { return current_index_; }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(GCInfoTest, InitialEmpty);
+ FRIEND_TEST_ALL_PREFIXES(GCInfoTest, ResizeToMaxIndex);
+ FRIEND_TEST_ALL_PREFIXES(GCInfoDeathTest, MoreThanMaxIndexInfos);
+
+ // Singleton for each process. Retrieved through Get().
+ static GCInfoTable* global_table_;
+
+ // Use GCInfoTable::Get() for retrieving the global table outside of testing
+ // code.
+ GCInfoTable();
+
+ void Resize();
+
+ // Holds the per-class GCInfo descriptors; each HeapObjectHeader keeps an
+ // index into this table.
+ const GCInfo** table_ = nullptr;
+
+ // Current index used when requiring a new GCInfo object.
+ GCInfoIndex current_index_ = kMinIndex;
+
+ // The limit (exclusive) of the currently allocated table.
+ GCInfoIndex limit_ = 0;
+
+ Mutex table_mutex_;
+};
+
+// static
+const GCInfo& GCInfo::From(GCInfoIndex index) {
+ return GCInfoTable::Get().GCInfoFromIndex(index);
+}
+
+template <typename T>
+struct GCInfoTrait {
+ STATIC_ONLY(GCInfoTrait);
+
+ static GCInfoIndex Index() {
+ static_assert(sizeof(T), "T must be fully defined");
+ static const GCInfo kGcInfo = {
+ TraceTrait<T>::Trace, internal::FinalizerTrait<T>::kCallback,
+ NameTrait<T>::GetName, std::is_polymorphic<T>::value};
+ // This is more complicated than using threadsafe initialization, but this
+ // is instantiated many times (once for every GC type).
+ static std::atomic<GCInfoIndex> gc_info_index{0};
+ GCInfoIndex index = gc_info_index.load(std::memory_order_acquire);
+ if (!index) {
+ index = GCInfoTable::GetMutable()->EnsureGCInfoIndex(&kGcInfo,
+ &gc_info_index);
+ }
+ DCHECK_GE(index, GCInfoTable::kMinIndex);
+ DCHECK_LT(index, GCInfoTable::kMaxIndex);
+ return index;
+ }
+};
+
+template <typename U>
+class GCInfoTrait<const U> : public GCInfoTrait<U> {};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_GC_INFO_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/gc_task_runner.h b/chromium/third_party/blink/renderer/platform/heap/impl/gc_task_runner.h
new file mode 100644
index 00000000000..1e93f7fcac5
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/gc_task_runner.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2014 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_GC_TASK_RUNNER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_GC_TASK_RUNNER_H_
+
+#include <memory>
+#include "base/location.h"
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+#include "third_party/blink/renderer/platform/scheduler/public/thread.h"
+#include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
+
+namespace blink {
+
+class GCTaskObserver final : public Thread::TaskObserver {
+ USING_FAST_MALLOC(GCTaskObserver);
+
+ public:
+ GCTaskObserver() : nesting_(0) {}
+
+ ~GCTaskObserver() override {
+ // m_nesting can be 1 if this was unregistered in a task and
+ // didProcessTask was not called.
+ DCHECK(!nesting_ || nesting_ == 1);
+ }
+
+ void WillProcessTask(const base::PendingTask&, bool) override { nesting_++; }
+
+ void DidProcessTask(const base::PendingTask&) override {
+ // In the production code WebKit::initialize is called from inside the
+ // message loop so we can get didProcessTask() without corresponding
+ // willProcessTask once. This is benign.
+ if (nesting_)
+ nesting_--;
+
+ ThreadState::Current()->SafePoint(nesting_
+ ? BlinkGC::kHeapPointersOnStack
+ : BlinkGC::kNoHeapPointersOnStack);
+ }
+
+ private:
+ int nesting_;
+};
+
+class GCTaskRunner final {
+ USING_FAST_MALLOC(GCTaskRunner);
+
+ public:
+ explicit GCTaskRunner(Thread* thread)
+ : gc_task_observer_(std::make_unique<GCTaskObserver>()), thread_(thread) {
+ thread_->AddTaskObserver(gc_task_observer_.get());
+ }
+
+ ~GCTaskRunner() { thread_->RemoveTaskObserver(gc_task_observer_.get()); }
+
+ private:
+ std::unique_ptr<GCTaskObserver> gc_task_observer_;
+ Thread* thread_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_GC_TASK_RUNNER_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/heap.cc b/chromium/third_party/blink/renderer/platform/heap/impl/heap.cc
new file mode 100644
index 00000000000..2fa56d7d1d9
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/heap.cc
@@ -0,0 +1,796 @@
+/*
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "third_party/blink/renderer/platform/heap/heap.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+
+#include "base/trace_event/process_memory_dump.h"
+#include "third_party/blink/public/common/features.h"
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/renderer/platform/bindings/script_forbidden_scope.h"
+#include "third_party/blink/renderer/platform/bindings/trace_wrapper_v8_reference.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc_memory_dump_provider.h"
+#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
+#include "third_party/blink/renderer/platform/heap/impl/heap_compact.h"
+#include "third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.h"
+#include "third_party/blink/renderer/platform/heap/impl/marking_visitor.h"
+#include "third_party/blink/renderer/platform/heap/impl/page_bloom_filter.h"
+#include "third_party/blink/renderer/platform/heap/impl/page_memory.h"
+#include "third_party/blink/renderer/platform/heap/impl/page_pool.h"
+#include "third_party/blink/renderer/platform/heap/thread_state_scopes.h"
+#include "third_party/blink/renderer/platform/heap/unified_heap_marking_visitor.h"
+#include "third_party/blink/renderer/platform/instrumentation/histogram.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/web_memory_allocator_dump.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/web_process_memory_dump.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+#include "third_party/blink/renderer/platform/wtf/leak_annotations.h"
+
+namespace blink {
+
+HeapAllocHooks::AllocationHook* HeapAllocHooks::allocation_hook_ = nullptr;
+HeapAllocHooks::FreeHook* HeapAllocHooks::free_hook_ = nullptr;
+
+class ProcessHeapReporter final : public ThreadHeapStatsObserver {
+ public:
+ void IncreaseAllocatedSpace(size_t bytes) final {
+ ProcessHeap::IncreaseTotalAllocatedSpace(bytes);
+ }
+
+ void DecreaseAllocatedSpace(size_t bytes) final {
+ ProcessHeap::DecreaseTotalAllocatedSpace(bytes);
+ }
+
+ void ResetAllocatedObjectSize(size_t bytes) final {
+ ProcessHeap::DecreaseTotalAllocatedObjectSize(prev_incremented_);
+ ProcessHeap::IncreaseTotalAllocatedObjectSize(bytes);
+ prev_incremented_ = bytes;
+ }
+
+ void IncreaseAllocatedObjectSize(size_t bytes) final {
+ ProcessHeap::IncreaseTotalAllocatedObjectSize(bytes);
+ prev_incremented_ += bytes;
+ }
+
+ void DecreaseAllocatedObjectSize(size_t bytes) final {
+ ProcessHeap::DecreaseTotalAllocatedObjectSize(bytes);
+ prev_incremented_ -= bytes;
+ }
+
+ private:
+ size_t prev_incremented_ = 0;
+};
+
+ThreadHeap::ThreadHeap(ThreadState* thread_state)
+ : thread_state_(thread_state),
+ heap_stats_collector_(std::make_unique<ThreadHeapStatsCollector>()),
+ region_tree_(std::make_unique<RegionTree>()),
+ page_bloom_filter_(std::make_unique<PageBloomFilter>()),
+ free_page_pool_(std::make_unique<PagePool>()),
+ process_heap_reporter_(std::make_unique<ProcessHeapReporter>()) {
+ if (ThreadState::Current()->IsMainThread())
+ main_thread_heap_ = this;
+
+ for (int arena_index = 0; arena_index < BlinkGC::kLargeObjectArenaIndex;
+ arena_index++) {
+ arenas_[arena_index] = new NormalPageArena(thread_state_, arena_index);
+ }
+ arenas_[BlinkGC::kLargeObjectArenaIndex] =
+ new LargeObjectArena(thread_state_, BlinkGC::kLargeObjectArenaIndex);
+
+ stats_collector()->RegisterObserver(process_heap_reporter_.get());
+}
+
+ThreadHeap::~ThreadHeap() {
+ for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i)
+ delete arenas_[i];
+}
+
+Address ThreadHeap::CheckAndMarkPointer(MarkingVisitor* visitor,
+ Address address) {
+ DCHECK(thread_state_->InAtomicMarkingPause());
+
+#if !DCHECK_IS_ON()
+ if (!page_bloom_filter_->MayContain(address)) {
+ return nullptr;
+ }
+#endif
+
+ if (BasePage* page = LookupPageForAddress(address)) {
+#if DCHECK_IS_ON()
+ DCHECK(page->Contains(address));
+#endif
+ DCHECK(page_bloom_filter_->MayContain(address));
+ DCHECK(&visitor->Heap() == &page->Arena()->GetThreadState()->Heap());
+ visitor->ConservativelyMarkAddress(page, address);
+ return address;
+ }
+
+ return nullptr;
+}
+
+void ThreadHeap::VisitRememberedSets(MarkingVisitor* visitor) {
+ static_assert(BlinkGC::kLargeObjectArenaIndex + 1 == BlinkGC::kNumberOfArenas,
+ "LargeObject arena must be the last one.");
+ const auto visit_header = [visitor](HeapObjectHeader* header) {
+ // Process only old objects.
+ if (header->IsOld<HeapObjectHeader::AccessMode::kNonAtomic>()) {
+ // The design of young generation requires collections to be executed at
+ // the top level (with the guarantee that no objects are currently being
+ // in construction). This can be ensured by running young GCs from safe
+ // points or by reintroducing nested allocation scopes that avoid
+ // finalization.
+ DCHECK(header->IsMarked());
+ DCHECK(!header->IsInConstruction());
+ const GCInfo& gc_info = GCInfo::From(header->GcInfoIndex());
+ gc_info.trace(visitor, header->Payload());
+ }
+ };
+ for (size_t i = 0; i < BlinkGC::kLargeObjectArenaIndex; ++i) {
+ static_cast<NormalPageArena*>(arenas_[i])
+ ->IterateAndClearCardTables(visit_header);
+ }
+ static_cast<LargeObjectArena*>(arenas_[BlinkGC::kLargeObjectArenaIndex])
+ ->IterateAndClearRememberedPages(visit_header);
+}
+
+void ThreadHeap::SetupWorklists(bool should_initialize_compaction_worklists) {
+ marking_worklist_ = std::make_unique<MarkingWorklist>();
+ write_barrier_worklist_ = std::make_unique<WriteBarrierWorklist>();
+ not_fully_constructed_worklist_ =
+ std::make_unique<NotFullyConstructedWorklist>();
+ previously_not_fully_constructed_worklist_ =
+ std::make_unique<NotFullyConstructedWorklist>();
+ weak_callback_worklist_ = std::make_unique<WeakCallbackWorklist>();
+ discovered_ephemeron_pairs_worklist_ =
+ std::make_unique<EphemeronPairsWorklist>();
+ ephemeron_pairs_to_process_worklist_ =
+ std::make_unique<EphemeronPairsWorklist>();
+ v8_references_worklist_ = std::make_unique<V8ReferencesWorklist>();
+ not_safe_to_concurrently_trace_worklist_ =
+ std::make_unique<NotSafeToConcurrentlyTraceWorklist>();
+ weak_containers_worklist_ = std::make_unique<WeakContainersWorklist>();
+ if (should_initialize_compaction_worklists) {
+ movable_reference_worklist_ = std::make_unique<MovableReferenceWorklist>();
+ }
+}
+
+void ThreadHeap::DestroyMarkingWorklists(BlinkGC::StackState stack_state) {
+ marking_worklist_.reset();
+ write_barrier_worklist_.reset();
+ previously_not_fully_constructed_worklist_.reset();
+ weak_callback_worklist_.reset();
+ ephemeron_pairs_to_process_worklist_.reset();
+ v8_references_worklist_.reset();
+ not_safe_to_concurrently_trace_worklist_.reset();
+ weak_containers_worklist_.reset();
+ // The fixed point iteration may have found not-fully-constructed objects.
+ // Such objects should have already been found through the stack scan though
+ // and should thus already be marked.
+ //
+ // Possible reasons for encountering unmarked objects here:
+ // - Object is not allocated through MakeGarbageCollected.
+ // - Broken stack (roots) scanning.
+ if (!not_fully_constructed_worklist_->IsGlobalEmpty()) {
+#if DCHECK_IS_ON()
+ const bool conservative_gc =
+ BlinkGC::StackState::kHeapPointersOnStack == stack_state;
+ NotFullyConstructedItem item;
+ while (not_fully_constructed_worklist_->Pop(WorklistTaskId::MutatorThread,
+ &item)) {
+ HeapObjectHeader* const header = HeapObjectHeader::FromInnerAddress(
+ reinterpret_cast<Address>(const_cast<void*>(item)));
+ DCHECK(conservative_gc && header->IsMarked())
+ << " conservative: " << (conservative_gc ? "yes" : "no")
+ << " type: " << header->Name();
+ }
+#else
+ not_fully_constructed_worklist_->Clear();
+#endif
+ }
+ not_fully_constructed_worklist_.reset();
+
+ // |discovered_ephemeron_pairs_worklist_| may still hold ephemeron pairs with
+ // dead keys.
+ if (!discovered_ephemeron_pairs_worklist_->IsGlobalEmpty()) {
+#if DCHECK_IS_ON()
+ EphemeronPairItem item;
+ while (discovered_ephemeron_pairs_worklist_->Pop(
+ WorklistTaskId::MutatorThread, &item)) {
+ const HeapObjectHeader* const header = HeapObjectHeader::FromInnerAddress(
+ reinterpret_cast<ConstAddress>(item.key));
+ DCHECK(!header->IsMarked());
+ }
+#else
+ discovered_ephemeron_pairs_worklist_->Clear();
+#endif
+ }
+ discovered_ephemeron_pairs_worklist_.reset();
+}
+
+void ThreadHeap::DestroyCompactionWorklists() {
+ movable_reference_worklist_.reset();
+}
+
+HeapCompact* ThreadHeap::Compaction() {
+ if (!compaction_)
+ compaction_ = std::make_unique<HeapCompact>(this);
+ return compaction_.get();
+}
+
+bool ThreadHeap::ShouldRegisterMovingAddress() {
+ return Compaction()->ShouldRegisterMovingAddress();
+}
+
+void ThreadHeap::FlushNotFullyConstructedObjects() {
+ NotFullyConstructedWorklist::View view(not_fully_constructed_worklist_.get(),
+ WorklistTaskId::MutatorThread);
+ if (!view.IsLocalViewEmpty()) {
+ view.FlushToGlobal();
+ previously_not_fully_constructed_worklist_->MergeGlobalPool(
+ not_fully_constructed_worklist_.get());
+ }
+ DCHECK(view.IsLocalViewEmpty());
+}
+
+void ThreadHeap::FlushEphemeronPairs(EphemeronProcessing ephemeron_processing) {
+ if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) {
+ if (steps_since_last_ephemeron_pairs_flush_ <
+ kStepsBeforeEphemeronPairsFlush)
+ return;
+ }
+
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ stats_collector(), ThreadHeapStatsCollector::kMarkFlushEphemeronPairs);
+
+ EphemeronPairsWorklist::View view(discovered_ephemeron_pairs_worklist_.get(),
+ WorklistTaskId::MutatorThread);
+ if (!view.IsLocalViewEmpty()) {
+ view.FlushToGlobal();
+ ephemeron_pairs_to_process_worklist_->MergeGlobalPool(
+ discovered_ephemeron_pairs_worklist_.get());
+ }
+
+ steps_since_last_ephemeron_pairs_flush_ = 0;
+}
+
+void ThreadHeap::MarkNotFullyConstructedObjects(MarkingVisitor* visitor) {
+ DCHECK(!thread_state_->IsIncrementalMarking());
+ ThreadHeapStatsCollector::Scope stats_scope(
+ stats_collector(),
+ ThreadHeapStatsCollector::kMarkNotFullyConstructedObjects);
+
+ DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id());
+ NotFullyConstructedItem item;
+ while (not_fully_constructed_worklist_->Pop(WorklistTaskId::MutatorThread,
+ &item)) {
+ BasePage* const page = PageFromObject(item);
+ visitor->ConservativelyMarkAddress(page,
+ reinterpret_cast<ConstAddress>(item));
+ }
+}
+
+namespace {
+
+static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
+static constexpr size_t kDefaultConcurrentDeadlineCheckInterval =
+ 5 * kDefaultDeadlineCheckInterval;
+
+template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
+ typename Worklist,
+ typename Callback,
+ typename YieldPredicate>
+bool DrainWorklist(Worklist* worklist,
+ Callback callback,
+ YieldPredicate should_yield,
+ int task_id) {
+ // For concurrent markers, should_yield also reports marked bytes.
+ if (worklist->IsLocalViewEmpty(task_id))
+ return true;
+ if (should_yield())
+ return false;
+ size_t processed_callback_count = kDeadlineCheckInterval;
+ typename Worklist::EntryType item;
+ while (worklist->Pop(task_id, &item)) {
+ callback(item);
+ if (--processed_callback_count == 0) {
+ if (should_yield()) {
+ return false;
+ }
+ processed_callback_count = kDeadlineCheckInterval;
+ }
+ }
+ return true;
+}
+
+template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
+ typename Worklist,
+ typename Callback>
+bool DrainWorklistWithDeadline(base::TimeTicks deadline,
+ Worklist* worklist,
+ Callback callback,
+ int task_id) {
+ return DrainWorklist<kDeadlineCheckInterval>(
+ worklist, std::move(callback),
+ [deadline]() { return deadline <= base::TimeTicks::Now(); }, task_id);
+}
+
+} // namespace
+
+bool ThreadHeap::InvokeEphemeronCallbacks(
+ EphemeronProcessing ephemeron_processing,
+ MarkingVisitor* visitor,
+ base::TimeTicks deadline) {
+ if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) {
+ if (steps_since_last_ephemeron_processing_ <
+ kStepsBeforeEphemeronProcessing) {
+ // Returning "no more work" to avoid excessive processing. The fixed
+ // point computation in the atomic pause takes care of correctness.
+ return true;
+ }
+ }
+
+ FlushEphemeronPairs(EphemeronProcessing::kFullProcessing);
+
+ steps_since_last_ephemeron_processing_ = 0;
+
+ // Mark any strong pointers that have now become reachable in ephemeron maps.
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ stats_collector(),
+ ThreadHeapStatsCollector::kMarkInvokeEphemeronCallbacks);
+
+ DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id());
+
+ // Then we iterate over the new callbacks found by the marking visitor.
+ // Callbacks found by the concurrent marking will be flushed eventually
+ // and then invoked by the mutator thread (in the atomic pause at latest).
+ return DrainWorklistWithDeadline(
+ deadline, ephemeron_pairs_to_process_worklist_.get(),
+ [visitor](EphemeronPairItem& item) {
+ visitor->VisitEphemeron(item.key, item.value_desc);
+ },
+ WorklistTaskId::MutatorThread);
+}
+
+bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
+ base::TimeTicks deadline,
+ EphemeronProcessing ephemeron_processing) {
+ DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id());
+
+ ++steps_since_last_ephemeron_pairs_flush_;
+ ++steps_since_last_ephemeron_processing_;
+
+ bool finished;
+ bool processed_ephemerons = false;
+ FlushEphemeronPairs(ephemeron_processing);
+ // Ephemeron fixed point loop.
+ do {
+ {
+ // Iteratively mark all objects that are reachable from the objects
+ // currently pushed onto the marking worklist.
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ stats_collector(), ThreadHeapStatsCollector::kMarkProcessWorklists);
+
+ // Start with mutator-thread-only worklists (not fully constructed).
+ // If time runs out, concurrent markers can take care of the rest.
+
+ {
+ ThreadHeapStatsCollector::EnabledScope inner_scope(
+ stats_collector(), ThreadHeapStatsCollector::kMarkBailOutObjects);
+ // Items in the bailout worklist are only collection backing stores.
+ // These items could take a long time to process, so we should check
+ // the deadline more often (backing stores and large items can also be
+ // found in the regular marking worklist, but those are interleaved
+ // with smaller objects).
+ finished = DrainWorklistWithDeadline<kDefaultDeadlineCheckInterval / 5>(
+ deadline, not_safe_to_concurrently_trace_worklist_.get(),
+ [visitor](const NotSafeToConcurrentlyTraceItem& item) {
+ item.desc.callback(visitor, item.desc.base_object_payload);
+ visitor->AccountMarkedBytes(item.bailout_size);
+ },
+ WorklistTaskId::MutatorThread);
+ if (!finished)
+ break;
+ }
+
+ {
+ ThreadHeapStatsCollector::EnabledScope inner_scope(
+ stats_collector(),
+ ThreadHeapStatsCollector::kMarkFlushV8References);
+ finished = FlushV8References(deadline);
+ if (!finished)
+ break;
+ }
+
+ {
+ ThreadHeapStatsCollector::EnabledScope inner_scope(
+ stats_collector(),
+ ThreadHeapStatsCollector::kMarkProcessNotFullyconstructeddWorklist);
+ // Convert |previously_not_fully_constructed_worklist_| to
+ // |marking_worklist_|. This merely re-adds items with the proper
+ // callbacks.
+ finished = DrainWorklistWithDeadline(
+ deadline, previously_not_fully_constructed_worklist_.get(),
+ [visitor](NotFullyConstructedItem& item) {
+ visitor->DynamicallyMarkAddress(
+ reinterpret_cast<ConstAddress>(item));
+ },
+ WorklistTaskId::MutatorThread);
+ if (!finished)
+ break;
+ }
+
+ {
+ ThreadHeapStatsCollector::EnabledScope inner_scope(
+ stats_collector(),
+ ThreadHeapStatsCollector::kMarkProcessMarkingWorklist);
+ finished = DrainWorklistWithDeadline(
+ deadline, marking_worklist_.get(),
+ [visitor](const MarkingItem& item) {
+ HeapObjectHeader* header =
+ HeapObjectHeader::FromPayload(item.base_object_payload);
+ DCHECK(!header->IsInConstruction());
+ item.callback(visitor, item.base_object_payload);
+ visitor->AccountMarkedBytes(header);
+ },
+ WorklistTaskId::MutatorThread);
+ if (!finished)
+ break;
+ }
+
+ {
+ ThreadHeapStatsCollector::EnabledScope inner_scope(
+ stats_collector(),
+ ThreadHeapStatsCollector::kMarkProcessWriteBarrierWorklist);
+ finished = DrainWorklistWithDeadline(
+ deadline, write_barrier_worklist_.get(),
+ [visitor](HeapObjectHeader* header) {
+ DCHECK(!header->IsInConstruction());
+ GCInfo::From(header->GcInfoIndex())
+ .trace(visitor, header->Payload());
+ visitor->AccountMarkedBytes(header);
+ },
+ WorklistTaskId::MutatorThread);
+ if (!finished)
+ break;
+ }
+ }
+
+ if ((ephemeron_processing == EphemeronProcessing::kFullProcessing) ||
+ !processed_ephemerons) {
+ processed_ephemerons = true;
+ finished =
+ InvokeEphemeronCallbacks(ephemeron_processing, visitor, deadline);
+ if (!finished)
+ break;
+ }
+
+ // Rerun loop if ephemeron processing queued more objects for tracing.
+ } while (!marking_worklist_->IsLocalViewEmpty(WorklistTaskId::MutatorThread));
+
+ return finished;
+}
+
+bool ThreadHeap::HasWorkForConcurrentMarking() const {
+ return !marking_worklist_->IsGlobalPoolEmpty() ||
+ !write_barrier_worklist_->IsGlobalPoolEmpty() ||
+ !previously_not_fully_constructed_worklist_->IsGlobalPoolEmpty() ||
+ !ephemeron_pairs_to_process_worklist_->IsGlobalPoolEmpty();
+}
+
+size_t ThreadHeap::ConcurrentMarkingGlobalWorkSize() const {
+ return marking_worklist_->GlobalPoolSize() +
+ write_barrier_worklist_->GlobalPoolSize() +
+ previously_not_fully_constructed_worklist_->GlobalPoolSize() +
+ ephemeron_pairs_to_process_worklist_->GlobalPoolSize();
+}
+
+bool ThreadHeap::AdvanceConcurrentMarking(
+ ConcurrentMarkingVisitor* visitor,
+ base::JobDelegate* delegate,
+ MarkingSchedulingOracle* marking_scheduler) {
+ auto should_yield_callback = [marking_scheduler, visitor, delegate]() {
+ marking_scheduler->AddConcurrentlyMarkedBytes(
+ visitor->RecentlyMarkedBytes());
+ return delegate->ShouldYield();
+ };
+ bool finished;
+ do {
+ // Convert |previously_not_fully_constructed_worklist_| to
+ // |marking_worklist_|. This merely re-adds items with the proper
+ // callbacks.
+ finished = DrainWorklist<kDefaultConcurrentDeadlineCheckInterval>(
+ previously_not_fully_constructed_worklist_.get(),
+ [visitor](NotFullyConstructedItem& item) {
+ visitor->DynamicallyMarkAddress(reinterpret_cast<ConstAddress>(item));
+ },
+ should_yield_callback, visitor->task_id());
+ if (!finished)
+ break;
+
+ // Iteratively mark all objects that are reachable from the objects
+ // currently pushed onto the marking worklist.
+ finished = DrainWorklist<kDefaultConcurrentDeadlineCheckInterval>(
+ marking_worklist_.get(),
+ [visitor](const MarkingItem& item) {
+ HeapObjectHeader* header =
+ HeapObjectHeader::FromPayload(item.base_object_payload);
+ PageFromObject(header)->SynchronizedLoad();
+ DCHECK(
+ !header
+ ->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>());
+ item.callback(visitor, item.base_object_payload);
+ visitor->AccountMarkedBytes(header);
+ },
+ should_yield_callback, visitor->task_id());
+ if (!finished)
+ break;
+
+ finished = DrainWorklist<kDefaultConcurrentDeadlineCheckInterval>(
+ write_barrier_worklist_.get(),
+ [visitor](HeapObjectHeader* header) {
+ PageFromObject(header)->SynchronizedLoad();
+ DCHECK(
+ !header
+ ->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>());
+ GCInfo::From(header->GcInfoIndex()).trace(visitor, header->Payload());
+ visitor->AccountMarkedBytes(header);
+ },
+ should_yield_callback, visitor->task_id());
+ if (!finished)
+ break;
+
+ {
+ ThreadHeapStatsCollector::ConcurrentScope stats_scope(
+ stats_collector(),
+ ThreadHeapStatsCollector::kConcurrentMarkInvokeEphemeronCallbacks);
+
+ // Then we iterate over the new ephemerons found by the marking visitor.
+ // Callbacks found by the concurrent marking will be flushed eventually
+ // by the mutator thread and then invoked either concurrently or by the
+ // mutator thread (in the atomic pause at latest).
+ finished = DrainWorklist<kDefaultConcurrentDeadlineCheckInterval>(
+ ephemeron_pairs_to_process_worklist_.get(),
+ [visitor](EphemeronPairItem& item) {
+ visitor->VisitEphemeron(item.key, item.value_desc);
+ },
+ should_yield_callback, visitor->task_id());
+ if (!finished)
+ break;
+ }
+
+ } while (HasWorkForConcurrentMarking());
+
+ return finished;
+}
+
+void ThreadHeap::WeakProcessing(MarkingVisitor* visitor) {
+ ThreadHeapStatsCollector::Scope stats_scope(
+ stats_collector(), ThreadHeapStatsCollector::kMarkWeakProcessing);
+
+ // Weak processing may access unmarked objects but are forbidden from
+ // resurrecting them or allocating new ones.
+ ThreadState::NoAllocationScope allocation_forbidden(ThreadState::Current());
+
+ DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id());
+
+ // Call weak callbacks on objects that may now be pointing to dead objects.
+ CustomCallbackItem item;
+ LivenessBroker broker = internal::LivenessBrokerFactory::Create();
+ while (weak_callback_worklist_->Pop(WorklistTaskId::MutatorThread, &item)) {
+ item.callback(broker, item.parameter);
+ }
+ // Weak callbacks should not add any new objects for marking.
+ DCHECK(marking_worklist_->IsGlobalEmpty());
+}
+
+void ThreadHeap::VerifyMarking() {
+ for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) {
+ arenas_[i]->VerifyMarking();
+ }
+}
+
+size_t ThreadHeap::ObjectPayloadSizeForTesting() {
+ ThreadState::AtomicPauseScope atomic_pause_scope(thread_state_);
+ ScriptForbiddenScope script_forbidden_scope;
+ size_t object_payload_size = 0;
+ thread_state_->SetGCPhase(ThreadState::GCPhase::kMarking);
+ thread_state_->Heap().MakeConsistentForGC();
+ thread_state_->Heap().PrepareForSweep(BlinkGC::CollectionType::kMajor);
+ for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i)
+ object_payload_size += arenas_[i]->ObjectPayloadSizeForTesting();
+ MakeConsistentForMutator();
+ thread_state_->SetGCPhase(ThreadState::GCPhase::kSweeping);
+ thread_state_->SetGCPhase(ThreadState::GCPhase::kNone);
+ return object_payload_size;
+}
+
+void ThreadHeap::ResetAllocationPointForTesting() {
+ for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i)
+ arenas_[i]->ResetAllocationPoint();
+}
+
+BasePage* ThreadHeap::LookupPageForAddress(ConstAddress address) {
+ if (PageMemoryRegion* region = region_tree_->Lookup(address)) {
+ return region->PageFromAddress(address);
+ }
+ return nullptr;
+}
+
+void ThreadHeap::MakeConsistentForGC() {
+ DCHECK(thread_state_->InAtomicMarkingPause());
+ for (BaseArena* arena : arenas_) {
+ arena->MakeConsistentForGC();
+ }
+}
+
+void ThreadHeap::MakeConsistentForMutator() {
+ DCHECK(thread_state_->InAtomicMarkingPause());
+ for (BaseArena* arena : arenas_) {
+ arena->MakeConsistentForMutator();
+ }
+}
+
+void ThreadHeap::Unmark() {
+ DCHECK(thread_state_->InAtomicMarkingPause());
+ for (BaseArena* arena : arenas_) {
+ arena->Unmark();
+ }
+}
+
+void ThreadHeap::Compact() {
+ if (!Compaction()->IsCompacting())
+ return;
+
+ ThreadHeapStatsCollector::Scope stats_scope(
+ stats_collector(), ThreadHeapStatsCollector::kAtomicPauseCompaction);
+ // Compaction is done eagerly and before the mutator threads get
+ // to run again. Doing it lazily is problematic, as the mutator's
+ // references to live objects could suddenly be invalidated by
+ // compaction of a page/heap. We do know all the references to
+ // the relocating objects just after marking, but won't later.
+ // (e.g., stack references could have been created, new objects
+ // created which refer to old collection objects, and so on.)
+
+ // Compact the hash table backing store arena first, it usually has
+ // higher fragmentation and is larger.
+ for (int i = BlinkGC::kHashTableArenaIndex; i >= BlinkGC::kVectorArenaIndex;
+ --i)
+ static_cast<NormalPageArena*>(arenas_[i])->SweepAndCompact();
+ Compaction()->Finish();
+}
+
+void ThreadHeap::PrepareForSweep(BlinkGC::CollectionType collection_type) {
+ DCHECK(thread_state_->InAtomicMarkingPause());
+ DCHECK(thread_state_->CheckThread());
+ for (int i = 0; i < BlinkGC::kNumberOfArenas; i++)
+ arenas_[i]->PrepareForSweep(collection_type);
+}
+
+void ThreadHeap::RemoveAllPages() {
+ DCHECK(thread_state_->CheckThread());
+ for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i)
+ arenas_[i]->RemoveAllPages();
+}
+
+void ThreadHeap::CompleteSweep() {
+ for (int i = 0; i < BlinkGC::kNumberOfArenas; i++)
+ arenas_[i]->CompleteSweep();
+}
+
+void ThreadHeap::InvokeFinalizersOnSweptPages() {
+ for (size_t i = BlinkGC::kNormalPage1ArenaIndex; i < BlinkGC::kNumberOfArenas;
+ i++)
+ arenas_[i]->InvokeFinalizersOnSweptPages();
+}
+
+#if defined(ADDRESS_SANITIZER)
+void ThreadHeap::PoisonUnmarkedObjects() {
+ // Poisoning all unmarked objects in the other arenas.
+ for (int i = 1; i < BlinkGC::kNumberOfArenas; i++)
+ arenas_[i]->PoisonUnmarkedObjects();
+}
+#endif
+
+#if DCHECK_IS_ON()
+BasePage* ThreadHeap::FindPageFromAddress(Address address) {
+ for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) {
+ if (BasePage* page = arenas_[i]->FindPageFromAddress(address))
+ return page;
+ }
+ return nullptr;
+}
+#endif
+
+void ThreadHeap::CollectStatistics(ThreadState::Statistics* stats) {
+#define SNAPSHOT_ARENA(name) \
+ arenas_[BlinkGC::k##name##ArenaIndex]->CollectStatistics( \
+ BlinkGC::ToString(BlinkGC::k##name##ArenaIndex), stats);
+
+ FOR_EACH_ARENA(SNAPSHOT_ARENA)
+#undef SNAPSHOT_ARENA
+}
+
+bool ThreadHeap::AdvanceLazySweep(base::TimeTicks deadline) {
+ static constexpr base::TimeDelta slack = base::TimeDelta::FromSecondsD(0.001);
+ for (size_t i = 0; i < BlinkGC::kNumberOfArenas; i++) {
+ // lazySweepWithDeadline() won't check the deadline until it sweeps
+ // 10 pages. So we give a small slack for safety.
+ const base::TimeDelta remaining_budget =
+ deadline - slack - base::TimeTicks::Now();
+ if (remaining_budget <= base::TimeDelta() ||
+ !arenas_[i]->LazySweepWithDeadline(deadline)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool ThreadHeap::AdvanceConcurrentSweep(base::JobDelegate* job) {
+ for (size_t i = 0; i < BlinkGC::kNumberOfArenas; i++) {
+ while (!arenas_[i]->ConcurrentSweepOnePage()) {
+ if (job->ShouldYield())
+ return false;
+ }
+ }
+ return true;
+}
+
+// TODO(omerkatz): Temporary solution until concurrent marking is ready. see
+// https://crrev.com/c/1730054 for details. Eventually this will be removed.
+bool ThreadHeap::FlushV8References(base::TimeTicks deadline) {
+ if (!thread_state_->IsUnifiedGCMarkingInProgress())
+ return true;
+
+ DCHECK(base::FeatureList::IsEnabled(
+ blink::features::kBlinkHeapConcurrentMarking) ||
+ v8_references_worklist_->IsGlobalEmpty());
+
+ v8::EmbedderHeapTracer* controller =
+ reinterpret_cast<v8::EmbedderHeapTracer*>(
+ thread_state_->unified_heap_controller());
+ return DrainWorklistWithDeadline(
+ deadline, v8_references_worklist_.get(),
+ [controller](const V8Reference& reference) {
+ if (!reference->Get().IsEmpty()) {
+ controller->RegisterEmbedderReference(
+ reference->template Cast<v8::Data>().Get());
+ }
+ },
+ WorklistTaskId::MutatorThread);
+}
+
+ThreadHeap* ThreadHeap::main_thread_heap_ = nullptr;
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/heap.h b/chromium/third_party/blink/renderer/platform/heap/impl/heap.h
new file mode 100644
index 00000000000..f1740aaa3e2
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/heap.h
@@ -0,0 +1,757 @@
+/*
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_H_
+
+#include <limits>
+#include <memory>
+#include <unordered_set>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "third_party/blink/renderer/platform/heap/impl/gc_info.h"
+#include "third_party/blink/renderer/platform/heap/impl/heap_page.h"
+#include "third_party/blink/renderer/platform/heap/impl/thread_state_statistics.h"
+#include "third_party/blink/renderer/platform/heap/impl/worklist.h"
+#include "third_party/blink/renderer/platform/heap/process_heap.h"
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+#include "third_party/blink/renderer/platform/heap/visitor.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+#include "third_party/blink/renderer/platform/wtf/forward.h"
+#include "third_party/blink/renderer/platform/wtf/sanitizers.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+
+namespace blink {
+
+namespace incremental_marking_test {
+class IncrementalMarkingScopeBase;
+} // namespace incremental_marking_test
+
+class ConcurrentMarkingVisitor;
+class ThreadHeapStatsCollector;
+class PageBloomFilter;
+class PagePool;
+class ProcessHeapReporter;
+class RegionTree;
+class MarkingSchedulingOracle;
+
+using MarkingItem = TraceDescriptor;
+using NotFullyConstructedItem = const void*;
+
+struct EphemeronPairItem {
+ const void* key;
+ TraceDescriptor value_desc;
+};
+
+struct CustomCallbackItem {
+ WeakCallback callback;
+ const void* parameter;
+};
+
+struct NotSafeToConcurrentlyTraceItem {
+ TraceDescriptor desc;
+ size_t bailout_size;
+};
+
+using V8Reference = const TraceWrapperV8Reference<v8::Value>*;
+
+// Segment size of 512 entries necessary to avoid throughput regressions. Since
+// the work list is currently a temporary object this is not a problem.
+using MarkingWorklist = Worklist<MarkingItem, 512 /* local entries */>;
+using WriteBarrierWorklist = Worklist<HeapObjectHeader*, 64>;
+using NotFullyConstructedWorklist =
+ Worklist<NotFullyConstructedItem, 16 /* local entries */>;
+using WeakCallbackWorklist =
+ Worklist<CustomCallbackItem, 64 /* local entries */>;
+// Using large local segments here (sized 512 entries) to avoid throughput
+// regressions.
+using MovableReferenceWorklist =
+ Worklist<const MovableReference*, 256 /* local entries */>;
+using EphemeronPairsWorklist =
+ Worklist<EphemeronPairItem, 64 /* local entries */>;
+using V8ReferencesWorklist = Worklist<V8Reference, 16 /* local entries */>;
+using NotSafeToConcurrentlyTraceWorklist =
+ Worklist<NotSafeToConcurrentlyTraceItem, 64 /* local entries */>;
+
+class WeakContainersWorklist {
+ public:
+ inline void Push(const HeapObjectHeader* object) {
+ DCHECK(object);
+ WTF::MutexLocker locker(lock_);
+ objects_.insert(object);
+ }
+
+ inline bool Contains(const HeapObjectHeader* object) {
+ // This method is called only during atomic pause, so lock is not needed.
+ DCHECK(object);
+ return objects_.find(object) != objects_.end();
+ }
+
+ private:
+ WTF::Mutex lock_;
+ std::unordered_set<const HeapObjectHeader*> objects_;
+};
+
+class PLATFORM_EXPORT HeapAllocHooks {
+ STATIC_ONLY(HeapAllocHooks);
+
+ public:
+ // TODO(hajimehoshi): Pass a type name of the allocated object.
+ typedef void AllocationHook(Address, size_t, const char*);
+ typedef void FreeHook(Address);
+
+ // Sets allocation hook. Only one hook is supported.
+ static void SetAllocationHook(AllocationHook* hook) {
+ CHECK(!allocation_hook_ || !hook);
+ allocation_hook_ = hook;
+ }
+
+ // Sets free hook. Only one hook is supported.
+ static void SetFreeHook(FreeHook* hook) {
+ CHECK(!free_hook_ || !hook);
+ free_hook_ = hook;
+ }
+
+ static void AllocationHookIfEnabled(Address address,
+ size_t size,
+ const char* type_name) {
+ AllocationHook* allocation_hook = allocation_hook_;
+ if (UNLIKELY(!!allocation_hook))
+ allocation_hook(address, size, type_name);
+ }
+
+ static void FreeHookIfEnabled(Address address) {
+ FreeHook* free_hook = free_hook_;
+ if (UNLIKELY(!!free_hook))
+ free_hook(address);
+ }
+
+ private:
+ static AllocationHook* allocation_hook_;
+ static FreeHook* free_hook_;
+};
+
+class HeapCompact;
+template <typename T>
+class Member;
+template <typename T>
+class WeakMember;
+template <typename T>
+class UntracedMember;
+
+namespace internal {
+
+class LivenessBrokerFactory;
+
+template <typename T, bool = NeedsAdjustPointer<T>::value>
+class ObjectAliveTrait;
+
+template <typename T>
+class ObjectAliveTrait<T, false> {
+ STATIC_ONLY(ObjectAliveTrait);
+
+ public:
+ static bool IsHeapObjectAlive(const T* object) {
+ static_assert(sizeof(T), "T must be fully defined");
+ return HeapObjectHeader::FromPayload(object)->IsMarked();
+ }
+};
+
+template <typename T>
+class ObjectAliveTrait<T, true> {
+ STATIC_ONLY(ObjectAliveTrait);
+
+ public:
+ NO_SANITIZE_ADDRESS
+ static bool IsHeapObjectAlive(const T* object) {
+ static_assert(sizeof(T), "T must be fully defined");
+ const HeapObjectHeader* header = HeapObjectHeader::FromPayload(
+ TraceTrait<T>::GetTraceDescriptor(object).base_object_payload);
+ DCHECK(!header->IsInConstruction() || header->IsMarked());
+ return header->IsMarked();
+ }
+};
+
+template <typename T, typename = int>
+struct IsGarbageCollectedContainer : std::false_type {};
+
+template <typename T>
+struct IsGarbageCollectedContainer<
+ T,
+ typename T::IsGarbageCollectedCollectionTypeMarker> : std::true_type {};
+
+} // namespace internal
+
+class PLATFORM_EXPORT ThreadHeap {
+ USING_FAST_MALLOC(ThreadHeap);
+
+ using EphemeronProcessing = ThreadState::EphemeronProcessing;
+
+ public:
+ explicit ThreadHeap(ThreadState*);
+ ~ThreadHeap();
+
+ MarkingWorklist* GetMarkingWorklist() const {
+ return marking_worklist_.get();
+ }
+
+ WriteBarrierWorklist* GetWriteBarrierWorklist() const {
+ return write_barrier_worklist_.get();
+ }
+
+ NotFullyConstructedWorklist* GetNotFullyConstructedWorklist() const {
+ return not_fully_constructed_worklist_.get();
+ }
+
+ NotFullyConstructedWorklist* GetPreviouslyNotFullyConstructedWorklist()
+ const {
+ return previously_not_fully_constructed_worklist_.get();
+ }
+
+ WeakCallbackWorklist* GetWeakCallbackWorklist() const {
+ return weak_callback_worklist_.get();
+ }
+
+ MovableReferenceWorklist* GetMovableReferenceWorklist() const {
+ return movable_reference_worklist_.get();
+ }
+
+ EphemeronPairsWorklist* GetDiscoveredEphemeronPairsWorklist() const {
+ return discovered_ephemeron_pairs_worklist_.get();
+ }
+
+ EphemeronPairsWorklist* GetEphemeronPairsToProcessWorklist() const {
+ return ephemeron_pairs_to_process_worklist_.get();
+ }
+
+ V8ReferencesWorklist* GetV8ReferencesWorklist() const {
+ return v8_references_worklist_.get();
+ }
+
+ NotSafeToConcurrentlyTraceWorklist* GetNotSafeToConcurrentlyTraceWorklist()
+ const {
+ return not_safe_to_concurrently_trace_worklist_.get();
+ }
+
+ WeakContainersWorklist* GetWeakContainersWorklist() const {
+ return weak_containers_worklist_.get();
+ }
+
+ // Register an ephemeron table for fixed-point iteration.
+ void RegisterWeakTable(void* container_object, EphemeronCallback);
+
+ // Heap compaction registration methods:
+
+ // Checks whether we need to register |addr| as a backing store or a slot
+ // containing reference to it.
+ bool ShouldRegisterMovingAddress();
+
+ RegionTree* GetRegionTree() { return region_tree_.get(); }
+
+ static inline size_t AllocationSizeFromSize(size_t size) {
+ // Add space for header.
+ size_t allocation_size = size + sizeof(HeapObjectHeader);
+ // The allocation size calculation can overflow for large sizes.
+ CHECK_GT(allocation_size, size);
+ // Align size with allocation granularity.
+ allocation_size = (allocation_size + kAllocationMask) & ~kAllocationMask;
+ return allocation_size;
+ }
+ Address AllocateOnArenaIndex(ThreadState*,
+ size_t,
+ int arena_index,
+ uint32_t gc_info_index,
+ const char* type_name);
+ template <typename T>
+ static Address Allocate(size_t);
+
+ void WeakProcessing(MarkingVisitor*);
+
+ // Moves not fully constructed objects to previously not fully constructed
+ // objects. Such objects can be iterated using the Trace() method and do
+ // not need to rely on conservative handling.
+ void FlushNotFullyConstructedObjects();
+
+ // Moves ephemeron pairs from |discovered_ephemeron_pairs_worklist_| to
+ // |ephemeron_pairs_to_process_worklist_|
+ void FlushEphemeronPairs(EphemeronProcessing);
+
+ // Marks not fully constructed objects.
+ void MarkNotFullyConstructedObjects(MarkingVisitor*);
+ // Marks the transitive closure including ephemerons.
+ bool AdvanceMarking(MarkingVisitor*, base::TimeTicks, EphemeronProcessing);
+ void VerifyMarking();
+
+ // Returns true if concurrent markers will have work to steal
+ bool HasWorkForConcurrentMarking() const;
+ // Returns the amount of work currently available for stealing (there could be
+ // work remaining even if this is 0).
+ size_t ConcurrentMarkingGlobalWorkSize() const;
+ // Returns true if marker is done
+ bool AdvanceConcurrentMarking(ConcurrentMarkingVisitor*,
+ base::JobDelegate*,
+ MarkingSchedulingOracle* marking_scheduler);
+
+ // Conservatively checks whether an address is a pointer in any of the
+ // thread heaps. If so marks the object pointed to as live.
+ Address CheckAndMarkPointer(MarkingVisitor*, Address);
+
+ // Visits remembered sets.
+ void VisitRememberedSets(MarkingVisitor*);
+
+ size_t ObjectPayloadSizeForTesting();
+ void ResetAllocationPointForTesting();
+
+ PagePool* GetFreePagePool() { return free_page_pool_.get(); }
+
+ // This look-up uses the region search tree and a negative contains cache to
+ // provide an efficient mapping from arbitrary addresses to the containing
+ // heap-page if one exists.
+ BasePage* LookupPageForAddress(ConstAddress);
+
+ HeapCompact* Compaction();
+
+ // Get one of the heap structures for this thread.
+ // The thread heap is split into multiple heap parts based on object types
+ // and object sizes.
+ BaseArena* Arena(int arena_index) const {
+ DCHECK_LE(0, arena_index);
+ DCHECK_LT(arena_index, BlinkGC::kNumberOfArenas);
+ return arenas_[arena_index];
+ }
+
+ static bool IsVectorArenaIndex(int arena_index) {
+ return BlinkGC::kVectorArenaIndex == arena_index;
+ }
+ static bool IsNormalArenaIndex(int);
+
+ void MakeConsistentForGC();
+ // MakeConsistentForMutator() drops marks from marked objects and rebuild
+ // free lists. This is called after taking a snapshot and before resuming
+ // the executions of mutators.
+ void MakeConsistentForMutator();
+
+ // Unmarks all objects in the entire heap. This is supposed to be called in
+ // the beginning of major GC.
+ void Unmark();
+
+ void Compact();
+
+ bool AdvanceLazySweep(base::TimeTicks deadline);
+ bool AdvanceConcurrentSweep(base::JobDelegate*);
+
+ void PrepareForSweep(BlinkGC::CollectionType);
+ void RemoveAllPages();
+ void InvokeFinalizersOnSweptPages();
+ void CompleteSweep();
+
+ void CollectStatistics(ThreadState::Statistics* statistics);
+
+ ThreadHeapStatsCollector* stats_collector() const {
+ return heap_stats_collector_.get();
+ }
+
+#if defined(ADDRESS_SANITIZER)
+ void PoisonUnmarkedObjects();
+#endif
+
+#if DCHECK_IS_ON()
+ // Infrastructure to determine if an address is within one of the
+ // address ranges for the Blink heap. If the address is in the Blink
+ // heap the containing heap page is returned.
+ BasePage* FindPageFromAddress(Address);
+ BasePage* FindPageFromAddress(const void* pointer) {
+ return FindPageFromAddress(
+ reinterpret_cast<Address>(const_cast<void*>(pointer)));
+ }
+#endif
+
+ PageBloomFilter* page_bloom_filter() { return page_bloom_filter_.get(); }
+
+ bool IsInLastAllocatedRegion(Address address) const;
+ void SetLastAllocatedRegion(Address start, size_t length);
+
+ private:
+ struct LastAllocatedRegion {
+ Address start = nullptr;
+ size_t length = 0;
+ };
+
+ static int ArenaIndexForObjectSize(size_t);
+
+ void SetupWorklists(bool);
+ void DestroyMarkingWorklists(BlinkGC::StackState);
+ void DestroyCompactionWorklists();
+
+ bool InvokeEphemeronCallbacks(EphemeronProcessing,
+ MarkingVisitor*,
+ base::TimeTicks);
+
+ bool FlushV8References(base::TimeTicks);
+
+ ThreadState* thread_state_;
+ std::unique_ptr<ThreadHeapStatsCollector> heap_stats_collector_;
+ std::unique_ptr<RegionTree> region_tree_;
+ std::unique_ptr<PageBloomFilter> page_bloom_filter_;
+ std::unique_ptr<PagePool> free_page_pool_;
+ std::unique_ptr<ProcessHeapReporter> process_heap_reporter_;
+
+ // All objects on this worklist have been fully initialized and assigned a
+ // trace callback for iterating the body of the object. This worklist should
+ // contain almost all objects.
+ std::unique_ptr<MarkingWorklist> marking_worklist_;
+
+ // Objects on this worklist have been collected in the write barrier. The
+ // worklist is different from |marking_worklist_| to minimize execution in the
+ // path where a write barrier is executed.
+ std::unique_ptr<WriteBarrierWorklist> write_barrier_worklist_;
+
+ // Objects on this worklist were observed to be in construction (in their
+ // constructor) and thus have been delayed for processing. They have not yet
+ // been assigned a valid header and trace callback.
+ std::unique_ptr<NotFullyConstructedWorklist> not_fully_constructed_worklist_;
+
+ // Objects on this worklist were previously in construction but have been
+ // moved here upon observing a safepoint, i.e., processing without stack. They
+ // have not yet been assigned a valid header and trace callback but are fully
+ // specified and can thus be iterated using the trace callback (which can be
+ // looked up dynamically).
+ std::unique_ptr<NotFullyConstructedWorklist>
+ previously_not_fully_constructed_worklist_;
+
+ // Worklist of weak callbacks accumulated for objects. Such callbacks are
+ // processed after finishing marking objects.
+ std::unique_ptr<WeakCallbackWorklist> weak_callback_worklist_;
+
+ // The worklist is to remember slots that are traced during
+ // marking phases. The mapping between the slots and the backing stores are
+ // created at the atomic pause phase.
+ std::unique_ptr<MovableReferenceWorklist> movable_reference_worklist_;
+
+ // Worklist of ephemeron callbacks. Used to pass new callbacks from
+ // MarkingVisitor to ThreadHeap.
+ std::unique_ptr<EphemeronPairsWorklist> discovered_ephemeron_pairs_worklist_;
+ std::unique_ptr<EphemeronPairsWorklist> ephemeron_pairs_to_process_worklist_;
+
+ // Worklist for storing the V8 references until ThreadHeap can flush them
+ // to V8.
+ std::unique_ptr<V8ReferencesWorklist> v8_references_worklist_;
+
+ std::unique_ptr<NotSafeToConcurrentlyTraceWorklist>
+ not_safe_to_concurrently_trace_worklist_;
+
+ std::unique_ptr<WeakContainersWorklist> weak_containers_worklist_;
+
+ std::unique_ptr<HeapCompact> compaction_;
+
+ LastAllocatedRegion last_allocated_region_;
+
+ BaseArena* arenas_[BlinkGC::kNumberOfArenas];
+
+ static ThreadHeap* main_thread_heap_;
+
+ static constexpr size_t kStepsBeforeEphemeronPairsFlush = 4u;
+ size_t steps_since_last_ephemeron_pairs_flush_ = 0;
+ static constexpr size_t kStepsBeforeEphemeronProcessing = 16u;
+ size_t steps_since_last_ephemeron_processing_ = 0;
+
+ friend class incremental_marking_test::IncrementalMarkingScopeBase;
+ template <typename T>
+ friend class Member;
+ friend class ThreadState;
+};
+
+template <typename T>
+class GarbageCollected {
+ IS_GARBAGE_COLLECTED_TYPE();
+
+ public:
+ using ParentMostGarbageCollectedType = T;
+
+ // Must use MakeGarbageCollected.
+ void* operator new(size_t) = delete;
+ void* operator new[](size_t) = delete;
+ // The garbage collector is taking care of reclaiming the object. Also,
+ // virtual destructor requires an unambiguous, accessible 'operator delete'.
+ void operator delete(void*) { NOTREACHED(); }
+ void operator delete[](void*) = delete;
+
+ template <typename Derived>
+ static void* AllocateObject(size_t size) {
+ return ThreadHeap::Allocate<GCInfoFoldedType<Derived>>(size);
+ }
+
+ protected:
+ // This trait in theory can be moved to gc_info.h, but that would cause
+ // significant memory bloat caused by huge number of ThreadHeap::Allocate<>
+ // instantiations, which linker is not able to fold.
+ template <typename Derived>
+ class GCInfoFolded {
+ static constexpr bool is_virtual_destructor_at_base =
+ std::has_virtual_destructor<ParentMostGarbageCollectedType>::value;
+ static constexpr bool both_trivially_destructible =
+ std::is_trivially_destructible<ParentMostGarbageCollectedType>::value &&
+ std::is_trivially_destructible<Derived>::value;
+ static constexpr bool has_custom_dispatch_at_base =
+ internal::HasFinalizeGarbageCollectedObject<
+ ParentMostGarbageCollectedType>::value;
+
+ public:
+ using Type = std::conditional_t<is_virtual_destructor_at_base ||
+ both_trivially_destructible ||
+ has_custom_dispatch_at_base,
+ ParentMostGarbageCollectedType,
+ Derived>;
+ };
+
+ template <typename Derived>
+ using GCInfoFoldedType = typename GCInfoFolded<Derived>::Type;
+
+ GarbageCollected() = default;
+
+ DISALLOW_COPY_AND_ASSIGN(GarbageCollected);
+};
+
+// Used for passing custom sizes to MakeGarbageCollected.
+struct AdditionalBytes {
+ explicit AdditionalBytes(size_t bytes) : value(bytes) {}
+ const size_t value;
+};
+
+template <typename T>
+struct MakeGarbageCollectedTrait {
+ template <typename... Args>
+ static T* Call(Args&&... args) {
+ static_assert(WTF::IsGarbageCollectedType<T>::value,
+ "T needs to be a garbage collected object");
+ static_assert(
+ std::is_trivially_destructible<T>::value ||
+ std::has_virtual_destructor<T>::value || std::is_final<T>::value ||
+ internal::IsGarbageCollectedContainer<T>::value ||
+ internal::HasFinalizeGarbageCollectedObject<T>::value,
+ "Finalized GarbageCollected class should either have a virtual "
+ "destructor or be marked as final");
+ static_assert(!IsGarbageCollectedMixin<T>::value ||
+ sizeof(T) <= kLargeObjectSizeThreshold,
+ "GarbageCollectedMixin may not be a large object");
+ void* memory = T::template AllocateObject<T>(sizeof(T));
+ HeapObjectHeader* header = HeapObjectHeader::FromPayload(memory);
+ // Placement new as regular operator new() is deleted.
+ T* object = ::new (memory) T(std::forward<Args>(args)...);
+ header->MarkFullyConstructed<HeapObjectHeader::AccessMode::kAtomic>();
+ return object;
+ }
+
+ template <typename... Args>
+ static T* Call(AdditionalBytes additional_bytes, Args&&... args) {
+ static_assert(WTF::IsGarbageCollectedType<T>::value,
+ "T needs to be a garbage collected object");
+ static_assert(
+ std::is_trivially_destructible<T>::value ||
+ std::has_virtual_destructor<T>::value || std::is_final<T>::value ||
+ internal::IsGarbageCollectedContainer<T>::value ||
+ internal::HasFinalizeGarbageCollectedObject<T>::value,
+ "Finalized GarbageCollected class should either have a virtual "
+ "destructor or be marked as final.");
+ const size_t size = sizeof(T) + additional_bytes.value;
+ if (IsGarbageCollectedMixin<T>::value) {
+ // Ban large mixin so we can use PageFromObject() on them.
+ CHECK_GE(kLargeObjectSizeThreshold, size)
+ << "GarbageCollectedMixin may not be a large object";
+ }
+ void* memory = T::template AllocateObject<T>(size);
+ HeapObjectHeader* header = HeapObjectHeader::FromPayload(memory);
+ // Placement new as regular operator new() is deleted.
+ T* object = ::new (memory) T(std::forward<Args>(args)...);
+ header->MarkFullyConstructed<HeapObjectHeader::AccessMode::kAtomic>();
+ return object;
+ }
+};
+
+template <typename T, typename = void>
+struct PostConstructionHookTrait {
+ static void Call(T*) {}
+};
+
+// Default MakeGarbageCollected: Constructs an instance of T, which is a garbage
+// collected type.
+template <typename T, typename... Args>
+T* MakeGarbageCollected(Args&&... args) {
+ T* object = MakeGarbageCollectedTrait<T>::Call(std::forward<Args>(args)...);
+ PostConstructionHookTrait<T>::Call(object);
+ return object;
+}
+
+// Constructs an instance of T, which is a garbage collected type. This special
+// version takes size which enables constructing inline objects.
+template <typename T, typename... Args>
+T* MakeGarbageCollected(AdditionalBytes additional_bytes, Args&&... args) {
+ T* object = MakeGarbageCollectedTrait<T>::Call(additional_bytes,
+ std::forward<Args>(args)...);
+ PostConstructionHookTrait<T>::Call(object);
+ return object;
+}
+
+// Assigning class types to their arenas.
+//
+// We use sized arenas for most 'normal' objects to improve memory locality.
+// It seems that the same type of objects are likely to be accessed together,
+// which means that we want to group objects by type. That's one reason
+// why we provide dedicated arenas for popular types (e.g., Node, CSSValue),
+// but it's not practical to prepare dedicated arenas for all types.
+// Thus we group objects by their sizes, hoping that this will approximately
+// group objects by their types.
+//
+
+inline int ThreadHeap::ArenaIndexForObjectSize(size_t size) {
+ if (size < 64) {
+ if (size < 32)
+ return BlinkGC::kNormalPage1ArenaIndex;
+ return BlinkGC::kNormalPage2ArenaIndex;
+ }
+ if (size < 128)
+ return BlinkGC::kNormalPage3ArenaIndex;
+ return BlinkGC::kNormalPage4ArenaIndex;
+}
+
+inline bool ThreadHeap::IsNormalArenaIndex(int index) {
+ return index >= BlinkGC::kNormalPage1ArenaIndex &&
+ index <= BlinkGC::kNormalPage4ArenaIndex;
+}
+
+inline Address ThreadHeap::AllocateOnArenaIndex(ThreadState* state,
+ size_t size,
+ int arena_index,
+ uint32_t gc_info_index,
+ const char* type_name) {
+ DCHECK(state->IsAllocationAllowed());
+ DCHECK_NE(arena_index, BlinkGC::kLargeObjectArenaIndex);
+ NormalPageArena* arena = static_cast<NormalPageArena*>(Arena(arena_index));
+ Address address =
+ arena->AllocateObject(AllocationSizeFromSize(size), gc_info_index);
+ HeapAllocHooks::AllocationHookIfEnabled(address, size, type_name);
+ return address;
+}
+
+template <typename T>
+Address ThreadHeap::Allocate(size_t size) {
+ ThreadState* state = ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
+ const char* type_name = WTF_HEAP_PROFILER_TYPE_NAME(T);
+ return state->Heap().AllocateOnArenaIndex(
+ state, size, ThreadHeap::ArenaIndexForObjectSize(size),
+ GCInfoTrait<T>::Index(), type_name);
+}
+
+inline bool ThreadHeap::IsInLastAllocatedRegion(Address address) const {
+ return last_allocated_region_.start <= address &&
+ address <
+ (last_allocated_region_.start + last_allocated_region_.length);
+}
+
+inline void ThreadHeap::SetLastAllocatedRegion(Address start, size_t length) {
+ last_allocated_region_.start = start;
+ last_allocated_region_.length = length;
+}
+
+class PLATFORM_EXPORT LivenessBroker final {
+ public:
+ template <typename T>
+ bool IsHeapObjectAlive(const T*) const;
+ template <typename T>
+ bool IsHeapObjectAlive(const WeakMember<T>&) const;
+ template <typename T>
+ bool IsHeapObjectAlive(const UntracedMember<T>&) const;
+
+ private:
+ LivenessBroker() = default;
+ friend class internal::LivenessBrokerFactory;
+};
+
+template <typename T>
+bool LivenessBroker::IsHeapObjectAlive(const T* object) const {
+ static_assert(sizeof(T), "T must be fully defined");
+ // The strongification of collections relies on the fact that once a
+ // collection has been strongified, there is no way that it can contain
+ // non-live entries, so no entries will be removed. Since you can't set
+ // the mark bit on a null pointer, that means that null pointers are
+ // always 'alive'.
+ if (!object)
+ return true;
+ // TODO(keishi): some tests create CrossThreadPersistent on non attached
+ // threads.
+ if (!ThreadState::Current())
+ return true;
+ DCHECK(&ThreadState::Current()->Heap() ==
+ &PageFromObject(object)->Arena()->GetThreadState()->Heap());
+ return internal::ObjectAliveTrait<T>::IsHeapObjectAlive(object);
+}
+
+template <typename T>
+bool LivenessBroker::IsHeapObjectAlive(const WeakMember<T>& weak_member) const {
+ return IsHeapObjectAlive(weak_member.Get());
+}
+
+template <typename T>
+bool LivenessBroker::IsHeapObjectAlive(
+ const UntracedMember<T>& untraced_member) const {
+ return IsHeapObjectAlive(untraced_member.Get());
+}
+
+template <typename T>
+void Visitor::HandleWeakCell(const LivenessBroker& broker, const void* object) {
+ WeakMember<T>* weak_member =
+ reinterpret_cast<WeakMember<T>*>(const_cast<void*>(object));
+ if (weak_member->Get()) {
+ if (weak_member->IsHashTableDeletedValue()) {
+ // This can happen when weak fields are deleted while incremental marking
+ // is running. Deleted values need to be preserved to avoid reviving
+ // objects in containers.
+ return;
+ }
+ if (!broker.IsHeapObjectAlive(weak_member->Get()))
+ weak_member->Clear();
+ }
+}
+
+namespace internal {
+
+class LivenessBrokerFactory final {
+ public:
+ static LivenessBroker Create() { return LivenessBroker(); }
+};
+
+} // namespace internal
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/heap_allocator.cc b/chromium/third_party/blink/renderer/platform/heap/impl/heap_allocator.cc
new file mode 100644
index 00000000000..f4e2745e671
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/heap_allocator.cc
@@ -0,0 +1,139 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/heap_allocator.h"
+
+namespace blink {
+
+namespace {
+
+struct BackingModifier {
+ bool can_modify;
+ BasePage* const page;
+ HeapObjectHeader* const header;
+};
+
+BackingModifier CanModifyBacking(ThreadState* const state, void* address) {
+ // - |SweepForbidden| protects against modifying objects from destructors.
+ // - |IsSweepingInProgress| protects against modifying objects while
+ // concurrent sweeping is in progress.
+ // - |in_atomic_pause| protects against modifying objects from within the GC.
+ // This can
+ // e.g. happen when hash table buckets that have containers inlined are
+ // freed during weakness processing.
+ // - |IsMarkingInProgress| protects against incremental marking which may have
+ // registered callbacks.
+ if (state->SweepForbidden() || state->IsSweepingInProgress() ||
+ state->in_atomic_pause() || state->IsMarkingInProgress())
+ return {false, nullptr, nullptr};
+
+ // - Don't adjust large objects because their page is never reused.
+ // - Don't free backings allocated on other threads.
+ BasePage* page = PageFromObject(address);
+ if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
+ return {false, nullptr, nullptr};
+
+ HeapObjectHeader* const header = HeapObjectHeader::FromPayload(address);
+ // - Guards against pages that have not been swept. Technically, it should be
+ // fine to modify those backings. We bail out to maintain the invariant that
+ // no marked backing is modified.
+ if (header->IsMarked())
+ return {false, nullptr, nullptr};
+ return {true, page, header};
+}
+
+} // namespace
+
+void HeapAllocator::BackingFree(void* address) {
+ if (!address)
+ return;
+
+ ThreadState* const state = ThreadState::Current();
+ BackingModifier result = CanModifyBacking(state, address);
+ if (!result.can_modify)
+ return;
+
+ static_cast<NormalPage*>(result.page)
+ ->ArenaForNormalPage()
+ ->PromptlyFreeObject(result.header);
+}
+
+void HeapAllocator::FreeVectorBacking(void* address) {
+ BackingFree(address);
+}
+
+void HeapAllocator::FreeHashTableBacking(void* address) {
+ BackingFree(address);
+}
+
+bool HeapAllocator::BackingExpand(void* address, size_t new_size) {
+ if (!address)
+ return false;
+
+ ThreadState* state = ThreadState::Current();
+
+ BackingModifier result = CanModifyBacking(state, address);
+ if (!result.can_modify)
+ return false;
+ DCHECK(!state->in_atomic_pause());
+ DCHECK(state->IsAllocationAllowed());
+ DCHECK_EQ(&state->Heap(), &ThreadState::FromObject(address)->Heap());
+
+ // FIXME: Support expand for large objects.
+ // Don't expand backings allocated on other threads.
+ BasePage* page = PageFromObject(address);
+ if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
+ return false;
+
+ HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
+ NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage();
+ return arena->ExpandObject(header, new_size);
+}
+
+bool HeapAllocator::ExpandVectorBacking(void* address, size_t new_size) {
+ return BackingExpand(address, new_size);
+}
+
+bool HeapAllocator::ExpandHashTableBacking(void* address, size_t new_size) {
+ return BackingExpand(address, new_size);
+}
+
+bool HeapAllocator::BackingShrink(void* address,
+ size_t quantized_current_size,
+ size_t quantized_shrunk_size) {
+ if (!address || quantized_shrunk_size == quantized_current_size)
+ return true;
+
+ DCHECK_LT(quantized_shrunk_size, quantized_current_size);
+
+ ThreadState* const state = ThreadState::Current();
+ BackingModifier result = CanModifyBacking(state, address);
+ if (!result.can_modify)
+ return false;
+
+ DCHECK(state->IsAllocationAllowed());
+ DCHECK_EQ(&state->Heap(), &ThreadState::FromObject(address)->Heap());
+
+ NormalPageArena* arena =
+ static_cast<NormalPage*>(result.page)->ArenaForNormalPage();
+ // We shrink the object only if the shrinking will make a non-small
+ // prompt-free block.
+ // FIXME: Optimize the threshold size.
+ if (quantized_current_size <= quantized_shrunk_size +
+ sizeof(HeapObjectHeader) +
+ sizeof(void*) * 32 &&
+ !arena->IsObjectAllocatedAtAllocationPoint(result.header))
+ return true;
+
+ arena->ShrinkObject(result.header, quantized_shrunk_size);
+ return true;
+}
+
+bool HeapAllocator::ShrinkVectorBacking(void* address,
+ size_t quantized_current_size,
+ size_t quantized_shrunk_size) {
+ return BackingShrink(address, quantized_current_size, quantized_shrunk_size);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/heap_allocator.h b/chromium/third_party/blink/renderer/platform/heap/impl/heap_allocator.h
new file mode 100644
index 00000000000..8fc46cb2a62
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/heap_allocator.h
@@ -0,0 +1,909 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_ALLOCATOR_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_ALLOCATOR_H_
+
+#include <type_traits>
+
+#include "build/build_config.h"
+#include "third_party/blink/renderer/platform/heap/collection_support/heap_hash_table_backing.h"
+#include "third_party/blink/renderer/platform/heap/collection_support/heap_vector_backing.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/heap_buildflags.h"
+#include "third_party/blink/renderer/platform/heap/impl/marking_visitor.h"
+#include "third_party/blink/renderer/platform/heap/impl/trace_traits.h"
+#include "third_party/blink/renderer/platform/heap/thread_state_scopes.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+#include "third_party/blink/renderer/platform/wtf/construct_traits.h"
+#include "third_party/blink/renderer/platform/wtf/deque.h"
+#include "third_party/blink/renderer/platform/wtf/doubly_linked_list.h"
+#include "third_party/blink/renderer/platform/wtf/hash_counted_set.h"
+#include "third_party/blink/renderer/platform/wtf/hash_map.h"
+#include "third_party/blink/renderer/platform/wtf/hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/hash_table.h"
+#include "third_party/blink/renderer/platform/wtf/linked_hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/list_hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/type_traits.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+namespace blink {
+
+#define DISALLOW_IN_CONTAINER() \
+ public: \
+ using IsDisallowedInContainerMarker = int; \
+ \
+ private: \
+ friend class ::WTF::internal::__thisIsHereToForceASemicolonAfterThisMacro
+
+// IsAllowedInContainer returns true if some type T supports being nested
+// arbitrarily in other containers. This is relevant for collections where some
+// collections assume that they are placed on a non-moving arena.
+template <typename T, typename = int>
+struct IsAllowedInContainer : std::true_type {};
+template <typename T>
+struct IsAllowedInContainer<T, typename T::IsDisallowedInContainerMarker>
+ : std::false_type {};
+
+// This is a static-only class used as a trait on collections to make them heap
+// allocated. However see also HeapListHashSetAllocator.
+class PLATFORM_EXPORT HeapAllocator {
+ STATIC_ONLY(HeapAllocator);
+
+ public:
+ using LivenessBroker = blink::LivenessBroker;
+ using Visitor = blink::Visitor;
+ static constexpr bool kIsGarbageCollected = true;
+
+ template <typename T>
+ static size_t MaxElementCountInBackingStore() {
+ return kMaxHeapObjectSize / sizeof(T);
+ }
+
+ template <typename T>
+ static size_t QuantizedSize(size_t count) {
+ CHECK(count <= MaxElementCountInBackingStore<T>());
+ return ThreadHeap::AllocationSizeFromSize(count * sizeof(T)) -
+ sizeof(HeapObjectHeader);
+ }
+ template <typename T>
+ static T* AllocateVectorBacking(size_t size) {
+ return reinterpret_cast<T*>(
+ MakeGarbageCollected<HeapVectorBacking<T>>(size / sizeof(T)));
+ }
+ static void FreeVectorBacking(void*);
+ static bool ExpandVectorBacking(void*, size_t);
+ static bool ShrinkVectorBacking(void* address,
+ size_t quantized_current_size,
+ size_t quantized_shrunk_size);
+
+ template <typename T, typename HashTable>
+ static T* AllocateHashTableBacking(size_t size) {
+ return reinterpret_cast<T*>(
+ MakeGarbageCollected<HeapHashTableBacking<HashTable>>(
+ size / sizeof(typename HashTable::ValueType)));
+ }
+ template <typename T, typename HashTable>
+ static T* AllocateZeroedHashTableBacking(size_t size) {
+ return AllocateHashTableBacking<T, HashTable>(size);
+ }
+ static void FreeHashTableBacking(void* address);
+ static bool ExpandHashTableBacking(void*, size_t);
+
+ static void TraceBackingStoreIfMarked(const void* address) {
+ // Trace backing store elements only if backing store was marked. The
+ // sweeper may be active on the backing store which requires atomic mark bit
+ // access. A precise filter is performed in
+ // MarkingVisitor::TraceMarkedBackingStore.
+ if (HeapObjectHeader::FromPayload(address)
+ ->IsMarked<HeapObjectHeader::AccessMode::kAtomic>()) {
+ MarkingVisitor::TraceMarkedBackingStore(address);
+ }
+ }
+
+ template <typename T>
+ static void BackingWriteBarrier(T** slot) {
+ MarkingVisitor::WriteBarrier(slot);
+ }
+
+ template <typename Return, typename Metadata>
+ static Return Malloc(size_t size, const char* type_name) {
+ return reinterpret_cast<Return>(
+ MarkAsConstructed(ThreadHeap::Allocate<Metadata>(size)));
+ }
+
+ // Compilers sometimes eagerly instantiates the unused 'operator delete', so
+ // we provide a version that asserts and fails at run-time if used.
+ static void Free(void*) { NOTREACHED(); }
+
+ template <typename T>
+ static void* NewArray(size_t bytes) {
+ NOTREACHED();
+ return nullptr;
+ }
+
+ static void DeleteArray(void* ptr) { NOTREACHED(); }
+
+ static bool IsAllocationAllowed() {
+ return ThreadState::Current()->IsAllocationAllowed();
+ }
+
+ static bool IsIncrementalMarking() {
+ return ThreadState::IsAnyIncrementalMarking() &&
+ ThreadState::Current()->IsIncrementalMarking();
+ }
+
+ template <typename T, typename Traits>
+ static void Trace(Visitor* visitor, const T& t) {
+ TraceCollectionIfEnabled<WTF::WeakHandlingTrait<T>::value, T,
+ Traits>::Trace(visitor, &t);
+ }
+
+ static void EnterGCForbiddenScope() {
+ ThreadState::Current()->EnterGCForbiddenScope();
+ }
+
+ static void LeaveGCForbiddenScope() {
+ ThreadState::Current()->LeaveGCForbiddenScope();
+ }
+
+ template <typename T, typename Traits>
+ static void NotifyNewObject(T* object) {
+#if BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
+ ThreadState* const thread_state = ThreadState::Current();
+ if (!thread_state->IsIncrementalMarking()) {
+ MarkingVisitor::GenerationalBarrier(reinterpret_cast<Address>(object),
+ thread_state);
+ return;
+ }
+#else
+ if (!ThreadState::IsAnyIncrementalMarking())
+ return;
+ // The object may have been in-place constructed as part of a large object.
+ // It is not safe to retrieve the page from the object here.
+ ThreadState* const thread_state = ThreadState::Current();
+ if (!thread_state->IsIncrementalMarking()) {
+ return;
+ }
+#endif // BLINK_HEAP_YOUNG_GENERATION
+ // Eagerly trace the object ensuring that the object and all its children
+ // are discovered by the marker.
+ ThreadState::NoAllocationScope no_allocation_scope(thread_state);
+ DCHECK(thread_state->CurrentVisitor());
+ // No weak handling for write barriers. Modifying weakly reachable objects
+ // strongifies them for the current cycle.
+ DCHECK(!Traits::kCanHaveDeletedValue || !Traits::IsDeletedValue(*object));
+ TraceCollectionIfEnabled<WTF::kNoWeakHandling, T, Traits>::Trace(
+ thread_state->CurrentVisitor(), object);
+ }
+
+ template <typename T, typename Traits>
+ static void NotifyNewObjects(T* array, size_t len) {
+#if BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
+ ThreadState* const thread_state = ThreadState::Current();
+ if (!thread_state->IsIncrementalMarking()) {
+ MarkingVisitor::GenerationalBarrier(reinterpret_cast<Address>(array),
+ thread_state);
+ return;
+ }
+#else
+ if (!ThreadState::IsAnyIncrementalMarking())
+ return;
+ // The object may have been in-place constructed as part of a large object.
+ // It is not safe to retrieve the page from the object here.
+ ThreadState* const thread_state = ThreadState::Current();
+ if (!thread_state->IsIncrementalMarking()) {
+ return;
+ }
+#endif // BLINK_HEAP_YOUNG_GENERATION
+ // See |NotifyNewObject| for details.
+ ThreadState::NoAllocationScope no_allocation_scope(thread_state);
+ DCHECK(thread_state->CurrentVisitor());
+ // No weak handling for write barriers. Modifying weakly reachable objects
+ // strongifies them for the current cycle.
+ while (len-- > 0) {
+ DCHECK(!Traits::kCanHaveDeletedValue || !Traits::IsDeletedValue(*array));
+ TraceCollectionIfEnabled<WTF::kNoWeakHandling, T, Traits>::Trace(
+ thread_state->CurrentVisitor(), array);
+ array++;
+ }
+ }
+
+ template <typename T>
+ static void TraceVectorBacking(Visitor* visitor,
+ const T* backing,
+ const T* const* backing_slot) {
+ visitor->TraceMovablePointer(backing_slot);
+ visitor->Trace(reinterpret_cast<const HeapVectorBacking<T>*>(backing));
+ }
+
+ template <typename T, typename HashTable>
+ static void TraceHashTableBackingStrongly(Visitor* visitor,
+ const T* backing,
+ const T* const* backing_slot) {
+ visitor->TraceMovablePointer(backing_slot);
+ visitor->Trace(
+ reinterpret_cast<const HeapHashTableBacking<HashTable>*>(backing));
+ }
+
+ template <typename T, typename HashTable>
+ static void TraceHashTableBackingWeakly(Visitor* visitor,
+ const T* backing,
+ const T* const* backing_slot,
+ WeakCallback callback,
+ const void* parameter) {
+ visitor->TraceMovablePointer(backing_slot);
+ visitor->TraceWeakContainer(
+ reinterpret_cast<const HeapHashTableBacking<HashTable>*>(backing),
+ reinterpret_cast<const HeapHashTableBacking<HashTable>* const*>(
+ backing_slot),
+ TraceTrait<HeapHashTableBacking<HashTable>>::GetTraceDescriptor(
+ backing),
+ TraceTrait<HeapHashTableBacking<HashTable>>::GetWeakTraceDescriptor(
+ backing),
+ callback, parameter);
+ }
+
+ private:
+ static Address MarkAsConstructed(Address address) {
+ HeapObjectHeader::FromPayload(reinterpret_cast<void*>(address))
+ ->MarkFullyConstructed<HeapObjectHeader::AccessMode::kAtomic>();
+ return address;
+ }
+
+ static void BackingFree(void*);
+ static bool BackingExpand(void*, size_t);
+ static bool BackingShrink(void*,
+ size_t quantized_current_size,
+ size_t quantized_shrunk_size);
+
+ template <typename T, wtf_size_t u, typename V>
+ friend class WTF::Vector;
+ template <typename T, typename U, typename V, typename W>
+ friend class WTF::HashSet;
+ template <typename T,
+ typename U,
+ typename V,
+ typename W,
+ typename X,
+ typename Y>
+ friend class WTF::HashMap;
+};
+
+template <typename VisitorDispatcher, typename Value>
+static void TraceListHashSetValue(VisitorDispatcher visitor,
+ const Value& value) {
+ // We use the default hash traits for the value in the node, because
+ // ListHashSet does not let you specify any specific ones.
+ // We don't allow ListHashSet of WeakMember, so we set that one false
+ // (there's an assert elsewhere), but we have to specify some value for the
+ // strongify template argument, so we specify WTF::WeakPointersActWeak,
+ // arbitrarily.
+ TraceCollectionIfEnabled<WTF::kNoWeakHandling, Value,
+ WTF::HashTraits<Value>>::Trace(visitor, &value);
+}
+
+// The inline capacity is just a dummy template argument to match the off-heap
+// allocator.
+// This inherits from the static-only HeapAllocator trait class, but we do
+// declare pointers to instances. These pointers are always null, and no
+// objects are instantiated.
+template <typename ValueArg, wtf_size_t inlineCapacity>
+class HeapListHashSetAllocator : public HeapAllocator {
+ DISALLOW_NEW();
+
+ public:
+ using TableAllocator = HeapAllocator;
+ using Node = WTF::ListHashSetNode<ValueArg, HeapListHashSetAllocator>;
+
+ class AllocatorProvider {
+ DISALLOW_NEW();
+
+ public:
+ // For the heap allocation we don't need an actual allocator object, so
+ // we just return null.
+ HeapListHashSetAllocator* Get() const { return nullptr; }
+
+ // No allocator object is needed.
+ void CreateAllocatorIfNeeded() {}
+ void ReleaseAllocator() {}
+
+ // There is no allocator object in the HeapListHashSet (unlike in the
+ // regular ListHashSet) so there is nothing to swap.
+ void Swap(AllocatorProvider& other) {}
+ };
+
+ void Deallocate(void* dummy) {}
+
+ // This is not a static method even though it could be, because it needs to
+ // match the one that the (off-heap) ListHashSetAllocator has. The 'this'
+ // pointer will always be null.
+ void* AllocateNode() {
+ // Consider using a LinkedHashSet instead if this compile-time assert fails:
+ static_assert(!WTF::IsWeak<ValueArg>::value,
+ "weak pointers in a ListHashSet will result in null entries "
+ "in the set");
+
+ return Malloc<void*, Node>(
+ sizeof(Node),
+ nullptr /* Oilpan does not use the heap profiler at the moment. */);
+ }
+
+ template <typename VisitorDispatcher>
+ static void TraceValue(VisitorDispatcher visitor, const Node* node) {
+ TraceListHashSetValue(visitor, node->value_);
+ }
+};
+
+namespace internal {
+
+template <typename T>
+constexpr bool IsMember = WTF::IsSubclassOfTemplate<T, Member>::value;
+
+} // namespace internal
+
+template <typename KeyArg,
+ typename MappedArg,
+ typename HashArg = typename DefaultHash<KeyArg>::Hash,
+ typename KeyTraitsArg = HashTraits<KeyArg>,
+ typename MappedTraitsArg = HashTraits<MappedArg>>
+class HeapHashMap : public HashMap<KeyArg,
+ MappedArg,
+ HashArg,
+ KeyTraitsArg,
+ MappedTraitsArg,
+ HeapAllocator> {
+ IS_GARBAGE_COLLECTED_CONTAINER_TYPE();
+ DISALLOW_NEW();
+
+ static void CheckType() {
+ static_assert(std::is_trivially_destructible<HeapHashMap>::value,
+ "HeapHashMap must be trivially destructible.");
+ static_assert(
+ IsAllowedInContainer<KeyArg>::value,
+ "Not allowed to directly nest type. Use Member<> indirection instead.");
+ static_assert(
+ IsAllowedInContainer<MappedArg>::value,
+ "Not allowed to directly nest type. Use Member<> indirection instead.");
+ static_assert(
+ WTF::IsTraceable<KeyArg>::value || WTF::IsTraceable<MappedArg>::value,
+ "For hash maps without traceable elements, use HashMap<> "
+ "instead of HeapHashMap<>.");
+ static_assert(WTF::IsMemberOrWeakMemberType<KeyArg>::value ||
+ !WTF::IsTraceable<KeyArg>::value,
+ "HeapHashMap supports only Member, WeakMember and "
+ "non-traceable types as keys.");
+ static_assert(WTF::IsMemberOrWeakMemberType<MappedArg>::value ||
+ !WTF::IsTraceable<MappedArg>::value ||
+ WTF::IsSubclassOfTemplate<MappedArg,
+ TraceWrapperV8Reference>::value,
+ "HeapHashMap supports only Member, WeakMember, "
+ "TraceWrapperV8Reference and "
+ "non-traceable types as values.");
+ }
+
+ public:
+ template <typename>
+ static void* AllocateObject(size_t size) {
+ return ThreadHeap::Allocate<
+ HeapHashMap<KeyArg, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>>(
+ size);
+ }
+
+ HeapHashMap() { CheckType(); }
+};
+
+template <typename T, typename U, typename V, typename W, typename X>
+struct GCInfoTrait<HeapHashMap<T, U, V, W, X>>
+ : public GCInfoTrait<HashMap<T, U, V, W, X, HeapAllocator>> {};
+
+template <typename ValueArg,
+ typename HashArg = typename DefaultHash<ValueArg>::Hash,
+ typename TraitsArg = HashTraits<ValueArg>>
+class HeapHashSet
+ : public HashSet<ValueArg, HashArg, TraitsArg, HeapAllocator> {
+ IS_GARBAGE_COLLECTED_CONTAINER_TYPE();
+ DISALLOW_NEW();
+
+ static void CheckType() {
+ static_assert(WTF::IsMemberOrWeakMemberType<ValueArg>::value,
+ "HeapHashSet supports only Member and WeakMember.");
+ static_assert(std::is_trivially_destructible<HeapHashSet>::value,
+ "HeapHashSet must be trivially destructible.");
+ static_assert(
+ IsAllowedInContainer<ValueArg>::value,
+ "Not allowed to directly nest type. Use Member<> indirection instead.");
+ static_assert(WTF::IsTraceable<ValueArg>::value,
+ "For hash sets without traceable elements, use HashSet<> "
+ "instead of HeapHashSet<>.");
+ }
+
+ public:
+ template <typename>
+ static void* AllocateObject(size_t size) {
+ return ThreadHeap::Allocate<HeapHashSet<ValueArg, HashArg, TraitsArg>>(
+ size);
+ }
+
+ HeapHashSet() { CheckType(); }
+};
+
+template <typename T, typename U, typename V>
+struct GCInfoTrait<HeapHashSet<T, U, V>>
+ : public GCInfoTrait<HashSet<T, U, V, HeapAllocator>> {};
+
+template <typename ValueArg, typename TraitsArg = HashTraits<ValueArg>>
+class HeapLinkedHashSet
+ : public LinkedHashSet<ValueArg, TraitsArg, HeapAllocator> {
+ IS_GARBAGE_COLLECTED_CONTAINER_TYPE();
+ DISALLOW_NEW();
+
+ static void CheckType() {
+ static_assert(WTF::IsMemberOrWeakMemberType<ValueArg>::value,
+ "HeapLinkedHashSet supports only Member and WeakMember.");
+ // If not trivially destructible, we have to add a destructor which will
+ // hinder performance.
+ static_assert(std::is_trivially_destructible<HeapLinkedHashSet>::value,
+ "HeapLinkedHashSet must be trivially destructible.");
+ static_assert(
+ IsAllowedInContainer<ValueArg>::value,
+ "Not allowed to directly nest type. Use Member<> indirection instead.");
+ static_assert(WTF::IsTraceable<ValueArg>::value,
+ "For sets without traceable elements, use LinkedHashSet<> "
+ "instead of HeapLinkedHashSet<>.");
+ }
+
+ public:
+ template <typename>
+ static void* AllocateObject(size_t size) {
+ return ThreadHeap::Allocate<HeapLinkedHashSet<ValueArg, TraitsArg>>(size);
+ }
+
+ HeapLinkedHashSet() { CheckType(); }
+};
+
+template <typename T, typename U>
+struct GCInfoTrait<HeapLinkedHashSet<T, U>>
+ : public GCInfoTrait<LinkedHashSet<T, U, HeapAllocator>> {};
+
+template <typename ValueArg,
+ wtf_size_t inlineCapacity = 0, // The inlineCapacity is just a dummy
+ // to match ListHashSet (off-heap).
+ typename HashArg = typename DefaultHash<ValueArg>::Hash>
+class HeapListHashSet
+ : public ListHashSet<ValueArg,
+ inlineCapacity,
+ HashArg,
+ HeapListHashSetAllocator<ValueArg, inlineCapacity>> {
+ IS_GARBAGE_COLLECTED_CONTAINER_TYPE();
+ DISALLOW_NEW();
+
+ static void CheckType() {
+ static_assert(WTF::IsMemberOrWeakMemberType<ValueArg>::value,
+ "HeapListHashSet supports only Member and WeakMember.");
+ static_assert(std::is_trivially_destructible<HeapListHashSet>::value,
+ "HeapListHashSet must be trivially destructible.");
+ static_assert(
+ IsAllowedInContainer<ValueArg>::value,
+ "Not allowed to directly nest type. Use Member<> indirection instead.");
+ static_assert(WTF::IsTraceable<ValueArg>::value,
+ "For sets without traceable elements, use ListHashSet<> "
+ "instead of HeapListHashSet<>.");
+ }
+
+ public:
+ template <typename>
+ static void* AllocateObject(size_t size) {
+ return ThreadHeap::Allocate<
+ HeapListHashSet<ValueArg, inlineCapacity, HashArg>>(size);
+ }
+
+ HeapListHashSet() { CheckType(); }
+};
+
+template <typename T, wtf_size_t inlineCapacity, typename U>
+struct GCInfoTrait<HeapListHashSet<T, inlineCapacity, U>>
+ : public GCInfoTrait<
+ ListHashSet<T,
+ inlineCapacity,
+ U,
+ HeapListHashSetAllocator<T, inlineCapacity>>> {};
+
+template <typename Value,
+ typename HashFunctions = typename DefaultHash<Value>::Hash,
+ typename Traits = HashTraits<Value>>
+class HeapHashCountedSet
+ : public HashCountedSet<Value, HashFunctions, Traits, HeapAllocator> {
+ IS_GARBAGE_COLLECTED_CONTAINER_TYPE();
+ DISALLOW_NEW();
+
+ static void CheckType() {
+ static_assert(WTF::IsMemberOrWeakMemberType<Value>::value,
+ "HeapHashCountedSet supports only Member and WeakMember.");
+ static_assert(std::is_trivially_destructible<HeapHashCountedSet>::value,
+ "HeapHashCountedSet must be trivially destructible.");
+ static_assert(
+ IsAllowedInContainer<Value>::value,
+ "Not allowed to directly nest type. Use Member<> indirection instead.");
+ static_assert(WTF::IsTraceable<Value>::value,
+ "For counted sets without traceable elements, use "
+ "HashCountedSet<> instead of HeapHashCountedSet<>.");
+ }
+
+ public:
+ template <typename>
+ static void* AllocateObject(size_t size) {
+ return ThreadHeap::Allocate<
+ HeapHashCountedSet<Value, HashFunctions, Traits>>(size);
+ }
+
+ HeapHashCountedSet() { CheckType(); }
+};
+
+template <typename T, typename U, typename V>
+struct GCInfoTrait<HeapHashCountedSet<T, U, V>>
+ : public GCInfoTrait<HashCountedSet<T, U, V, HeapAllocator>> {};
+
+template <typename T, wtf_size_t inlineCapacity = 0>
+class HeapVector : public Vector<T, inlineCapacity, HeapAllocator> {
+ IS_GARBAGE_COLLECTED_CONTAINER_TYPE();
+ DISALLOW_NEW();
+
+ static void CheckType() {
+ static_assert(
+ std::is_trivially_destructible<HeapVector>::value || inlineCapacity,
+ "HeapVector must be trivially destructible.");
+ static_assert(
+ IsAllowedInContainer<T>::value,
+ "Not allowed to directly nest type. Use Member<> indirection instead.");
+ static_assert(WTF::IsTraceable<T>::value,
+ "For vectors without traceable elements, use Vector<> "
+ "instead of HeapVector<>.");
+ static_assert(!WTF::IsWeak<T>::value,
+ "Weak types are not allowed in HeapVector.");
+ static_assert(WTF::IsTraceableInCollectionTrait<VectorTraits<T>>::value,
+ "Type must be traceable in collection");
+ }
+
+ public:
+ template <typename>
+ static void* AllocateObject(size_t size) {
+ // On-heap HeapVectors generally should not have inline capacity, but it is
+ // hard to avoid when using a type alias. Hence we only disallow the
+ // VectorTraits<T>::kNeedsDestruction case for now.
+ static_assert(inlineCapacity == 0 || !VectorTraits<T>::kNeedsDestruction,
+ "on-heap HeapVector<> should not have an inline capacity");
+ return ThreadHeap::Allocate<HeapVector<T, inlineCapacity>>(size);
+ }
+
+ HeapVector() { CheckType(); }
+
+ explicit HeapVector(wtf_size_t size)
+ : Vector<T, inlineCapacity, HeapAllocator>(size) {
+ CheckType();
+ }
+
+ HeapVector(wtf_size_t size, const T& val)
+ : Vector<T, inlineCapacity, HeapAllocator>(size, val) {
+ CheckType();
+ }
+
+ template <wtf_size_t otherCapacity>
+ HeapVector(const HeapVector<T, otherCapacity>& other)
+ : Vector<T, inlineCapacity, HeapAllocator>(other) {
+ CheckType();
+ }
+
+ HeapVector(std::initializer_list<T> elements)
+ : Vector<T, inlineCapacity, HeapAllocator>(elements) {
+ CheckType();
+ }
+};
+
+template <typename T, wtf_size_t inlineCapacity>
+struct GCInfoTrait<HeapVector<T, inlineCapacity>>
+ : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> {};
+
+template <typename T>
+class HeapDeque : public Deque<T, 0, HeapAllocator> {
+ IS_GARBAGE_COLLECTED_CONTAINER_TYPE();
+ DISALLOW_NEW();
+
+ static void CheckType() {
+ static_assert(internal::IsMember<T>, "HeapDeque supports only Member.");
+ static_assert(std::is_trivially_destructible<HeapDeque>::value,
+ "HeapDeque must be trivially destructible.");
+ static_assert(
+ IsAllowedInContainer<T>::value,
+ "Not allowed to directly nest type. Use Member<> indirection instead.");
+ static_assert(WTF::IsTraceable<T>::value,
+ "For vectors without traceable elements, use Deque<> instead "
+ "of HeapDeque<>");
+ }
+
+ public:
+ template <typename>
+ static void* AllocateObject(size_t size) {
+ return ThreadHeap::Allocate<HeapDeque<T>>(size);
+ }
+
+ HeapDeque() { CheckType(); }
+
+ explicit HeapDeque(wtf_size_t size) : Deque<T, 0, HeapAllocator>(size) {
+ CheckType();
+ }
+
+ HeapDeque(wtf_size_t size, const T& val)
+ : Deque<T, 0, HeapAllocator>(size, val) {
+ CheckType();
+ }
+
+ HeapDeque& operator=(const HeapDeque& other) {
+ HeapDeque<T> copy(other);
+ Deque<T, 0, HeapAllocator>::Swap(copy);
+ return *this;
+ }
+
+ HeapDeque(const HeapDeque<T>& other) : Deque<T, 0, HeapAllocator>(other) {}
+};
+
+template <typename T>
+struct GCInfoTrait<HeapDeque<T>>
+ : public GCInfoTrait<Deque<T, 0, HeapAllocator>> {};
+
+} // namespace blink
+
+namespace WTF {
+
+template <typename T>
+struct VectorTraits<blink::Member<T>> : VectorTraitsBase<blink::Member<T>> {
+ STATIC_ONLY(VectorTraits);
+ static const bool kNeedsDestruction = false;
+ static const bool kCanInitializeWithMemset = true;
+ static const bool kCanClearUnusedSlotsWithMemset = true;
+ static const bool kCanCopyWithMemcpy = true;
+ static const bool kCanMoveWithMemcpy = true;
+
+ static constexpr bool kCanTraceConcurrently = true;
+};
+
+// These traits are used in VectorBackedLinkedList to support WeakMember in
+// HeapLinkedHashSet though HeapVector<WeakMember> usage is still banned.
+// (See the discussion in https://crrev.com/c/2246014)
+template <typename T>
+struct VectorTraits<blink::WeakMember<T>>
+ : VectorTraitsBase<blink::WeakMember<T>> {
+ STATIC_ONLY(VectorTraits);
+ static const bool kNeedsDestruction = false;
+ static const bool kCanInitializeWithMemset = true;
+ static const bool kCanClearUnusedSlotsWithMemset = true;
+ static const bool kCanCopyWithMemcpy = true;
+ static const bool kCanMoveWithMemcpy = true;
+
+ static constexpr bool kCanTraceConcurrently = true;
+};
+
+template <typename T>
+struct VectorTraits<blink::UntracedMember<T>>
+ : VectorTraitsBase<blink::UntracedMember<T>> {
+ STATIC_ONLY(VectorTraits);
+ static const bool kNeedsDestruction = false;
+ static const bool kCanInitializeWithMemset = true;
+ static const bool kCanClearUnusedSlotsWithMemset = true;
+ static const bool kCanMoveWithMemcpy = true;
+};
+
+template <typename T>
+struct VectorTraits<blink::HeapVector<T, 0>>
+ : VectorTraitsBase<blink::HeapVector<T, 0>> {
+ STATIC_ONLY(VectorTraits);
+ static const bool kNeedsDestruction = false;
+ static const bool kCanInitializeWithMemset = true;
+ static const bool kCanClearUnusedSlotsWithMemset = true;
+ static const bool kCanMoveWithMemcpy = true;
+};
+
+template <typename T>
+struct VectorTraits<blink::HeapDeque<T>>
+ : VectorTraitsBase<blink::HeapDeque<T>> {
+ STATIC_ONLY(VectorTraits);
+ static const bool kNeedsDestruction = false;
+ static const bool kCanInitializeWithMemset = true;
+ static const bool kCanClearUnusedSlotsWithMemset = true;
+ static const bool kCanMoveWithMemcpy = true;
+};
+
+template <typename T, wtf_size_t inlineCapacity>
+struct VectorTraits<blink::HeapVector<T, inlineCapacity>>
+ : VectorTraitsBase<blink::HeapVector<T, inlineCapacity>> {
+ STATIC_ONLY(VectorTraits);
+ static const bool kNeedsDestruction = VectorTraits<T>::kNeedsDestruction;
+ static const bool kCanInitializeWithMemset =
+ VectorTraits<T>::kCanInitializeWithMemset;
+ static const bool kCanClearUnusedSlotsWithMemset =
+ VectorTraits<T>::kCanClearUnusedSlotsWithMemset;
+ static const bool kCanMoveWithMemcpy = VectorTraits<T>::kCanMoveWithMemcpy;
+};
+
+template <typename T>
+struct HashTraits<blink::Member<T>> : SimpleClassHashTraits<blink::Member<T>> {
+ STATIC_ONLY(HashTraits);
+ // FIXME: Implement proper const'ness for iterator types. Requires support
+ // in the marking Visitor.
+ using PeekInType = T*;
+ using IteratorGetType = blink::Member<T>*;
+ using IteratorConstGetType = const blink::Member<T>*;
+ using IteratorReferenceType = blink::Member<T>&;
+ using IteratorConstReferenceType = const blink::Member<T>&;
+ static IteratorReferenceType GetToReferenceConversion(IteratorGetType x) {
+ return *x;
+ }
+ static IteratorConstReferenceType GetToReferenceConstConversion(
+ IteratorConstGetType x) {
+ return *x;
+ }
+
+ using PeekOutType = T*;
+
+ template <typename U>
+ static void Store(const U& value, blink::Member<T>& storage) {
+ storage = value;
+ }
+
+ static PeekOutType Peek(const blink::Member<T>& value) { return value; }
+
+ static void ConstructDeletedValue(blink::Member<T>& slot, bool) {
+ slot = WTF::kHashTableDeletedValue;
+ }
+
+ static constexpr bool kCanTraceConcurrently = true;
+};
+
+template <typename T>
+struct HashTraits<blink::WeakMember<T>>
+ : SimpleClassHashTraits<blink::WeakMember<T>> {
+ STATIC_ONLY(HashTraits);
+ static const bool kNeedsDestruction = false;
+ // FIXME: Implement proper const'ness for iterator types. Requires support
+ // in the marking Visitor.
+ using PeekInType = T*;
+ using IteratorGetType = blink::WeakMember<T>*;
+ using IteratorConstGetType = const blink::WeakMember<T>*;
+ using IteratorReferenceType = blink::WeakMember<T>&;
+ using IteratorConstReferenceType = const blink::WeakMember<T>&;
+ static IteratorReferenceType GetToReferenceConversion(IteratorGetType x) {
+ return *x;
+ }
+ static IteratorConstReferenceType GetToReferenceConstConversion(
+ IteratorConstGetType x) {
+ return *x;
+ }
+
+ using PeekOutType = T*;
+
+ template <typename U>
+ static void Store(const U& value, blink::WeakMember<T>& storage) {
+ storage = value;
+ }
+
+ static PeekOutType Peek(const blink::WeakMember<T>& value) { return value; }
+
+ static void ConstructDeletedValue(blink::WeakMember<T>& slot, bool) {
+ slot = WTF::kHashTableDeletedValue;
+ }
+
+ static constexpr bool kCanTraceConcurrently = true;
+};
+
+template <typename T>
+struct HashTraits<blink::UntracedMember<T>>
+ : SimpleClassHashTraits<blink::UntracedMember<T>> {
+ STATIC_ONLY(HashTraits);
+ static const bool kNeedsDestruction = false;
+ // FIXME: Implement proper const'ness for iterator types.
+ using PeekInType = T*;
+ using IteratorGetType = blink::UntracedMember<T>*;
+ using IteratorConstGetType = const blink::UntracedMember<T>*;
+ using IteratorReferenceType = blink::UntracedMember<T>&;
+ using IteratorConstReferenceType = const blink::UntracedMember<T>&;
+ static IteratorReferenceType GetToReferenceConversion(IteratorGetType x) {
+ return *x;
+ }
+ static IteratorConstReferenceType GetToReferenceConstConversion(
+ IteratorConstGetType x) {
+ return *x;
+ }
+ using PeekOutType = T*;
+
+ template <typename U>
+ static void Store(const U& value, blink::UntracedMember<T>& storage) {
+ storage = value;
+ }
+
+ static PeekOutType Peek(const blink::UntracedMember<T>& value) {
+ return value;
+ }
+};
+
+template <typename T, wtf_size_t inlineCapacity>
+struct IsTraceable<
+ ListHashSetNode<T, blink::HeapListHashSetAllocator<T, inlineCapacity>>*> {
+ STATIC_ONLY(IsTraceable);
+ static_assert(sizeof(T), "T must be fully defined");
+ // All heap allocated node pointers need visiting to keep the nodes alive,
+ // regardless of whether they contain pointers to other heap allocated
+ // objects.
+ static const bool value = true;
+};
+
+template <typename T, wtf_size_t inlineCapacity>
+struct IsGarbageCollectedType<
+ ListHashSetNode<T, blink::HeapListHashSetAllocator<T, inlineCapacity>>> {
+ static const bool value = true;
+};
+
+template <typename Set>
+struct IsGarbageCollectedType<ListHashSetIterator<Set>> {
+ static const bool value = IsGarbageCollectedType<Set>::value;
+};
+
+template <typename Set>
+struct IsGarbageCollectedType<ListHashSetConstIterator<Set>> {
+ static const bool value = IsGarbageCollectedType<Set>::value;
+};
+
+template <typename Set>
+struct IsGarbageCollectedType<ListHashSetReverseIterator<Set>> {
+ static const bool value = IsGarbageCollectedType<Set>::value;
+};
+
+template <typename Set>
+struct IsGarbageCollectedType<ListHashSetConstReverseIterator<Set>> {
+ static const bool value = IsGarbageCollectedType<Set>::value;
+};
+
+template <typename T, typename H>
+struct HandleHashTraits : SimpleClassHashTraits<H> {
+ STATIC_ONLY(HandleHashTraits);
+ // TODO: Implement proper const'ness for iterator types. Requires support
+ // in the marking Visitor.
+ using PeekInType = T*;
+ using IteratorGetType = H*;
+ using IteratorConstGetType = const H*;
+ using IteratorReferenceType = H&;
+ using IteratorConstReferenceType = const H&;
+ static IteratorReferenceType GetToReferenceConversion(IteratorGetType x) {
+ return *x;
+ }
+ static IteratorConstReferenceType GetToReferenceConstConversion(
+ IteratorConstGetType x) {
+ return *x;
+ }
+
+ using PeekOutType = T*;
+
+ template <typename U>
+ static void Store(const U& value, H& storage) {
+ storage = value;
+ }
+
+ static PeekOutType Peek(const H& value) { return value; }
+};
+
+template <typename Value,
+ typename HashFunctions,
+ typename Traits,
+ typename VectorType>
+inline void CopyToVector(
+ const blink::HeapHashCountedSet<Value, HashFunctions, Traits>& set,
+ VectorType& vector) {
+ CopyToVector(static_cast<const HashCountedSet<Value, HashFunctions, Traits,
+ blink::HeapAllocator>&>(set),
+ vector);
+}
+
+} // namespace WTF
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_ALLOCATOR_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/heap_compact.cc b/chromium/third_party/blink/renderer/platform/heap/impl/heap_compact.cc
new file mode 100644
index 00000000000..411ea513c3e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/heap_compact.cc
@@ -0,0 +1,457 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/impl/heap_compact.h"
+
+#include <memory>
+
+#include "base/debug/alias.h"
+#include "base/memory/ptr_util.h"
+#include "third_party/blink/public/common/features.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
+#include "third_party/blink/renderer/platform/instrumentation/histogram.h"
+#include "third_party/blink/renderer/platform/runtime_enabled_features.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/hash_map.h"
+
+namespace blink {
+
+// The real worker behind heap compaction, recording references to movable
+// objects ("slots".) When the objects end up being compacted and moved,
+// relocate() will adjust the slots to point to the new location of the
+// object along with handling fixups for interior pointers.
+//
+// The "fixups" object is created and maintained for the lifetime of one
+// heap compaction-enhanced GC.
+class HeapCompact::MovableObjectFixups final {
+ USING_FAST_MALLOC(HeapCompact::MovableObjectFixups);
+
+ public:
+ explicit MovableObjectFixups(ThreadHeap* heap) : heap_(heap) {}
+ ~MovableObjectFixups() = default;
+
+ // For the arenas being compacted, record all pages belonging to them.
+ // This is needed to handle interior pointers that reside on areas that are
+ // compacted themselves.
+ void AddCompactingPage(BasePage* page);
+
+ // Adds a slot for compaction. Filters slots in dead objects.
+ void AddOrFilter(const MovableReference*);
+
+ // Relocates a backing store |from| -> |to|.
+ void Relocate(Address from, Address to);
+
+ // Relocates interior slots in a backing store that is moved |from| -> |to|.
+ void RelocateInteriorFixups(Address from, Address to, size_t size);
+
+ // Updates the collection of callbacks from the item pushed the worklist by
+ // marking visitors.
+ void UpdateCallbacks();
+
+#if DEBUG_HEAP_COMPACTION
+ void dumpDebugStats() {
+ LOG_HEAP_COMPACTION() << "Fixups: pages=" << relocatable_pages_.size()
+ << " objects=" << fixups_.size()
+ << " interior-size=" << interior_fixups_.size();
+ }
+#endif
+
+ private:
+ void VerifyUpdatedSlot(MovableReference* slot);
+
+ ThreadHeap* const heap_;
+
+ // Map from movable reference (value) to its slots. Upon moving an object its
+ // slot pointing to it requires updating.
+ HashMap<MovableReference, MovableReference*> fixups_;
+
+ // Map of interior slots to their final location. Needs to be an ordered map
+ // as it is used to walk through slots starting at a given memory address.
+ // Requires log(n) lookup to make the early bailout reasonably fast. Currently
+ // only std::map fullfills those requirements.
+ //
+ // - The initial value for a given key is nullptr.
+ // - Upon moving a an object this value is adjusted accordingly.
+ std::map<MovableReference*, Address> interior_fixups_;
+
+ // All pages that are being compacted. The set keeps references to
+ // BasePage instances. The void* type was selected to allow to check
+ // arbitrary addresses.
+ HashSet<void*> relocatable_pages_;
+
+#if DCHECK_IS_ON()
+ // The following two collections are used to allow refer back from a slot to
+ // an already moved object.
+ HashSet<const void*> moved_objects_;
+ HashMap<MovableReference*, MovableReference> interior_slot_to_object_;
+#endif // DCHECK_IS_ON()
+};
+
+void HeapCompact::MovableObjectFixups::AddCompactingPage(BasePage* page) {
+ DCHECK(!page->IsLargeObjectPage());
+ relocatable_pages_.insert(page);
+}
+
+void HeapCompact::MovableObjectFixups::AddOrFilter(
+ const MovableReference* const_slot) {
+ const void* value = *const_slot;
+ CHECK(value);
+
+ // All slots and values are part of Oilpan's heap.
+ // - Slots may be contained within dead objects if e.g. the write barrier
+ // registered the slot while the out backing itself has not been marked
+ // live in time. Slots in dead objects are filtered below.
+ // - Values may only be contained in or point to live objects.
+
+ // Slots handling.
+ BasePage* const slot_page =
+ heap_->LookupPageForAddress(reinterpret_cast<ConstAddress>(const_slot));
+ CHECK(slot_page);
+ HeapObjectHeader* const header =
+ slot_page->IsLargeObjectPage()
+ ? static_cast<LargeObjectPage*>(slot_page)->ObjectHeader()
+ : static_cast<NormalPage*>(slot_page)->FindHeaderFromAddress(
+ reinterpret_cast<ConstAddress>(const_slot));
+ CHECK(header);
+ // Filter the slot since the object that contains the slot is dead.
+ if (!header->IsMarked())
+ return;
+
+ // Value handling.
+ BasePage* const value_page =
+ heap_->LookupPageForAddress(reinterpret_cast<ConstAddress>(value));
+ CHECK(value_page);
+
+ // The following cases are not compacted and do not require recording:
+ // - Backings in large pages.
+ // - Inline backings that are part of a non-backing arena.
+ if (value_page->IsLargeObjectPage() ||
+ !HeapCompact::IsCompactableArena(value_page->Arena()->ArenaIndex()))
+ return;
+
+ // Slots must reside in and values must point to live objects at this
+ // point, with the exception of slots in eagerly swept arenas where objects
+ // have already been processed. |value| usually points to a separate
+ // backing store but can also point to inlined storage which is why the
+ // dynamic header lookup is required.
+ HeapObjectHeader* const value_header =
+ static_cast<NormalPage*>(value_page)
+ ->FindHeaderFromAddress(reinterpret_cast<ConstAddress>(value));
+ CHECK(value_header);
+ CHECK(value_header->IsMarked());
+
+ // Slots may have been recorded already but must point to the same
+ // value. Example: Ephemeron iterations may register slots multiple
+ // times.
+ auto fixup_it = fixups_.find(value);
+ if (UNLIKELY(fixup_it != fixups_.end())) {
+ CHECK_EQ(const_slot, fixup_it->value);
+ return;
+ }
+
+ // Add regular fixup.
+ MovableReference* slot = const_cast<MovableReference*>(const_slot);
+ fixups_.insert(value, slot);
+
+ // Check whether the slot itself resides on a page that is compacted.
+ if (LIKELY(!relocatable_pages_.Contains(slot_page)))
+ return;
+
+ auto interior_it = interior_fixups_.find(slot);
+ CHECK(interior_fixups_.end() == interior_it);
+ interior_fixups_.emplace(slot, nullptr);
+#if DCHECK_IS_ON()
+ interior_slot_to_object_.insert(slot, header->Payload());
+#endif // DCHECK_IS_ON()
+ LOG_HEAP_COMPACTION() << "Interior slot: " << slot;
+}
+
+void HeapCompact::MovableObjectFixups::Relocate(Address from, Address to) {
+#if DCHECK_IS_ON()
+ moved_objects_.insert(from);
+#endif // DCHECK_IS_ON()
+
+ const HeapObjectHeader* header = HeapObjectHeader::FromPayload(to);
+ const size_t size = header->PayloadSize();
+
+ // Interior slots always need to be processed for moved objects.
+ // Consider an object A with slot A.x pointing to value B where A is
+ // allocated on a movable page itself. When B is finally moved, it needs to
+ // find the corresponding slot A.x. Object A may be moved already and the
+ // memory may have been freed, which would result in a crash.
+ if (!interior_fixups_.empty()) {
+ RelocateInteriorFixups(from, to, size);
+ }
+
+ auto it = fixups_.find(from);
+ // This means that there is no corresponding slot for a live backing store.
+ // This may happen because a mutator may change the slot to point to a
+ // different backing store because e.g. incremental marking marked a backing
+ // store as live that was later on replaced.
+ if (it == fixups_.end()) {
+ return;
+ }
+
+#if DCHECK_IS_ON()
+ BasePage* from_page = PageFromObject(from);
+ DCHECK(relocatable_pages_.Contains(from_page));
+#endif
+
+ // If the object is referenced by a slot that is contained on a compacted
+ // area itself, check whether it can be updated already.
+ MovableReference* slot = it->value;
+ auto interior_it = interior_fixups_.find(slot);
+ if (interior_it != interior_fixups_.end()) {
+ MovableReference* slot_location =
+ reinterpret_cast<MovableReference*>(interior_it->second);
+ if (!slot_location) {
+ interior_it->second = to;
+#if DCHECK_IS_ON()
+ // Check that the containing object has not been moved yet.
+ auto reverse_it = interior_slot_to_object_.find(slot);
+ DCHECK(interior_slot_to_object_.end() != reverse_it);
+ DCHECK(moved_objects_.end() == moved_objects_.find(reverse_it->value));
+#endif // DCHECK_IS_ON()
+ } else {
+ LOG_HEAP_COMPACTION()
+ << "Redirected slot: " << slot << " => " << slot_location;
+ slot = slot_location;
+ }
+ }
+
+ // If the slot has subsequently been updated, e.g. a destructor having
+ // mutated and expanded/shrunk the collection, do not update and relocate
+ // the slot -- |from| is no longer valid and referenced.
+ if (UNLIKELY(*slot != from)) {
+ LOG_HEAP_COMPACTION() << "No relocation: slot = " << slot
+ << ", *slot = " << *slot << ", from = " << from
+ << ", to = " << to;
+ VerifyUpdatedSlot(slot);
+ return;
+ }
+
+ // Update the slots new value.
+ *slot = to;
+}
+
+void HeapCompact::MovableObjectFixups::RelocateInteriorFixups(Address from,
+ Address to,
+ size_t size) {
+ // |from| is a valid address for a slot.
+ auto interior_it =
+ interior_fixups_.lower_bound(reinterpret_cast<MovableReference*>(from));
+ if (interior_it == interior_fixups_.end())
+ return;
+
+ CHECK_GE(reinterpret_cast<Address>(interior_it->first), from);
+ size_t offset = reinterpret_cast<Address>(interior_it->first) - from;
+ while (offset < size) {
+ if (!interior_it->second) {
+ // Update the interior fixup value, so that when the object the slot is
+ // pointing to is moved, it can re-use this value.
+ Address fixup = to + offset;
+ interior_it->second = fixup;
+
+ // If the |slot|'s content is pointing into the region [from, from +
+ // size) we are dealing with an interior pointer that does not point to
+ // a valid HeapObjectHeader. Such references need to be fixed up
+ // immediately.
+ Address fixup_contents = *reinterpret_cast<Address*>(fixup);
+ if (fixup_contents > from && fixup_contents < (from + size)) {
+ *reinterpret_cast<Address*>(fixup) = fixup_contents - from + to;
+ }
+ }
+
+ interior_it++;
+ if (interior_it == interior_fixups_.end())
+ return;
+ offset = reinterpret_cast<Address>(interior_it->first) - from;
+ }
+}
+
+void HeapCompact::MovableObjectFixups::VerifyUpdatedSlot(
+ MovableReference* slot) {
+// Verify that the already updated slot is valid, meaning:
+// - has been cleared.
+// - has been updated & expanded with a large object backing store.
+// - has been updated with a larger, freshly allocated backing store.
+// (on a fresh page in a compactable arena that is not being
+// compacted.)
+#if DCHECK_IS_ON()
+ if (!*slot)
+ return;
+ BasePage* slot_page =
+ heap_->LookupPageForAddress(reinterpret_cast<ConstAddress>(*slot));
+ // ref_page is null if *slot is pointing to an off-heap region. This may
+ // happy if *slot is pointing to an inline buffer of HeapVector with
+ // inline capacity.
+ if (!slot_page)
+ return;
+ DCHECK(slot_page->IsLargeObjectPage() ||
+ (HeapCompact::IsCompactableArena(slot_page->Arena()->ArenaIndex()) &&
+ !relocatable_pages_.Contains(slot_page)));
+#endif // DCHECK_IS_ON()
+}
+
+HeapCompact::HeapCompact(ThreadHeap* heap) : heap_(heap) {
+ // The heap compaction implementation assumes the contiguous range,
+ //
+ // [VectorArenaIndex, HashTableArenaIndex]
+ //
+ // in a few places. Use static asserts here to not have that assumption
+ // be silently invalidated by ArenaIndices changes.
+ static_assert(BlinkGC::kVectorArenaIndex + 1 == BlinkGC::kHashTableArenaIndex,
+ "unexpected ArenaIndices ordering");
+}
+
+HeapCompact::~HeapCompact() = default;
+
+HeapCompact::MovableObjectFixups& HeapCompact::Fixups() {
+ if (!fixups_)
+ fixups_ = std::make_unique<MovableObjectFixups>(heap_);
+ return *fixups_;
+}
+
+bool HeapCompact::ShouldCompact(BlinkGC::StackState stack_state,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::GCReason reason) {
+ if (marking_type == BlinkGC::MarkingType::kAtomicMarking &&
+ stack_state == BlinkGC::StackState::kHeapPointersOnStack) {
+ // The following check ensures that tests that want to test compaction are
+ // not interrupted by garbage collections that cannot use compaction.
+ CHECK(!force_for_next_gc_);
+ return false;
+ }
+
+ UpdateHeapResidency();
+
+ if (force_for_next_gc_) {
+ return true;
+ }
+
+ if (!base::FeatureList::IsEnabled(blink::features::kBlinkHeapCompaction)) {
+ return false;
+ }
+
+ // Only enable compaction when in a memory reduction garbage collection as it
+ // may significantly increase the final garbage collection pause.
+ if (reason == BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC ||
+ reason == BlinkGC::GCReason::kUnifiedHeapForcedForTestingGC) {
+ return free_list_size_ > kFreeListSizeThreshold;
+ }
+
+ return false;
+}
+
+void HeapCompact::Initialize(ThreadState* state) {
+ CHECK(force_for_next_gc_ ||
+ base::FeatureList::IsEnabled(blink::features::kBlinkHeapCompaction));
+ CHECK(!do_compact_);
+ CHECK(!fixups_);
+ LOG_HEAP_COMPACTION() << "Compacting: free=" << free_list_size_;
+ do_compact_ = true;
+ gc_count_since_last_compaction_ = 0;
+ force_for_next_gc_ = false;
+}
+
+bool HeapCompact::ShouldRegisterMovingAddress() {
+ return do_compact_;
+}
+
+void HeapCompact::UpdateHeapResidency() {
+ size_t total_arena_size = 0;
+ size_t total_free_list_size = 0;
+
+ compactable_arenas_ = 0;
+#if DEBUG_HEAP_FREELIST
+ std::stringstream stream;
+#endif
+ for (int i = BlinkGC::kVectorArenaIndex; i <= BlinkGC::kHashTableArenaIndex;
+ ++i) {
+ NormalPageArena* arena = static_cast<NormalPageArena*>(heap_->Arena(i));
+ size_t arena_size = arena->ArenaSize();
+ size_t free_list_size = arena->FreeListSize();
+ total_arena_size += arena_size;
+ total_free_list_size += free_list_size;
+#if DEBUG_HEAP_FREELIST
+ stream << i << ": [" << arena_size << ", " << free_list_size << "], ";
+#endif
+ // TODO: be more discriminating and consider arena
+ // load factor, effectiveness of past compactions etc.
+ if (!arena_size)
+ continue;
+ // Mark the arena as compactable.
+ compactable_arenas_ |= 0x1u << i;
+ }
+#if DEBUG_HEAP_FREELIST
+ LOG_HEAP_FREELIST() << "Arena residencies: {" << stream.str() << "}";
+ LOG_HEAP_FREELIST() << "Total = " << total_arena_size
+ << ", Free = " << total_free_list_size;
+#endif
+
+ // TODO(sof): consider smoothing the reported sizes.
+ free_list_size_ = total_free_list_size;
+}
+
+void HeapCompact::FinishedArenaCompaction(NormalPageArena* arena,
+ size_t freed_pages,
+ size_t freed_size) {
+ if (!do_compact_)
+ return;
+
+ heap_->stats_collector()->IncreaseCompactionFreedPages(freed_pages);
+ heap_->stats_collector()->IncreaseCompactionFreedSize(freed_size);
+}
+
+void HeapCompact::Relocate(Address from, Address to) {
+ Fixups().Relocate(from, to);
+}
+
+void HeapCompact::FilterNonLiveSlots() {
+ if (!do_compact_)
+ return;
+
+ last_fixup_count_for_testing_ = 0;
+ MovableReferenceWorklist::View traced_slots(
+ heap_->GetMovableReferenceWorklist(), WorklistTaskId::MutatorThread);
+ const MovableReference* slot;
+ while (traced_slots.Pop(&slot)) {
+ CHECK(heap_->LookupPageForAddress(reinterpret_cast<ConstAddress>(slot)));
+ if (*slot) {
+ Fixups().AddOrFilter(slot);
+ last_fixup_count_for_testing_++;
+ }
+ }
+}
+
+void HeapCompact::Finish() {
+ if (!do_compact_)
+ return;
+
+#if DEBUG_HEAP_COMPACTION
+ if (fixups_)
+ fixups_->dumpDebugStats();
+#endif
+ do_compact_ = false;
+ fixups_.reset();
+}
+
+void HeapCompact::Cancel() {
+ if (!do_compact_)
+ return;
+
+ last_fixup_count_for_testing_ = 0;
+ do_compact_ = false;
+ heap_->GetMovableReferenceWorklist()->Clear();
+ fixups_.reset();
+}
+
+void HeapCompact::AddCompactingPage(BasePage* page) {
+ DCHECK(do_compact_);
+ DCHECK(IsCompactingArena(page->Arena()->ArenaIndex()));
+ Fixups().AddCompactingPage(page);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/heap_compact.h b/chromium/third_party/blink/renderer/platform/heap/impl/heap_compact.h
new file mode 100644
index 00000000000..e39c9c57b88
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/heap_compact.h
@@ -0,0 +1,167 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_COMPACT_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_COMPACT_H_
+
+#include <memory>
+
+#include "base/memory/ptr_util.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/hash_map.h"
+#include "third_party/blink/renderer/platform/wtf/hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+
+#include <bitset>
+#include <utility>
+
+// Compaction-specific debug switches:
+
+// Emit debug info during compaction.
+#define DEBUG_HEAP_COMPACTION 0
+
+// Emit stats on freelist occupancy.
+// 0 - disabled, 1 - minimal, 2 - verbose.
+#define DEBUG_HEAP_FREELIST 0
+
+namespace blink {
+
+class NormalPageArena;
+class BasePage;
+class ThreadState;
+class ThreadHeap;
+
+class PLATFORM_EXPORT HeapCompact final {
+ public:
+ // Returns |true| if the ongoing GC may compact the given arena/sub-heap.
+ static bool IsCompactableArena(int arena_index) {
+ return arena_index >= BlinkGC::kVectorArenaIndex &&
+ arena_index <= BlinkGC::kHashTableArenaIndex;
+ }
+
+ explicit HeapCompact(ThreadHeap*);
+ ~HeapCompact();
+
+ // Returns true if compaction can and should be used for the provided
+ // parameters.
+ bool ShouldCompact(BlinkGC::StackState,
+ BlinkGC::MarkingType,
+ BlinkGC::GCReason);
+
+ // Compaction should be performed as part of the ongoing GC, initialize
+ // the heap compaction pass.
+ void Initialize(ThreadState*);
+
+ // Returns true if the ongoing GC will perform compaction.
+ bool IsCompacting() const { return do_compact_; }
+
+ // Returns true if the ongoing GC will perform compaction for the given
+ // heap arena.
+ bool IsCompactingArena(int arena_index) const {
+ return do_compact_ && (compactable_arenas_ & (0x1u << arena_index));
+ }
+
+ // See |Heap::ShouldRegisterMovingAddress()| documentation.
+ bool ShouldRegisterMovingAddress();
+
+ // Slots that are not contained within live objects are filtered. This can
+ // happen when the write barrier for in-payload objects triggers but the outer
+ // backing store does not survive the marking phase because all its referents
+ // die before being reached by the marker.
+ void FilterNonLiveSlots();
+
+ // Finishes compaction and clears internal state.
+ void Finish();
+
+ // Cancels compaction after slots may have been recorded already.
+ void Cancel();
+
+ // Perform any relocation post-processing after having completed compacting
+ // the given arena. The number of pages that were freed together with the
+ // total size (in bytes) of freed heap storage, are passed in as arguments.
+ void FinishedArenaCompaction(NormalPageArena*,
+ size_t freed_pages,
+ size_t freed_size);
+
+ // Register the heap page as containing live objects that will all be
+ // compacted. Registration happens as part of making the arenas ready
+ // for a GC.
+ void AddCompactingPage(BasePage*);
+
+ // Notify heap compaction that object at |from| has been relocated to.. |to|.
+ // (Called by the sweep compaction pass.)
+ void Relocate(Address from, Address to);
+
+ // Enables compaction for the next garbage collection if technically possible.
+ void EnableCompactionForNextGCForTesting() { force_for_next_gc_ = true; }
+
+ // Returns true if one or more vector arenas are being compacted.
+ bool IsCompactingVectorArenasForTesting() const {
+ return IsCompactingArena(BlinkGC::kVectorArenaIndex);
+ }
+
+ size_t LastFixupCountForTesting() const {
+ return last_fixup_count_for_testing_;
+ }
+
+ private:
+ class MovableObjectFixups;
+
+ // Freelist size threshold that must be exceeded before compaction
+ // should be considered.
+ static const size_t kFreeListSizeThreshold = 512 * 1024;
+
+ // Sample the amount of fragmentation and heap memory currently residing
+ // on the freelists of the arenas we're able to compact. The computed
+ // numbers will be subsequently used to determine if a heap compaction
+ // is on order (shouldCompact().)
+ void UpdateHeapResidency();
+
+ MovableObjectFixups& Fixups();
+
+ ThreadHeap* const heap_;
+ std::unique_ptr<MovableObjectFixups> fixups_;
+
+ // Set to |true| when a compacting sweep will go ahead.
+ bool do_compact_ = false;
+ size_t gc_count_since_last_compaction_ = 0;
+
+ // Last reported freelist size, across all compactable arenas.
+ size_t free_list_size_ = 0;
+
+ // If compacting, i'th heap arena will be compacted if corresponding bit is
+ // set. Indexes are in the range of BlinkGC::ArenaIndices.
+ unsigned compactable_arenas_ = 0u;
+
+ size_t last_fixup_count_for_testing_ = 0;
+
+ bool force_for_next_gc_ = false;
+};
+
+} // namespace blink
+
+// Logging macros activated by debug switches.
+
+#define LOG_HEAP_COMPACTION_INTERNAL() DLOG(INFO)
+
+#if DEBUG_HEAP_COMPACTION
+#define LOG_HEAP_COMPACTION() LOG_HEAP_COMPACTION_INTERNAL()
+#else
+#define LOG_HEAP_COMPACTION() EAT_STREAM_PARAMETERS
+#endif
+
+#if DEBUG_HEAP_FREELIST
+#define LOG_HEAP_FREELIST() LOG_HEAP_COMPACTION_INTERNAL()
+#else
+#define LOG_HEAP_FREELIST() EAT_STREAM_PARAMETERS
+#endif
+
+#if DEBUG_HEAP_FREELIST == 2
+#define LOG_HEAP_FREELIST_VERBOSE() LOG_HEAP_COMPACTION_INTERNAL()
+#else
+#define LOG_HEAP_FREELIST_VERBOSE() EAT_STREAM_PARAMETERS
+#endif
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_COMPACT_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/heap_page.cc b/chromium/third_party/blink/renderer/platform/heap/impl/heap_page.cc
new file mode 100644
index 00000000000..1949d5c9921
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/heap_page.cc
@@ -0,0 +1,1910 @@
+/*
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "third_party/blink/renderer/platform/heap/impl/heap_page.h"
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/auto_reset.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/renderer/platform/bindings/script_forbidden_scope.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc_memory_dump_provider.h"
+#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
+#include "third_party/blink/renderer/platform/heap/impl/heap_compact.h"
+#include "third_party/blink/renderer/platform/heap/impl/marking_verifier.h"
+#include "third_party/blink/renderer/platform/heap/impl/page_bloom_filter.h"
+#include "third_party/blink/renderer/platform/heap/impl/page_memory.h"
+#include "third_party/blink/renderer/platform/heap/impl/page_pool.h"
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+#include "third_party/blink/renderer/platform/instrumentation/histogram.h"
+#include "third_party/blink/renderer/platform/instrumentation/memory_pressure_listener.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/web_memory_allocator_dump.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/web_process_memory_dump.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+#include "third_party/blink/renderer/platform/wtf/container_annotations.h"
+#include "third_party/blink/renderer/platform/wtf/leak_annotations.h"
+
+#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
+
+// When finalizing a non-inlined vector backing store/container, remove
+// its contiguous container annotation. Required as it will not be destructed
+// from its Vector.
+#define ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize) \
+ do { \
+ BasePage* page = PageFromObject(object); \
+ DCHECK(page); \
+ bool is_container = \
+ ThreadHeap::IsVectorArenaIndex(page->Arena()->ArenaIndex()); \
+ if (!is_container && page->IsLargeObjectPage()) \
+ is_container = \
+ static_cast<LargeObjectPage*>(page)->IsVectorBackingPage(); \
+ if (is_container) \
+ ANNOTATE_DELETE_BUFFER(object, objectSize, 0); \
+ } while (0)
+
+// A vector backing store represented by a large object is marked
+// so that when it is finalized, its ASan annotation will be
+// correctly retired.
+#define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, large_object) \
+ if (ThreadHeap::IsVectorArenaIndex(arena->ArenaIndex())) { \
+ BasePage* large_page = PageFromObject(large_object); \
+ DCHECK(large_page->IsLargeObjectPage()); \
+ static_cast<LargeObjectPage*>(large_page)->SetIsVectorBackingPage(); \
+ }
+#else
+#define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize)
+#define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject)
+#endif
+
+namespace blink {
+
+void HeapObjectHeader::Finalize(Address object, size_t object_size) {
+ DCHECK(!IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>());
+ HeapAllocHooks::FreeHookIfEnabled(object);
+ const GCInfo& gc_info = GCInfo::From(GcInfoIndex());
+ if (gc_info.finalize)
+ gc_info.finalize(object);
+
+ ASAN_RETIRE_CONTAINER_ANNOTATION(object, object_size);
+}
+
+bool HeapObjectHeader::HasNonTrivialFinalizer() const {
+ return GCInfo::From(GcInfoIndex()).finalize;
+}
+
+const char* HeapObjectHeader::Name() const {
+ return GCInfo::From(GcInfoIndex()).name(Payload()).value;
+}
+
+BaseArena::BaseArena(ThreadState* state, int index)
+ : thread_state_(state), index_(index) {}
+
+BaseArena::~BaseArena() {
+ DCHECK(swept_pages_.IsEmpty());
+ DCHECK(unswept_pages_.IsEmpty());
+ DCHECK(swept_unfinalized_pages_.IsEmpty());
+ DCHECK(swept_unfinalized_empty_pages_.IsEmpty());
+}
+
+void BaseArena::RemoveAllPages() {
+ ClearFreeLists();
+
+ DCHECK(SweepingAndFinalizationCompleted());
+ while (BasePage* page = swept_pages_.Pop()) {
+ page->RemoveFromHeap();
+ }
+}
+
+void BaseArena::CollectStatistics(std::string name,
+ ThreadState::Statistics* stats) {
+ ThreadState::Statistics::ArenaStatistics arena_stats;
+
+ ResetAllocationPoint();
+
+ if (!NameClient::HideInternalName()) {
+ const size_t num_types = GCInfoTable::Get().NumberOfGCInfos();
+ arena_stats.object_stats.num_types = num_types;
+ arena_stats.object_stats.type_name.resize(num_types);
+ arena_stats.object_stats.type_count.resize(num_types);
+ arena_stats.object_stats.type_bytes.resize(num_types);
+ }
+
+ arena_stats.name = std::move(name);
+ DCHECK(unswept_pages_.IsEmpty());
+ for (BasePage* page : swept_pages_) {
+ page->CollectStatistics(&arena_stats);
+ }
+ CollectFreeListStatistics(&arena_stats.free_list_stats);
+ stats->used_size_bytes += arena_stats.used_size_bytes;
+ stats->committed_size_bytes += arena_stats.committed_size_bytes;
+ stats->arena_stats.emplace_back(std::move(arena_stats));
+}
+
+void NormalPageArena::CollectFreeListStatistics(
+ ThreadState::Statistics::FreeListStatistics* stats) {
+ free_list_.CollectStatistics(stats);
+}
+
+#if DCHECK_IS_ON()
+BasePage* BaseArena::FindPageFromAddress(ConstAddress address) const {
+ for (BasePage* page : swept_pages_) {
+ if (page->Contains(address))
+ return page;
+ }
+ for (BasePage* page : unswept_pages_) {
+ if (page->Contains(address))
+ return page;
+ }
+ for (BasePage* page : swept_unfinalized_pages_) {
+ if (page->Contains(address))
+ return page;
+ }
+ for (BasePage* page : swept_unfinalized_empty_pages_) {
+ if (page->Contains(address))
+ return page;
+ }
+ return nullptr;
+}
+#endif
+
+void BaseArena::MakeConsistentForGC() {
+#if DCHECK_IS_ON()
+ DCHECK(IsConsistentForGC());
+#endif
+
+ // We should not start a new GC until we finish sweeping in the current GC.
+ CHECK(SweepingAndFinalizationCompleted());
+
+ HeapCompact* heap_compactor = GetThreadState()->Heap().Compaction();
+ if (!heap_compactor->IsCompactingArena(ArenaIndex()))
+ return;
+
+ for (BasePage* page : swept_pages_) {
+ if (!page->IsLargeObjectPage())
+ heap_compactor->AddCompactingPage(page);
+ }
+}
+
+void BaseArena::MakeConsistentForMutator() {
+ ClearFreeLists();
+#if DCHECK_IS_ON()
+ DCHECK(IsConsistentForGC());
+#endif
+ DCHECK(swept_pages_.IsEmpty());
+
+ // Drop marks from marked objects and rebuild free lists in preparation for
+ // resuming the executions of mutators.
+ for (BasePage* page : unswept_pages_) {
+ page->MakeConsistentForMutator();
+ page->MarkAsSwept();
+ }
+
+ swept_pages_.MoveFrom(std::move(unswept_pages_));
+ DCHECK(SweepingAndFinalizationCompleted());
+
+ VerifyObjectStartBitmap();
+}
+
+void BaseArena::Unmark() {
+ DCHECK(GetThreadState()->InAtomicMarkingPause());
+ DCHECK(SweepingAndFinalizationCompleted());
+
+ for (BasePage* page : swept_pages_) {
+ page->Unmark();
+ }
+}
+
+size_t BaseArena::ObjectPayloadSizeForTesting() {
+#if DCHECK_IS_ON()
+ DCHECK(IsConsistentForGC());
+#endif
+ // DCHECK(SweepingCompleted());
+
+ size_t object_payload_size = 0;
+ for (BasePage* page : unswept_pages_) {
+ object_payload_size += page->ObjectPayloadSizeForTesting();
+ }
+ return object_payload_size;
+}
+
+void BaseArena::PrepareForSweep(BlinkGC::CollectionType collection_type) {
+ DCHECK(GetThreadState()->InAtomicMarkingPause());
+ DCHECK(SweepingAndFinalizationCompleted());
+
+ ClearFreeLists();
+
+ // Verification depends on the allocation point being cleared.
+ VerifyObjectStartBitmap();
+
+ if (collection_type == BlinkGC::CollectionType::kMinor) {
+ auto** first_young =
+ std::partition(swept_pages_.begin(), swept_pages_.end(),
+ [](BasePage* page) { return !page->IsYoung(); });
+ for (auto** it = first_young; it != swept_pages_.end(); ++it) {
+ BasePage* page = *it;
+ page->MarkAsUnswept();
+ page->SetAsYoung(false);
+ unswept_pages_.Push(page);
+ }
+ swept_pages_.erase(first_young, swept_pages_.end());
+ return;
+ }
+
+ for (BasePage* page : swept_pages_) {
+ page->MarkAsUnswept();
+ }
+ // Move all pages to a list of unswept pages.
+ unswept_pages_.MoveFrom(std::move(swept_pages_));
+ DCHECK(swept_pages_.IsEmpty());
+}
+
+#if defined(ADDRESS_SANITIZER)
+void BaseArena::PoisonUnmarkedObjects() {
+ for (BasePage* page : unswept_pages_) {
+ page->PoisonUnmarkedObjects();
+ }
+}
+#endif
+
+Address BaseArena::LazySweep(size_t allocation_size, size_t gc_info_index) {
+ // If there are no pages to be swept, return immediately.
+ if (SweepingAndFinalizationCompleted())
+ return nullptr;
+
+ CHECK(GetThreadState()->IsSweepingInProgress());
+
+ // lazySweepPages() can be called recursively if finalizers invoked in
+ // page->Sweep() allocate memory and the allocation triggers
+ // lazySweepPages(). This check prevents the sweeping from being executed
+ // recursively.
+ if (GetThreadState()->SweepForbidden())
+ return nullptr;
+
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ GetThreadState()->Heap().stats_collector(),
+ ThreadHeapStatsCollector::kLazySweepOnAllocation);
+ ThreadState::SweepForbiddenScope sweep_forbidden(GetThreadState());
+ ScriptForbiddenScope script_forbidden;
+ return LazySweepPages(allocation_size, gc_info_index);
+}
+
+bool BaseArena::SweepUnsweptPageOnConcurrentThread(BasePage* page) {
+ const bool is_empty = page->Sweep(FinalizeType::kDeferred);
+ if (is_empty) {
+ swept_unfinalized_empty_pages_.PushLocked(page);
+ } else {
+ swept_unfinalized_pages_.PushLocked(page);
+ }
+ return is_empty;
+}
+
+bool BaseArena::SweepUnsweptPage(BasePage* page) {
+ const bool is_empty = page->Sweep(FinalizeType::kInlined);
+ if (is_empty) {
+ page->FinalizeSweep(SweepResult::kPageEmpty);
+ } else {
+ // First, we add page to the list of swept pages
+ // so that the FindPageFromAddress check is happy.
+ swept_pages_.PushLocked(page);
+ page->FinalizeSweep(SweepResult::kPageNotEmpty);
+ }
+ return is_empty;
+}
+
+bool BaseArena::LazySweepWithDeadline(base::TimeTicks deadline) {
+ // It might be heavy to call
+ // Platform::current()->monotonicallyIncreasingTimeSeconds() per page (i.e.,
+ // 128 KB sweep or one LargeObject sweep), so we check the deadline per 10
+ // pages.
+ static constexpr size_t kDeadlineCheckInterval = 10;
+
+ CHECK(GetThreadState()->IsSweepingInProgress());
+ DCHECK(GetThreadState()->SweepForbidden());
+ DCHECK(ScriptForbiddenScope::IsScriptForbidden());
+
+ size_t page_count = 1;
+ // First, process empty pages to faster reduce memory footprint.
+ while (BasePage* page = swept_unfinalized_empty_pages_.PopLocked()) {
+ page->FinalizeSweep(SweepResult::kPageEmpty);
+ if (page_count % kDeadlineCheckInterval == 0) {
+ if (deadline <= base::TimeTicks::Now()) {
+ // Deadline has come.
+ return SweepingAndFinalizationCompleted();
+ }
+ }
+ page_count++;
+ }
+ // Second, execute finalizers to leave more work for concurrent sweeper.
+ while (BasePage* page = swept_unfinalized_pages_.PopLocked()) {
+ swept_pages_.PushLocked(page);
+ page->FinalizeSweep(SweepResult::kPageNotEmpty);
+ if (page_count % kDeadlineCheckInterval == 0) {
+ if (deadline <= base::TimeTicks::Now()) {
+ // Deadline has come.
+ return SweepingAndFinalizationCompleted();
+ }
+ }
+ page_count++;
+ }
+ // Help concurrent sweeper.
+ while (BasePage* page = unswept_pages_.PopLocked()) {
+ SweepUnsweptPage(page);
+ if (page_count % kDeadlineCheckInterval == 0) {
+ if (deadline <= base::TimeTicks::Now()) {
+ // Deadline has come.
+ return SweepingAndFinalizationCompleted();
+ }
+ }
+ page_count++;
+ }
+
+ return true;
+}
+
+void BaseArena::InvokeFinalizersOnSweptPages() {
+ DCHECK(GetThreadState()->CheckThread());
+ DCHECK(GetThreadState()->IsSweepingInProgress());
+ DCHECK(GetThreadState()->SweepForbidden());
+ while (BasePage* page = swept_unfinalized_pages_.PopLocked()) {
+ swept_pages_.PushLocked(page);
+ page->FinalizeSweep(SweepResult::kPageNotEmpty);
+ }
+ while (BasePage* page = swept_unfinalized_empty_pages_.PopLocked()) {
+ page->FinalizeSweep(SweepResult::kPageEmpty);
+ }
+}
+
+bool BaseArena::ConcurrentSweepOnePage() {
+ BasePage* page = unswept_pages_.PopLocked();
+ if (!page)
+ return true;
+ SweepUnsweptPageOnConcurrentThread(page);
+ return false;
+}
+
+void BaseArena::CompleteSweep() {
+ CHECK(GetThreadState()->IsSweepingInProgress());
+ DCHECK(GetThreadState()->SweepForbidden());
+ DCHECK(ScriptForbiddenScope::IsScriptForbidden());
+
+ // Some phases, e.g. verification, require iterability of a page.
+ MakeIterable();
+
+ // First, finalize pages that have been processed by concurrent sweepers.
+ InvokeFinalizersOnSweptPages();
+
+ // Then, sweep and finalize pages.
+ while (BasePage* page = unswept_pages_.PopLocked()) {
+ SweepUnsweptPage(page);
+ }
+
+ // Verify object start bitmap after all freelists have been merged.
+ VerifyObjectStartBitmap();
+}
+
+Address BaseArena::AllocateLargeObject(size_t allocation_size,
+ size_t gc_info_index) {
+ LargeObjectArena* large_object_arena = static_cast<LargeObjectArena*>(
+ GetThreadState()->Heap().Arena(BlinkGC::kLargeObjectArenaIndex));
+ Address large_object = large_object_arena->AllocateLargeObjectPage(
+ allocation_size, gc_info_index);
+ ASAN_MARK_LARGE_VECTOR_CONTAINER(this, large_object);
+ return large_object;
+}
+
+NormalPageArena::NormalPageArena(ThreadState* state, int index)
+ : BaseArena(state, index),
+ current_allocation_point_(nullptr),
+ remaining_allocation_size_(0),
+ promptly_freed_size_(0) {}
+
+void NormalPageArena::AddToFreeList(Address address, size_t size) {
+#if DCHECK_IS_ON()
+ DCHECK(FindPageFromAddress(address));
+ DCHECK(FindPageFromAddress(address + size - 1));
+#endif
+ free_list_.Add(address, size);
+ static_cast<NormalPage*>(PageFromObject(address))
+ ->object_start_bit_map()
+ ->SetBit<HeapObjectHeader::AccessMode::kAtomic>(address);
+}
+
+void NormalPageArena::MakeConsistentForGC() {
+ BaseArena::MakeConsistentForGC();
+
+ // Remove linear allocation area.
+ SetAllocationPoint(nullptr, 0);
+}
+
+void NormalPageArena::ClearFreeLists() {
+ SetAllocationPoint(nullptr, 0);
+ free_list_.Clear();
+ promptly_freed_size_ = 0;
+}
+
+void NormalPageArena::MakeIterable() {
+ SetAllocationPoint(nullptr, 0);
+}
+
+size_t NormalPageArena::ArenaSize() {
+ size_t size = 0;
+ for (BasePage* page : swept_pages_) {
+ size += page->size();
+ }
+ LOG_HEAP_FREELIST_VERBOSE()
+ << "Heap size: " << size << "(" << ArenaIndex() << ")";
+ return size;
+}
+
+size_t NormalPageArena::FreeListSize() {
+ size_t free_size = free_list_.FreeListSize();
+ LOG_HEAP_FREELIST_VERBOSE()
+ << "Free size: " << free_size << "(" << ArenaIndex() << ")";
+ return free_size;
+}
+
+void NormalPageArena::SweepAndCompact() {
+ ThreadHeap& heap = GetThreadState()->Heap();
+ if (!heap.Compaction()->IsCompactingArena(ArenaIndex()))
+ return;
+
+ if (SweepingCompleted()) {
+ heap.Compaction()->FinishedArenaCompaction(this, 0, 0);
+ return;
+ }
+
+ // Compaction is performed in-place, sliding objects down over unused
+ // holes for a smaller heap page footprint and improved locality.
+ // A "compaction pointer" is consequently kept, pointing to the next
+ // available address to move objects down to. It will belong to one
+ // of the already sweep-compacted pages for this arena, but as compaction
+ // proceeds, it will not belong to the same page as the one being
+ // currently compacted.
+ //
+ // The compaction pointer is represented by the
+ // |(currentPage, allocationPoint)| pair, with |allocationPoint|
+ // being the offset into |currentPage|, making up the next
+ // available location. When the compaction of an arena page causes the
+ // compaction pointer to exhaust the current page it is compacting into,
+ // page compaction will advance the current page of the compaction
+ // pointer, as well as the allocation point.
+ //
+ // By construction, the page compaction can be performed without having
+ // to allocate any new pages. So to arrange for the page compaction's
+ // supply of freed, available pages, we chain them together after each
+ // has been "compacted from". The page compaction will then reuse those
+ // as needed, and once finished, the chained, available pages can be
+ // released back to the OS.
+ //
+ // To ease the passing of the compaction state when iterating over an
+ // arena's pages, package it up into a |CompactionContext|.
+ NormalPage::CompactionContext context;
+ context.compacted_pages_ = &swept_pages_;
+
+ while (BasePage* page = unswept_pages_.Pop()) {
+ // Large objects do not belong to this arena.
+ DCHECK(!page->IsLargeObjectPage());
+ NormalPage* normal_page = static_cast<NormalPage*>(page);
+ normal_page->MarkAsSwept();
+ // If not the first page, add |normalPage| onto the available pages chain.
+ if (!context.current_page_) {
+ context.current_page_ = normal_page;
+ } else {
+ context.available_pages_.Push(normal_page);
+ }
+ normal_page->SweepAndCompact(context);
+ }
+
+ // All pages were empty; nothing to compact.
+ if (!context.current_page_) {
+ heap.Compaction()->FinishedArenaCompaction(this, 0, 0);
+ return;
+ }
+
+ size_t freed_size = 0;
+ size_t freed_page_count = 0;
+
+ // If the current page hasn't been allocated into, add it to the available
+ // list, for subsequent release below.
+ size_t allocation_point = context.allocation_point_;
+ if (!allocation_point) {
+ context.available_pages_.Push(context.current_page_);
+ } else {
+ NormalPage* current_page = context.current_page_;
+ swept_pages_.Push(current_page);
+ if (allocation_point != current_page->PayloadSize()) {
+ // Put the remainder of the page onto the free list.
+ freed_size = current_page->PayloadSize() - allocation_point;
+ Address payload = current_page->Payload();
+ SET_MEMORY_INACCESSIBLE(payload + allocation_point, freed_size);
+ current_page->ArenaForNormalPage()->AddToFreeList(
+ payload + allocation_point, freed_size);
+ }
+ }
+
+ // Return available pages to the free page pool, decommitting them from
+ // the pagefile.
+#if DEBUG_HEAP_COMPACTION
+ std::stringstream stream;
+#endif
+ while (BasePage* available_pages = context.available_pages_.Pop()) {
+ size_t page_size = available_pages->size();
+#if DEBUG_HEAP_COMPACTION
+ if (!freed_page_count)
+ stream << "Releasing:";
+ stream << " [" << available_pages << ", "
+ << static_cast<void*>(reinterpret_cast<char*>(available_pages) +
+ page_size)
+ << "]";
+#endif
+ freed_size += page_size;
+ freed_page_count++;
+#if !(DCHECK_IS_ON() || defined(LEAK_SANITIZER) || \
+ defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER))
+ // Clear out the page before adding it to the free page pool, which
+ // decommits it. Recommitting the page must find a zeroed page later.
+ // We cannot assume that the OS will hand back a zeroed page across
+ // its "decommit" operation.
+ //
+ // If in a debug setting, the unused page contents will have been
+ // zapped already; leave it in that state.
+ DCHECK(!available_pages->IsLargeObjectPage());
+ NormalPage* unused_page = static_cast<NormalPage*>(available_pages);
+ memset(unused_page->Payload(), 0, unused_page->PayloadSize());
+#endif
+ available_pages->RemoveFromHeap();
+ }
+#if DEBUG_HEAP_COMPACTION
+ if (freed_page_count)
+ LOG_HEAP_COMPACTION() << stream.str();
+#endif
+ heap.Compaction()->FinishedArenaCompaction(this, freed_page_count,
+ freed_size);
+
+ VerifyObjectStartBitmap();
+}
+
+void NormalPageArena::VerifyObjectStartBitmap() {
+#if DCHECK_IS_ON()
+ // Verifying object start bitmap requires iterability of pages. As compaction
+ // may set up a new we have to reset here.
+ SetAllocationPoint(nullptr, 0);
+ for (BasePage* page : swept_pages_) {
+ static_cast<NormalPage*>(page)
+ ->VerifyObjectStartBitmapIsConsistentWithPayload();
+ }
+#endif // DCHECK_IS_ON()
+}
+
+void BaseArena::VerifyMarking() {
+#if DCHECK_IS_ON()
+ // We cannot rely on other marking phases to clear the allocation area as
+ // for incremental marking the application is running between steps and
+ // might set up a new area. For large object arenas this is a no-op.
+ ResetAllocationPoint();
+
+ DCHECK(swept_unfinalized_pages_.IsEmpty());
+ DCHECK(swept_unfinalized_empty_pages_.IsEmpty());
+ // There may be objects on |swept_pages_| as pre-finalizers may allocate.
+ // These objects may point to other object on |swept_pages_| or marked objects
+ // on |unswept_pages_| but may never point to a dead (unmarked) object in
+ // |unswept_pages_|.
+ for (BasePage* page : swept_pages_) {
+ page->VerifyMarking();
+ }
+ for (BasePage* page : unswept_pages_) {
+ page->VerifyMarking();
+ }
+#endif // DCHECK_IS_ON()
+}
+
+#if DCHECK_IS_ON()
+bool NormalPageArena::IsConsistentForGC() {
+ // A thread heap is consistent for sweeping if none of the pages to be swept
+ // contain a freelist block or the current allocation point.
+ FreeListEntry* entry = free_list_.FindEntry([this](FreeListEntry* entry) {
+ return PagesToBeSweptContains(entry->GetAddress());
+ });
+ if (entry)
+ return false;
+
+ if (HasCurrentAllocationArea()) {
+ if (PagesToBeSweptContains(CurrentAllocationPoint()))
+ return false;
+ }
+ return true;
+}
+
+bool NormalPageArena::PagesToBeSweptContains(ConstAddress address) const {
+ for (BasePage* page : unswept_pages_) {
+ if (page->Contains(address))
+ return true;
+ }
+ return false;
+}
+#endif
+
+void NormalPageArena::AllocatePage() {
+ PageMemory* page_memory =
+ GetThreadState()->Heap().GetFreePagePool()->Take(ArenaIndex());
+
+ if (!page_memory) {
+ // Allocate a memory region for blinkPagesPerRegion pages that
+ // will each have the following layout.
+ //
+ // [ guard os page | ... payload ... | guard os page ]
+ // ^---{ aligned to blink page size }
+ PageMemoryRegion* region = PageMemoryRegion::AllocateNormalPages(
+ GetThreadState()->Heap().GetRegionTree());
+
+ // Setup the PageMemory object for each of the pages in the region.
+ for (size_t i = 0; i < kBlinkPagesPerRegion; ++i) {
+ PageMemory* memory = PageMemory::SetupPageMemoryInRegion(
+ region, i * kBlinkPageSize, BlinkPagePayloadSize());
+ // Take the first possible page ensuring that this thread actually
+ // gets a page and add the rest to the page pool.
+ if (!page_memory) {
+ bool result = memory->Commit();
+ // If you hit the CHECK, it will mean that you're hitting the limit
+ // of the number of mmapped regions the OS can support
+ // (e.g., /proc/sys/vm/max_map_count in Linux) or on that Windows you
+ // have exceeded the max commit charge across all processes for the
+ // system.
+ CHECK(result);
+ page_memory = memory;
+ } else {
+ GetThreadState()->Heap().GetFreePagePool()->Add(ArenaIndex(), memory);
+ }
+ }
+ }
+ NormalPage* page =
+ new (page_memory->WritableStart()) NormalPage(page_memory, this);
+ swept_pages_.PushLocked(page);
+
+ ThreadHeap& heap = GetThreadState()->Heap();
+ heap.stats_collector()->IncreaseAllocatedSpace(page->size());
+ heap.page_bloom_filter()->Add(page->GetAddress());
+#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+ // Allow the following addToFreeList() to add the newly allocated memory
+ // to the free list.
+ ASAN_UNPOISON_MEMORY_REGION(page->Payload(), page->PayloadSize());
+ Address address = page->Payload();
+ for (size_t i = 0; i < page->PayloadSize(); i++)
+ address[i] = kReuseAllowedZapValue;
+ ASAN_POISON_MEMORY_REGION(page->Payload(), page->PayloadSize());
+#endif
+ AddToFreeList(page->Payload(), page->PayloadSize());
+ SynchronizedStore(page);
+}
+
+void NormalPageArena::FreePage(NormalPage* page) {
+ ThreadHeap& heap = GetThreadState()->Heap();
+ heap.stats_collector()->DecreaseAllocatedSpace(page->size());
+ heap.page_bloom_filter()->Remove(page->GetAddress());
+
+ PageMemory* memory = page->Storage();
+ page->~NormalPage();
+ GetThreadState()->Heap().GetFreePagePool()->Add(ArenaIndex(), memory);
+}
+
+PlatformAwareObjectStartBitmap::PlatformAwareObjectStartBitmap(Address offset)
+ : ObjectStartBitmap(offset) {}
+
+ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
+ Clear();
+}
+
+void ObjectStartBitmap::Clear() {
+ memset(&object_start_bit_map_, 0, kReservedForBitmap);
+}
+
+void NormalPageArena::PromptlyFreeObject(HeapObjectHeader* header) {
+ DCHECK(!GetThreadState()->IsMarkingInProgress());
+ DCHECK(!GetThreadState()->SweepForbidden());
+ Address address = reinterpret_cast<Address>(header);
+ Address payload = header->Payload();
+ size_t size = header->size();
+ size_t payload_size = header->PayloadSize();
+ DCHECK_GT(size, 0u);
+#if DCHECK_IS_ON()
+ DCHECK_EQ(PageFromObject(address), FindPageFromAddress(address));
+#endif
+ {
+ ThreadState::SweepForbiddenScope forbidden_scope(GetThreadState());
+ header->Finalize(payload, payload_size);
+ if (IsObjectAllocatedAtAllocationPoint(header)) {
+ current_allocation_point_ -= size;
+ DCHECK_EQ(address, current_allocation_point_);
+ remaining_allocation_size_ += size;
+ SET_MEMORY_INACCESSIBLE(address, size);
+ // Memory that is part of the allocation point is not allowed to be part
+ // of the object start bit map.
+ reinterpret_cast<NormalPage*>(PageFromObject(header))
+ ->object_start_bit_map()
+ ->ClearBit(address);
+ return;
+ }
+ DCHECK(!header->IsMarked());
+ PromptlyFreeObjectInFreeList(header, size);
+ }
+}
+
+void NormalPageArena::PromptlyFreeObjectInFreeList(HeapObjectHeader* header,
+ size_t size) {
+ DCHECK(!header->IsMarked());
+ Address address = reinterpret_cast<Address>(header);
+ NormalPage* page = reinterpret_cast<NormalPage*>(PageFromObject(header));
+ if (page->HasBeenSwept()) {
+ Address payload = header->Payload();
+ size_t payload_size = header->PayloadSize();
+ // If the page has been swept a promptly freed object may be adjacent
+ // to other free list entries. We make the object available for future
+ // allocation right away by adding it to the free list and increase the
+ // promptly_freed_size_ counter which may result in coalescing later.
+ SET_MEMORY_INACCESSIBLE(payload, payload_size);
+ CHECK_MEMORY_INACCESSIBLE(payload, payload_size);
+ AddToFreeList(address, size);
+ promptly_freed_size_ += size;
+ }
+ GetThreadState()->Heap().stats_collector()->DecreaseAllocatedObjectSize(size);
+}
+
+bool NormalPageArena::ExpandObject(HeapObjectHeader* header, size_t new_size) {
+ // It's possible that Vector requests a smaller expanded size because
+ // Vector::shrinkCapacity can set a capacity smaller than the actual payload
+ // size.
+ if (header->PayloadSize() >= new_size)
+ return true;
+ size_t allocation_size = ThreadHeap::AllocationSizeFromSize(new_size);
+ DCHECK_GT(allocation_size, header->size());
+ size_t expand_size = allocation_size - header->size();
+ if (IsObjectAllocatedAtAllocationPoint(header) &&
+ expand_size <= remaining_allocation_size_) {
+ current_allocation_point_ += expand_size;
+ DCHECK_GE(remaining_allocation_size_, expand_size);
+ remaining_allocation_size_ -= expand_size;
+ // Unpoison the memory used for the object (payload).
+ SET_MEMORY_ACCESSIBLE(header->PayloadEnd(), expand_size);
+ header->SetSize(allocation_size);
+#if DCHECK_IS_ON()
+ DCHECK(FindPageFromAddress(header->PayloadEnd() - 1));
+#endif
+ return true;
+ }
+ return false;
+}
+
+bool NormalPageArena::ShrinkObject(HeapObjectHeader* header, size_t new_size) {
+ DCHECK_GT(header->PayloadSize(), new_size);
+ size_t allocation_size = ThreadHeap::AllocationSizeFromSize(new_size);
+ DCHECK_GT(header->size(), allocation_size);
+ size_t shrink_size = header->size() - allocation_size;
+ if (IsObjectAllocatedAtAllocationPoint(header)) {
+ current_allocation_point_ -= shrink_size;
+ remaining_allocation_size_ += shrink_size;
+ SET_MEMORY_INACCESSIBLE(current_allocation_point_, shrink_size);
+ header->SetSize(allocation_size);
+ return true;
+ }
+ DCHECK_GE(shrink_size, sizeof(HeapObjectHeader));
+ DCHECK_GT(header->GcInfoIndex(), 0u);
+ Address shrink_address = header->PayloadEnd() - shrink_size;
+ HeapObjectHeader* freed_header = new (NotNull, shrink_address)
+ HeapObjectHeader(shrink_size, header->GcInfoIndex());
+ // Since only size has been changed, we don't need to update object starts.
+ PromptlyFreeObjectInFreeList(freed_header, shrink_size);
+#if DCHECK_IS_ON()
+ DCHECK_EQ(PageFromObject(reinterpret_cast<Address>(header)),
+ FindPageFromAddress(reinterpret_cast<Address>(header)));
+#endif
+ header->SetSize(allocation_size);
+
+ return false;
+}
+
+Address NormalPageArena::AllocateFromFreeList(size_t allocation_size,
+ size_t gc_info_index) {
+ FreeListEntry* entry = free_list_.Allocate(allocation_size);
+ if (!entry)
+ return nullptr;
+
+ SetAllocationPoint(entry->GetAddress(), entry->size());
+ DCHECK(HasCurrentAllocationArea());
+ DCHECK_GE(RemainingAllocationSize(), allocation_size);
+ return AllocateObject(allocation_size, gc_info_index);
+}
+
+Address NormalPageArena::LazySweepPages(size_t allocation_size,
+ size_t gc_info_index) {
+ DCHECK(!HasCurrentAllocationArea());
+ Address result = nullptr;
+ // First, process unfinalized pages as finalizing a page is faster than
+ // sweeping.
+ while (BasePage* page = swept_unfinalized_pages_.PopLocked()) {
+ swept_pages_.PushLocked(page);
+ page->FinalizeSweep(SweepResult::kPageNotEmpty);
+ // For NormalPage, stop lazy sweeping once we find a slot to
+ // allocate a new object.
+ result = AllocateFromFreeList(allocation_size, gc_info_index);
+ if (result)
+ return result;
+ }
+ while (BasePage* page = unswept_pages_.PopLocked()) {
+ const bool is_empty = SweepUnsweptPage(page);
+ if (!is_empty) {
+ // For NormalPage, stop lazy sweeping once we find a slot to
+ // allocate a new object.
+ result = AllocateFromFreeList(allocation_size, gc_info_index);
+ if (result)
+ return result;
+ }
+ }
+ return result;
+}
+
+void NormalPageArena::SetAllocationPoint(Address point, size_t size) {
+#if DCHECK_IS_ON()
+ if (point) {
+ DCHECK(size);
+ BasePage* page = PageFromObject(point);
+ DCHECK(!page->IsLargeObjectPage());
+ DCHECK_LE(size, static_cast<NormalPage*>(page)->PayloadSize());
+ }
+#endif
+ // Free and clear the old linear allocation area.
+ if (HasCurrentAllocationArea()) {
+ AddToFreeList(CurrentAllocationPoint(), RemainingAllocationSize());
+ GetThreadState()->Heap().stats_collector()->DecreaseAllocatedObjectSize(
+ RemainingAllocationSize());
+ }
+ // Set up a new linear allocation area.
+ current_allocation_point_ = point;
+ remaining_allocation_size_ = size;
+ // Update last allocated region in ThreadHeap. This must also be done if the
+ // allocation point is set to 0 (before doing GC), so that the last allocated
+ // region is automatically reset after GC.
+ GetThreadState()->Heap().SetLastAllocatedRegion(point, size);
+ if (point) {
+ // Only, update allocated size and object start bitmap if the area is
+ // actually set up with a non-null address.
+ GetThreadState()->Heap().stats_collector()->IncreaseAllocatedObjectSize(
+ size);
+ // Current allocation point can never be part of the object bitmap start
+ // because the area can grow or shrink. Will be added back before a GC when
+ // clearing the allocation point.
+ NormalPage* page = reinterpret_cast<NormalPage*>(PageFromObject(point));
+ page->object_start_bit_map()
+ ->ClearBit<HeapObjectHeader::AccessMode::kAtomic>(point);
+ // Mark page as containing young objects.
+ page->SetAsYoung(true);
+ }
+}
+
+Address NormalPageArena::OutOfLineAllocate(size_t allocation_size,
+ size_t gc_info_index) {
+ Address result = OutOfLineAllocateImpl(allocation_size, gc_info_index);
+ GetThreadState()->Heap().stats_collector()->AllocatedObjectSizeSafepoint();
+ return result;
+}
+
+Address NormalPageArena::OutOfLineAllocateImpl(size_t allocation_size,
+ size_t gc_info_index) {
+ DCHECK_GT(allocation_size, RemainingAllocationSize());
+ DCHECK_GE(allocation_size, kAllocationGranularity);
+
+ // 1. If this allocation is big enough, allocate a large object.
+ if (allocation_size >= kLargeObjectSizeThreshold)
+ return AllocateLargeObject(allocation_size, gc_info_index);
+
+ // 2. Try to allocate from a free list.
+ Address result = AllocateFromFreeList(allocation_size, gc_info_index);
+ if (result)
+ return result;
+
+ // 3. Reset the allocation point.
+ SetAllocationPoint(nullptr, 0);
+
+ // 4. Lazily sweep pages of this heap until we find a freed area for
+ // this allocation or we finish sweeping all pages of this heap.
+ result = LazySweep(allocation_size, gc_info_index);
+ if (result)
+ return result;
+
+ // 5. Complete sweeping.
+ GetThreadState()->CompleteSweep();
+
+ // 6. Check if we should trigger a GC.
+ GetThreadState()->ScheduleGCIfNeeded();
+
+ // 7. Add a new page to this heap.
+ AllocatePage();
+
+ // 8. Try to allocate from a free list. This allocation must succeed.
+ result = AllocateFromFreeList(allocation_size, gc_info_index);
+ CHECK(result);
+ return result;
+}
+
+LargeObjectArena::LargeObjectArena(ThreadState* state, int index)
+ : BaseArena(state, index) {}
+
+Address LargeObjectArena::AllocateLargeObjectPage(size_t allocation_size,
+ size_t gc_info_index) {
+ // Caller already added space for object header and rounded up to allocation
+ // alignment
+ DCHECK(!(allocation_size & kAllocationMask));
+
+ // 1. Try to sweep large objects more than allocationSize bytes
+ // before allocating a new large object.
+ Address result = LazySweep(allocation_size, gc_info_index);
+ if (result)
+ return result;
+
+ // 2. If we have failed in sweeping allocationSize bytes,
+ // we complete sweeping before allocating this large object.
+ GetThreadState()->CompleteSweep();
+
+ // 3. Check if we should trigger a GC.
+ GetThreadState()->ScheduleGCIfNeeded();
+
+ return DoAllocateLargeObjectPage(allocation_size, gc_info_index);
+}
+
+Address LargeObjectArena::DoAllocateLargeObjectPage(size_t allocation_size,
+ size_t gc_info_index) {
+ size_t large_object_size =
+ LargeObjectPage::PageHeaderSize() + allocation_size;
+// If ASan is supported we add allocationGranularity bytes to the allocated
+// space and poison that to detect overflows
+#if defined(ADDRESS_SANITIZER)
+ large_object_size += kAllocationGranularity;
+#endif
+
+ PageMemory* page_memory = PageMemory::Allocate(
+ large_object_size, GetThreadState()->Heap().GetRegionTree());
+ Address large_object_address = page_memory->WritableStart();
+ Address header_address =
+ large_object_address + LargeObjectPage::PageHeaderSize();
+#if DCHECK_IS_ON()
+ // Verify that the allocated PageMemory is expectedly zeroed.
+ for (size_t i = 0; i < large_object_size; ++i)
+ DCHECK(!large_object_address[i]);
+#endif
+ DCHECK_GT(gc_info_index, 0u);
+ LargeObjectPage* large_object = new (large_object_address)
+ LargeObjectPage(page_memory, this, allocation_size);
+ HeapObjectHeader* header = new (NotNull, header_address)
+ HeapObjectHeader(kLargeObjectSizeInHeader, gc_info_index);
+ Address result = header_address + sizeof(*header);
+ DCHECK(!(reinterpret_cast<uintptr_t>(result) & kAllocationMask));
+
+ // Poison the object header and allocationGranularity bytes after the object
+ ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
+ ASAN_POISON_MEMORY_REGION(large_object->GetAddress() + large_object->size(),
+ kAllocationGranularity);
+
+ swept_pages_.PushLocked(large_object);
+
+ // Update last allocated region in ThreadHeap.
+ GetThreadState()->Heap().SetLastAllocatedRegion(large_object->Payload(),
+ large_object->PayloadSize());
+
+ // Add all segments of kBlinkPageSize to the bloom filter so that the large
+ // object can be kept by derived pointers on stack. An alternative might be to
+ // prohibit derived pointers to large objects, but that is dangerous since the
+ // compiler is free to optimize on-stack base pointers away.
+ for (Address page_begin = RoundToBlinkPageStart(large_object->GetAddress());
+ page_begin < large_object->PayloadEnd(); page_begin += kBlinkPageSize) {
+ GetThreadState()->Heap().page_bloom_filter()->Add(page_begin);
+ }
+ GetThreadState()->Heap().stats_collector()->IncreaseAllocatedSpace(
+ large_object->size());
+ GetThreadState()->Heap().stats_collector()->IncreaseAllocatedObjectSize(
+ large_object->PayloadSize());
+ // Add page to the list of young pages.
+ large_object->SetAsYoung(true);
+ SynchronizedStore(large_object);
+ return result;
+}
+
+void LargeObjectArena::FreeLargeObjectPage(LargeObjectPage* object) {
+ ASAN_UNPOISON_MEMORY_REGION(object->Payload(), object->PayloadSize());
+ object->ObjectHeader()->Finalize(object->Payload(), object->PayloadSize());
+ ThreadHeap& heap = GetThreadState()->Heap();
+ heap.stats_collector()->DecreaseAllocatedSpace(object->size());
+ heap.page_bloom_filter()->Remove(object->GetAddress());
+
+ // Unpoison the object header and allocationGranularity bytes after the
+ // object before freeing.
+ ASAN_UNPOISON_MEMORY_REGION(object->ObjectHeader(), sizeof(HeapObjectHeader));
+ ASAN_UNPOISON_MEMORY_REGION(object->GetAddress() + object->size(),
+ kAllocationGranularity);
+
+ PageMemory* memory = object->Storage();
+ object->~LargeObjectPage();
+ delete memory;
+}
+
+Address LargeObjectArena::LazySweepPages(size_t allocation_size,
+ size_t gc_info_index) {
+ Address result = nullptr;
+ size_t swept_size = 0;
+ while (BasePage* page = unswept_pages_.PopLocked()) {
+ if (page->Sweep(FinalizeType::kInlined)) {
+ swept_size += static_cast<LargeObjectPage*>(page)->ObjectSize();
+ page->RemoveFromHeap();
+ // For LargeObjectPage, stop lazy sweeping once we have swept
+ // more than |allocation_size| bytes.
+ if (swept_size >= allocation_size) {
+ result = DoAllocateLargeObjectPage(allocation_size, gc_info_index);
+ DCHECK(result);
+ break;
+ }
+ } else {
+ swept_pages_.PushLocked(page);
+ page->MarkAsSwept();
+ }
+ }
+ return result;
+}
+
+FreeList::FreeList() : biggest_free_list_index_(0) {
+ Clear();
+}
+
+void FreeList::Add(Address address, size_t size) {
+ DCHECK_LT(size, BlinkPagePayloadSize());
+ // The free list entries are only pointer aligned (but when we allocate
+ // from them we are 8 byte aligned due to the header size).
+ DCHECK(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) &
+ kAllocationMask));
+ DCHECK(!(size & kAllocationMask));
+ DCHECK(!PageFromObject(address)->IsLargeObjectPage());
+ ASAN_UNPOISON_MEMORY_REGION(address, size);
+ FreeListEntry* entry;
+ if (size < sizeof(*entry)) {
+ // Create a dummy header with only a size and freelist bit set.
+ DCHECK_GE(size, sizeof(HeapObjectHeader));
+ // Free list encode the size to mark the lost memory as freelist memory.
+ new (NotNull, address)
+ HeapObjectHeader(size, kGcInfoIndexForFreeListHeader);
+ ASAN_POISON_MEMORY_REGION(address, size);
+ // This memory gets lost. Sweeping can reclaim it.
+ return;
+ }
+ entry = new (NotNull, address) FreeListEntry(size);
+
+#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+ // The following logic delays reusing free lists for (at least) one GC
+ // cycle. This is helpful to detect use-after-free errors that could be caused
+ // by lazy sweeping etc.
+ size_t allowed_count = 0;
+ size_t forbidden_count = 0;
+ GetAllowedAndForbiddenCounts(address, size, allowed_count, forbidden_count);
+ size_t entry_count = size - sizeof(FreeListEntry);
+ if (forbidden_count == entry_count) {
+ // If all values in the memory region are reuseForbiddenZapValue,
+ // we flip them to reuseAllowedZapValue. This allows the next
+ // addToFreeList() to add the memory region to the free list
+ // (unless someone concatenates the memory region with another memory
+ // region that contains reuseForbiddenZapValue.)
+ for (size_t i = sizeof(FreeListEntry); i < size; i++)
+ address[i] = kReuseAllowedZapValue;
+ ASAN_POISON_MEMORY_REGION(address, size);
+ // Don't add the memory region to the free list in this addToFreeList().
+ return;
+ }
+ if (allowed_count != entry_count) {
+ // If the memory region mixes reuseForbiddenZapValue and
+ // reuseAllowedZapValue, we (conservatively) flip all the values
+ // to reuseForbiddenZapValue. These values will be changed to
+ // reuseAllowedZapValue in the next addToFreeList().
+ for (size_t i = sizeof(FreeListEntry); i < size; i++)
+ address[i] = kReuseForbiddenZapValue;
+ ASAN_POISON_MEMORY_REGION(address, size);
+ // Don't add the memory region to the free list in this addToFreeList().
+ return;
+ }
+// We reach here only when all the values in the memory region are
+// reuseAllowedZapValue. In this case, we are allowed to add the memory
+// region to the free list and reuse it for another object.
+#endif
+ ASAN_POISON_MEMORY_REGION(address, size);
+
+ const int index = BucketIndexForSize(size);
+ entry->Link(&free_list_heads_[index]);
+ if (index > biggest_free_list_index_) {
+ biggest_free_list_index_ = index;
+ }
+ if (!entry->Next()) {
+ free_list_tails_[index] = entry;
+ }
+}
+
+void FreeList::MoveFrom(FreeList* other) {
+#if DCHECK_IS_ON()
+ const size_t expected_size = FreeListSize() + other->FreeListSize();
+#endif
+
+ // Newly created entries get added to the head.
+ for (size_t index = 0; index < kBlinkPageSizeLog2; ++index) {
+ FreeListEntry* other_tail = other->free_list_tails_[index];
+ FreeListEntry*& this_head = this->free_list_heads_[index];
+ if (other_tail) {
+ other_tail->Append(this_head);
+ if (!this_head) {
+ this->free_list_tails_[index] = other_tail;
+ }
+ this_head = other->free_list_heads_[index];
+ other->free_list_heads_[index] = nullptr;
+ other->free_list_tails_[index] = nullptr;
+ }
+ }
+
+ biggest_free_list_index_ =
+ std::max(biggest_free_list_index_, other->biggest_free_list_index_);
+ other->biggest_free_list_index_ = 0;
+
+#if DCHECK_IS_ON()
+ DCHECK_EQ(expected_size, FreeListSize());
+#endif
+ DCHECK(other->IsEmpty());
+}
+
+FreeListEntry* FreeList::Allocate(size_t allocation_size) {
+ // Try reusing a block from the largest bin. The underlying reasoning
+ // being that we want to amortize this slow allocation call by carving
+ // off as a large a free block as possible in one go; a block that will
+ // service this block and let following allocations be serviced quickly
+ // by bump allocation.
+ size_t bucket_size = static_cast<size_t>(1) << biggest_free_list_index_;
+ int index = biggest_free_list_index_;
+ for (; index > 0; --index, bucket_size >>= 1) {
+ DCHECK(IsConsistent(index));
+ FreeListEntry* entry = free_list_heads_[index];
+ if (allocation_size > bucket_size) {
+ // Final bucket candidate; check initial entry if it is able
+ // to service this allocation. Do not perform a linear scan,
+ // as it is considered too costly.
+ if (!entry || entry->size() < allocation_size)
+ break;
+ }
+ if (entry) {
+ if (!entry->Next()) {
+ DCHECK_EQ(entry, free_list_tails_[index]);
+ free_list_tails_[index] = nullptr;
+ }
+ entry->Unlink(&free_list_heads_[index]);
+ biggest_free_list_index_ = index;
+ return entry;
+ }
+ }
+ biggest_free_list_index_ = index;
+ return nullptr;
+}
+
+#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
+ defined(MEMORY_SANITIZER)
+NO_SANITIZE_MEMORY
+void NOINLINE FreeList::GetAllowedAndForbiddenCounts(Address address,
+ size_t size,
+ size_t& allowed_count,
+ size_t& forbidden_count) {
+ for (size_t i = sizeof(FreeListEntry); i < size; i++) {
+ if (address[i] == kReuseAllowedZapValue)
+ allowed_count++;
+ else if (address[i] == kReuseForbiddenZapValue)
+ forbidden_count++;
+ else
+ NOTREACHED();
+ }
+}
+
+NO_SANITIZE_ADDRESS
+NO_SANITIZE_MEMORY
+void NOINLINE FreeList::ZapFreedMemory(Address address, size_t size) {
+ for (size_t i = 0; i < size; i++) {
+ // See the comment in addToFreeList().
+ if (address[i] != kReuseAllowedZapValue)
+ address[i] = kReuseForbiddenZapValue;
+ }
+}
+
+void NOINLINE FreeList::CheckFreedMemoryIsZapped(Address address, size_t size) {
+ for (size_t i = 0; i < size; i++) {
+ DCHECK(address[i] == kReuseAllowedZapValue ||
+ address[i] == kReuseForbiddenZapValue);
+ }
+}
+#endif
+
+size_t FreeList::FreeListSize() const {
+ size_t free_size = 0;
+ for (unsigned i = 0; i < kBlinkPageSizeLog2; ++i) {
+ FreeListEntry* entry = free_list_heads_[i];
+ while (entry) {
+ free_size += entry->size();
+ entry = entry->Next();
+ }
+ }
+#if DEBUG_HEAP_FREELIST
+ if (free_size) {
+ LOG_HEAP_FREELIST_VERBOSE() << "FreeList(" << this << "): " << free_size;
+ for (unsigned i = 0; i < kBlinkPageSizeLog2; ++i) {
+ FreeListEntry* entry = free_list_heads_[i];
+ size_t bucket = 0;
+ size_t count = 0;
+ while (entry) {
+ bucket += entry->size();
+ count++;
+ entry = entry->Next();
+ }
+ if (bucket) {
+ LOG_HEAP_FREELIST_VERBOSE()
+ << "[" << (0x1 << i) << ", " << (0x1 << (i + 1)) << "]: " << bucket
+ << " (" << count << ")";
+ }
+ }
+ }
+#endif
+ return free_size;
+}
+
+void FreeList::Clear() {
+ biggest_free_list_index_ = 0;
+ for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) {
+ free_list_heads_[i] = nullptr;
+ free_list_tails_[i] = nullptr;
+ }
+}
+
+bool FreeList::IsEmpty() const {
+ if (biggest_free_list_index_)
+ return false;
+ for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) {
+ if (free_list_heads_[i]) {
+ DCHECK(free_list_tails_[i]);
+ return false;
+ }
+ }
+ return true;
+}
+
+int FreeList::BucketIndexForSize(size_t size) {
+ DCHECK_GT(size, 0u);
+ int index = -1;
+ while (size) {
+ size >>= 1;
+ index++;
+ }
+ return index;
+}
+
+void FreeList::CollectStatistics(
+ ThreadState::Statistics::FreeListStatistics* stats) {
+ Vector<size_t> bucket_size;
+ Vector<size_t> free_count;
+ Vector<size_t> free_size;
+ for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) {
+ size_t entry_count = 0;
+ size_t entry_size = 0;
+ for (FreeListEntry* entry = free_list_heads_[i]; entry;
+ entry = entry->Next()) {
+ ++entry_count;
+ entry_size += entry->size();
+ }
+ bucket_size.push_back(1 << i);
+ free_count.push_back(entry_count);
+ free_size.push_back(entry_size);
+ }
+ *stats = {std::move(bucket_size), std::move(free_count),
+ std::move(free_size)};
+}
+
+BasePage::BasePage(PageMemory* storage, BaseArena* arena, PageType page_type)
+ : storage_(storage),
+ arena_(arena),
+ thread_state_(arena->GetThreadState()),
+ page_type_(page_type) {
+#if DCHECK_IS_ON()
+ DCHECK(IsPageHeaderAddress(reinterpret_cast<Address>(this)));
+#endif
+}
+
+NormalPage::NormalPage(PageMemory* storage, BaseArena* arena)
+ : BasePage(storage, arena, PageType::kNormalPage),
+ object_start_bit_map_(Payload()) {
+#if DCHECK_IS_ON()
+ DCHECK(IsPageHeaderAddress(reinterpret_cast<Address>(this)));
+#endif // DCHECK_IS_ON()
+}
+
+NormalPage::~NormalPage() {
+#if DCHECK_IS_ON()
+ DCHECK(IsPageHeaderAddress(reinterpret_cast<Address>(this)));
+#endif
+}
+
+size_t NormalPage::ObjectPayloadSizeForTesting() {
+ size_t object_payload_size = 0;
+ Address header_address = Payload();
+ DCHECK_NE(header_address, PayloadEnd());
+ do {
+ HeapObjectHeader* header =
+ reinterpret_cast<HeapObjectHeader*>(header_address);
+ if (!header->IsFree()) {
+ object_payload_size += header->PayloadSize();
+ }
+ DCHECK_LT(header->size(), BlinkPagePayloadSize());
+ header_address += header->size();
+ DCHECK_LE(header_address, PayloadEnd());
+ } while (header_address < PayloadEnd());
+ return object_payload_size;
+}
+
+void NormalPage::RemoveFromHeap() {
+ ArenaForNormalPage()->FreePage(this);
+}
+
+#if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
+static void DiscardPages(Address begin, Address end) {
+ uintptr_t begin_address =
+ base::RoundUpToSystemPage(reinterpret_cast<uintptr_t>(begin));
+ uintptr_t end_address =
+ base::RoundDownToSystemPage(reinterpret_cast<uintptr_t>(end));
+ if (begin_address < end_address) {
+ base::DiscardSystemPages(reinterpret_cast<void*>(begin_address),
+ end_address - begin_address);
+ }
+}
+#endif
+
+void NormalPage::ToBeFinalizedObject::Finalize() {
+ const size_t size = header->size();
+ // This is a fast version of header->PayloadSize().
+ const size_t payload_size = size - sizeof(HeapObjectHeader);
+ const Address payload = header->Payload();
+ // For ASan, unpoison the object before calling the finalizer. The
+ // finalized object will be zero-filled and poison'ed afterwards.
+ // Given all other unmarked objects are poisoned, ASan will detect
+ // an error if the finalizer touches any other on-heap object that
+ // die at the same GC cycle.
+ ASAN_UNPOISON_MEMORY_REGION(payload, payload_size);
+
+ header->Finalize(payload, payload_size);
+ // This memory will be added to the freelist. Maintain the invariant
+ // that memory on the freelist is zero filled.
+ SET_MEMORY_INACCESSIBLE(reinterpret_cast<Address>(header), size);
+}
+
+void NormalPage::FinalizeSweep(SweepResult action) {
+ // Call finalizers.
+ for (ToBeFinalizedObject& object : to_be_finalized_objects_) {
+ object.Finalize();
+ }
+ to_be_finalized_objects_.clear();
+#if BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
+ // Copy object start bit map.
+ DCHECK(cached_object_start_bit_map_);
+ object_start_bit_map_ = *cached_object_start_bit_map_;
+ cached_object_start_bit_map_.reset();
+#endif
+ // Merge freelists or unmap the page.
+ if (action == SweepResult::kPageNotEmpty) {
+ MergeFreeLists();
+ MarkAsSwept();
+ } else {
+ DCHECK(action == SweepResult::kPageEmpty);
+ RemoveFromHeap();
+ }
+}
+
+void NormalPage::AddToFreeList(Address start,
+ size_t size,
+ FinalizeType finalize_type,
+ bool found_finalizer) {
+ // If a free allocation block contains an object that is yet to be
+ // finalized, push it in a separate freelist to preserve the guarantee
+ // that all freelist entries are zeroed out.
+ if (found_finalizer && finalize_type == FinalizeType::kDeferred) {
+ FutureFreelistEntry entry{start, size};
+ unfinalized_freelist_.push_back(std::move(entry));
+ } else {
+ cached_freelist_.Add(start, size);
+#if BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
+ cached_object_start_bit_map_->SetBit(start);
+#else
+ object_start_bit_map_.SetBit(start);
+#endif
+#if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
+ if (Arena()->GetThreadState()->IsMemoryReducingGC()) {
+ DiscardPages(start + sizeof(FreeListEntry), start + size);
+ }
+#endif
+ }
+}
+
+void NormalPage::MergeFreeLists() {
+ NormalPageArena* arena = ArenaForNormalPage();
+ arena->AddToFreeList(&cached_freelist_);
+ DCHECK(cached_freelist_.IsEmpty());
+
+ for (const FutureFreelistEntry& entry : unfinalized_freelist_) {
+ arena->AddToFreeList(entry.start, entry.size);
+#if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
+ if (Arena()->GetThreadState()->IsMemoryReducingGC()) {
+ DiscardPages(entry.start + sizeof(FreeListEntry),
+ entry.start + entry.size);
+ }
+#endif
+ }
+ unfinalized_freelist_.clear();
+}
+
+bool NormalPage::Sweep(FinalizeType finalize_type) {
+ PlatformAwareObjectStartBitmap* bitmap;
+#if BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
+ cached_object_start_bit_map_ =
+ std::make_unique<PlatformAwareObjectStartBitmap>(Payload());
+ bitmap = cached_object_start_bit_map_.get();
+#else
+ object_start_bit_map()->Clear();
+ bitmap = object_start_bit_map();
+#endif
+ cached_freelist_.Clear();
+ unfinalized_freelist_.clear();
+ Address start_of_gap = Payload();
+ bool found_finalizer = false;
+ for (Address header_address = start_of_gap; header_address < PayloadEnd();) {
+ HeapObjectHeader* header =
+ reinterpret_cast<HeapObjectHeader*>(header_address);
+ const size_t size = header->size();
+ DCHECK_GT(size, 0u);
+ DCHECK_LT(size, BlinkPagePayloadSize());
+
+ if (header->IsFree<HeapObjectHeader::AccessMode::kAtomic>()) {
+ // Zero the memory in the free list header to maintain the
+ // invariant that memory on the free list is zero filled.
+ // The rest of the memory is already on the free list and is
+ // therefore already zero filled.
+ SET_MEMORY_INACCESSIBLE(header_address,
+ std::min(size, sizeof(FreeListEntry)));
+ CHECK_MEMORY_INACCESSIBLE(header_address, size);
+ header_address += size;
+ continue;
+ }
+ if (!header->IsMarked<HeapObjectHeader::AccessMode::kAtomic>()) {
+ // The following accesses to the header are safe non-atomically, because
+ // we just established the invariant that the object is not marked.
+ ToBeFinalizedObject object{header};
+ if (finalize_type == FinalizeType::kInlined ||
+ !header->HasNonTrivialFinalizer()) {
+ // In case the header doesn't have a finalizer, we eagerly call a
+ // freehook.
+ // TODO(bikineev): It may be unsafe to do this concurrently.
+ object.Finalize();
+ } else {
+ to_be_finalized_objects_.push_back(std::move(object));
+ found_finalizer = true;
+ }
+ header_address += size;
+ continue;
+ }
+ if (start_of_gap != header_address) {
+ AddToFreeList(start_of_gap, header_address - start_of_gap, finalize_type,
+ found_finalizer);
+ found_finalizer = false;
+ }
+ bitmap->SetBit(header_address);
+#if !BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
+ header->Unmark<HeapObjectHeader::AccessMode::kAtomic>();
+#endif
+ header_address += size;
+ start_of_gap = header_address;
+ }
+ // Only add the memory to the free list if the page is not completely empty
+ // and we are not at the end of the page. Empty pages are not added to the
+ // free list as the pages are removed immediately.
+ if (start_of_gap != Payload() && start_of_gap != PayloadEnd()) {
+ AddToFreeList(start_of_gap, PayloadEnd() - start_of_gap, finalize_type,
+ found_finalizer);
+ }
+ return start_of_gap == Payload();
+}
+
+void NormalPage::SweepAndCompact(CompactionContext& context) {
+ object_start_bit_map()->Clear();
+ NormalPage*& current_page = context.current_page_;
+ size_t& allocation_point = context.allocation_point_;
+
+ NormalPageArena* page_arena = ArenaForNormalPage();
+#if defined(ADDRESS_SANITIZER)
+ bool is_vector_arena =
+ ThreadHeap::IsVectorArenaIndex(page_arena->ArenaIndex());
+#endif
+ HeapCompact* compact = page_arena->GetThreadState()->Heap().Compaction();
+ for (Address header_address = Payload(); header_address < PayloadEnd();) {
+ HeapObjectHeader* header =
+ reinterpret_cast<HeapObjectHeader*>(header_address);
+ size_t size = header->size();
+ DCHECK_GT(size, 0u);
+ DCHECK_LT(size, BlinkPagePayloadSize());
+
+ if (header->IsFree()) {
+ // Unpoison the freelist entry so that we
+ // can compact into it as wanted.
+ ASAN_UNPOISON_MEMORY_REGION(header_address, size);
+ header_address += size;
+ continue;
+ }
+ // This is a fast version of header->PayloadSize().
+ size_t payload_size = size - sizeof(HeapObjectHeader);
+ Address payload = header->Payload();
+ if (!header->IsMarked()) {
+ // For ASan, unpoison the object before calling the finalizer. The
+ // finalized object will be zero-filled and poison'ed afterwards.
+ // Given all other unmarked objects are poisoned, ASan will detect
+ // an error if the finalizer touches any other on-heap object that
+ // die at the same GC cycle.
+ ASAN_UNPOISON_MEMORY_REGION(header_address, size);
+ // Compaction is currently launched only from AtomicPhaseEpilogue, so it's
+ // guaranteed to be on the mutator thread - no need to postpone
+ // finalization.
+ header->Finalize(payload, payload_size);
+
+// As compaction is under way, leave the freed memory accessible
+// while compacting the rest of the page. We just zap the payload
+// to catch out other finalizers trying to access it.
+#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
+ defined(MEMORY_SANITIZER)
+ FreeList::ZapFreedMemory(payload, payload_size);
+#endif
+ header_address += size;
+ continue;
+ }
+#if !BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
+ header->Unmark();
+#endif
+ // Allocate and copy over the live object.
+ Address compact_frontier = current_page->Payload() + allocation_point;
+ if (compact_frontier + size > current_page->PayloadEnd()) {
+ // Can't fit on current allocation page; add remaining onto the
+ // freelist and advance to next available page.
+ //
+ // TODO(sof): be more clever & compact later objects into
+ // |currentPage|'s unused slop.
+ context.compacted_pages_->Push(current_page);
+ size_t free_size = current_page->PayloadSize() - allocation_point;
+ if (free_size) {
+ SET_MEMORY_INACCESSIBLE(compact_frontier, free_size);
+ current_page->ArenaForNormalPage()->AddToFreeList(compact_frontier,
+ free_size);
+ }
+
+ current_page = static_cast<NormalPage*>(context.available_pages_.Pop());
+ allocation_point = 0;
+ compact_frontier = current_page->Payload();
+ }
+ if (LIKELY(compact_frontier != header_address)) {
+#if defined(ADDRESS_SANITIZER)
+ // Unpoison the header + if it is a vector backing
+ // store object, let go of the container annotations.
+ // Do that by unpoisoning the payload entirely.
+ ASAN_UNPOISON_MEMORY_REGION(header, sizeof(HeapObjectHeader));
+ if (is_vector_arena) {
+ ASAN_UNPOISON_MEMORY_REGION(payload, payload_size);
+ }
+#endif
+ // Use a non-overlapping copy, if possible.
+ if (current_page == this)
+ memmove(compact_frontier, header_address, size);
+ else
+ memcpy(compact_frontier, header_address, size);
+ compact->Relocate(payload, compact_frontier + sizeof(HeapObjectHeader));
+ }
+ current_page->object_start_bit_map()->SetBit(compact_frontier);
+ header_address += size;
+ allocation_point += size;
+ DCHECK(allocation_point <= current_page->PayloadSize());
+ }
+
+#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
+ defined(MEMORY_SANITIZER)
+ // Zap the unused portion, until it is either compacted into or freed.
+ if (current_page != this) {
+ FreeList::ZapFreedMemory(Payload(), PayloadSize());
+ } else {
+ FreeList::ZapFreedMemory(Payload() + allocation_point,
+ PayloadSize() - allocation_point);
+ }
+#endif
+}
+
+void NormalPage::MakeConsistentForMutator() {
+ object_start_bit_map()->Clear();
+ Address start_of_gap = Payload();
+ NormalPageArena* normal_arena = ArenaForNormalPage();
+ for (Address header_address = Payload(); header_address < PayloadEnd();) {
+ HeapObjectHeader* header =
+ reinterpret_cast<HeapObjectHeader*>(header_address);
+ size_t size = header->size();
+ DCHECK_LT(size, BlinkPagePayloadSize());
+ if (header->IsFree()) {
+ // Zero the memory in the free list header to maintain the
+ // invariant that memory on the free list is zero filled.
+ // The rest of the memory is already on the free list and is
+ // therefore already zero filled.
+ SET_MEMORY_INACCESSIBLE(header_address, size < sizeof(FreeListEntry)
+ ? size
+ : sizeof(FreeListEntry));
+ CHECK_MEMORY_INACCESSIBLE(header_address, size);
+ header_address += size;
+ continue;
+ }
+ if (start_of_gap != header_address)
+ normal_arena->AddToFreeList(start_of_gap, header_address - start_of_gap);
+ if (header->IsMarked()) {
+ header->Unmark();
+ }
+ object_start_bit_map()->SetBit(header_address);
+ header_address += size;
+ start_of_gap = header_address;
+ DCHECK_LE(header_address, PayloadEnd());
+ }
+ if (start_of_gap != PayloadEnd())
+ normal_arena->AddToFreeList(start_of_gap, PayloadEnd() - start_of_gap);
+
+ VerifyObjectStartBitmapIsConsistentWithPayload();
+}
+
+// This is assumed to be called from the atomic pause, so no concurrency should
+// be involved here.
+void NormalPage::Unmark() {
+ const Address current_allocation_point =
+ ArenaForNormalPage()->CurrentAllocationPoint();
+ const size_t allocation_area_size =
+ ArenaForNormalPage()->RemainingAllocationSize();
+ for (Address header_address = Payload(); header_address < PayloadEnd();) {
+ // Since unmarking can happen inside IncrementalMarkingStart, the current
+ // allocation point can be set and we need to skip over it.
+ if (header_address == current_allocation_point && allocation_area_size) {
+ header_address += allocation_area_size;
+ continue;
+ }
+ HeapObjectHeader* header =
+ reinterpret_cast<HeapObjectHeader*>(header_address);
+ if (header->IsMarked()) {
+ header->Unmark();
+ }
+ header_address += header->size();
+ }
+ ClearCardTable();
+}
+
+#if defined(ADDRESS_SANITIZER)
+void NormalPage::PoisonUnmarkedObjects() {
+ for (Address header_address = Payload(); header_address < PayloadEnd();) {
+ HeapObjectHeader* header =
+ reinterpret_cast<HeapObjectHeader*>(header_address);
+ DCHECK_LT(header->size(), BlinkPagePayloadSize());
+ // Check if a free list entry first since we cannot call
+ // isMarked on a free list entry.
+ if (header->IsFree()) {
+ header_address += header->size();
+ continue;
+ }
+ if (!header->IsMarked()) {
+ ASAN_POISON_MEMORY_REGION(header->Payload(), header->PayloadSize());
+ }
+ header_address += header->size();
+ }
+}
+#endif
+
+void NormalPage::VerifyObjectStartBitmapIsConsistentWithPayload() {
+#if DCHECK_IS_ON()
+ HeapObjectHeader* current_header =
+ reinterpret_cast<HeapObjectHeader*>(Payload());
+ object_start_bit_map()->Iterate([this,
+ &current_header](Address object_address) {
+ const HeapObjectHeader* object_header =
+ reinterpret_cast<HeapObjectHeader*>(object_address);
+ DCHECK_EQ(object_header, current_header);
+ current_header = reinterpret_cast<HeapObjectHeader*>(object_address +
+ object_header->size());
+ // Skip over allocation area.
+ if (reinterpret_cast<Address>(current_header) ==
+ ArenaForNormalPage()->CurrentAllocationPoint()) {
+ current_header = reinterpret_cast<HeapObjectHeader*>(
+ ArenaForNormalPage()->CurrentAllocationPoint() +
+ ArenaForNormalPage()->RemainingAllocationSize());
+ }
+ });
+#endif // DCHECK_IS_ON()
+}
+
+void NormalPage::VerifyMarking() {
+ DCHECK(!ArenaForNormalPage()->CurrentAllocationPoint());
+ MarkingVerifier verifier(ArenaForNormalPage()->GetThreadState());
+ for (Address header_address = Payload(); header_address < PayloadEnd();) {
+ HeapObjectHeader* header =
+ reinterpret_cast<HeapObjectHeader*>(header_address);
+ verifier.VerifyObject(header);
+ header_address += header->size();
+ }
+}
+
+void LargeObjectPage::VerifyMarking() {
+ MarkingVerifier verifier(Arena()->GetThreadState());
+ verifier.VerifyObject(ObjectHeader());
+}
+
+HeapObjectHeader* NormalPage::ConservativelyFindHeaderFromAddress(
+ ConstAddress address) const {
+ if (!ContainedInObjectPayload(address))
+ return nullptr;
+ if (ArenaForNormalPage()->IsInCurrentAllocationPointRegion(address))
+ return nullptr;
+ HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(
+ object_start_bit_map()->FindHeader(address));
+ if (header->IsFree())
+ return nullptr;
+ DCHECK_LT(0u, header->GcInfoIndex());
+ DCHECK_GT(header->PayloadEnd(), address);
+ return header;
+}
+
+void NormalPage::CollectStatistics(
+ ThreadState::Statistics::ArenaStatistics* arena_stats) {
+ HeapObjectHeader* header = nullptr;
+ size_t live_size = 0;
+ for (Address header_address = Payload(); header_address < PayloadEnd();
+ header_address += header->size()) {
+ header = reinterpret_cast<HeapObjectHeader*>(header_address);
+ if (!header->IsFree()) {
+ // All non-free objects, dead or alive, are considered as live for the
+ // purpose of taking a snapshot.
+ live_size += header->size();
+ if (!NameClient::HideInternalName()) {
+ // Detailed names available.
+ uint32_t gc_info_index = header->GcInfoIndex();
+ arena_stats->object_stats.type_count[gc_info_index]++;
+ arena_stats->object_stats.type_bytes[gc_info_index] += header->size();
+ if (arena_stats->object_stats.type_name[gc_info_index].empty()) {
+ arena_stats->object_stats.type_name[gc_info_index] = header->Name();
+ }
+ }
+ }
+ }
+ arena_stats->committed_size_bytes += kBlinkPageSize;
+ arena_stats->used_size_bytes += live_size;
+ arena_stats->page_stats.emplace_back(
+ ThreadState::Statistics::PageStatistics{kBlinkPageSize, live_size});
+}
+
+#if DCHECK_IS_ON()
+bool NormalPage::Contains(ConstAddress addr) const {
+ Address blink_page_start = RoundToBlinkPageStart(GetAddress());
+ // Page is at aligned address plus guard page size.
+ DCHECK_EQ(blink_page_start, GetAddress() - BlinkGuardPageSize());
+ return blink_page_start <= addr && addr < blink_page_start + kBlinkPageSize;
+}
+#endif
+
+LargeObjectPage::LargeObjectPage(PageMemory* storage,
+ BaseArena* arena,
+ size_t object_size)
+ : BasePage(storage, arena, PageType::kLargeObjectPage),
+ object_size_(object_size)
+#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
+ ,
+ is_vector_backing_page_(false)
+#endif
+{
+}
+
+size_t LargeObjectPage::ObjectPayloadSizeForTesting() {
+ return PayloadSize();
+}
+
+void LargeObjectPage::RemoveFromHeap() {
+ static_cast<LargeObjectArena*>(Arena())->FreeLargeObjectPage(this);
+}
+
+bool LargeObjectPage::Sweep(FinalizeType) {
+ if (!ObjectHeader()->IsMarked()) {
+ return true;
+ }
+#if !BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
+ ObjectHeader()->Unmark();
+#endif
+ return false;
+}
+
+void LargeObjectPage::Unmark() {
+ HeapObjectHeader* header = ObjectHeader();
+ if (header->IsMarked()) {
+ header->Unmark();
+ }
+ SetRemembered(false);
+}
+
+void LargeObjectPage::MakeConsistentForMutator() {
+ Unmark();
+}
+
+void LargeObjectPage::FinalizeSweep(SweepResult action) {
+ if (action == SweepResult::kPageNotEmpty) {
+ MarkAsSwept();
+ } else {
+ DCHECK(action == SweepResult::kPageEmpty);
+ RemoveFromHeap();
+ }
+}
+
+#if defined(ADDRESS_SANITIZER)
+void LargeObjectPage::PoisonUnmarkedObjects() {
+ HeapObjectHeader* header = ObjectHeader();
+ if (!header->IsMarked()) {
+ ASAN_POISON_MEMORY_REGION(header->Payload(), header->PayloadSize());
+ }
+}
+#endif
+
+void LargeObjectPage::CollectStatistics(
+ ThreadState::Statistics::ArenaStatistics* arena_stats) {
+ HeapObjectHeader* header = ObjectHeader();
+ size_t live_size = 0;
+ // All non-free objects, dead or alive, are considered as live for the
+ // purpose of taking a snapshot.
+ live_size += ObjectSize();
+ if (!NameClient::HideInternalName()) {
+ // Detailed names available.
+ uint32_t gc_info_index = header->GcInfoIndex();
+ arena_stats->object_stats.type_count[gc_info_index]++;
+ arena_stats->object_stats.type_bytes[gc_info_index] += ObjectSize();
+ }
+
+ arena_stats->committed_size_bytes += size();
+ arena_stats->used_size_bytes += live_size;
+ arena_stats->page_stats.emplace_back(
+ ThreadState::Statistics::PageStatistics{size(), live_size});
+}
+
+#if DCHECK_IS_ON()
+bool LargeObjectPage::Contains(ConstAddress object) const {
+ return RoundToBlinkPageStart(GetAddress()) <= object &&
+ object < RoundToBlinkPageEnd(GetAddress() + size());
+}
+#endif
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/heap_page.h b/chromium/third_party/blink/renderer/platform/heap/impl/heap_page.h
new file mode 100644
index 00000000000..1c26d4ffe20
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/heap_page.h
@@ -0,0 +1,1614 @@
+/*
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_PAGE_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_PAGE_H_
+
+#include <stdint.h>
+#include <array>
+#include <atomic>
+
+#include "base/bits.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+#include "third_party/blink/renderer/platform/heap/garbage_collected.h"
+#include "third_party/blink/renderer/platform/heap/heap_buildflags.h"
+#include "third_party/blink/renderer/platform/heap/impl/gc_info.h"
+#include "third_party/blink/renderer/platform/heap/impl/thread_state_statistics.h"
+#include "third_party/blink/renderer/platform/heap/impl/unsanitized_atomic.h"
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+#include "third_party/blink/renderer/platform/heap/visitor.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+#include "third_party/blink/renderer/platform/wtf/container_annotations.h"
+#include "third_party/blink/renderer/platform/wtf/forward.h"
+#include "third_party/blink/renderer/platform/wtf/sanitizers.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+
+namespace blink {
+
+// TODO(palmer): Document the reason for 17.
+constexpr size_t kBlinkPageSizeLog2 = 17;
+constexpr size_t kBlinkPageSize = 1 << kBlinkPageSizeLog2;
+constexpr size_t kBlinkPageOffsetMask = kBlinkPageSize - 1;
+constexpr size_t kBlinkPageBaseMask = ~kBlinkPageOffsetMask;
+
+// We allocate pages at random addresses but in groups of kBlinkPagesPerRegion
+// at a given random address. We group pages to not spread out too much over the
+// address space which would blow away the page tables and lead to bad
+// performance.
+constexpr size_t kBlinkPagesPerRegion = 10;
+
+// TODO(nya): Replace this with something like #if ENABLE_NACL.
+#if defined(ARCH_CPU_PPC64)
+// NaCl's system page size is 64 KiB. This causes a problem in Oilpan's heap
+// layout because Oilpan allocates two guard pages for each Blink page (whose
+// size is kBlinkPageSize = 2^17 = 128 KiB). So we don't use guard pages in
+// NaCl.
+// The same issue holds for ppc64 systems, which use a 64k page size.
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+BlinkGuardPageSize() {
+ return 0;
+}
+#else
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+BlinkGuardPageSize() {
+ return base::SystemPageSize();
+}
+#endif
+
+// Double precision floats are more efficient when 8-byte aligned, so we 8-byte
+// align all allocations (even on 32 bit systems).
+static_assert(8 == sizeof(double), "We expect sizeof(double) to be 8");
+constexpr size_t kAllocationGranularity = sizeof(double);
+constexpr size_t kAllocationMask = kAllocationGranularity - 1;
+constexpr size_t kMaxHeapObjectSizeLog2 = 27;
+constexpr size_t kMaxHeapObjectSize = 1 << kMaxHeapObjectSizeLog2;
+constexpr size_t kLargeObjectSizeThreshold = kBlinkPageSize / 2;
+
+// A zap value used for freed memory that is allowed to be added to the free
+// list in the next call to AddToFreeList.
+constexpr uint8_t kReuseAllowedZapValue = 0x2a;
+// A zap value used for freed memory that is forbidden to be added to the free
+// list in the next call to AddToFreeList.
+constexpr uint8_t kReuseForbiddenZapValue = 0x2c;
+
+// In non-production builds, memory is zapped when it's freed. The zapped memory
+// is zeroed out when the memory is reused in ThreadHeap::AllocateObject.
+//
+// In production builds, memory is not zapped (for performance). The memory is
+// just zeroed out when it is added to the free list.
+#if defined(MEMORY_SANITIZER)
+// TODO(kojii): We actually need __msan_poison/unpoison here, but it'll be
+// added later.
+#define SET_MEMORY_INACCESSIBLE(address, size) \
+ FreeList::ZapFreedMemory(address, size);
+#define SET_MEMORY_ACCESSIBLE(address, size) memset((address), 0, (size))
+#define CHECK_MEMORY_INACCESSIBLE(address, size) \
+ ASAN_UNPOISON_MEMORY_REGION(address, size); \
+ FreeList::CheckFreedMemoryIsZapped(address, size); \
+ ASAN_POISON_MEMORY_REGION(address, size)
+#elif DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+#define SET_MEMORY_INACCESSIBLE(address, size) \
+ FreeList::ZapFreedMemory(address, size); \
+ ASAN_POISON_MEMORY_REGION(address, size)
+#define SET_MEMORY_ACCESSIBLE(address, size) \
+ ASAN_UNPOISON_MEMORY_REGION(address, size); \
+ memset((address), 0, (size))
+#define CHECK_MEMORY_INACCESSIBLE(address, size) \
+ ASAN_UNPOISON_MEMORY_REGION(address, size); \
+ FreeList::CheckFreedMemoryIsZapped(address, size); \
+ ASAN_POISON_MEMORY_REGION(address, size)
+#else
+#define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size))
+#define SET_MEMORY_ACCESSIBLE(address, size) \
+ do { \
+ } while (false)
+#define CHECK_MEMORY_INACCESSIBLE(address, size) \
+ do { \
+ } while (false)
+#endif
+
+class NormalPageArena;
+class PageMemory;
+class BaseArena;
+class ThreadHeap;
+
+// HeapObjectHeader is a 32-bit object that has the following layout:
+//
+// | padding (32 bits) | Only present on 64-bit platforms.
+// | gc_info_index (14 bits) |
+// | unused (1 bit) |
+// | in construction (1 bit) | true: bit not set; false bit set
+//
+// | size (14 bits) | Actually 17 bits because sizes are aligned.
+// | unused (1 bit) |
+// | mark bit (1 bit) |
+//
+// Notes:
+// - 14 bits for |gc_info_index} (type information) are enough as there are
+// fewer than 2^14 types allocated in Blink.
+// - |size| for regular objects is encoded with 14 bits but can actually
+// represent sizes up to |kBlinkPageSize| (2^17) because allocations are
+// always 8 byte aligned (see kAllocationGranularity).
+// - |size| for large objects is encoded as 0. The size of a large object is
+// stored in |LargeObjectPage::PayloadSize()|.
+// - |mark bit| and |in construction| bits are located in separate variables and
+// therefore can be accessed concurrently. Since tsan works with word-size
+// objects they still should be accessed atomically.
+constexpr uint16_t kHeaderMarkBitMask = 1;
+constexpr uint16_t kHeaderSizeShift = 2;
+constexpr uint16_t kHeaderSizeMask =
+ static_cast<uint16_t>(((1 << 14) - 1) << kHeaderSizeShift);
+
+constexpr uint16_t kHeaderIsInConstructionMask = 1;
+constexpr uint16_t kHeaderGCInfoIndexShift = 2;
+constexpr uint16_t kHeaderGCInfoSize = static_cast<uint16_t>(1 << 14);
+constexpr uint16_t kHeaderGCInfoIndexMask =
+ static_cast<uint16_t>((kHeaderGCInfoSize - 1) << kHeaderGCInfoIndexShift);
+
+constexpr uint16_t kLargeObjectSizeInHeader = 0;
+constexpr uint16_t kGcInfoIndexForFreeListHeader = 0;
+constexpr size_t kNonLargeObjectPageSizeMax = 1 << kBlinkPageSizeLog2;
+
+static_assert(kHeaderGCInfoSize == GCInfoTable::kMaxIndex,
+ "GCInfoTable size and and header GCInfo index size must match");
+
+static_assert(
+ kNonLargeObjectPageSizeMax >= kBlinkPageSize,
+ "max size supported by HeapObjectHeader must at least be kBlinkPageSize");
+
+namespace internal {
+
+NO_SANITIZE_ADDRESS constexpr uint16_t EncodeSize(size_t size) {
+ // Essentially, gets optimized to >> 1.
+ return static_cast<uint16_t>((size << kHeaderSizeShift) /
+ kAllocationGranularity);
+}
+
+NO_SANITIZE_ADDRESS constexpr size_t DecodeSize(uint16_t encoded) {
+ // Essentially, gets optimized to << 1.
+ return ((encoded & kHeaderSizeMask) >> kHeaderSizeShift) *
+ kAllocationGranularity;
+}
+
+} // namespace internal
+
+class PLATFORM_EXPORT HeapObjectHeader {
+ DISALLOW_NEW();
+
+ public:
+ enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
+
+ static HeapObjectHeader* FromPayload(const void*);
+ template <AccessMode = AccessMode::kNonAtomic>
+ static HeapObjectHeader* FromInnerAddress(const void*);
+
+ // Checks sanity of the header given a payload pointer.
+ static void CheckFromPayload(const void*);
+
+ // If |gc_info_index| is 0, this header is interpreted as a free list header.
+ HeapObjectHeader(size_t, size_t);
+
+ template <AccessMode mode = AccessMode::kNonAtomic>
+ NO_SANITIZE_ADDRESS bool IsFree() const {
+ return GcInfoIndex<mode>() == kGcInfoIndexForFreeListHeader;
+ }
+
+ template <AccessMode mode = AccessMode::kNonAtomic>
+ NO_SANITIZE_ADDRESS uint32_t GcInfoIndex() const {
+ const uint16_t encoded =
+ LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
+ return (encoded & kHeaderGCInfoIndexMask) >> kHeaderGCInfoIndexShift;
+ }
+
+ template <AccessMode = AccessMode::kNonAtomic>
+ size_t size() const;
+ void SetSize(size_t size);
+
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool IsLargeObject() const;
+
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool IsMarked() const;
+ template <AccessMode = AccessMode::kNonAtomic>
+ void Unmark();
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool TryMark();
+
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool IsOld() const;
+
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool IsInConstruction() const;
+ template <AccessMode = AccessMode::kNonAtomic>
+ void MarkFullyConstructed();
+
+ // The payload starts directly after the HeapObjectHeader, and the payload
+ // size does not include the sizeof(HeapObjectHeader).
+ Address Payload() const;
+ size_t PayloadSize() const;
+ template <AccessMode = AccessMode::kNonAtomic>
+ Address PayloadEnd() const;
+
+ void Finalize(Address, size_t);
+
+ // Returns true if object has finalizer.
+ bool HasNonTrivialFinalizer() const;
+
+ // Returns a human-readable name of this object.
+ const char* Name() const;
+
+ private:
+ enum class EncodedHalf : uint8_t { kLow, kHigh };
+
+ template <AccessMode,
+ EncodedHalf part,
+ std::memory_order = std::memory_order_seq_cst>
+ uint16_t LoadEncoded() const;
+ template <AccessMode mode,
+ EncodedHalf part,
+ std::memory_order = std::memory_order_seq_cst>
+ void StoreEncoded(uint16_t bits, uint16_t mask);
+
+#if defined(ARCH_CPU_64_BITS)
+ uint32_t padding_ = 0;
+#endif // defined(ARCH_CPU_64_BITS)
+ uint16_t encoded_high_;
+ uint16_t encoded_low_;
+};
+
+class FreeListEntry final : public HeapObjectHeader {
+ public:
+ NO_SANITIZE_ADDRESS
+ explicit FreeListEntry(size_t size)
+ : HeapObjectHeader(size, kGcInfoIndexForFreeListHeader), next_(nullptr) {}
+
+ Address GetAddress() { return reinterpret_cast<Address>(this); }
+
+ NO_SANITIZE_ADDRESS
+ void Unlink(FreeListEntry** previous_next) {
+ *previous_next = next_;
+ next_ = nullptr;
+ }
+
+ NO_SANITIZE_ADDRESS
+ void Link(FreeListEntry** previous_next) {
+ next_ = *previous_next;
+ *previous_next = this;
+ }
+
+ NO_SANITIZE_ADDRESS
+ FreeListEntry* Next() const { return next_; }
+
+ NO_SANITIZE_ADDRESS
+ void Append(FreeListEntry* next) {
+ DCHECK(!next_);
+ next_ = next;
+ }
+
+ private:
+ FreeListEntry* next_;
+
+ friend class FreeList;
+};
+
+class FreeList {
+ DISALLOW_NEW();
+
+ public:
+ // Returns a bucket number for inserting a |FreeListEntry| of a given size.
+ // All entries in the given bucket, n, have size >= 2^n.
+ static int BucketIndexForSize(size_t);
+
+#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
+ defined(MEMORY_SANITIZER)
+ static void GetAllowedAndForbiddenCounts(Address, size_t, size_t&, size_t&);
+ static void ZapFreedMemory(Address, size_t);
+ static void CheckFreedMemoryIsZapped(Address, size_t);
+#endif
+
+ FreeList();
+
+ FreeListEntry* Allocate(size_t);
+ void Add(Address, size_t);
+ void MoveFrom(FreeList*);
+ void Clear();
+
+ bool IsEmpty() const;
+ size_t FreeListSize() const;
+
+ void CollectStatistics(ThreadState::Statistics::FreeListStatistics*);
+
+ template <typename Predicate>
+ FreeListEntry* FindEntry(Predicate pred) {
+ for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) {
+ for (FreeListEntry* entry = free_list_heads_[i]; entry;
+ entry = entry->Next()) {
+ if (pred(entry)) {
+ return entry;
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ private:
+ bool IsConsistent(size_t index) const {
+ return (!free_list_heads_[index] && !free_list_tails_[index]) ||
+ (free_list_heads_[index] && free_list_tails_[index] &&
+ !free_list_tails_[index]->Next());
+ }
+
+ // All |FreeListEntry|s in the nth list have size >= 2^n.
+ FreeListEntry* free_list_heads_[kBlinkPageSizeLog2];
+ FreeListEntry* free_list_tails_[kBlinkPageSizeLog2];
+ int biggest_free_list_index_;
+};
+
+// Blink heap pages are set up with a guard page before and after the payload.
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+BlinkPagePayloadSize() {
+ return kBlinkPageSize - 2 * BlinkGuardPageSize();
+}
+
+// Blink heap pages are aligned to the Blink heap page size. Therefore, the
+// start of a Blink page can be obtained by rounding down to the Blink page
+// size.
+inline Address RoundToBlinkPageStart(Address address) {
+ return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) &
+ kBlinkPageBaseMask);
+}
+
+inline Address RoundToBlinkPageEnd(Address address) {
+ return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) &
+ kBlinkPageBaseMask) +
+ kBlinkPageSize;
+}
+
+// Masks an address down to the enclosing Blink page base address.
+inline Address BlinkPageAddress(Address address) {
+ return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) &
+ kBlinkPageBaseMask);
+}
+
+inline bool VTableInitialized(const void* object_pointer) {
+ return !!(*reinterpret_cast<const ConstAddress*>(object_pointer));
+}
+
+#if DCHECK_IS_ON()
+
+// Sanity check for a page header address: the address of the page header should
+// be 1 OS page size away from being Blink page size-aligned.
+inline bool IsPageHeaderAddress(Address address) {
+ return !((reinterpret_cast<uintptr_t>(address) & kBlinkPageOffsetMask) -
+ BlinkGuardPageSize());
+}
+
+#endif
+
+// |FinalizeType| specifies when finalization should take place.
+// In case of concurrent sweeper we defer finalization to be done
+// on the main thread.
+enum class FinalizeType : uint8_t { kInlined, kDeferred };
+
+// |SweepResult| indicates if page turned out to be empty after sweeping.
+enum class SweepResult : uint8_t { kPageEmpty, kPageNotEmpty };
+
+// |PageType| indicates whether a page is used for normal objects or whether it
+// holds a large object.
+enum class PageType : uint8_t { kNormalPage, kLargeObjectPage };
+
+// |BasePage| is a base class for |NormalPage| and |LargeObjectPage|.
+//
+// - |NormalPage| is a page whose size is |kBlinkPageSize|. A |NormalPage| can
+// contain multiple objects. An object whose size is smaller than
+// |kLargeObjectSizeThreshold| is stored in a |NormalPage|.
+//
+// - |LargeObjectPage| is a page that contains only one object. The object size
+// is arbitrary. An object whose size is larger than |kBlinkPageSize| is
+// stored as a single project in |LargeObjectPage|.
+//
+// Note: An object whose size is between |kLargeObjectSizeThreshold| and
+// |kBlinkPageSize| can go to either of |NormalPage| or |LargeObjectPage|.
+class BasePage {
+ DISALLOW_NEW();
+
+ public:
+ BasePage(PageMemory*, BaseArena*, PageType);
+ virtual ~BasePage() = default;
+
+ // Virtual methods are slow. So performance-sensitive methods should be
+ // defined as non-virtual methods on |NormalPage| and |LargeObjectPage|. The
+ // following methods are not performance-sensitive.
+ virtual size_t ObjectPayloadSizeForTesting() = 0;
+ virtual void RemoveFromHeap() = 0;
+ // Sweeps a page. Returns true when that page is empty and false otherwise.
+ // Does not create free list entries for empty pages.
+ virtual bool Sweep(FinalizeType) = 0;
+ virtual void MakeConsistentForMutator() = 0;
+ virtual void Unmark() = 0;
+
+ // Calls finalizers after sweeping is done.
+ virtual void FinalizeSweep(SweepResult) = 0;
+
+#if defined(ADDRESS_SANITIZER)
+ virtual void PoisonUnmarkedObjects() = 0;
+#endif
+
+ virtual void CollectStatistics(
+ ThreadState::Statistics::ArenaStatistics* arena_stats) = 0;
+
+#if DCHECK_IS_ON()
+ virtual bool Contains(ConstAddress) const = 0;
+#endif
+ virtual size_t size() const = 0;
+
+ Address GetAddress() const {
+ return reinterpret_cast<Address>(const_cast<BasePage*>(this));
+ }
+ PageMemory* Storage() const { return storage_; }
+ BaseArena* Arena() const { return arena_; }
+ ThreadState* thread_state() const { return thread_state_; }
+
+ // Returns true if this page has been swept by the ongoing sweep; false
+ // otherwise.
+ bool HasBeenSwept() const { return swept_; }
+
+ void MarkAsSwept() {
+ DCHECK(!swept_);
+ swept_ = true;
+ }
+
+ void MarkAsUnswept() {
+ DCHECK(swept_);
+ swept_ = false;
+ }
+
+ // Returns true if this page is a large object page; false otherwise.
+ bool IsLargeObjectPage() const {
+ return page_type_ == PageType::kLargeObjectPage;
+ }
+
+ // Young pages are pages that contain at least a single young object.
+ bool IsYoung() const { return is_young_; }
+
+ void SetAsYoung(bool young) { is_young_ = young; }
+
+ virtual void VerifyMarking() = 0;
+
+ private:
+ void SynchronizedLoad() {
+#if defined(THREAD_SANITIZER)
+ WTF::AsAtomicPtr(&page_type_)->load(std::memory_order_acquire);
+#endif
+ }
+ void SynchronizedStore() {
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+#if defined(THREAD_SANITIZER)
+ WTF::AsAtomicPtr(&page_type_)->store(page_type_, std::memory_order_release);
+#endif
+ }
+
+ PageMemory* const storage_;
+ BaseArena* const arena_;
+ ThreadState* const thread_state_;
+
+ // Track the sweeping state of a page. Set to false at the start of a sweep,
+ // true upon completion of sweeping that page.
+ bool swept_ = true;
+ bool is_young_ = false;
+
+ PageType page_type_;
+
+ friend class BaseArena;
+ friend class ThreadHeap;
+};
+
+class PageStack : Vector<BasePage*> {
+ using Base = Vector<BasePage*>;
+
+ public:
+ PageStack() = default;
+
+ void Push(BasePage* page) { push_back(page); }
+
+ BasePage* Pop() {
+ if (IsEmpty())
+ return nullptr;
+ BasePage* top = back();
+ pop_back();
+ return top;
+ }
+
+ BasePage* Top() const {
+ if (IsEmpty())
+ return nullptr;
+ return back();
+ }
+
+ using Base::begin;
+ using Base::end;
+
+ using Base::clear;
+ using Base::erase;
+
+ using Base::IsEmpty;
+ using Base::size;
+};
+
+class PageStackThreadSafe : public PageStack {
+ public:
+ void PushLocked(BasePage* page) {
+ WTF::MutexLocker locker(mutex_);
+ Push(page);
+ }
+
+ BasePage* PopLocked() {
+ WTF::MutexLocker locker(mutex_);
+ return Pop();
+ }
+
+ bool IsEmptyLocked() const {
+ WTF::MutexLocker locker(mutex_);
+ return IsEmpty();
+ }
+
+ // Explicit unsafe move assignment.
+ void MoveFrom(PageStack&& other) { PageStack::operator=(std::move(other)); }
+
+ private:
+ mutable WTF::Mutex mutex_;
+};
+
+// A bitmap for recording object starts. Objects have to be allocated at
+// minimum granularity of kGranularity.
+//
+// Depends on internals such as:
+// - kBlinkPageSize
+// - kAllocationGranularity
+class PLATFORM_EXPORT ObjectStartBitmap {
+ USING_FAST_MALLOC(ObjectStartBitmap);
+
+ public:
+ // Granularity of addresses added to the bitmap.
+ static constexpr size_t Granularity() { return kAllocationGranularity; }
+
+ // Maximum number of entries in the bitmap.
+ static constexpr size_t MaxEntries() {
+ return kReservedForBitmap * kCellSize;
+ }
+
+ explicit ObjectStartBitmap(Address offset);
+
+ // Finds an object header based on a
+ // address_maybe_pointing_to_the_middle_of_object. Will search for an object
+ // start in decreasing address order.
+ template <
+ HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ Address FindHeader(
+ ConstAddress address_maybe_pointing_to_the_middle_of_object) const;
+
+ template <
+ HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ inline void SetBit(Address);
+ template <
+ HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ inline void ClearBit(Address);
+ template <
+ HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ inline bool CheckBit(Address) const;
+
+ // Iterates all object starts recorded in the bitmap.
+ //
+ // The callback is of type
+ // void(Address)
+ // and is passed the object start address as parameter.
+ template <typename Callback>
+ inline void Iterate(Callback) const;
+
+ // Clear the object start bitmap.
+ void Clear();
+
+ private:
+ template <
+ HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ void store(size_t cell_index, uint8_t value);
+ template <
+ HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ uint8_t load(size_t cell_index) const;
+
+ static const size_t kCellSize = sizeof(uint8_t) * 8;
+ static const size_t kCellMask = sizeof(uint8_t) * 8 - 1;
+ static const size_t kBitmapSize =
+ (kBlinkPageSize + ((kCellSize * kAllocationGranularity) - 1)) /
+ (kCellSize * kAllocationGranularity);
+ static const size_t kReservedForBitmap =
+ ((kBitmapSize + kAllocationMask) & ~kAllocationMask);
+
+ inline void ObjectStartIndexAndBit(Address, size_t*, size_t*) const;
+
+ Address offset_;
+ // The bitmap contains a bit for every kGranularity aligned address on a
+ // a NormalPage, i.e., for a page of size kBlinkPageSize.
+ uint8_t object_start_bit_map_[kReservedForBitmap];
+};
+
+// A platform aware version of ObjectStartBitmap to provide platform specific
+// optimizations (e.g. Use non-atomic stores on ARMv7 when not marking).
+class PLATFORM_EXPORT PlatformAwareObjectStartBitmap
+ : public ObjectStartBitmap {
+ USING_FAST_MALLOC(PlatformAwareObjectStartBitmap);
+
+ public:
+ explicit PlatformAwareObjectStartBitmap(Address offset);
+
+ template <
+ HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ inline void SetBit(Address);
+ template <
+ HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ inline void ClearBit(Address);
+
+ private:
+ template <HeapObjectHeader::AccessMode>
+ static bool ShouldForceNonAtomic();
+};
+
+class PLATFORM_EXPORT NormalPage final : public BasePage {
+ public:
+ NormalPage(PageMemory*, BaseArena*);
+ ~NormalPage() override;
+
+ Address Payload() const { return GetAddress() + PageHeaderSize(); }
+ static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+ PayloadSize() {
+ return (BlinkPagePayloadSize() - PageHeaderSize()) & ~kAllocationMask;
+ }
+ Address PayloadEnd() const { return Payload() + PayloadSize(); }
+ bool ContainedInObjectPayload(ConstAddress address) const {
+ return Payload() <= address && address < PayloadEnd();
+ }
+
+ size_t ObjectPayloadSizeForTesting() override;
+ void RemoveFromHeap() override;
+ bool Sweep(FinalizeType) override;
+ void MakeConsistentForMutator() override;
+ void Unmark() override;
+ void FinalizeSweep(SweepResult) override;
+#if defined(ADDRESS_SANITIZER)
+ void PoisonUnmarkedObjects() override;
+#endif
+
+ void CollectStatistics(
+ ThreadState::Statistics::ArenaStatistics* arena_stats) override;
+
+#if DCHECK_IS_ON()
+ // Returns true for the whole |kBlinkPageSize| page that the page is on, even
+ // for the header, and the unmapped guard page at the start. That ensures the
+ // result can be used to populate the negative page cache.
+ bool Contains(ConstAddress) const override;
+#endif
+ size_t size() const override { return kBlinkPageSize; }
+ static constexpr size_t PageHeaderSize() {
+ // Compute the amount of padding we have to add to a header to make the size
+ // of the header plus the padding a multiple of 8 bytes.
+ constexpr size_t kPaddingSize =
+ (sizeof(NormalPage) + kAllocationGranularity -
+ (sizeof(HeapObjectHeader) % kAllocationGranularity)) %
+ kAllocationGranularity;
+ return sizeof(NormalPage) + kPaddingSize;
+ }
+
+ inline NormalPageArena* ArenaForNormalPage() const;
+
+ // Context object holding the state of the arena page compaction pass, passed
+ // in when compacting individual pages.
+ class CompactionContext {
+ STACK_ALLOCATED();
+
+ public:
+ // Page compacting into.
+ NormalPage* current_page_ = nullptr;
+ // Offset into |current_page_| to the next free address.
+ size_t allocation_point_ = 0;
+ // Vector of available pages to use for compaction. Page compaction picks
+ // the next one when the current one is exhausted.
+ PageStack available_pages_;
+ // Vector of pages that have been compacted. Page compaction will add
+ // compacted pages once the current one becomes exhausted.
+ PageStack* compacted_pages_;
+ };
+
+ void SweepAndCompact(CompactionContext&);
+
+ // Object start bitmap of this page.
+ PlatformAwareObjectStartBitmap* object_start_bit_map() {
+ return &object_start_bit_map_;
+ }
+ const PlatformAwareObjectStartBitmap* object_start_bit_map() const {
+ return &object_start_bit_map_;
+ }
+
+ // Verifies that the object start bitmap only contains a bit iff the object
+ // is also reachable through iteration on the page.
+ void VerifyObjectStartBitmapIsConsistentWithPayload();
+
+ // Uses the object_start_bit_map_ to find an object for a given address. The
+ // returned header is either nullptr, indicating that no object could be
+ // found, or it is pointing to valid object or free list entry.
+ // This method is called only during stack scanning when there are no
+ // concurrent markers, thus no atomics required.
+ HeapObjectHeader* ConservativelyFindHeaderFromAddress(ConstAddress) const;
+
+ // Uses the object_start_bit_map_ to find an object for a given address. It is
+ // assumed that the address points into a valid heap object. Use the
+ // conservative version if that assumption does not hold.
+ template <
+ HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ HeapObjectHeader* FindHeaderFromAddress(ConstAddress) const;
+
+ void VerifyMarking() override;
+
+ // Marks a card corresponding to address.
+ void MarkCard(Address address);
+
+ // Iterates over all objects in marked cards.
+ template <typename Function>
+ void IterateCardTable(Function function) const;
+
+ // Clears all bits in the card table.
+ void ClearCardTable() { card_table_.Clear(); }
+
+ private:
+ // Data structure that divides a page in a number of cards each of 512 bytes
+ // size. Marked cards are stored in bytes, not bits, to make write barrier
+ // faster and reduce chances of false sharing. This gives only ~0.1% of memory
+ // overhead. Also, since there are guard pages before and after a Blink page,
+ // some of card bits are wasted and unneeded.
+ class CardTable final {
+ public:
+ struct value_type {
+ uint8_t bit;
+ size_t index;
+ };
+
+ struct iterator {
+ iterator& operator++() {
+ ++index;
+ return *this;
+ }
+ value_type operator*() const { return {table->table_[index], index}; }
+ bool operator!=(iterator other) const {
+ return table != other.table || index != other.index;
+ }
+
+ size_t index = 0;
+ const CardTable* table = nullptr;
+ };
+
+ using const_iterator = iterator;
+
+ static constexpr size_t kBitsPerCard = 9;
+ static constexpr size_t kCardSize = 1 << kBitsPerCard;
+
+ const_iterator begin() const { return {FirstPayloadCard(), this}; }
+ const_iterator end() const { return {LastPayloadCard(), this}; }
+
+ void Mark(size_t card) {
+ DCHECK_LE(FirstPayloadCard(), card);
+ DCHECK_GT(LastPayloadCard(), card);
+ table_[card] = 1;
+ }
+
+ bool IsMarked(size_t card) const {
+ DCHECK_LE(FirstPayloadCard(), card);
+ DCHECK_GT(LastPayloadCard(), card);
+ return table_[card];
+ }
+
+ void Clear() { std::fill(table_.begin(), table_.end(), 0); }
+
+ private:
+ static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+ FirstPayloadCard() {
+ return (BlinkGuardPageSize() + NormalPage::PageHeaderSize()) / kCardSize;
+ }
+ static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+ LastPayloadCard() {
+ return (BlinkGuardPageSize() + BlinkPagePayloadSize()) / kCardSize;
+ }
+
+ std::array<uint8_t, kBlinkPageSize / kCardSize> table_{};
+ };
+
+ struct ToBeFinalizedObject {
+ HeapObjectHeader* header;
+ void Finalize();
+ };
+ struct FutureFreelistEntry {
+ Address start;
+ size_t size;
+ };
+
+ template <typename Function>
+ void IterateOnCard(Function function, size_t card_number) const;
+
+ void MergeFreeLists();
+ void AddToFreeList(Address start,
+ size_t size,
+ FinalizeType finalize_type,
+ bool found_finalizer);
+
+ CardTable card_table_;
+ PlatformAwareObjectStartBitmap object_start_bit_map_;
+#if BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
+ std::unique_ptr<PlatformAwareObjectStartBitmap> cached_object_start_bit_map_;
+#endif
+ Vector<ToBeFinalizedObject> to_be_finalized_objects_;
+ FreeList cached_freelist_;
+ Vector<FutureFreelistEntry> unfinalized_freelist_;
+
+ friend class CardTableTest;
+};
+
+// Large allocations are allocated as separate objects and linked in a list.
+//
+// In order to use the same memory allocation routines for everything allocated
+// in the heap, large objects are considered heap pages containing only one
+// object.
+class PLATFORM_EXPORT LargeObjectPage final : public BasePage {
+ public:
+ static size_t PageHeaderSize() {
+ // Compute the amount of padding we have to add to a header to make the size
+ // of the header plus the padding a multiple of 8 bytes.
+ size_t padding_size =
+ (sizeof(LargeObjectPage) + kAllocationGranularity -
+ (sizeof(HeapObjectHeader) % kAllocationGranularity)) %
+ kAllocationGranularity;
+ return sizeof(LargeObjectPage) + padding_size;
+ }
+
+ LargeObjectPage(PageMemory*, BaseArena*, size_t);
+
+ // LargeObjectPage has the following memory layout:
+ // this -> +------------------+
+ // | Header | PageHeaderSize()
+ // ObjectHeader() -> +------------------+
+ // | HeapObjectHeader | sizeof(HeapObjectHeader)
+ // Payload() -> +------------------+
+ // | Object payload | PayloadSize()
+ // | |
+ // PayloadEnd() -> +------------------+
+ //
+ // ObjectSize(): PayloadSize() + sizeof(HeapObjectHeader)
+ // size(): ObjectSize() + PageHeaderSize()
+
+ HeapObjectHeader* ObjectHeader() const {
+ Address header_address = GetAddress() + PageHeaderSize();
+ return reinterpret_cast<HeapObjectHeader*>(header_address);
+ }
+
+ // Returns the size of the page that is allocatable for objects. This differs
+ // from PayloadSize() as it also includes the HeapObjectHeader.
+ size_t ObjectSize() const { return object_size_; }
+
+ // Returns the size of the page including the header.
+ size_t size() const override { return PageHeaderSize() + object_size_; }
+
+ // Returns the payload start of the underlying object.
+ Address Payload() const { return ObjectHeader()->Payload(); }
+
+ // Returns the payload size of the underlying object.
+ size_t PayloadSize() const { return object_size_ - sizeof(HeapObjectHeader); }
+
+ // Points to the payload end of the underlying object.
+ Address PayloadEnd() const { return Payload() + PayloadSize(); }
+
+ bool ContainedInObjectPayload(ConstAddress address) const {
+ return Payload() <= address && address < PayloadEnd();
+ }
+
+ size_t ObjectPayloadSizeForTesting() override;
+ void RemoveFromHeap() override;
+ bool Sweep(FinalizeType) override;
+ void MakeConsistentForMutator() override;
+ void Unmark() override;
+ void FinalizeSweep(SweepResult) override;
+
+ void CollectStatistics(
+ ThreadState::Statistics::ArenaStatistics* arena_stats) override;
+
+ void VerifyMarking() override;
+
+#if defined(ADDRESS_SANITIZER)
+ void PoisonUnmarkedObjects() override;
+#endif
+
+#if DCHECK_IS_ON()
+ // Returns true for any address that is on one of the pages that this large
+ // object uses. That ensures that we can use a negative result to populate the
+ // negative page cache.
+ bool Contains(ConstAddress) const override;
+#endif
+
+#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
+ void SetIsVectorBackingPage() { is_vector_backing_page_ = true; }
+ bool IsVectorBackingPage() const { return is_vector_backing_page_; }
+#endif
+
+ // Remembers the page as containing inter-generational pointers.
+ void SetRemembered(bool remembered) { is_remembered_ = remembered; }
+ bool IsRemembered() const { return is_remembered_; }
+
+ private:
+ // The size of the underlying object including HeapObjectHeader.
+ size_t object_size_;
+#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
+ bool is_vector_backing_page_;
+#endif
+ bool is_remembered_ = false;
+};
+
+// Each thread has a number of thread arenas (e.g., Generic arenas, typed arenas
+// for |Node|, arenas for collection backings, etc.) and |BaseArena| represents
+// each thread arena.
+//
+// |BaseArena| is a parent class of |NormalPageArena| and |LargeObjectArena|.
+// |NormalPageArena| represents a part of a heap that contains |NormalPage|s,
+// and |LargeObjectArena| represents a part of a heap that contains
+// |LargeObjectPage|s.
+class PLATFORM_EXPORT BaseArena {
+ USING_FAST_MALLOC(BaseArena);
+
+ public:
+ BaseArena(ThreadState*, int);
+ virtual ~BaseArena();
+ void RemoveAllPages();
+
+ void CollectStatistics(std::string, ThreadState::Statistics*);
+ virtual void CollectFreeListStatistics(
+ ThreadState::Statistics::FreeListStatistics*) {}
+
+#if DCHECK_IS_ON()
+ BasePage* FindPageFromAddress(ConstAddress) const;
+#endif
+ virtual void ClearFreeLists() {}
+ virtual void MakeIterable() {}
+ virtual void MakeConsistentForGC();
+ void MakeConsistentForMutator();
+ void Unmark();
+#if DCHECK_IS_ON()
+ virtual bool IsConsistentForGC() = 0;
+#endif
+ size_t ObjectPayloadSizeForTesting();
+ void PrepareForSweep(BlinkGC::CollectionType);
+#if defined(ADDRESS_SANITIZER)
+ void PoisonUnmarkedObjects();
+#endif
+ Address LazySweep(size_t, size_t gc_info_index);
+ bool SweepUnsweptPage(BasePage*);
+ bool SweepUnsweptPageOnConcurrentThread(BasePage*);
+ // Returns true if we have swept all pages within the deadline. Returns false
+ // otherwise.
+ bool LazySweepWithDeadline(base::TimeTicks deadline);
+ // Returns true if the arena has been fully swept.
+ bool ConcurrentSweepOnePage();
+ void CompleteSweep();
+ void InvokeFinalizersOnSweptPages();
+
+ ThreadState* GetThreadState() { return thread_state_; }
+ int ArenaIndex() const { return index_; }
+
+ Address AllocateLargeObject(size_t allocation_size, size_t gc_info_index);
+
+ // Resets the allocation point if it exists for an arena.
+ virtual void ResetAllocationPoint() {}
+
+ void VerifyMarking();
+ virtual void VerifyObjectStartBitmap() {}
+
+ protected:
+ bool SweepingCompleted() const { return unswept_pages_.IsEmptyLocked(); }
+ bool SweepingAndFinalizationCompleted() const {
+ return unswept_pages_.IsEmptyLocked() &&
+ swept_unfinalized_pages_.IsEmptyLocked() &&
+ swept_unfinalized_empty_pages_.IsEmptyLocked();
+ }
+
+ // Pages for allocation.
+ PageStackThreadSafe swept_pages_;
+ // Pages that are being swept.
+ PageStackThreadSafe unswept_pages_;
+ // Pages that have been swept but contain unfinalized objects.
+ PageStackThreadSafe swept_unfinalized_pages_;
+ // Pages that have been swept and need to be removed from the heap.
+ PageStackThreadSafe swept_unfinalized_empty_pages_;
+
+ protected:
+ void SynchronizedStore(BasePage* page) { page->SynchronizedStore(); }
+
+ private:
+ virtual Address LazySweepPages(size_t, size_t gc_info_index) = 0;
+
+ ThreadState* thread_state_;
+
+ // Index into the page pools. This is used to ensure that the pages of the
+ // same type go into the correct page pool and thus avoid type confusion.
+ //
+ // TODO(palmer): Should this be size_t?
+ int index_;
+};
+
+class PLATFORM_EXPORT NormalPageArena final : public BaseArena {
+ public:
+ NormalPageArena(ThreadState*, int index);
+ void AddToFreeList(Address address, size_t size);
+ void AddToFreeList(FreeList* other) { free_list_.MoveFrom(other); }
+ void ClearFreeLists() override;
+ void CollectFreeListStatistics(
+ ThreadState::Statistics::FreeListStatistics*) override;
+ void MakeIterable() override;
+
+#if DCHECK_IS_ON()
+ bool IsConsistentForGC() override;
+ bool PagesToBeSweptContains(ConstAddress) const;
+#endif
+
+ Address AllocateObject(size_t allocation_size, size_t gc_info_index);
+
+ void FreePage(NormalPage*);
+
+ void PromptlyFreeObject(HeapObjectHeader*);
+ void PromptlyFreeObjectInFreeList(HeapObjectHeader*, size_t);
+ bool ExpandObject(HeapObjectHeader*, size_t);
+ bool ShrinkObject(HeapObjectHeader*, size_t);
+ size_t promptly_freed_size() const { return promptly_freed_size_; }
+
+ bool IsObjectAllocatedAtAllocationPoint(HeapObjectHeader* header) {
+ return header->PayloadEnd() == current_allocation_point_;
+ }
+
+ size_t ArenaSize();
+ size_t FreeListSize();
+
+ void SweepAndCompact();
+
+ void ResetAllocationPoint() override { SetAllocationPoint(nullptr, 0); }
+
+ void VerifyObjectStartBitmap() override;
+
+ Address CurrentAllocationPoint() const { return current_allocation_point_; }
+
+ bool IsInCurrentAllocationPointRegion(ConstAddress address) const {
+ return HasCurrentAllocationArea() &&
+ (CurrentAllocationPoint() <= address) &&
+ (address < (CurrentAllocationPoint() + RemainingAllocationSize()));
+ }
+
+ size_t RemainingAllocationSize() const { return remaining_allocation_size_; }
+
+ void MakeConsistentForGC() override;
+
+ template <typename Function>
+ void IterateAndClearCardTables(Function function);
+
+ private:
+ void AllocatePage();
+
+ // OutOfLineAllocate represent the slow-path allocation. The suffixed version
+ // contains just allocation code while the other version also invokes a
+ // safepoint where allocated bytes are reported to observers.
+ Address OutOfLineAllocate(size_t allocation_size, size_t gc_info_index);
+ Address OutOfLineAllocateImpl(size_t allocation_size, size_t gc_info_index);
+
+ Address AllocateFromFreeList(size_t, size_t gc_info_index);
+
+ Address LazySweepPages(size_t, size_t gc_info_index) override;
+
+ bool HasCurrentAllocationArea() const {
+ return CurrentAllocationPoint() && RemainingAllocationSize();
+ }
+ void SetAllocationPoint(Address, size_t);
+
+ FreeList free_list_;
+ Address current_allocation_point_;
+ size_t remaining_allocation_size_;
+
+ // The size of promptly freed objects in the heap. This counter is set to
+ // zero before sweeping when clearing the free list and after coalescing.
+ // It will increase for promptly freed objects on already swept pages.
+ size_t promptly_freed_size_;
+};
+
+class LargeObjectArena final : public BaseArena {
+ public:
+ LargeObjectArena(ThreadState*, int index);
+ Address AllocateLargeObjectPage(size_t, size_t gc_info_index);
+ void FreeLargeObjectPage(LargeObjectPage*);
+
+#if DCHECK_IS_ON()
+ bool IsConsistentForGC() override { return true; }
+#endif
+
+ template <typename Function>
+ void IterateAndClearRememberedPages(Function function);
+
+ private:
+ Address DoAllocateLargeObjectPage(size_t, size_t gc_info_index);
+ Address LazySweepPages(size_t, size_t gc_info_index) override;
+};
+
+// Mask an address down to the enclosing Oilpan heap base page. All Oilpan heap
+// pages are aligned at |kBlinkPageBase| plus the size of a guard page. This
+// will work only for 1) a pointer pointing to a non-large object and 2) a
+// pointer pointing to the beginning of a large object.
+//
+// FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our
+// typed arenas. This is only exported to enable tests in HeapTest.cpp.
+PLATFORM_EXPORT ALWAYS_INLINE BasePage* PageFromObject(const void* object) {
+ Address address = reinterpret_cast<Address>(const_cast<void*>(object));
+ BasePage* page = reinterpret_cast<BasePage*>(BlinkPageAddress(address) +
+ BlinkGuardPageSize());
+#if DCHECK_IS_ON()
+ DCHECK(page->Contains(address));
+#endif
+ return page;
+}
+
+inline HeapObjectHeader* HeapObjectHeader::FromPayload(const void* payload) {
+ Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
+ HeapObjectHeader* header =
+ reinterpret_cast<HeapObjectHeader*>(addr - sizeof(HeapObjectHeader));
+ return header;
+}
+
+template <HeapObjectHeader::AccessMode mode>
+inline HeapObjectHeader* HeapObjectHeader::FromInnerAddress(
+ const void* address) {
+ BasePage* const page = PageFromObject(address);
+ return page->IsLargeObjectPage()
+ ? static_cast<LargeObjectPage*>(page)->ObjectHeader()
+ : static_cast<NormalPage*>(page)->FindHeaderFromAddress<mode>(
+ reinterpret_cast<ConstAddress>(address));
+}
+
+inline void HeapObjectHeader::CheckFromPayload(const void* payload) {
+ (void)FromPayload(payload);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const {
+ // Size is immutable after construction while either marking or sweeping
+ // is running so relaxed load (if mode == kAtomic) is enough.
+ uint16_t encoded_low_value =
+ LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
+ const size_t result = internal::DecodeSize(encoded_low_value);
+ // Large objects should not refer to header->size() but use
+ // LargeObjectPage::PayloadSize().
+ DCHECK(result != kLargeObjectSizeInHeader);
+ DCHECK(!PageFromObject(this)->IsLargeObjectPage());
+ return result;
+}
+
+NO_SANITIZE_ADDRESS inline void HeapObjectHeader::SetSize(size_t size) {
+ DCHECK(!PageFromObject(Payload())->thread_state()->IsIncrementalMarking());
+ DCHECK_LT(size, kNonLargeObjectPageSizeMax);
+ encoded_low_ = static_cast<uint16_t>(internal::EncodeSize(size) |
+ (encoded_low_ & ~kHeaderSizeMask));
+}
+
+template <HeapObjectHeader::AccessMode mode>
+NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::IsLargeObject() const {
+ uint16_t encoded_low_value =
+ LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
+ return internal::DecodeSize(encoded_low_value) == kLargeObjectSizeInHeader;
+}
+
+template <HeapObjectHeader::AccessMode mode>
+NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::IsInConstruction() const {
+ return (LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>() &
+ kHeaderIsInConstructionMask) == 0;
+}
+
+template <HeapObjectHeader::AccessMode mode>
+NO_SANITIZE_ADDRESS inline void HeapObjectHeader::MarkFullyConstructed() {
+ DCHECK(IsInConstruction());
+ StoreEncoded<mode, EncodedHalf::kHigh, std::memory_order_release>(
+ kHeaderIsInConstructionMask, kHeaderIsInConstructionMask);
+}
+
+inline Address HeapObjectHeader::Payload() const {
+ return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
+ sizeof(HeapObjectHeader);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+inline Address HeapObjectHeader::PayloadEnd() const {
+ return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
+ size<mode>();
+}
+
+NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::PayloadSize() const {
+ const size_t size = internal::DecodeSize(encoded_low_);
+ if (UNLIKELY(size == kLargeObjectSizeInHeader)) {
+ DCHECK(PageFromObject(this)->IsLargeObjectPage());
+ return static_cast<LargeObjectPage*>(PageFromObject(this))->PayloadSize();
+ }
+ DCHECK(!PageFromObject(this)->IsLargeObjectPage());
+ return size - sizeof(HeapObjectHeader);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::IsMarked() const {
+ const uint16_t encoded =
+ LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
+ return encoded & kHeaderMarkBitMask;
+}
+
+template <HeapObjectHeader::AccessMode mode>
+NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::IsOld() const {
+ // Oilpan uses the sticky-mark-bits technique to encode old objects.
+ return IsMarked<mode>();
+}
+
+template <HeapObjectHeader::AccessMode mode>
+NO_SANITIZE_ADDRESS inline void HeapObjectHeader::Unmark() {
+ DCHECK(IsMarked<mode>());
+ StoreEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>(
+ 0u, kHeaderMarkBitMask);
+}
+
+// The function relies on size bits being unmodified when the function is
+// called, i.e. SetSize() and TryMark() can't be called concurrently.
+template <HeapObjectHeader::AccessMode mode>
+NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::TryMark() {
+ if (mode == AccessMode::kNonAtomic) {
+ if (encoded_low_ & kHeaderMarkBitMask)
+ return false;
+ encoded_low_ |= kHeaderMarkBitMask;
+ return true;
+ }
+ auto* atomic_encoded = internal::AsUnsanitizedAtomic(&encoded_low_);
+ uint16_t old_value = atomic_encoded->load(std::memory_order_relaxed);
+ if (old_value & kHeaderMarkBitMask)
+ return false;
+ const uint16_t new_value = old_value | kHeaderMarkBitMask;
+ return atomic_encoded->compare_exchange_strong(old_value, new_value,
+ std::memory_order_relaxed);
+}
+
+inline Address NormalPageArena::AllocateObject(size_t allocation_size,
+ size_t gc_info_index) {
+ if (LIKELY(allocation_size <= remaining_allocation_size_)) {
+ Address header_address = current_allocation_point_;
+ current_allocation_point_ += allocation_size;
+ remaining_allocation_size_ -= allocation_size;
+ DCHECK_GT(gc_info_index, 0u);
+ new (NotNull, header_address)
+ HeapObjectHeader(allocation_size, gc_info_index);
+ DCHECK(!PageFromObject(header_address)->IsLargeObjectPage());
+ static_cast<NormalPage*>(PageFromObject(header_address))
+ ->object_start_bit_map()
+ ->SetBit<HeapObjectHeader::AccessMode::kAtomic>(header_address);
+ Address result = header_address + sizeof(HeapObjectHeader);
+ DCHECK(!(reinterpret_cast<uintptr_t>(result) & kAllocationMask));
+
+ SET_MEMORY_ACCESSIBLE(result, allocation_size - sizeof(HeapObjectHeader));
+#if DCHECK_IS_ON()
+ DCHECK(FindPageFromAddress(header_address + allocation_size - 1));
+#endif
+ return result;
+ }
+ return OutOfLineAllocate(allocation_size, gc_info_index);
+}
+
+inline NormalPageArena* NormalPage::ArenaForNormalPage() const {
+ return static_cast<NormalPageArena*>(Arena());
+}
+
+// Iterates over all card tables and clears them.
+template <typename Function>
+inline void NormalPageArena::IterateAndClearCardTables(Function function) {
+ for (BasePage* page : swept_pages_) {
+ auto* normal_page = static_cast<NormalPage*>(page);
+ normal_page->IterateCardTable(function);
+ normal_page->ClearCardTable();
+ }
+}
+
+// Iterates over all pages that may contain inter-generational pointers.
+template <typename Function>
+inline void LargeObjectArena::IterateAndClearRememberedPages(
+ Function function) {
+ for (BasePage* page : swept_pages_) {
+ auto* large_page = static_cast<LargeObjectPage*>(page);
+ if (large_page->IsRemembered()) {
+ function(large_page->ObjectHeader());
+ large_page->SetRemembered(false);
+ }
+ }
+}
+
+// static
+template <HeapObjectHeader::AccessMode mode>
+bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
+#if defined(ARCH_CPU_ARMEL)
+ // Use non-atomic accesses on ARMv7 when marking is not active.
+ if (mode == HeapObjectHeader::AccessMode::kAtomic) {
+ if (LIKELY(!ThreadState::Current()->IsAnyIncrementalMarking()))
+ return true;
+ }
+#endif // defined(ARCH_CPU_ARMEL)
+ return false;
+}
+
+template <HeapObjectHeader::AccessMode mode>
+inline void PlatformAwareObjectStartBitmap::SetBit(Address header_address) {
+ if (ShouldForceNonAtomic<mode>()) {
+ ObjectStartBitmap::SetBit<HeapObjectHeader::AccessMode::kNonAtomic>(
+ header_address);
+ return;
+ }
+ ObjectStartBitmap::SetBit<mode>(header_address);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+inline void PlatformAwareObjectStartBitmap::ClearBit(Address header_address) {
+ if (ShouldForceNonAtomic<mode>()) {
+ ObjectStartBitmap::ClearBit<HeapObjectHeader::AccessMode::kNonAtomic>(
+ header_address);
+ return;
+ }
+ ObjectStartBitmap::ClearBit<mode>(header_address);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+inline void ObjectStartBitmap::store(size_t cell_index, uint8_t value) {
+ if (mode == HeapObjectHeader::AccessMode::kNonAtomic) {
+ object_start_bit_map_[cell_index] = value;
+ return;
+ }
+ WTF::AsAtomicPtr(&object_start_bit_map_[cell_index])
+ ->store(value, std::memory_order_release);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+inline uint8_t ObjectStartBitmap::load(size_t cell_index) const {
+ if (mode == HeapObjectHeader::AccessMode::kNonAtomic) {
+ return object_start_bit_map_[cell_index];
+ }
+ return WTF::AsAtomicPtr(&object_start_bit_map_[cell_index])
+ ->load(std::memory_order_acquire);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+inline void ObjectStartBitmap::SetBit(Address header_address) {
+ size_t cell_index, object_bit;
+ ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
+ // Only the mutator thread writes to the bitmap during concurrent marking,
+ // so no need for CAS here.
+ store<mode>(cell_index,
+ static_cast<uint8_t>(load(cell_index) | (1 << object_bit)));
+}
+
+template <HeapObjectHeader::AccessMode mode>
+inline void ObjectStartBitmap::ClearBit(Address header_address) {
+ size_t cell_index, object_bit;
+ ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
+ store<mode>(cell_index,
+ static_cast<uint8_t>(load(cell_index) & ~(1 << object_bit)));
+}
+
+template <HeapObjectHeader::AccessMode mode>
+inline bool ObjectStartBitmap::CheckBit(Address header_address) const {
+ size_t cell_index, object_bit;
+ ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
+ return load<mode>(cell_index) & (1 << object_bit);
+}
+
+inline void ObjectStartBitmap::ObjectStartIndexAndBit(Address header_address,
+ size_t* cell_index,
+ size_t* bit) const {
+ const size_t object_offset = header_address - offset_;
+ DCHECK(!(object_offset & kAllocationMask));
+ const size_t object_start_number = object_offset / kAllocationGranularity;
+ *cell_index = object_start_number / kCellSize;
+#if DCHECK_IS_ON()
+ const size_t bitmap_size = kBitmapSize;
+ DCHECK_LT(*cell_index, bitmap_size);
+#endif
+ *bit = object_start_number & kCellMask;
+}
+
+template <typename Callback>
+inline void ObjectStartBitmap::Iterate(Callback callback) const {
+ for (size_t cell_index = 0; cell_index < kReservedForBitmap; cell_index++) {
+ uint8_t value = load(cell_index);
+ while (value) {
+ const int trailing_zeroes = base::bits::CountTrailingZeroBits(value);
+ const size_t object_start_number =
+ (cell_index * kCellSize) + trailing_zeroes;
+ const Address object_address =
+ offset_ + (kAllocationGranularity * object_start_number);
+ callback(object_address);
+ // Clear current object bit in temporary value to advance iteration.
+ value &= ~(1 << (object_start_number & kCellMask));
+ }
+ }
+}
+
+template <HeapObjectHeader::AccessMode mode>
+Address ObjectStartBitmap::FindHeader(
+ ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
+ size_t object_offset =
+ address_maybe_pointing_to_the_middle_of_object - offset_;
+ size_t object_start_number = object_offset / kAllocationGranularity;
+ size_t cell_index = object_start_number / kCellSize;
+#if DCHECK_IS_ON()
+ const size_t bitmap_size = kReservedForBitmap;
+ DCHECK_LT(cell_index, bitmap_size);
+#endif
+ size_t bit = object_start_number & kCellMask;
+ uint8_t byte = load<mode>(cell_index) & ((1 << (bit + 1)) - 1);
+ while (!byte) {
+ DCHECK_LT(0u, cell_index);
+ byte = load<mode>(--cell_index);
+ }
+ int leading_zeroes = base::bits::CountLeadingZeroBits(byte);
+ object_start_number =
+ (cell_index * kCellSize) + (kCellSize - 1) - leading_zeroes;
+ object_offset = object_start_number * kAllocationGranularity;
+ return object_offset + offset_;
+}
+
+NO_SANITIZE_ADDRESS inline HeapObjectHeader::HeapObjectHeader(
+ size_t size,
+ size_t gc_info_index) {
+ // sizeof(HeapObjectHeader) must be equal to or smaller than
+ // |kAllocationGranularity|, because |HeapObjectHeader| is used as a header
+ // for a freed entry. Given that the smallest entry size is
+ // |kAllocationGranurarity|, |HeapObjectHeader| must fit into the size.
+ static_assert(
+ sizeof(HeapObjectHeader) <= kAllocationGranularity,
+ "size of HeapObjectHeader must be smaller than kAllocationGranularity");
+
+ DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
+ DCHECK_LT(size, kNonLargeObjectPageSizeMax);
+ DCHECK_EQ(0u, size & kAllocationMask);
+ // Relaxed memory order is enough as in construction is created/synchronized
+ // as follows:
+ // - Page allocator gets zeroed page and uses page initialization fence.
+ // - Sweeper zeroes memory and synchronizes via global lock.
+ internal::AsUnsanitizedAtomic(&encoded_high_)
+ ->store(static_cast<uint16_t>(gc_info_index << kHeaderGCInfoIndexShift),
+ std::memory_order_relaxed);
+ encoded_low_ = internal::EncodeSize(size);
+ DCHECK(IsInConstruction());
+}
+
+template <HeapObjectHeader::AccessMode mode,
+ HeapObjectHeader::EncodedHalf part,
+ std::memory_order memory_order>
+NO_SANITIZE_ADDRESS inline uint16_t HeapObjectHeader::LoadEncoded() const {
+ const uint16_t& half =
+ part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
+ if (mode == AccessMode::kNonAtomic)
+ return half;
+ return internal::AsUnsanitizedAtomic(&half)->load(memory_order);
+}
+
+// Sets bits selected by the mask to the given value. Please note that atomicity
+// of the whole operation is not guaranteed.
+template <HeapObjectHeader::AccessMode mode,
+ HeapObjectHeader::EncodedHalf part,
+ std::memory_order memory_order>
+NO_SANITIZE_ADDRESS inline void HeapObjectHeader::StoreEncoded(uint16_t bits,
+ uint16_t mask) {
+ DCHECK_EQ(static_cast<uint16_t>(0u), bits & ~mask);
+ uint16_t& half = part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
+ if (mode == AccessMode::kNonAtomic) {
+ half = (half & ~mask) | bits;
+ return;
+ }
+ // We don't perform CAS loop here assuming that the data is constant and no
+ // one except for us can change this half concurrently.
+ auto* atomic_encoded = internal::AsUnsanitizedAtomic(&half);
+ uint16_t value = atomic_encoded->load(std::memory_order_relaxed);
+ value = (value & ~mask) | bits;
+ atomic_encoded->store(value, memory_order);
+}
+
+template <HeapObjectHeader::AccessMode mode>
+HeapObjectHeader* NormalPage::FindHeaderFromAddress(
+ ConstAddress address) const {
+ DCHECK(ContainedInObjectPayload(address));
+ HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(
+ object_start_bit_map()->FindHeader<mode>(address));
+ DCHECK_LT(0u, header->GcInfoIndex<mode>());
+ DCHECK_GT(header->PayloadEnd<HeapObjectHeader::AccessMode::kAtomic>(),
+ address);
+ return header;
+}
+
+template <typename Function>
+void NormalPage::IterateCardTable(Function function) const {
+ // TODO(bikineev): Consider introducing a "dirty" per-page bit to avoid
+ // the loop (this may in turn pessimize barrier implementation).
+ for (auto card : card_table_) {
+ if (UNLIKELY(card.bit)) {
+ IterateOnCard(function, card.index);
+ }
+ }
+}
+
+// Iterates over all objects in the specified marked card. Please note that
+// since objects are not aligned by the card boundary, it starts from the
+// object which may reside on a previous card.
+template <typename Function>
+void NormalPage::IterateOnCard(Function function, size_t card_number) const {
+#if DCHECK_IS_ON()
+ DCHECK(card_table_.IsMarked(card_number));
+ DCHECK(ArenaForNormalPage()->IsConsistentForGC());
+#endif
+
+ const Address card_begin = RoundToBlinkPageStart(GetAddress()) +
+ (card_number << CardTable::kBitsPerCard);
+ const Address card_end = card_begin + CardTable::kCardSize;
+ // Generational barrier marks cards corresponding to slots (not source
+ // objects), therefore the potential source object may reside on a
+ // previous card.
+ HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(
+ card_number == card_table_.begin().index
+ ? Payload()
+ : object_start_bit_map_.FindHeader(card_begin));
+ for (; header < reinterpret_cast<HeapObjectHeader*>(card_end);
+ reinterpret_cast<Address&>(header) += header->size()) {
+ if (!header->IsFree()) {
+ function(header);
+ }
+ }
+}
+
+inline void NormalPage::MarkCard(Address address) {
+#if DCHECK_IS_ON()
+ DCHECK(Contains(address));
+#endif
+ const size_t byte = reinterpret_cast<size_t>(address) & kBlinkPageOffsetMask;
+ const size_t card = byte / CardTable::kCardSize;
+ card_table_.Mark(card);
+}
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_PAGE_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/heap_stats_collector.cc b/chromium/third_party/blink/renderer/platform/heap/impl/heap_stats_collector.cc
new file mode 100644
index 00000000000..97590b912b2
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/heap_stats_collector.cc
@@ -0,0 +1,275 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
+
+#include <cmath>
+
+#include "base/check_op.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
+
+namespace blink {
+
+void ThreadHeapStatsCollector::IncreaseCompactionFreedSize(size_t bytes) {
+ DCHECK(is_started_);
+ current_.compaction_freed_bytes += bytes;
+ current_.compaction_recorded_events = true;
+}
+
+void ThreadHeapStatsCollector::IncreaseCompactionFreedPages(size_t pages) {
+ DCHECK(is_started_);
+ current_.compaction_freed_pages += pages;
+ current_.compaction_recorded_events = true;
+}
+
+void ThreadHeapStatsCollector::IncreaseAllocatedObjectSize(size_t bytes) {
+ // The current GC may not have been started. This is ok as recording considers
+ // the whole time range between garbage collections.
+ pos_delta_allocated_bytes_since_prev_gc_ += bytes;
+}
+
+void ThreadHeapStatsCollector::IncreaseAllocatedObjectSizeForTesting(
+ size_t bytes) {
+ IncreaseAllocatedObjectSize(bytes);
+ AllocatedObjectSizeSafepointImpl();
+}
+
+void ThreadHeapStatsCollector::DecreaseAllocatedObjectSize(size_t bytes) {
+ // See IncreaseAllocatedObjectSize.
+ neg_delta_allocated_bytes_since_prev_gc_ += bytes;
+}
+
+void ThreadHeapStatsCollector::DecreaseAllocatedObjectSizeForTesting(
+ size_t bytes) {
+ DecreaseAllocatedObjectSize(bytes);
+ AllocatedObjectSizeSafepointImpl();
+}
+
+void ThreadHeapStatsCollector::AllocatedObjectSizeSafepoint() {
+ if (std::abs(pos_delta_allocated_bytes_since_prev_gc_ -
+ neg_delta_allocated_bytes_since_prev_gc_) > kUpdateThreshold) {
+ AllocatedObjectSizeSafepointImpl();
+ }
+}
+
+void ThreadHeapStatsCollector::AllocatedObjectSizeSafepointImpl() {
+ allocated_bytes_since_prev_gc_ +=
+ static_cast<int64_t>(pos_delta_allocated_bytes_since_prev_gc_) -
+ static_cast<int64_t>(neg_delta_allocated_bytes_since_prev_gc_);
+
+ // These observer methods may start or finalize GC. In case they trigger a
+ // final GC pause, the delta counters are reset there and the following
+ // observer calls are called with '0' updates.
+ ForAllObservers([this](ThreadHeapStatsObserver* observer) {
+ // Recompute delta here so that a GC finalization is able to clear the
+ // delta for other observer calls.
+ int64_t delta = pos_delta_allocated_bytes_since_prev_gc_ -
+ neg_delta_allocated_bytes_since_prev_gc_;
+ if (delta < 0) {
+ observer->DecreaseAllocatedObjectSize(static_cast<size_t>(-delta));
+ } else {
+ observer->IncreaseAllocatedObjectSize(static_cast<size_t>(delta));
+ }
+ });
+ pos_delta_allocated_bytes_since_prev_gc_ = 0;
+ neg_delta_allocated_bytes_since_prev_gc_ = 0;
+}
+
+void ThreadHeapStatsCollector::IncreaseAllocatedSpace(size_t bytes) {
+ allocated_space_bytes_ += bytes;
+ ForAllObservers([bytes](ThreadHeapStatsObserver* observer) {
+ observer->IncreaseAllocatedSpace(bytes);
+ });
+}
+
+void ThreadHeapStatsCollector::DecreaseAllocatedSpace(size_t bytes) {
+ allocated_space_bytes_ -= bytes;
+ ForAllObservers([bytes](ThreadHeapStatsObserver* observer) {
+ observer->DecreaseAllocatedSpace(bytes);
+ });
+}
+
+ThreadHeapStatsCollector::Event::Event() {
+ static std::atomic<size_t> counter{0};
+ unique_id = counter.fetch_add(1);
+}
+
+void ThreadHeapStatsCollector::NotifyMarkingStarted(
+ BlinkGC::CollectionType collection_type,
+ BlinkGC::GCReason reason,
+ bool is_forced_gc) {
+ DCHECK(!is_started_);
+ DCHECK(current_.marking_time().is_zero());
+ is_started_ = true;
+ current_.reason = reason;
+ current_.collection_type = collection_type;
+ current_.is_forced_gc = is_forced_gc;
+}
+
+void ThreadHeapStatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
+ allocated_bytes_since_prev_gc_ +=
+ static_cast<int64_t>(pos_delta_allocated_bytes_since_prev_gc_) -
+ static_cast<int64_t>(neg_delta_allocated_bytes_since_prev_gc_);
+ current_.marked_bytes = marked_bytes;
+ current_.object_size_in_bytes_before_sweeping = object_size_in_bytes();
+ current_.allocated_space_in_bytes_before_sweeping = allocated_space_bytes();
+ current_.partition_alloc_bytes_before_sweeping =
+ WTF::Partitions::TotalSizeOfCommittedPages();
+ allocated_bytes_since_prev_gc_ = 0;
+ pos_delta_allocated_bytes_since_prev_gc_ = 0;
+ neg_delta_allocated_bytes_since_prev_gc_ = 0;
+
+ ForAllObservers([marked_bytes](ThreadHeapStatsObserver* observer) {
+ observer->ResetAllocatedObjectSize(marked_bytes);
+ });
+}
+
+void ThreadHeapStatsCollector::NotifySweepingCompleted() {
+ is_started_ = false;
+ current_.live_object_rate =
+ current_.object_size_in_bytes_before_sweeping
+ ? static_cast<double>(current().marked_bytes) /
+ current_.object_size_in_bytes_before_sweeping
+ : 0.0;
+ current_.gc_nested_in_v8 = gc_nested_in_v8_;
+ gc_nested_in_v8_ = base::TimeDelta();
+ // Reset the current state.
+ static_assert(std::is_trivially_copyable<Event>::value,
+ "Event should be trivially copyable");
+ previous_ = std::move(current_);
+ current_ = Event();
+}
+
+void ThreadHeapStatsCollector::UpdateReason(BlinkGC::GCReason reason) {
+ current_.reason = reason;
+}
+
+size_t ThreadHeapStatsCollector::object_size_in_bytes() const {
+ DCHECK_GE(static_cast<int64_t>(previous().marked_bytes) +
+ allocated_bytes_since_prev_gc_,
+ 0);
+ return static_cast<size_t>(static_cast<int64_t>(previous().marked_bytes) +
+ allocated_bytes_since_prev_gc_);
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::roots_marking_time() const {
+ return scope_data[kVisitRoots];
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::incremental_marking_time()
+ const {
+ return scope_data[kIncrementalMarkingStartMarking] +
+ scope_data[kIncrementalMarkingStep] + scope_data[kUnifiedMarkingStep];
+}
+
+base::TimeDelta
+ThreadHeapStatsCollector::Event::worklist_processing_time_foreground() const {
+ return scope_data[kMarkProcessWorklists];
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::flushing_v8_references_time()
+ const {
+ return scope_data[kMarkFlushV8References];
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::atomic_marking_time() const {
+ return scope_data[kAtomicPauseMarkPrologue] +
+ scope_data[kAtomicPauseMarkRoots] +
+ scope_data[kAtomicPauseMarkTransitiveClosure] +
+ scope_data[kAtomicPauseMarkEpilogue];
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::atomic_sweep_and_compact_time()
+ const {
+ return scope_data[ThreadHeapStatsCollector::kAtomicPauseSweepAndCompact];
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::foreground_marking_time()
+ const {
+ return incremental_marking_time() + atomic_marking_time();
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::background_marking_time()
+ const {
+ return base::TimeDelta::FromMicroseconds(base::subtle::NoBarrier_Load(
+ &concurrent_scope_data[kConcurrentMarkingStep]));
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::marking_time() const {
+ return foreground_marking_time() + background_marking_time();
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::gc_cycle_time() const {
+ // Note that scopes added here also have to have a proper BlinkGCInV8Scope
+ // scope if they are nested in a V8 scope.
+ return incremental_marking_time() + atomic_marking_time() +
+ atomic_sweep_and_compact_time() + foreground_sweeping_time();
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::atomic_pause_time() const {
+ return atomic_marking_time() + atomic_sweep_and_compact_time();
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::foreground_sweeping_time()
+ const {
+ return scope_data[kCompleteSweep] + scope_data[kLazySweepInIdle] +
+ scope_data[kLazySweepOnAllocation];
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::background_sweeping_time()
+ const {
+ return base::TimeDelta::FromMicroseconds(
+ concurrent_scope_data[kConcurrentSweepingStep]);
+}
+
+base::TimeDelta ThreadHeapStatsCollector::Event::sweeping_time() const {
+ return foreground_sweeping_time() + background_sweeping_time();
+}
+
+int64_t ThreadHeapStatsCollector::allocated_bytes_since_prev_gc() const {
+ return allocated_bytes_since_prev_gc_;
+}
+
+size_t ThreadHeapStatsCollector::marked_bytes() const {
+ return current_.marked_bytes;
+}
+
+base::TimeDelta ThreadHeapStatsCollector::marking_time_so_far() const {
+ return current_.marking_time();
+}
+
+base::TimeDelta ThreadHeapStatsCollector::worklist_processing_time_foreground()
+ const {
+ return current_.worklist_processing_time_foreground();
+}
+
+base::TimeDelta ThreadHeapStatsCollector::flushing_v8_references_time() const {
+ return current_.flushing_v8_references_time();
+}
+
+size_t ThreadHeapStatsCollector::allocated_space_bytes() const {
+ return allocated_space_bytes_;
+}
+
+void ThreadHeapStatsCollector::RegisterObserver(
+ ThreadHeapStatsObserver* observer) {
+ DCHECK(!observers_.Contains(observer));
+ observers_.push_back(observer);
+}
+
+void ThreadHeapStatsCollector::UnregisterObserver(
+ ThreadHeapStatsObserver* observer) {
+ wtf_size_t index = observers_.Find(observer);
+ DCHECK_NE(WTF::kNotFound, index);
+ observers_.EraseAt(index);
+}
+
+template <typename Callback>
+void ThreadHeapStatsCollector::ForAllObservers(Callback callback) {
+ for (ThreadHeapStatsObserver* observer : observers_) {
+ callback(observer);
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/heap_stats_collector.h b/chromium/third_party/blink/renderer/platform/heap/impl/heap_stats_collector.h
new file mode 100644
index 00000000000..cbac7c3b8ac
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/heap_stats_collector.h
@@ -0,0 +1,469 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_STATS_COLLECTOR_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_STATS_COLLECTOR_H_
+
+#include <stddef.h>
+
+#include "base/atomicops.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+
+namespace blink {
+
+// Interface for observing changes to heap sizing.
+class PLATFORM_EXPORT ThreadHeapStatsObserver {
+ public:
+ // Called upon allocating/releasing chunks of memory that contain objects.
+ //
+ // Must not trigger GC or allocate.
+ virtual void IncreaseAllocatedSpace(size_t) = 0;
+ virtual void DecreaseAllocatedSpace(size_t) = 0;
+
+ // Called once per GC cycle with the accurate number of live |bytes|.
+ //
+ // Must not trigger GC or allocate.
+ virtual void ResetAllocatedObjectSize(size_t bytes) = 0;
+
+ // Called after observing at least
+ // |ThreadHeapStatsCollector::kUpdateThreshold| changed bytes through
+ // allocation or explicit free. Reports both, negative and positive
+ // increments, to allow observer to decide whether absolute values or only the
+ // deltas is interesting.
+ //
+ // May trigger GC but most not allocate.
+ virtual void IncreaseAllocatedObjectSize(size_t) = 0;
+ virtual void DecreaseAllocatedObjectSize(size_t) = 0;
+};
+
+#define FOR_ALL_SCOPES(V) \
+ V(AtomicPauseCompaction) \
+ V(AtomicPauseMarkEpilogue) \
+ V(AtomicPauseMarkPrologue) \
+ V(AtomicPauseMarkRoots) \
+ V(AtomicPauseMarkTransitiveClosure) \
+ V(AtomicPauseSweepAndCompact) \
+ V(CompleteSweep) \
+ V(IncrementalMarkingFinalize) \
+ V(IncrementalMarkingStartMarking) \
+ V(IncrementalMarkingStep) \
+ V(IncrementalMarkingWithDeadline) \
+ V(InvokePreFinalizers) \
+ V(LazySweepInIdle) \
+ V(LazySweepOnAllocation) \
+ V(MarkBailOutObjects) \
+ V(MarkInvokeEphemeronCallbacks) \
+ V(MarkFlushV8References) \
+ V(MarkFlushEphemeronPairs) \
+ V(MarkProcessWorklists) \
+ V(MarkProcessMarkingWorklist) \
+ V(MarkProcessWriteBarrierWorklist) \
+ V(MarkProcessNotFullyconstructeddWorklist) \
+ V(MarkNotFullyConstructedObjects) \
+ V(MarkWeakProcessing) \
+ V(UnifiedMarkingStep) \
+ V(VisitCrossThreadPersistents) \
+ V(VisitPersistentRoots) \
+ V(VisitPersistents) \
+ V(VisitRoots) \
+ V(VisitStackRoots) \
+ V(VisitRememberedSets)
+
+#define FOR_ALL_CONCURRENT_SCOPES(V) \
+ V(ConcurrentMarkInvokeEphemeronCallbacks) \
+ V(ConcurrentMarkingStep) \
+ V(ConcurrentSweepingStep)
+
+// Manages counters and statistics across garbage collection cycles.
+//
+// Usage:
+// ThreadHeapStatsCollector stats_collector;
+// stats_collector.NotifyMarkingStarted(<BlinkGC::CollectionType>,
+// <BlinkGC::GCReason>);
+// // Use tracer.
+// stats_collector.NotifySweepingCompleted();
+// // Previous event is available using stats_collector.previous().
+class PLATFORM_EXPORT ThreadHeapStatsCollector {
+ USING_FAST_MALLOC(ThreadHeapStatsCollector);
+
+ public:
+ // These ids will form human readable names when used in Scopes.
+ enum Id {
+#define DECLARE_ENUM(name) k##name,
+ FOR_ALL_SCOPES(DECLARE_ENUM)
+#undef DECLARE_ENUM
+ kNumScopeIds,
+ };
+
+ enum ConcurrentId {
+#define DECLARE_ENUM(name) k##name,
+ FOR_ALL_CONCURRENT_SCOPES(DECLARE_ENUM)
+#undef DECLARE_ENUM
+ kNumConcurrentScopeIds
+ };
+
+ constexpr static const char* ToString(Id id, BlinkGC::CollectionType type) {
+ switch (id) {
+#define CASE(name) \
+ case k##name: \
+ return type == BlinkGC::CollectionType::kMajor ? "BlinkGC." #name \
+ : "BlinkGC." #name \
+ ".Minor";
+ FOR_ALL_SCOPES(CASE)
+#undef CASE
+ default:
+ NOTREACHED();
+ }
+ return nullptr;
+ }
+
+ constexpr static const char* ToString(ConcurrentId id,
+ BlinkGC::CollectionType type) {
+ switch (id) {
+#define CASE(name) \
+ case k##name: \
+ return type == BlinkGC::CollectionType::kMajor ? "BlinkGC." #name \
+ : "BlinkGC." #name \
+ ".Minor";
+ FOR_ALL_CONCURRENT_SCOPES(CASE)
+#undef CASE
+ default:
+ NOTREACHED();
+ }
+ return nullptr;
+ }
+
+ enum TraceCategory { kEnabled, kDisabled };
+ enum ScopeContext { kMutatorThread, kConcurrentThread };
+
+ // Trace a particular scope. Will emit a trace event and record the time in
+ // the corresponding ThreadHeapStatsCollector.
+ template <TraceCategory trace_category = kDisabled,
+ ScopeContext scope_category = kMutatorThread>
+ class PLATFORM_EXPORT InternalScope {
+ DISALLOW_NEW();
+ DISALLOW_COPY_AND_ASSIGN(InternalScope);
+
+ using IdType =
+ std::conditional_t<scope_category == kMutatorThread, Id, ConcurrentId>;
+
+ public:
+ template <typename... Args>
+ InternalScope(ThreadHeapStatsCollector* tracer, IdType id, Args... args)
+ : tracer_(tracer), start_time_(base::TimeTicks::Now()), id_(id) {
+ StartTrace(args...);
+ }
+
+ ~InternalScope() {
+ StopTrace();
+ IncreaseScopeTime(id_);
+ }
+
+ private:
+ inline constexpr static const char* TraceCategory();
+
+ inline void StartTrace();
+ template <typename Value1>
+ inline void StartTrace(const char* k1, Value1 v1);
+ template <typename Value1, typename Value2>
+ inline void StartTrace(const char* k1,
+ Value1 v1,
+ const char* k2,
+ Value2 v2);
+ inline void StopTrace();
+
+ inline void IncreaseScopeTime(Id);
+ inline void IncreaseScopeTime(ConcurrentId);
+
+ ThreadHeapStatsCollector* const tracer_;
+ const base::TimeTicks start_time_;
+ const IdType id_;
+ };
+
+ using Scope = InternalScope<kDisabled>;
+ using EnabledScope = InternalScope<kEnabled>;
+ using ConcurrentScope = InternalScope<kDisabled, kConcurrentThread>;
+ using EnabledConcurrentScope = InternalScope<kEnabled, kConcurrentThread>;
+
+ // BlinkGCInV8Scope keeps track of time spent in Blink's GC when called by V8.
+ // This is necessary to avoid double-accounting of Blink's time when computing
+ // the overall time (V8 + Blink) spent in GC on the main thread.
+ class PLATFORM_EXPORT BlinkGCInV8Scope {
+ DISALLOW_NEW();
+ DISALLOW_COPY_AND_ASSIGN(BlinkGCInV8Scope);
+
+ public:
+ template <typename... Args>
+ BlinkGCInV8Scope(ThreadHeapStatsCollector* tracer)
+ : tracer_(tracer), start_time_(base::TimeTicks::Now()) {}
+
+ ~BlinkGCInV8Scope() {
+ if (tracer_)
+ tracer_->gc_nested_in_v8_ += base::TimeTicks::Now() - start_time_;
+ }
+
+ private:
+ ThreadHeapStatsCollector* const tracer_;
+ const base::TimeTicks start_time_;
+ };
+
+ // POD to hold interesting data accumulated during a garbage collection cycle.
+ // The event is always fully populated when looking at previous events but
+ // is only be partially populated when looking at the current event. See
+ // members on when they are available.
+ //
+ // Note that all getters include time for stand-alone as well as unified heap
+ // GCs. E.g., |atomic_marking_time()| report the marking time of the atomic
+ // phase, independent of whether the GC was a stand-alone or unified heap GC.
+ struct PLATFORM_EXPORT Event {
+ Event();
+
+ // Overall time spent in the GC cycle. This includes marking time as well as
+ // sweeping time.
+ base::TimeDelta gc_cycle_time() const;
+
+ // Time spent in the final atomic pause of a GC cycle.
+ base::TimeDelta atomic_pause_time() const;
+
+ // Time spent in the final atomic pause for marking the heap.
+ base::TimeDelta atomic_marking_time() const;
+
+ // Time spent in the final atomic pause in sweeping and compacting the heap.
+ base::TimeDelta atomic_sweep_and_compact_time() const;
+
+ // Time spent marking the roots.
+ base::TimeDelta roots_marking_time() const;
+
+ // Time spent incrementally marking the heap.
+ base::TimeDelta incremental_marking_time() const;
+
+ // Time spent processing worklist in the foreground thread.
+ base::TimeDelta worklist_processing_time_foreground() const;
+
+ // Time spent flushing v8 references (this is done only in the foreground)
+ base::TimeDelta flushing_v8_references_time() const;
+
+ // Time spent in foreground tasks marking the heap.
+ base::TimeDelta foreground_marking_time() const;
+
+ // Time spent in background tasks marking the heap.
+ base::TimeDelta background_marking_time() const;
+
+ // Overall time spent marking the heap.
+ base::TimeDelta marking_time() const;
+
+ // Time spent in foreground tasks sweeping the heap.
+ base::TimeDelta foreground_sweeping_time() const;
+
+ // Time spent in background tasks sweeping the heap.
+ base::TimeDelta background_sweeping_time() const;
+
+ // Overall time spent sweeping the heap.
+ base::TimeDelta sweeping_time() const;
+
+ // Marked bytes collected during sweeping.
+ size_t unique_id = -1;
+ size_t marked_bytes = 0;
+ size_t compaction_freed_bytes = 0;
+ size_t compaction_freed_pages = 0;
+ bool compaction_recorded_events = false;
+ base::TimeDelta scope_data[kNumScopeIds];
+ base::subtle::Atomic32 concurrent_scope_data[kNumConcurrentScopeIds]{0};
+ BlinkGC::GCReason reason = static_cast<BlinkGC::GCReason>(0);
+ BlinkGC::CollectionType collection_type = BlinkGC::CollectionType::kMajor;
+ size_t object_size_in_bytes_before_sweeping = 0;
+ size_t allocated_space_in_bytes_before_sweeping = 0;
+ size_t partition_alloc_bytes_before_sweeping = 0;
+ double live_object_rate = 0;
+ base::TimeDelta gc_nested_in_v8;
+ bool is_forced_gc = true;
+ };
+
+ // Indicates a new garbage collection cycle.
+ void NotifyMarkingStarted(BlinkGC::CollectionType,
+ BlinkGC::GCReason,
+ bool is_forced_gc);
+
+ // Indicates that marking of the current garbage collection cycle is
+ // completed.
+ void NotifyMarkingCompleted(size_t marked_bytes);
+
+ // Indicates the end of a garbage collection cycle. This means that sweeping
+ // is finished at this point.
+ void NotifySweepingCompleted();
+
+ void IncreaseScopeTime(Id id, base::TimeDelta time) {
+ DCHECK(is_started_);
+ current_.scope_data[id] += time;
+ }
+
+ void IncreaseConcurrentScopeTime(ConcurrentId id, base::TimeDelta time) {
+ using Atomic32 = base::subtle::Atomic32;
+ DCHECK(is_started_);
+ const int64_t ms = time.InMicroseconds();
+ DCHECK(ms <= std::numeric_limits<Atomic32>::max());
+ base::subtle::NoBarrier_AtomicIncrement(&current_.concurrent_scope_data[id],
+ static_cast<Atomic32>(ms));
+ }
+
+ void UpdateReason(BlinkGC::GCReason);
+ void IncreaseCompactionFreedSize(size_t);
+ void IncreaseCompactionFreedPages(size_t);
+ void IncreaseAllocatedObjectSize(size_t);
+ void DecreaseAllocatedObjectSize(size_t);
+ void IncreaseAllocatedSpace(size_t);
+ void DecreaseAllocatedSpace(size_t);
+ void IncreaseWrapperCount(size_t);
+ void DecreaseWrapperCount(size_t);
+ void IncreaseCollectedWrapperCount(size_t);
+
+ // Called by the GC when it hits a point where allocated memory may be
+ // reported and garbage collection is possible. This is necessary, as
+ // increments and decrements are reported as close to their actual
+ // allocation/reclamation as possible.
+ void AllocatedObjectSizeSafepoint();
+
+ // Size of objects on the heap. Based on marked bytes in the previous cycle
+ // and newly allocated bytes since the previous cycle.
+ size_t object_size_in_bytes() const;
+
+ size_t marked_bytes() const;
+ base::TimeDelta marking_time_so_far() const;
+
+ base::TimeDelta worklist_processing_time_foreground() const;
+
+ base::TimeDelta flushing_v8_references_time() const;
+
+ int64_t allocated_bytes_since_prev_gc() const;
+
+ size_t allocated_space_bytes() const;
+
+ size_t wrapper_count() const;
+ size_t collected_wrapper_count() const;
+
+ bool is_started() const { return is_started_; }
+
+ // Statistics for the previously running garbage collection.
+ const Event& previous() const { return previous_; }
+
+ void RegisterObserver(ThreadHeapStatsObserver* observer);
+ void UnregisterObserver(ThreadHeapStatsObserver* observer);
+
+ void IncreaseAllocatedObjectSizeForTesting(size_t);
+ void DecreaseAllocatedObjectSizeForTesting(size_t);
+
+ private:
+ // Observers are implemented using virtual calls. Avoid notifications below
+ // reasonably interesting sizes.
+ static constexpr int64_t kUpdateThreshold = 1024;
+
+ // Invokes |callback| for all registered observers.
+ template <typename Callback>
+ void ForAllObservers(Callback callback);
+
+ void AllocatedObjectSizeSafepointImpl();
+
+ // Statistics for the currently running garbage collection. Note that the
+ // Event may not be fully populated yet as some phase may not have been run.
+ const Event& current() const { return current_; }
+
+ Event current_;
+ Event previous_;
+
+ // Allocated bytes since the last garbage collection. These bytes are reset
+ // after marking as they are accounted in marked_bytes then.
+ int64_t allocated_bytes_since_prev_gc_ = 0;
+ int64_t pos_delta_allocated_bytes_since_prev_gc_ = 0;
+ int64_t neg_delta_allocated_bytes_since_prev_gc_ = 0;
+
+ // Allocated space in bytes for all arenas.
+ size_t allocated_space_bytes_ = 0;
+
+ bool is_started_ = false;
+
+ // base::TimeDelta for RawScope. These don't need to be nested within a
+ // garbage collection cycle to make them easier to use.
+ base::TimeDelta gc_nested_in_v8_;
+
+ Vector<ThreadHeapStatsObserver*> observers_;
+
+ FRIEND_TEST_ALL_PREFIXES(ThreadHeapStatsCollectorTest, InitialEmpty);
+ FRIEND_TEST_ALL_PREFIXES(ThreadHeapStatsCollectorTest, IncreaseScopeTime);
+ FRIEND_TEST_ALL_PREFIXES(ThreadHeapStatsCollectorTest, StopResetsCurrent);
+};
+
+template <ThreadHeapStatsCollector::TraceCategory trace_category,
+ ThreadHeapStatsCollector::ScopeContext scope_category>
+constexpr const char*
+ThreadHeapStatsCollector::InternalScope<trace_category,
+ scope_category>::TraceCategory() {
+ switch (trace_category) {
+ case kEnabled:
+ return "blink_gc,devtools.timeline";
+ case kDisabled:
+ return TRACE_DISABLED_BY_DEFAULT("blink_gc");
+ }
+}
+
+template <ThreadHeapStatsCollector::TraceCategory trace_category,
+ ThreadHeapStatsCollector::ScopeContext scope_category>
+void ThreadHeapStatsCollector::InternalScope<trace_category,
+ scope_category>::StartTrace() {
+ TRACE_EVENT_BEGIN0(TraceCategory(),
+ ToString(id_, tracer_->current_.collection_type));
+}
+
+template <ThreadHeapStatsCollector::TraceCategory trace_category,
+ ThreadHeapStatsCollector::ScopeContext scope_category>
+template <typename Value1>
+void ThreadHeapStatsCollector::InternalScope<trace_category, scope_category>::
+ StartTrace(const char* k1, Value1 v1) {
+ TRACE_EVENT_BEGIN1(TraceCategory(),
+ ToString(id_, tracer_->current_.collection_type), k1, v1);
+}
+
+template <ThreadHeapStatsCollector::TraceCategory trace_category,
+ ThreadHeapStatsCollector::ScopeContext scope_category>
+template <typename Value1, typename Value2>
+void ThreadHeapStatsCollector::InternalScope<trace_category, scope_category>::
+ StartTrace(const char* k1, Value1 v1, const char* k2, Value2 v2) {
+ TRACE_EVENT_BEGIN2(TraceCategory(),
+ ToString(id_, tracer_->current_.collection_type), k1, v1,
+ k2, v2);
+}
+
+template <ThreadHeapStatsCollector::TraceCategory trace_category,
+ ThreadHeapStatsCollector::ScopeContext scope_category>
+void ThreadHeapStatsCollector::InternalScope<trace_category,
+ scope_category>::StopTrace() {
+ TRACE_EVENT_END2(TraceCategory(),
+ ToString(id_, tracer_->current_.collection_type), "epoch",
+ tracer_->current_.unique_id, "forced",
+ tracer_->current_.is_forced_gc);
+}
+
+template <ThreadHeapStatsCollector::TraceCategory trace_category,
+ ThreadHeapStatsCollector::ScopeContext scope_category>
+void ThreadHeapStatsCollector::InternalScope<trace_category, scope_category>::
+ IncreaseScopeTime(Id) {
+ tracer_->IncreaseScopeTime(id_, base::TimeTicks::Now() - start_time_);
+}
+
+template <ThreadHeapStatsCollector::TraceCategory trace_category,
+ ThreadHeapStatsCollector::ScopeContext scope_category>
+void ThreadHeapStatsCollector::InternalScope<trace_category, scope_category>::
+ IncreaseScopeTime(ConcurrentId) {
+ tracer_->IncreaseConcurrentScopeTime(id_,
+ base::TimeTicks::Now() - start_time_);
+}
+
+#undef FOR_ALL_SCOPES
+#undef FOR_ALL_CONCURRENT_SCOPES
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_STATS_COLLECTOR_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/heap_traits.h b/chromium/third_party/blink/renderer/platform/heap/impl/heap_traits.h
new file mode 100644
index 00000000000..36ac20e5710
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/heap_traits.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_TRAITS_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_TRAITS_H_
+
+#include <type_traits>
+#include "third_party/blink/renderer/platform/heap/heap_allocator.h"
+#include "third_party/blink/renderer/platform/heap/member.h"
+#include "third_party/blink/renderer/platform/wtf/type_traits.h"
+
+namespace blink {
+
+// Given a type T, returns a type that is either Member<T> or just T depending
+// on whether T is a garbage-collected type.
+template <typename T>
+using AddMemberIfNeeded =
+ std::conditional_t<WTF::IsGarbageCollectedType<T>::value, Member<T>, T>;
+
+// Given a type T, returns a type that is either HeapVector<T>,
+// HeapVector<Member<T>> or Vector<T> depending on T.
+template <typename T>
+using VectorOf = std::conditional_t<WTF::IsTraceable<T>::value,
+ HeapVector<AddMemberIfNeeded<T>>,
+ Vector<T>>;
+
+// Given types T and U, returns a type that is one of the following:
+// - HeapVector<std::pair<V, X>>
+// (where V is either T or Member<T> and X is either U or Member<U>)
+// - Vector<std::pair<T, U>>
+template <typename T, typename U>
+using VectorOfPairs = std::conditional_t<
+ WTF::IsTraceable<T>::value || WTF::IsTraceable<U>::value,
+ HeapVector<std::pair<AddMemberIfNeeded<T>, AddMemberIfNeeded<U>>>,
+ Vector<std::pair<T, U>>>;
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_TRAITS_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.cc b/chromium/third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.cc
new file mode 100644
index 00000000000..8b05de2df81
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.cc
@@ -0,0 +1,97 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.h"
+
+#include "base/numerics/ranges.h"
+
+namespace blink {
+
+constexpr double MarkingSchedulingOracle::kEstimatedMarkingTimeMs;
+constexpr base::TimeDelta
+ MarkingSchedulingOracle::kDefaultIncrementalMarkingStepDuration;
+constexpr size_t MarkingSchedulingOracle::kMinimumMarkedBytesInStep;
+constexpr base::TimeDelta
+ MarkingSchedulingOracle::kMaximumIncrementalMarkingStepDuration;
+
+MarkingSchedulingOracle::MarkingSchedulingOracle()
+ : incremental_marking_start_time_(base::TimeTicks::Now()) {}
+
+void MarkingSchedulingOracle::UpdateIncrementalMarkingStats(
+ size_t overall_marked_bytes,
+ base::TimeDelta overall_marking_time,
+ base::TimeDelta non_contributing_time) {
+ incrementally_marked_bytes_ = overall_marked_bytes;
+ // |non_contributing_time| is time spent during |overall_marking_time| which
+ // does not contribute to |overall_marked_bytes| and is thus ignored so that
+ // it doesn't affect the marking speed.
+ DCHECK_LE(non_contributing_time, overall_marking_time);
+ incremental_marking_time_so_far_ =
+ overall_marking_time - non_contributing_time;
+}
+
+void MarkingSchedulingOracle::AddConcurrentlyMarkedBytes(size_t marked_bytes) {
+ concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed);
+}
+
+size_t MarkingSchedulingOracle::GetConcurrentlyMarkedBytes() {
+ return concurrently_marked_bytes_.load(std::memory_order_relaxed);
+}
+
+size_t MarkingSchedulingOracle::GetOverallMarkedBytes() {
+ return incrementally_marked_bytes_ + GetConcurrentlyMarkedBytes();
+}
+
+double MarkingSchedulingOracle::GetElapsedTimeInMs(base::TimeTicks start_time) {
+ if (elapsed_time_for_testing_ != kNoSetElapsedTimeForTesting) {
+ double elapsed_time = elapsed_time_for_testing_;
+ elapsed_time_for_testing_ = kNoSetElapsedTimeForTesting;
+ return elapsed_time;
+ }
+ return (base::TimeTicks::Now() - start_time).InMillisecondsF();
+}
+
+base::TimeDelta MarkingSchedulingOracle::GetMinimumStepDuration() {
+ DCHECK_LT(0u, incrementally_marked_bytes_);
+ DCHECK(!incremental_marking_time_so_far_.is_zero());
+ return incremental_marking_time_so_far_ * kMinimumMarkedBytesInStep /
+ incrementally_marked_bytes_;
+}
+
+base::TimeDelta MarkingSchedulingOracle::GetNextIncrementalStepDurationForTask(
+ size_t estimated_live_bytes) {
+ if ((incrementally_marked_bytes_ == 0) ||
+ incremental_marking_time_so_far_.is_zero()) {
+ // Impossible to estimate marking speed. Fallback to default duration.
+ return kDefaultIncrementalMarkingStepDuration;
+ }
+ double elapsed_time_in_ms =
+ GetElapsedTimeInMs(incremental_marking_start_time_);
+ size_t actual_marked_bytes = GetOverallMarkedBytes();
+ double expected_marked_bytes =
+ estimated_live_bytes * elapsed_time_in_ms / kEstimatedMarkingTimeMs;
+ base::TimeDelta minimum_duration = GetMinimumStepDuration();
+ if (expected_marked_bytes < actual_marked_bytes) {
+ // Marking is ahead of schedule, incremental marking doesn't need to
+ // do anything.
+ return std::min(minimum_duration, kMaximumIncrementalMarkingStepDuration);
+ }
+ // Assuming marking will take |kEstimatedMarkingTime|, overall there will
+ // be |estimated_live_bytes| live bytes to mark, and that marking speed is
+ // constant, after |elapsed_time| the number of marked_bytes should be
+ // |estimated_live_bytes| * (|elapsed_time| / |kEstimatedMarkingTime|),
+ // denoted as |expected_marked_bytes|. If |actual_marked_bytes| is less,
+ // i.e. marking is behind schedule, incremental marking should help "catch
+ // up" by marking (|expected_marked_bytes| - |actual_marked_bytes|).
+ // Assuming constant marking speed, duration of the next incremental step
+ // should be as follows:
+ const base::TimeDelta marking_time_to_catch_up =
+ incremental_marking_time_so_far_ *
+ (expected_marked_bytes - actual_marked_bytes) /
+ incrementally_marked_bytes_;
+ return base::ClampToRange(marking_time_to_catch_up, minimum_duration,
+ kMaximumIncrementalMarkingStepDuration);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.h b/chromium/third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.h
new file mode 100644
index 00000000000..19c9e0b68a0
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.h
@@ -0,0 +1,65 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MARKING_SCHEDULING_ORACLE_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MARKING_SCHEDULING_ORACLE_H_
+
+#include <atomic>
+
+#include "base/time/time.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+
+namespace blink {
+
+class PLATFORM_EXPORT MarkingSchedulingOracle {
+ public:
+ // Estimated duration of GC cycle in milliseconds.
+ static constexpr double kEstimatedMarkingTimeMs = 500.0;
+
+ // Duration of one incremental marking step. Should be short enough that it
+ // doesn't cause jank even though it is scheduled as a normal task.
+ static constexpr base::TimeDelta kDefaultIncrementalMarkingStepDuration =
+ base::TimeDelta::FromMillisecondsD(0.5);
+
+ // Minimum number of bytes that should be marked during an incremental
+ // marking step.
+ static constexpr size_t kMinimumMarkedBytesInStep = 64 * 1024;
+
+ // Maximum duration of one incremental marking step. Should be short enough
+ // that it doesn't cause jank even though it is scheduled as a normal task.
+ static constexpr base::TimeDelta kMaximumIncrementalMarkingStepDuration =
+ base::TimeDelta::FromMillisecondsD(2.0);
+
+ explicit MarkingSchedulingOracle();
+
+ void UpdateIncrementalMarkingStats(size_t, base::TimeDelta, base::TimeDelta);
+ void AddConcurrentlyMarkedBytes(size_t);
+
+ size_t GetConcurrentlyMarkedBytes();
+ size_t GetOverallMarkedBytes();
+
+ base::TimeDelta GetNextIncrementalStepDurationForTask(size_t);
+
+ void SetElapsedTimeForTesting(double elapsed_time) {
+ elapsed_time_for_testing_ = elapsed_time;
+ }
+
+ private:
+ double GetElapsedTimeInMs(base::TimeTicks);
+ base::TimeDelta GetMinimumStepDuration();
+
+ base::TimeTicks incremental_marking_start_time_;
+ base::TimeDelta incremental_marking_time_so_far_;
+
+ size_t incrementally_marked_bytes_ = 0;
+ std::atomic_size_t concurrently_marked_bytes_{0};
+
+ // Using -1 as sentinel to denote
+ static constexpr double kNoSetElapsedTimeForTesting = -1;
+ double elapsed_time_for_testing_ = kNoSetElapsedTimeForTesting;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MARKING_SCHEDULING_ORACLE_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/marking_verifier.cc b/chromium/third_party/blink/renderer/platform/heap/impl/marking_verifier.cc
new file mode 100644
index 00000000000..58b049ef54a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/marking_verifier.cc
@@ -0,0 +1,82 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/impl/marking_verifier.h"
+
+#include "third_party/blink/renderer/platform/heap/garbage_collected.h"
+#include "third_party/blink/renderer/platform/heap/impl/heap_page.h"
+
+namespace blink {
+
+void MarkingVerifier::VerifyObject(HeapObjectHeader* header) {
+ // Verify only non-free marked objects.
+ if (header->IsFree() || !header->IsMarked())
+ return;
+
+ const GCInfo& info = GCInfo::From(header->GcInfoIndex());
+ const bool can_verify =
+ !info.has_v_table || blink::VTableInitialized(header->Payload());
+ if (can_verify) {
+ parent_ = header;
+ info.trace(this, header->Payload());
+ }
+}
+
+void MarkingVerifier::Visit(const void* object, TraceDescriptor desc) {
+ VerifyChild(object, desc.base_object_payload);
+}
+
+void MarkingVerifier::VisitWeak(const void* object,
+ const void* object_weak_ref,
+ TraceDescriptor desc,
+ WeakCallback callback) {
+ // Weak objects should have been cleared at this point. As a consequence, all
+ // objects found through weak references have to point to live objects at this
+ // point.
+ VerifyChild(object, desc.base_object_payload);
+}
+
+void MarkingVerifier::VisitWeakContainer(const void* object,
+ const void* const*,
+ TraceDescriptor,
+ TraceDescriptor weak_desc,
+ WeakCallback,
+ const void*) {
+ if (!object)
+ return;
+
+ // Contents of weak backing stores are found themselves through page
+ // iteration and are treated strongly that way, similar to how they are
+ // treated strongly when found through stack scanning. The verification
+ // here only makes sure that the backing itself is properly marked. Weak
+ // backing stores found through
+ VerifyChild(object, weak_desc.base_object_payload);
+}
+
+void MarkingVerifier::VerifyChild(const void* object,
+ const void* base_object_payload) {
+ CHECK(object);
+ // Verification may check objects that are currently under construction and
+ // would require vtable access to figure out their headers. A nullptr in
+ // |base_object_payload| indicates that a mixin object is in construction
+ // and the vtable cannot be used to get to the object header.
+ const HeapObjectHeader* const child_header =
+ (base_object_payload) ? HeapObjectHeader::FromPayload(base_object_payload)
+ : HeapObjectHeader::FromInnerAddress(object);
+ // These checks ensure that any children reachable from marked parents are
+ // also marked. If you hit these checks then marking is in an inconsistent
+ // state meaning that there are unmarked objects reachable from marked
+ // ones.
+ CHECK(child_header);
+ if (!child_header->IsMarked()) {
+ CHECK(!PageFromObject(child_header->Payload())->HasBeenSwept());
+ LOG(FATAL) << "MarkingVerifier: Encountered unmarked object. " << std::endl
+ << std::endl
+ << "Hint: " << std::endl
+ << parent_->Name() << std::endl
+ << "\\-> " << child_header->Name() << std::endl;
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/marking_verifier.h b/chromium/third_party/blink/renderer/platform/heap/impl/marking_verifier.h
new file mode 100644
index 00000000000..ee4266a53f1
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/marking_verifier.h
@@ -0,0 +1,43 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MARKING_VERIFIER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MARKING_VERIFIER_H_
+
+#include "third_party/blink/renderer/platform/heap/visitor.h"
+
+namespace blink {
+
+class HeapObjectHeader;
+
+// Marking verifier that checks that a child is marked if its parent is marked.
+class MarkingVerifier final : public Visitor {
+ public:
+ explicit MarkingVerifier(ThreadState* state) : Visitor(state) {}
+ ~MarkingVerifier() override = default;
+
+ void VerifyObject(HeapObjectHeader* header);
+
+ void Visit(const void* object, TraceDescriptor desc) final;
+ void VisitWeak(const void* object,
+ const void* object_weak_ref,
+ TraceDescriptor desc,
+ WeakCallback callback) final;
+
+ void VisitWeakContainer(const void*,
+ const void* const*,
+ TraceDescriptor,
+ TraceDescriptor,
+ WeakCallback,
+ const void*) final;
+
+ private:
+ void VerifyChild(const void* object, const void* base_object_payload);
+
+ HeapObjectHeader* parent_ = nullptr;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MARKING_VERIFIER_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/marking_visitor.cc b/chromium/third_party/blink/renderer/platform/heap/impl/marking_visitor.cc
new file mode 100644
index 00000000000..aca7b715e65
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/marking_visitor.cc
@@ -0,0 +1,362 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/impl/marking_visitor.h"
+
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/impl/heap_page.h"
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+
+namespace blink {
+
+MarkingVisitorBase::MarkingVisitorBase(ThreadState* state,
+ MarkingMode marking_mode,
+ int task_id)
+ : Visitor(state),
+ marking_worklist_(Heap().GetMarkingWorklist(), task_id),
+ write_barrier_worklist_(Heap().GetWriteBarrierWorklist(), task_id),
+ not_fully_constructed_worklist_(Heap().GetNotFullyConstructedWorklist(),
+ task_id),
+ weak_callback_worklist_(Heap().GetWeakCallbackWorklist(), task_id),
+ movable_reference_worklist_(Heap().GetMovableReferenceWorklist(),
+ task_id),
+ discovered_ephemeron_pairs_worklist_(
+ Heap().GetDiscoveredEphemeronPairsWorklist(),
+ task_id),
+ ephemeron_pairs_to_process_worklist_(
+ Heap().GetEphemeronPairsToProcessWorklist(),
+ task_id),
+ weak_containers_worklist_(Heap().GetWeakContainersWorklist()),
+ marking_mode_(marking_mode),
+ task_id_(task_id) {}
+
+void MarkingVisitorBase::FlushCompactionWorklists() {
+ if (marking_mode_ != kGlobalMarkingWithCompaction)
+ return;
+ movable_reference_worklist_.FlushToGlobal();
+}
+
+void MarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
+ const void* object) {
+ weak_callback_worklist_.Push({callback, object});
+}
+
+void MarkingVisitorBase::RegisterMovableSlot(const void* const* slot) {
+ if (marking_mode_ != kGlobalMarkingWithCompaction)
+ return;
+ if (Heap().ShouldRegisterMovingAddress()) {
+ movable_reference_worklist_.Push(slot);
+ }
+}
+
+void MarkingVisitorBase::VisitWeak(const void* object,
+ const void* object_weak_ref,
+ TraceDescriptor desc,
+ WeakCallback callback) {
+ HeapObjectHeader* header =
+ HeapObjectHeader::FromPayload(desc.base_object_payload);
+ if (header->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
+ not_fully_constructed_worklist_.Push(desc.base_object_payload);
+ return;
+ }
+ // Filter out already marked values. The write barrier for WeakMember
+ // ensures that any newly set value after this point is kept alive and does
+ // not require the callback.
+ if (header->IsMarked<HeapObjectHeader::AccessMode::kAtomic>())
+ return;
+ RegisterWeakCallback(callback, object_weak_ref);
+}
+
+void MarkingVisitorBase::VisitEphemeron(const void* key,
+ TraceDescriptor value_desc) {
+ HeapObjectHeader* key_header = HeapObjectHeader::FromPayload(key);
+ if (!key_header->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>() &&
+ !key_header->IsMarked<HeapObjectHeader::AccessMode::kAtomic>()) {
+ // In construction keys are considered as marked because they are
+ // guaranteed to be marked by the end of GC (e.g. by write barrier
+ // on insertion to HashTable).
+ discovered_ephemeron_pairs_worklist_.Push({key, value_desc});
+ return;
+ }
+ value_desc.callback(this, value_desc.base_object_payload);
+}
+
+void MarkingVisitorBase::VisitWeakContainer(
+ const void* object,
+ const void* const*,
+ TraceDescriptor,
+ TraceDescriptor weak_desc,
+ WeakCallback weak_callback,
+ const void* weak_callback_parameter) {
+ // In case there's no object present, weakness processing is omitted. The GC
+ // relies on the fact that in such cases touching the weak data structure will
+ // strongify its references.
+ if (!object)
+ return;
+
+ HeapObjectHeader* header = HeapObjectHeader::FromPayload(object);
+ // We shouldn't trace an in-construction backing store of a weak container.
+ // If this container is an ephemeron, we will try to iterate over it's
+ // bucket which is unsafe when the backing store is in construction.
+ if (header->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
+ not_fully_constructed_worklist_.Push(object);
+ return;
+ }
+
+ // Only trace the container initially. Its buckets will be processed after
+ // marking. The interesting cases are:
+ // - The backing of the container is dropped using clear(): The backing can
+ // still be compacted but empty/deleted buckets will only be destroyed once
+ // the backing is reclaimed by the garbage collector on the next cycle.
+ // - The container expands/shrinks: Buckets are moved to the new backing
+ // store and strongified, resulting in all buckets being alive. The old
+ // backing store is marked but only contains empty/deleted buckets as all
+ // non-empty/deleted buckets have been moved to the new backing store.
+ MarkHeaderNoTracing(header);
+ AccountMarkedBytes(header);
+ weak_containers_worklist_->Push(header);
+
+ // Register final weak processing of the backing store.
+ RegisterWeakCallback(weak_callback, weak_callback_parameter);
+ // Register ephemeron callbacks if necessary.
+ if (weak_desc.callback)
+ weak_desc.callback(this, weak_desc.base_object_payload);
+}
+
+void MarkingVisitorBase::DynamicallyMarkAddress(ConstAddress address) {
+ constexpr HeapObjectHeader::AccessMode mode =
+ HeapObjectHeader::AccessMode::kAtomic;
+ HeapObjectHeader* const header =
+ HeapObjectHeader::FromInnerAddress<mode>(address);
+ DCHECK(header);
+ DCHECK(!header->IsInConstruction<mode>());
+ if (MarkHeaderNoTracing(header)) {
+ marking_worklist_.Push({reinterpret_cast<void*>(header->Payload()),
+ GCInfo::From(header->GcInfoIndex<mode>()).trace});
+ }
+}
+
+// static
+bool MarkingVisitor::MarkValue(void* value,
+ BasePage* base_page,
+ ThreadState* thread_state) {
+ HeapObjectHeader* header;
+ if (LIKELY(!base_page->IsLargeObjectPage())) {
+ header = reinterpret_cast<HeapObjectHeader*>(
+ static_cast<NormalPage*>(base_page)->FindHeaderFromAddress(
+ reinterpret_cast<Address>(value)));
+ } else {
+ LargeObjectPage* large_page = static_cast<LargeObjectPage*>(base_page);
+ header = large_page->ObjectHeader();
+ }
+
+ if (!header->TryMark<HeapObjectHeader::AccessMode::kAtomic>())
+ return false;
+
+ MarkingVisitor* visitor = thread_state->CurrentVisitor();
+ if (UNLIKELY(
+ header->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>())) {
+ // It is assumed that objects on not_fully_constructed_worklist_ are not
+ // marked.
+ header->Unmark<HeapObjectHeader::AccessMode::kAtomic>();
+ visitor->not_fully_constructed_worklist_.Push(header->Payload());
+ return true;
+ }
+
+ visitor->write_barrier_worklist_.Push(header);
+ return true;
+}
+
+// static
+bool MarkingVisitor::WriteBarrierSlow(void* value) {
+ if (!value || IsHashTableDeleteValue(value))
+ return false;
+
+ // It is guaranteed that managed references point to either GarbageCollected
+ // or GarbageCollectedMixin. Mixins are restricted to regular objects sizes.
+ // It is thus possible to get to the page header by aligning properly.
+ BasePage* base_page = PageFromObject(value);
+
+ ThreadState* const thread_state = base_page->thread_state();
+ if (!thread_state->IsIncrementalMarking())
+ return false;
+
+ return MarkValue(value, base_page, thread_state);
+}
+
+void MarkingVisitor::GenerationalBarrierSlow(Address slot,
+ ThreadState* thread_state) {
+ BasePage* slot_page = thread_state->Heap().LookupPageForAddress(slot);
+ DCHECK(slot_page);
+
+ if (UNLIKELY(slot_page->IsLargeObjectPage())) {
+ auto* large_page = static_cast<LargeObjectPage*>(slot_page);
+ if (UNLIKELY(large_page->ObjectHeader()->IsOld())) {
+ large_page->SetRemembered(true);
+ }
+ return;
+ }
+
+ auto* normal_page = static_cast<NormalPage*>(slot_page);
+ const HeapObjectHeader* source_header = reinterpret_cast<HeapObjectHeader*>(
+ normal_page->object_start_bit_map()->FindHeader(slot));
+ DCHECK_LT(0u, source_header->GcInfoIndex());
+ DCHECK_GT(source_header->PayloadEnd(), slot);
+ if (UNLIKELY(source_header->IsOld())) {
+ normal_page->MarkCard(slot);
+ }
+}
+
+void MarkingVisitor::TraceMarkedBackingStoreSlow(const void* value) {
+ if (!value)
+ return;
+
+ ThreadState* const thread_state = ThreadState::Current();
+ if (!thread_state->IsIncrementalMarking())
+ return;
+
+ // |value| is pointing to the start of a backing store.
+ HeapObjectHeader* header = HeapObjectHeader::FromPayload(value);
+ CHECK(header->IsMarked());
+ DCHECK(thread_state->CurrentVisitor());
+ // No weak handling for write barriers. Modifying weakly reachable objects
+ // strongifies them for the current cycle.
+
+ GCInfo::From(header->GcInfoIndex())
+ .trace(thread_state->CurrentVisitor(), value);
+}
+
+constexpr size_t MarkingVisitor::RecentlyRetracedWeakContainers::kMaxCacheSize;
+
+bool MarkingVisitor::RecentlyRetracedWeakContainers::Contains(
+ const HeapObjectHeader* header) {
+ return std::find(recently_retraced_cache_.begin(),
+ recently_retraced_cache_.end(),
+ header) != recently_retraced_cache_.end();
+}
+
+void MarkingVisitor::RecentlyRetracedWeakContainers::Insert(
+ const HeapObjectHeader* header) {
+ last_used_index_ = (last_used_index_ + 1) % kMaxCacheSize;
+ if (recently_retraced_cache_.size() <= last_used_index_)
+ recently_retraced_cache_.push_back(header);
+ else
+ recently_retraced_cache_[last_used_index_] = header;
+}
+
+MarkingVisitor::MarkingVisitor(ThreadState* state, MarkingMode marking_mode)
+ : MarkingVisitorBase(state, marking_mode, WorklistTaskId::MutatorThread) {
+ DCHECK(state->InAtomicMarkingPause());
+ DCHECK(state->CheckThread());
+}
+
+void MarkingVisitor::ConservativelyMarkAddress(BasePage* page,
+ ConstAddress address) {
+#if DCHECK_IS_ON()
+ DCHECK(page->Contains(address));
+#endif
+ HeapObjectHeader* const header =
+ page->IsLargeObjectPage()
+ ? static_cast<LargeObjectPage*>(page)->ObjectHeader()
+ : static_cast<NormalPage*>(page)->ConservativelyFindHeaderFromAddress(
+ address);
+ if (!header)
+ return;
+ if (header->IsMarked()) {
+ // Weak containers found through conservative GC need to be strongified. In
+ // case the container was previously marked and weakly traced, it should be
+ // retraced strongly now. Previously marked/traced weak containers are
+ // marked using the |weak_containers_worklist_|. Other marked object can be
+ // skipped.
+ if (weak_containers_worklist_->Contains(header) &&
+ !recently_retraced_weak_containers_.Contains(header)) {
+ DCHECK(!header->IsInConstruction());
+ // Record the weak container backing store to avoid retracing it again.
+ recently_retraced_weak_containers_.Insert(header);
+ marking_worklist_.Push(
+ {header->Payload(), GCInfo::From(header->GcInfoIndex()).trace});
+ }
+ return;
+ }
+
+ // Simple case for fully constructed objects. This just adds the object to the
+ // regular marking worklist.
+ if (!header->IsInConstruction()) {
+ MarkHeader(header,
+ {header->Payload(), GCInfo::From(header->GcInfoIndex()).trace});
+ return;
+ }
+
+ // This case is reached for not-fully-constructed objects with vtables.
+ // We can differentiate multiple cases:
+ // 1. No vtable set up. Example:
+ // class A : public GarbageCollected<A> { virtual void f() = 0; };
+ // class B : public A { B() : A(foo()) {}; };
+ // The vtable for A is not set up if foo() allocates and triggers a GC.
+ //
+ // 2. Vtables properly set up (non-mixin case).
+ // 3. Vtables not properly set up (mixin) if GC is allowed during mixin
+ // construction.
+ //
+ // We use a simple conservative approach for these cases as they are not
+ // performance critical.
+ MarkHeaderNoTracing(header);
+ Address* payload = reinterpret_cast<Address*>(header->Payload());
+ const size_t payload_size = header->PayloadSize();
+ for (size_t i = 0; i < (payload_size / sizeof(Address)); ++i) {
+ Address maybe_ptr = payload[i];
+#if defined(MEMORY_SANITIZER)
+ // |payload| may be uninitialized by design or just contain padding bytes.
+ // Copy into a local variable that is unpoisoned for conservative marking.
+ // Copy into a temporary variable to maintain the original MSAN state.
+ __msan_unpoison(&maybe_ptr, sizeof(maybe_ptr));
+#endif
+ if (maybe_ptr)
+ Heap().CheckAndMarkPointer(this, maybe_ptr);
+ }
+ AccountMarkedBytes(header);
+}
+
+void MarkingVisitor::FlushMarkingWorklists() {
+ marking_worklist_.FlushToGlobal();
+ write_barrier_worklist_.FlushToGlobal();
+}
+
+ConcurrentMarkingVisitor::ConcurrentMarkingVisitor(ThreadState* state,
+ MarkingMode marking_mode,
+ int task_id)
+ : MarkingVisitorBase(state, marking_mode, task_id),
+ not_safe_to_concurrently_trace_worklist_(
+ Heap().GetNotSafeToConcurrentlyTraceWorklist(),
+ task_id),
+ previously_not_fully_constructed_worklist_(
+ Heap().GetPreviouslyNotFullyConstructedWorklist(),
+ task_id) {
+ DCHECK(!state->CheckThread());
+ DCHECK_NE(WorklistTaskId::MutatorThread, task_id);
+}
+
+ConcurrentMarkingVisitor::~ConcurrentMarkingVisitor() {
+ // ConcurrentMarkingVisitor should report all its marked_bytes before dying.
+ DCHECK_EQ(marked_bytes_, last_marked_bytes_);
+}
+
+void ConcurrentMarkingVisitor::FlushWorklists() {
+ // Flush marking worklists for further marking on the mutator thread.
+ marking_worklist_.FlushToGlobal();
+ write_barrier_worklist_.FlushToGlobal();
+ not_fully_constructed_worklist_.FlushToGlobal();
+ previously_not_fully_constructed_worklist_.FlushToGlobal();
+ weak_callback_worklist_.FlushToGlobal();
+ discovered_ephemeron_pairs_worklist_.FlushToGlobal();
+ ephemeron_pairs_to_process_worklist_.FlushToGlobal();
+ not_safe_to_concurrently_trace_worklist_.FlushToGlobal();
+ // Flush compaction worklists.
+ if (marking_mode_ == kGlobalMarkingWithCompaction) {
+ movable_reference_worklist_.FlushToGlobal();
+ }
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/marking_visitor.h b/chromium/third_party/blink/renderer/platform/heap/impl/marking_visitor.h
new file mode 100644
index 00000000000..df17247076f
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/marking_visitor.h
@@ -0,0 +1,281 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MARKING_VISITOR_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MARKING_VISITOR_H_
+
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/heap_buildflags.h"
+#include "third_party/blink/renderer/platform/heap/impl/heap_page.h"
+#include "third_party/blink/renderer/platform/heap/visitor.h"
+
+namespace blink {
+
+namespace {
+
+ALWAYS_INLINE bool IsHashTableDeleteValue(const void* value) {
+ return value == reinterpret_cast<void*>(-1);
+}
+
+} // namespace
+
+class BasePage;
+class HeapAllocator;
+enum class TracenessMemberConfiguration;
+template <typename T, TracenessMemberConfiguration tracenessConfiguration>
+class MemberBase;
+
+// Base visitor used to mark Oilpan objects on any thread.
+class PLATFORM_EXPORT MarkingVisitorBase : public Visitor {
+ public:
+ enum MarkingMode {
+ // Default visitor mode used for regular marking.
+ kGlobalMarking,
+ // Visitor mode recording slots for compaction during marking.
+ kGlobalMarkingWithCompaction,
+ };
+
+ void VisitWeakContainer(const void*,
+ const void* const*,
+ TraceDescriptor,
+ TraceDescriptor,
+ WeakCallback,
+ const void*) final;
+ void VisitEphemeron(const void*, TraceDescriptor) final;
+
+ // Marks an object dynamically using any address within its body and adds a
+ // tracing callback for processing of the object. The object is not allowed
+ // to be in construction.
+ void DynamicallyMarkAddress(ConstAddress);
+
+ void RegisterMovableSlot(const void* const*) final;
+
+ void RegisterWeakCallback(WeakCallback, const void*) final;
+
+ // Flush private segments remaining in visitor's worklists to global pools.
+ void FlushCompactionWorklists();
+
+ size_t marked_bytes() const { return marked_bytes_; }
+
+ int task_id() const { return task_id_; }
+
+ // Account for object's live bytes. Should only be adjusted when
+ // actually tracing through an already marked object. Logically, this means
+ // accounting for the bytes when transitioning from grey to black.
+ ALWAYS_INLINE void AccountMarkedBytes(HeapObjectHeader*);
+ ALWAYS_INLINE void AccountMarkedBytes(size_t);
+
+ protected:
+ MarkingVisitorBase(ThreadState*, MarkingMode, int task_id);
+ ~MarkingVisitorBase() override = default;
+
+ void Visit(const void* object, TraceDescriptor desc) final;
+ void VisitWeak(const void*, const void*, TraceDescriptor, WeakCallback) final;
+
+ // Marks an object and adds a tracing callback for processing of the object.
+ void MarkHeader(HeapObjectHeader*, const TraceDescriptor&);
+ // Try to mark an object without tracing. Returns true when the object was not
+ // marked upon calling.
+ bool MarkHeaderNoTracing(HeapObjectHeader*);
+
+ MarkingWorklist::View marking_worklist_;
+ WriteBarrierWorklist::View write_barrier_worklist_;
+ NotFullyConstructedWorklist::View not_fully_constructed_worklist_;
+ WeakCallbackWorklist::View weak_callback_worklist_;
+ MovableReferenceWorklist::View movable_reference_worklist_;
+ EphemeronPairsWorklist::View discovered_ephemeron_pairs_worklist_;
+ EphemeronPairsWorklist::View ephemeron_pairs_to_process_worklist_;
+ WeakContainersWorklist* const weak_containers_worklist_;
+ size_t marked_bytes_ = 0;
+ const MarkingMode marking_mode_;
+ int task_id_;
+};
+
+ALWAYS_INLINE void MarkingVisitorBase::AccountMarkedBytes(
+ HeapObjectHeader* header) {
+ AccountMarkedBytes(
+ header->IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
+ ? static_cast<LargeObjectPage*>(PageFromObject(header))->ObjectSize()
+ : header->size<HeapObjectHeader::AccessMode::kAtomic>());
+}
+
+ALWAYS_INLINE void MarkingVisitorBase::AccountMarkedBytes(size_t marked_bytes) {
+ marked_bytes_ += marked_bytes;
+}
+
+ALWAYS_INLINE bool MarkingVisitorBase::MarkHeaderNoTracing(
+ HeapObjectHeader* header) {
+ DCHECK(header);
+ DCHECK(State()->IsIncrementalMarking() || State()->InAtomicMarkingPause());
+ // A GC should only mark the objects that belong in its heap.
+ DCHECK_EQ(State(),
+ PageFromObject(header->Payload())->Arena()->GetThreadState());
+ // Never mark free space objects. This would e.g. hint to marking a promptly
+ // freed backing store.
+ DCHECK(!header->IsFree());
+
+ return header->TryMark<HeapObjectHeader::AccessMode::kAtomic>();
+}
+
+inline void MarkingVisitorBase::Visit(const void* object,
+ TraceDescriptor desc) {
+ DCHECK(object);
+ MarkHeader(HeapObjectHeader::FromPayload(desc.base_object_payload), desc);
+}
+
+// Marks an object and adds a tracing callback for processing of the object.
+ALWAYS_INLINE void MarkingVisitorBase::MarkHeader(HeapObjectHeader* header,
+ const TraceDescriptor& desc) {
+ DCHECK(header);
+ DCHECK(desc.callback);
+
+ if (header->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
+ not_fully_constructed_worklist_.Push(header->Payload());
+ } else if (MarkHeaderNoTracing(header)) {
+ marking_worklist_.Push(desc);
+ }
+}
+
+// Visitor used to mark Oilpan objects on the main thread. Also implements
+// various sorts of write barriers that should only be called from the main
+// thread.
+class PLATFORM_EXPORT MarkingVisitor : public MarkingVisitorBase {
+ public:
+ static void GenerationalBarrier(Address slot, ThreadState* state);
+
+ // Eagerly traces an already marked backing store ensuring that all its
+ // children are discovered by the marker. The barrier bails out if marking
+ // is off and on individual objects reachable if they are already marked. The
+ // barrier uses the callback function through GcInfo, so it will not inline
+ // any templated type-specific code.
+ static void TraceMarkedBackingStore(const void* value);
+
+ MarkingVisitor(ThreadState*, MarkingMode);
+ ~MarkingVisitor() override = default;
+
+ // Conservatively marks an object if pointed to by Address. The object may
+ // be in construction as the scan is conservative without relying on a
+ // Trace method.
+ void ConservativelyMarkAddress(BasePage*, ConstAddress);
+
+ void FlushMarkingWorklists();
+
+ private:
+ // Write barrier that adds a value the |slot| refers to to the set of marked
+ // objects. The barrier bails out if marking is off or the object is not yet
+ // marked. Returns true if the value has been marked on this call.
+ template <typename T>
+ static bool WriteBarrier(T** slot);
+
+ // Exact version of the marking and generational write barriers.
+ static bool WriteBarrierSlow(void*);
+ static void GenerationalBarrierSlow(Address, ThreadState*);
+ static bool MarkValue(void*, BasePage*, ThreadState*);
+ static void TraceMarkedBackingStoreSlow(const void*);
+
+ // Weak containers are strongly retraced during conservative stack scanning.
+ // Stack scanning happens once per GC at the start of the atomic pause.
+ // Because the visitor is not retained between GCs, there is no need to clear
+ // the set at the end of GC.
+ class RecentlyRetracedWeakContainers {
+ static constexpr size_t kMaxCacheSize = 8;
+
+ public:
+ bool Contains(const HeapObjectHeader*);
+ void Insert(const HeapObjectHeader*);
+
+ private:
+ std::vector<const HeapObjectHeader*> recently_retraced_cache_;
+ size_t last_used_index_ = -1;
+ } recently_retraced_weak_containers_;
+
+ friend class HeapAllocator;
+ template <typename T, TracenessMemberConfiguration tracenessConfiguration>
+ friend class MemberBase;
+};
+
+// static
+template <typename T>
+ALWAYS_INLINE bool MarkingVisitor::WriteBarrier(T** slot) {
+#if BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
+ void* value = *slot;
+ if (!value || IsHashTableDeleteValue(value))
+ return false;
+
+ // Dijkstra barrier if concurrent marking is in progress.
+ BasePage* value_page = PageFromObject(value);
+ ThreadState* thread_state = value_page->thread_state();
+
+ if (UNLIKELY(thread_state->IsIncrementalMarking()))
+ return MarkValue(value, value_page, thread_state);
+
+ GenerationalBarrier(reinterpret_cast<Address>(slot), thread_state);
+ return false;
+#else
+ if (!ThreadState::IsAnyIncrementalMarking())
+ return false;
+
+ // Avoid any further checks and dispatch to a call at this point. Aggressive
+ // inlining otherwise pollutes the regular execution paths.
+ return WriteBarrierSlow(*slot);
+#endif
+}
+
+// static
+ALWAYS_INLINE void MarkingVisitor::GenerationalBarrier(Address slot,
+ ThreadState* state) {
+ // First, check if the source object is in the last allocated region of heap.
+ if (LIKELY(state->Heap().IsInLastAllocatedRegion(slot)))
+ return;
+ if (UNLIKELY(state->IsOnStack(slot)))
+ return;
+ GenerationalBarrierSlow(slot, state);
+}
+
+// static
+ALWAYS_INLINE void MarkingVisitor::TraceMarkedBackingStore(const void* value) {
+ if (!ThreadState::IsAnyIncrementalMarking())
+ return;
+
+ // Avoid any further checks and dispatch to a call at this point. Aggressive
+ // inlining otherwise pollutes the regular execution paths.
+ TraceMarkedBackingStoreSlow(value);
+}
+
+// Visitor used to mark Oilpan objects on concurrent threads.
+class PLATFORM_EXPORT ConcurrentMarkingVisitor : public MarkingVisitorBase {
+ public:
+ ConcurrentMarkingVisitor(ThreadState*, MarkingMode, int);
+ ~ConcurrentMarkingVisitor() override;
+
+ virtual void FlushWorklists();
+
+ bool IsConcurrent() const override { return true; }
+
+ bool DeferredTraceIfConcurrent(TraceDescriptor desc,
+ size_t bailout_size) override {
+ not_safe_to_concurrently_trace_worklist_.Push({desc, bailout_size});
+ // The object is bailed out from concurrent marking, so updating
+ // marked_bytes_ to reflect how many bytes were actually traced.
+ // This deducted bytes will be added to the mutator thread marking
+ // visitor's marked_bytes_ count when the object is popped from
+ // the bailout worklist.
+ marked_bytes_ -= bailout_size;
+ return true;
+ }
+
+ size_t RecentlyMarkedBytes() {
+ return marked_bytes_ - std::exchange(last_marked_bytes_, marked_bytes_);
+ }
+
+ private:
+ NotSafeToConcurrentlyTraceWorklist::View
+ not_safe_to_concurrently_trace_worklist_;
+ NotFullyConstructedWorklist::View previously_not_fully_constructed_worklist_;
+ size_t last_marked_bytes_ = 0;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MARKING_VISITOR_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/member.h b/chromium/third_party/blink/renderer/platform/heap/impl/member.h
new file mode 100644
index 00000000000..caf60bf1b23
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/member.h
@@ -0,0 +1,577 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MEMBER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MEMBER_H_
+
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/heap_buildflags.h"
+#include "third_party/blink/renderer/platform/heap/impl/heap_page.h"
+#include "third_party/blink/renderer/platform/heap/impl/marking_visitor.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/hash_functions.h"
+#include "third_party/blink/renderer/platform/wtf/hash_traits.h"
+
+namespace WTF {
+template <typename P, typename Traits, typename Allocator>
+class MemberConstructTraits;
+} // namespace WTF
+
+namespace blink {
+
+template <typename T>
+class Persistent;
+
+enum class TracenessMemberConfiguration {
+ kTraced,
+ kUntraced,
+};
+
+template <typename T,
+ TracenessMemberConfiguration tracenessConfiguration =
+ TracenessMemberConfiguration::kTraced>
+class MemberPointerVerifier {
+ public:
+ MemberPointerVerifier() = default;
+
+ void SaveCreationThreadState(T* pointer) {
+ if (tracenessConfiguration == TracenessMemberConfiguration::kUntraced) {
+ creation_thread_state_ = nullptr;
+ } else {
+ creation_thread_state_ = ThreadState::Current();
+ // Members should be created in an attached thread. But an empty
+ // value Member may be created on an unattached thread by a heap
+ // collection iterator.
+ DCHECK(creation_thread_state_ || !pointer);
+ }
+ }
+
+ void CheckPointer(T* pointer) {
+ if (!pointer)
+ return;
+
+ ThreadState* current = ThreadState::Current();
+ DCHECK(current);
+ if (tracenessConfiguration != TracenessMemberConfiguration::kUntraced) {
+ // creation_thread_state_ may be null when this is used in a heap
+ // collection which initialized the Member with memset and the
+ // constructor wasn't called.
+ if (creation_thread_state_) {
+ // Member should point to objects that belong in the same ThreadHeap.
+ DCHECK(creation_thread_state_->IsOnThreadHeap(pointer));
+ // Member should point to objects that belong in the same ThreadHeap.
+ DCHECK_EQ(&current->Heap(), &creation_thread_state_->Heap());
+ } else {
+ DCHECK(current->IsOnThreadHeap(pointer));
+ }
+ }
+
+ if (current->IsSweepingInProgress()) {
+ // During sweeping the object start bitmap is invalid. Check the header
+ // when the type is available and not pointing to a mixin.
+ if (IsFullyDefined<T>::value && !IsGarbageCollectedMixin<T>::value)
+ HeapObjectHeader::CheckFromPayload(pointer);
+ } else {
+ DCHECK(HeapObjectHeader::FromInnerAddress<
+ HeapObjectHeader::AccessMode::kAtomic>(pointer));
+ }
+ }
+
+ private:
+ const ThreadState* creation_thread_state_;
+};
+
+template <typename T,
+ TracenessMemberConfiguration tracenessConfiguration =
+ TracenessMemberConfiguration::kTraced>
+class MemberBase {
+ DISALLOW_NEW();
+
+ public:
+ MemberBase() : raw_(nullptr) { SaveCreationThreadState(); }
+
+ MemberBase(std::nullptr_t) : raw_(nullptr) { SaveCreationThreadState(); }
+
+ explicit MemberBase(T* raw) : raw_(raw) {
+ SaveCreationThreadState();
+ CheckPointer();
+ // No write barrier for initializing stores.
+ }
+
+ explicit MemberBase(T& raw) : raw_(&raw) {
+ SaveCreationThreadState();
+ CheckPointer();
+ // No write barrier for initializing stores.
+ }
+
+ MemberBase(WTF::HashTableDeletedValueType)
+ : raw_(reinterpret_cast<T*>(kHashTableDeletedRawValue)) {
+ SaveCreationThreadState();
+ }
+
+ MemberBase(const MemberBase& other) : raw_(other) {
+ SaveCreationThreadState();
+ CheckPointer();
+ // No write barrier for initializing stores.
+ }
+
+ template <typename U>
+ MemberBase(const Persistent<U>& other) : raw_(other) {
+ SaveCreationThreadState();
+ CheckPointer();
+ // No write barrier for initializing stores.
+ }
+
+ template <typename U>
+ MemberBase(const MemberBase<U>& other) : raw_(other) {
+ SaveCreationThreadState();
+ CheckPointer();
+ // No write barrier for initializing stores.
+ }
+
+ template <typename U>
+ MemberBase& operator=(const Persistent<U>& other) {
+ SetRaw(other);
+ CheckPointer();
+ WriteBarrier();
+ return *this;
+ }
+
+ MemberBase& operator=(const MemberBase& other) {
+ SetRaw(other);
+ CheckPointer();
+ WriteBarrier();
+ return *this;
+ }
+
+ template <typename U>
+ MemberBase& operator=(const MemberBase<U>& other) {
+ SetRaw(other);
+ CheckPointer();
+ WriteBarrier();
+ return *this;
+ }
+
+ template <typename U>
+ MemberBase& operator=(U* other) {
+ SetRaw(other);
+ CheckPointer();
+ WriteBarrier();
+ return *this;
+ }
+
+ MemberBase& operator=(WTF::HashTableDeletedValueType) {
+ SetRaw(reinterpret_cast<T*>(-1));
+ return *this;
+ }
+
+ MemberBase& operator=(std::nullptr_t) {
+ SetRaw(nullptr);
+ return *this;
+ }
+
+ void Swap(MemberBase<T>& other) {
+ T* tmp = GetRaw();
+ SetRaw(other.GetRaw());
+ other.SetRaw(tmp);
+ CheckPointer();
+ WriteBarrier();
+ other.WriteBarrier();
+ }
+
+ explicit operator bool() const { return GetRaw(); }
+ operator T*() const { return GetRaw(); }
+ T* operator->() const { return GetRaw(); }
+ T& operator*() const { return *GetRaw(); }
+
+ T* Get() const { return GetRaw(); }
+
+ void Clear() { SetRaw(nullptr); }
+
+ T* Release() {
+ T* result = GetRaw();
+ SetRaw(nullptr);
+ return result;
+ }
+
+ static bool IsMemberHashTableDeletedValue(const T* t) {
+ return t == reinterpret_cast<T*>(kHashTableDeletedRawValue);
+ }
+
+ bool IsHashTableDeletedValue() const {
+ return IsMemberHashTableDeletedValue(GetRaw());
+ }
+
+ protected:
+ static constexpr intptr_t kHashTableDeletedRawValue = -1;
+
+ enum class AtomicCtorTag { Atomic };
+
+ // MemberBase ctors that use atomic write to set raw_.
+
+ MemberBase(AtomicCtorTag, T* raw) {
+ SetRaw(raw);
+ SaveCreationThreadState();
+ CheckPointer();
+ // No write barrier for initializing stores.
+ }
+
+ MemberBase(AtomicCtorTag, T& raw) {
+ SetRaw(&raw);
+ SaveCreationThreadState();
+ CheckPointer();
+ // No write barrier for initializing stores.
+ }
+
+ void WriteBarrier() const {
+ MarkingVisitor::WriteBarrier(const_cast<std::remove_const_t<T>**>(&raw_));
+ }
+
+ void CheckPointer() {
+#if DCHECK_IS_ON()
+ // Should not be called for deleted hash table values. A value can be
+ // propagated here if a MemberBase containing the deleted value is copied.
+ if (IsHashTableDeletedValue())
+ return;
+ pointer_verifier_.CheckPointer(GetRaw());
+#endif // DCHECK_IS_ON()
+ }
+
+ void SaveCreationThreadState() {
+#if DCHECK_IS_ON()
+ pointer_verifier_.SaveCreationThreadState(GetRaw());
+#endif // DCHECK_IS_ON()
+ }
+
+ ALWAYS_INLINE void SetRaw(T* raw) {
+ if (tracenessConfiguration == TracenessMemberConfiguration::kUntraced)
+ raw_ = raw;
+ else
+ WTF::AsAtomicPtr(&raw_)->store(raw, std::memory_order_relaxed);
+ }
+ ALWAYS_INLINE T* GetRaw() const { return raw_; }
+
+ private:
+ // Thread safe version of Get() for marking visitors.
+ // This is used to prevent data races between concurrent marking visitors
+ // and writes on the main thread.
+ const T* GetSafe() const {
+ // TOOD(omerkatz): replace this cast with std::atomic_ref (C++20) once it
+ // becomes available
+ return WTF::AsAtomicPtr(&raw_)->load(std::memory_order_relaxed);
+ }
+
+ T* raw_;
+#if DCHECK_IS_ON()
+ MemberPointerVerifier<T, tracenessConfiguration> pointer_verifier_;
+#endif // DCHECK_IS_ON()
+
+ friend class Visitor;
+};
+
+// Members are used in classes to contain strong pointers to other oilpan heap
+// allocated objects.
+// All Member fields of a class must be traced in the class' trace method.
+// During the mark phase of the GC all live objects are marked as live and
+// all Member fields of a live object will be traced marked as live as well.
+template <typename T>
+class Member : public MemberBase<T, TracenessMemberConfiguration::kTraced> {
+ DISALLOW_NEW();
+ typedef MemberBase<T, TracenessMemberConfiguration::kTraced> Parent;
+
+ public:
+ Member() : Parent() {}
+ Member(std::nullptr_t) : Parent(nullptr) {}
+ Member(T* raw) : Parent(raw) {}
+ Member(T& raw) : Parent(raw) {}
+ Member(WTF::HashTableDeletedValueType x) : Parent(x) {}
+
+ Member(const Member& other) : Parent(other) {}
+
+ template <typename U>
+ Member(const Member<U>& other) : Parent(other) {}
+
+ template <typename U>
+ Member(const Persistent<U>& other) : Parent(other) {}
+
+ template <typename U>
+ Member& operator=(const Persistent<U>& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ Member& operator=(const Member& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ Member& operator=(const Member<U>& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ Member& operator=(const WeakMember<U>& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ Member& operator=(U* other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ Member& operator=(WTF::HashTableDeletedValueType x) {
+ Parent::operator=(x);
+ return *this;
+ }
+
+ Member& operator=(std::nullptr_t) {
+ Parent::operator=(nullptr);
+ return *this;
+ }
+
+ private:
+ using typename Parent::AtomicCtorTag;
+ Member(AtomicCtorTag atomic, T* raw) : Parent(atomic, raw) {}
+ Member(AtomicCtorTag atomic, T& raw) : Parent(atomic, raw) {}
+
+ template <typename P, typename Traits, typename Allocator>
+ friend class WTF::MemberConstructTraits;
+};
+
+// WeakMember is similar to Member in that it is used to point to other oilpan
+// heap allocated objects.
+// However instead of creating a strong pointer to the object, the WeakMember
+// creates a weak pointer, which does not keep the pointee alive. Hence if all
+// pointers to to a heap allocated object are weak the object will be garbage
+// collected. At the time of GC the weak pointers will automatically be set to
+// null.
+template <typename T>
+class WeakMember : public MemberBase<T, TracenessMemberConfiguration::kTraced> {
+ typedef MemberBase<T, TracenessMemberConfiguration::kTraced> Parent;
+
+ public:
+ WeakMember() : Parent() {}
+
+ WeakMember(std::nullptr_t) : Parent(nullptr) {}
+
+ WeakMember(T* raw) : Parent(raw) {}
+
+ WeakMember(WTF::HashTableDeletedValueType x) : Parent(x) {}
+
+ template <typename U>
+ WeakMember(const Persistent<U>& other) : Parent(other) {}
+
+ template <typename U>
+ WeakMember(const Member<U>& other) : Parent(other) {}
+
+ template <typename U>
+ WeakMember& operator=(const Persistent<U>& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ WeakMember& operator=(const Member<U>& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ WeakMember& operator=(U* other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ WeakMember& operator=(std::nullptr_t) {
+ this->SetRaw(nullptr);
+ return *this;
+ }
+
+ private:
+ using typename Parent::AtomicCtorTag;
+ WeakMember(AtomicCtorTag atomic, T* raw) : Parent(atomic, raw) {}
+ WeakMember(AtomicCtorTag atomic, T& raw) : Parent(atomic, raw) {}
+
+ template <typename P, typename Traits, typename Allocator>
+ friend class WTF::MemberConstructTraits;
+};
+
+// UntracedMember is a pointer to an on-heap object that is not traced for some
+// reason. Please don't use this unless you understand what you're doing.
+// Basically, all pointers to on-heap objects must be stored in either of
+// Persistent, Member or WeakMember. It is not allowed to leave raw pointers to
+// on-heap objects. However, there can be scenarios where you have to use raw
+// pointers for some reason, and in that case you can use UntracedMember. Of
+// course, it must be guaranteed that the pointing on-heap object is kept alive
+// while the raw pointer is pointing to the object.
+template <typename T>
+class UntracedMember final
+ : public MemberBase<T, TracenessMemberConfiguration::kUntraced> {
+ typedef MemberBase<T, TracenessMemberConfiguration::kUntraced> Parent;
+
+ public:
+ UntracedMember() : Parent() {}
+
+ UntracedMember(std::nullptr_t) : Parent(nullptr) {}
+
+ UntracedMember(T* raw) : Parent(raw) {}
+
+ template <typename U>
+ UntracedMember(const Persistent<U>& other) : Parent(other) {}
+
+ template <typename U>
+ UntracedMember(const Member<U>& other) : Parent(other) {}
+
+ UntracedMember(WTF::HashTableDeletedValueType x) : Parent(x) {}
+
+ UntracedMember& operator=(const UntracedMember& other) {
+ this->SetRaw(other);
+ this->CheckPointer();
+ return *this;
+ }
+
+ template <typename U>
+ UntracedMember& operator=(const Persistent<U>& other) {
+ this->SetRaw(other);
+ this->CheckPointer();
+ return *this;
+ }
+
+ template <typename U>
+ UntracedMember& operator=(const Member<U>& other) {
+ this->SetRaw(other);
+ this->CheckPointer();
+ return *this;
+ }
+
+ template <typename U>
+ UntracedMember& operator=(U* other) {
+ this->SetRaw(other);
+ this->CheckPointer();
+ return *this;
+ }
+
+ UntracedMember& operator=(std::nullptr_t) {
+ this->SetRaw(nullptr);
+ return *this;
+ }
+};
+
+template <typename T>
+struct MemberTraceTraits {
+ STATIC_ONLY(MemberTraceTraits);
+
+ public:
+ static TraceDescriptor GetTraceDescriptor(const T* ref) {
+ return {ref, TraceTrait<T>::Trace};
+ }
+
+ static void Trace(Visitor* visitor, const void* ref) {
+ visitor->Trace(*static_cast<const T*>(ref));
+ }
+};
+
+template <typename T>
+struct TraceTrait<Member<T>> : public MemberTraceTraits<Member<T>> {};
+
+template <typename T>
+struct TraceTrait<WeakMember<T>> : public MemberTraceTraits<WeakMember<T>> {};
+
+} // namespace blink
+
+namespace WTF {
+
+// PtrHash is the default hash for hash tables with Member<>-derived elements.
+template <typename T>
+struct MemberHash : PtrHash<T> {
+ STATIC_ONLY(MemberHash);
+ template <typename U>
+ static unsigned GetHash(const U& key) {
+ return PtrHash<T>::GetHash(key);
+ }
+ template <typename U, typename V>
+ static bool Equal(const U& a, const V& b) {
+ return a == b;
+ }
+};
+
+template <typename T>
+struct DefaultHash<blink::Member<T>> {
+ STATIC_ONLY(DefaultHash);
+ using Hash = MemberHash<T>;
+};
+
+template <typename T>
+struct DefaultHash<blink::WeakMember<T>> {
+ STATIC_ONLY(DefaultHash);
+ using Hash = MemberHash<T>;
+};
+
+template <typename T>
+struct DefaultHash<blink::UntracedMember<T>> {
+ STATIC_ONLY(DefaultHash);
+ using Hash = MemberHash<T>;
+};
+
+template <typename T>
+struct IsTraceable<blink::Member<T>> {
+ STATIC_ONLY(IsTraceable);
+ static const bool value = true;
+};
+
+template <typename T>
+struct IsWeak<blink::WeakMember<T>> : std::true_type {};
+
+template <typename T>
+struct IsTraceable<blink::WeakMember<T>> {
+ STATIC_ONLY(IsTraceable);
+ static const bool value = true;
+};
+
+template <typename T, typename Traits, typename Allocator>
+class MemberConstructTraits {
+ STATIC_ONLY(MemberConstructTraits);
+
+ public:
+ template <typename... Args>
+ static T* Construct(void* location, Args&&... args) {
+ return new (NotNull, location) T(std::forward<Args>(args)...);
+ }
+
+ static void NotifyNewElement(T* element) { element->WriteBarrier(); }
+
+ template <typename... Args>
+ static T* ConstructAndNotifyElement(void* location, Args&&... args) {
+ // ConstructAndNotifyElement updates an existing Member which might
+ // also be comncurrently traced while we update it. The regular ctors
+ // for Member don't use an atomic write which can lead to data races.
+ T* object = Construct(location, T::AtomicCtorTag::Atomic,
+ std::forward<Args>(args)...);
+ NotifyNewElement(object);
+ return object;
+ }
+
+ static void NotifyNewElements(T* array, size_t len) {
+ while (len-- > 0) {
+ array->WriteBarrier();
+ array++;
+ }
+ }
+};
+
+template <typename T, typename Traits, typename Allocator>
+class ConstructTraits<blink::Member<T>, Traits, Allocator>
+ : public MemberConstructTraits<blink::Member<T>, Traits, Allocator> {};
+
+template <typename T, typename Traits, typename Allocator>
+class ConstructTraits<blink::WeakMember<T>, Traits, Allocator>
+ : public MemberConstructTraits<blink::WeakMember<T>, Traits, Allocator> {};
+
+} // namespace WTF
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_MEMBER_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/name_traits.h b/chromium/third_party/blink/renderer/platform/heap/impl/name_traits.h
new file mode 100644
index 00000000000..c44305ff234
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/name_traits.h
@@ -0,0 +1,62 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_NAME_TRAITS_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_NAME_TRAITS_H_
+
+#include <cstring>
+
+#include "build/build_config.h"
+#include "third_party/blink/renderer/platform/bindings/name_client.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/type_traits.h"
+
+namespace blink {
+
+struct HeapObjectName {
+ const char* value;
+ bool name_is_hidden;
+};
+
+using NameCallback = HeapObjectName (*)(const void*);
+
+template <typename T>
+class NameTrait {
+ STATIC_ONLY(NameTrait);
+
+ public:
+ static HeapObjectName GetName(const void* obj) {
+ return GetNameFor(static_cast<const T*>(obj));
+ }
+
+ private:
+ static HeapObjectName GetNameFor(const NameClient* wrapper_tracable) {
+ return {wrapper_tracable->NameInHeapSnapshot(), false};
+ }
+
+ static HeapObjectName GetNameFor(...) {
+ if (NameClient::HideInternalName())
+ return {"InternalNode", true};
+
+ DCHECK(!NameClient::HideInternalName());
+ static const char* leaky_class_name = nullptr;
+ if (leaky_class_name)
+ return {leaky_class_name, false};
+
+ // Parsing string of structure:
+ // const char *WTF::GetStringWithTypeName<TYPE>() [T = TYPE]
+ // Note that this only works on clang or GCC builds.
+ const std::string raw(WTF::GetStringWithTypeName<T>());
+ const auto start_pos = raw.rfind("T = ") + 4;
+ DCHECK(std::string::npos != start_pos);
+ const auto len = raw.length() - start_pos - 1;
+ const std::string name = raw.substr(start_pos, len).c_str();
+ leaky_class_name = strcpy(new char[name.length() + 1], name.c_str());
+ return {leaky_class_name, false};
+ }
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_NAME_TRAITS_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/page_bloom_filter.h b/chromium/third_party/blink/renderer/platform/heap/impl/page_bloom_filter.h
new file mode 100644
index 00000000000..19263ae332e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/page_bloom_filter.h
@@ -0,0 +1,48 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PAGE_BLOOM_FILTER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PAGE_BLOOM_FILTER_H_
+
+#include "third_party/blink/renderer/platform/heap/impl/heap_page.h"
+#include "third_party/blink/renderer/platform/wtf/bloom_filter.h"
+
+namespace blink {
+
+// Bloom filter for Oilpan pages. Use counting to support correct deletion. This
+// is needed for stack scanning to quickly check if an arbitrary address doesn't
+// point inside Oilpan pages. May return false positives but never false
+// negatives.
+class PageBloomFilter {
+ public:
+ void Add(Address address) {
+ filter_.Add(Hash(RoundToBlinkPageStart(address)));
+ }
+
+ void Remove(Address address) {
+ filter_.Remove(Hash(RoundToBlinkPageStart(address)));
+ }
+
+ bool MayContain(Address address) const {
+ return filter_.MayContain(Hash(RoundToBlinkPageStart(address)));
+ }
+
+ private:
+ static constexpr size_t kNumberOfEntriesLog2 = 12;
+ static constexpr size_t kNumberOfEntries = 1 << kNumberOfEntriesLog2;
+
+ static unsigned Hash(Address address) {
+ size_t value = reinterpret_cast<size_t>(address) >> kBlinkPageSizeLog2;
+ value ^= value >> kNumberOfEntriesLog2;
+ value ^= value >> (kNumberOfEntriesLog2 * 2);
+ value &= kNumberOfEntries - 1;
+ return static_cast<unsigned>(value);
+ }
+
+ WTF::BloomFilter<kNumberOfEntriesLog2> filter_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PAGE_BLOOM_FILTER_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/page_memory.cc b/chromium/third_party/blink/renderer/platform/heap/impl/page_memory.cc
new file mode 100644
index 00000000000..4d3ffc1440d
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/page_memory.cc
@@ -0,0 +1,137 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/impl/page_memory.h"
+
+#include "base/allocator/partition_allocator/oom.h"
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+#include "third_party/blink/renderer/platform/wtf/sanitizers.h"
+
+namespace blink {
+
+void MemoryRegion::Release() {
+ base::FreePages(base_, size_);
+}
+
+bool MemoryRegion::Commit() {
+ CHECK(base::RecommitSystemPages(base_, size_, base::PageReadWrite));
+ return base::TrySetSystemPagesAccess(base_, size_, base::PageReadWrite);
+}
+
+void MemoryRegion::Decommit() {
+ ASAN_UNPOISON_MEMORY_REGION(base_, size_);
+ base::DecommitSystemPages(base_, size_);
+ base::SetSystemPagesAccess(base_, size_, base::PageInaccessible);
+}
+
+PageMemoryRegion::PageMemoryRegion(Address base,
+ size_t size,
+ unsigned num_pages,
+ RegionTree* region_tree)
+ : MemoryRegion(base, size),
+ is_large_page_(num_pages == 1),
+ num_pages_(num_pages),
+ region_tree_(region_tree) {
+ DCHECK(region_tree);
+ region_tree_->Add(this);
+ for (size_t i = 0; i < kBlinkPagesPerRegion; ++i)
+ in_use_[i] = false;
+}
+
+PageMemoryRegion::~PageMemoryRegion() {
+ region_tree_->Remove(this);
+ Release();
+}
+
+void PageMemoryRegion::PageDeleted(Address page) {
+ MarkPageUnused(page);
+ if (!num_pages_.Decrement())
+ delete this;
+}
+
+// TODO(haraken): Like partitionOutOfMemoryWithLotsOfUncommitedPages(),
+// we should probably have a way to distinguish physical memory OOM from
+// virtual address space OOM.
+static NOINLINE void BlinkGCOutOfMemory() {
+ // TODO(lizeb): Add the real allocation size here as well.
+ OOM_CRASH(0);
+}
+
+PageMemoryRegion* PageMemoryRegion::Allocate(size_t size,
+ unsigned num_pages,
+ RegionTree* region_tree) {
+ // Round size up to the allocation granularity.
+ size = base::RoundUpToPageAllocationGranularity(size);
+ Address base = static_cast<Address>(
+ base::AllocPages(nullptr, size, kBlinkPageSize, base::PageInaccessible,
+ base::PageTag::kBlinkGC));
+ if (!base)
+ BlinkGCOutOfMemory();
+ return new PageMemoryRegion(base, size, num_pages, region_tree);
+}
+
+PageMemoryRegion* RegionTree::Lookup(ConstAddress address) {
+ auto it = set_.upper_bound(address);
+ // This check also covers set_.size() > 0, since for empty vectors it is
+ // guaranteed that begin() == end().
+ if (it == set_.begin())
+ return nullptr;
+ auto* result = std::next(it, -1)->second;
+ if (address < result->Base() + result->size())
+ return result;
+ return nullptr;
+}
+
+void RegionTree::Add(PageMemoryRegion* region) {
+ DCHECK(region);
+ auto result = set_.emplace(region->Base(), region);
+ DCHECK(result.second);
+}
+
+void RegionTree::Remove(PageMemoryRegion* region) {
+ DCHECK(region);
+ auto size = set_.erase(region->Base());
+ DCHECK_EQ(1u, size);
+}
+
+PageMemory::PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable)
+ : reserved_(reserved), writable_(writable) {
+ DCHECK(reserved->Contains(writable));
+
+ // Register the writable area of the memory as part of the LSan root set.
+ // Only the writable area is mapped and can contain C++ objects. Those
+ // C++ objects can contain pointers to objects outside of the heap and
+ // should therefore be part of the LSan root set.
+ __lsan_register_root_region(writable_.Base(), writable_.size());
+}
+
+PageMemory* PageMemory::SetupPageMemoryInRegion(PageMemoryRegion* region,
+ size_t page_offset,
+ size_t payload_size) {
+ // Setup the payload one guard page into the page memory.
+ Address payload_address = region->Base() + page_offset + BlinkGuardPageSize();
+ return new PageMemory(region, MemoryRegion(payload_address, payload_size));
+}
+
+PageMemory* PageMemory::Allocate(size_t payload_size, RegionTree* region_tree) {
+ DCHECK_GT(payload_size, 0u);
+
+ // Virtual memory allocation routines operate in OS page sizes.
+ // Round up the requested size to nearest os page size.
+ payload_size = base::RoundUpToSystemPage(payload_size);
+
+ // Overallocate by 2 times OS page size to have space for a
+ // guard page at the beginning and end of blink heap page.
+ size_t allocation_size = payload_size + 2 * BlinkGuardPageSize();
+ PageMemoryRegion* page_memory_region =
+ PageMemoryRegion::AllocateLargePage(allocation_size, region_tree);
+ PageMemory* storage =
+ SetupPageMemoryInRegion(page_memory_region, 0, payload_size);
+ CHECK(storage->Commit());
+ return storage;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/page_memory.h b/chromium/third_party/blink/renderer/platform/heap/impl/page_memory.h
new file mode 100644
index 00000000000..ee13b210d59
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/page_memory.h
@@ -0,0 +1,186 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PAGE_MEMORY_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PAGE_MEMORY_H_
+
+#include "base/atomic_ref_count.h"
+#include "base/containers/flat_map.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/impl/heap_page.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+
+namespace blink {
+
+class RegionTree;
+
+class MemoryRegion {
+ USING_FAST_MALLOC(MemoryRegion);
+
+ public:
+ MemoryRegion(Address base, size_t size) : base_(base), size_(size) {
+ DCHECK_GT(size, 0u);
+ }
+
+ bool Contains(ConstAddress addr) const {
+ return base_ <= addr && addr < (base_ + size_);
+ }
+
+ bool Contains(const MemoryRegion& other) const {
+ return Contains(other.base_) && Contains(other.base_ + other.size_ - 1);
+ }
+
+ void Release();
+ WARN_UNUSED_RESULT bool Commit();
+ void Decommit();
+
+ Address Base() const { return base_; }
+ size_t size() const { return size_; }
+
+ private:
+ Address base_;
+ size_t size_;
+};
+
+// A PageMemoryRegion represents a chunk of reserved virtual address
+// space containing a number of blink heap pages. On Windows, reserved
+// virtual address space can only be given back to the system as a
+// whole. The PageMemoryRegion allows us to do that by keeping track
+// of the number of pages using it in order to be able to release all
+// of the virtual address space when there are no more pages using it.
+class PageMemoryRegion : public MemoryRegion {
+ public:
+ ~PageMemoryRegion();
+
+ void PageDeleted(Address);
+
+ void MarkPageUsed(Address page) {
+ DCHECK(!in_use_[Index(page)]);
+ in_use_[Index(page)] = true;
+ }
+
+ void MarkPageUnused(Address page) { in_use_[Index(page)] = false; }
+
+ static PageMemoryRegion* AllocateLargePage(size_t size,
+ RegionTree* region_tree) {
+ return Allocate(size, 1, region_tree);
+ }
+
+ static PageMemoryRegion* AllocateNormalPages(RegionTree* region_tree) {
+ return Allocate(kBlinkPageSize * kBlinkPagesPerRegion, kBlinkPagesPerRegion,
+ region_tree);
+ }
+
+ BasePage* PageFromAddress(ConstAddress address) {
+ DCHECK(Contains(address));
+ if (!in_use_[Index(address)])
+ return nullptr;
+ if (is_large_page_)
+ return PageFromObject(Base());
+ return PageFromObject(address);
+ }
+
+ private:
+ PageMemoryRegion(Address base, size_t, unsigned num_pages, RegionTree*);
+
+ unsigned Index(ConstAddress address) const {
+ DCHECK(Contains(address));
+ if (is_large_page_)
+ return 0;
+ size_t offset = BlinkPageAddress(const_cast<Address>(address)) - Base();
+ DCHECK_EQ(offset % kBlinkPageSize, 0u);
+ return static_cast<unsigned>(offset / kBlinkPageSize);
+ }
+
+ static PageMemoryRegion* Allocate(size_t, unsigned num_pages, RegionTree*);
+
+ const bool is_large_page_;
+ // A thread owns a page, but not a region. Represent the in-use
+ // bitmap such that thread non-interference comes for free.
+ bool in_use_[kBlinkPagesPerRegion];
+ base::AtomicRefCount num_pages_;
+ RegionTree* const region_tree_;
+};
+
+// A RegionTree is a simple binary search tree of PageMemoryRegions sorted
+// by base addresses.
+class RegionTree {
+ USING_FAST_MALLOC(RegionTree);
+
+ public:
+ void Add(PageMemoryRegion*);
+ void Remove(PageMemoryRegion*);
+ PageMemoryRegion* Lookup(ConstAddress);
+
+ private:
+ // Using flat_map allows to improve locality to minimize cache misses and
+ // balance binary lookup.
+ base::flat_map<ConstAddress, PageMemoryRegion*> set_;
+};
+
+// Representation of the memory used for a Blink heap page.
+//
+// The representation keeps track of two memory regions:
+//
+// 1. The virtual memory reserved from the system in order to be able
+// to free all the virtual memory reserved. Multiple PageMemory
+// instances can share the same reserved memory region and
+// therefore notify the reserved memory region on destruction so
+// that the system memory can be given back when all PageMemory
+// instances for that memory are gone.
+//
+// 2. The writable memory (a sub-region of the reserved virtual
+// memory region) that is used for the actual heap page payload.
+//
+// Guard pages are created before and after the writable memory.
+class PageMemory {
+ USING_FAST_MALLOC(PageMemory);
+
+ public:
+ ~PageMemory() {
+ __lsan_unregister_root_region(writable_.Base(), writable_.size());
+ reserved_->PageDeleted(WritableStart());
+ }
+
+ WARN_UNUSED_RESULT bool Commit() {
+ reserved_->MarkPageUsed(WritableStart());
+ return writable_.Commit();
+ }
+
+ void Decommit() {
+ reserved_->MarkPageUnused(WritableStart());
+ writable_.Decommit();
+ }
+
+ void MarkUnused() { reserved_->MarkPageUnused(WritableStart()); }
+
+ PageMemoryRegion* Region() { return reserved_; }
+
+ Address WritableStart() { return writable_.Base(); }
+
+ static PageMemory* SetupPageMemoryInRegion(PageMemoryRegion*,
+ size_t page_offset,
+ size_t payload_size);
+
+ // Allocate a virtual address space for one blink page with the
+ // following layout:
+ //
+ // [ guard os page | ... payload ... | guard os page ]
+ // ^---{ aligned to blink page size }
+ //
+ // The returned page memory region will be zeroed.
+ //
+ static PageMemory* Allocate(size_t payload_size, RegionTree*);
+
+ private:
+ PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable);
+
+ PageMemoryRegion* reserved_;
+ MemoryRegion writable_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PAGE_MEMORY_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/page_pool.cc b/chromium/third_party/blink/renderer/platform/heap/impl/page_pool.cc
new file mode 100644
index 00000000000..b3c31417cfb
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/page_pool.cc
@@ -0,0 +1,55 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/impl/page_pool.h"
+
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/impl/page_memory.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+
+namespace blink {
+
+PagePool::PagePool() {
+ for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) {
+ pool_[i] = nullptr;
+ }
+}
+
+PagePool::~PagePool() {
+ for (int index = 0; index < BlinkGC::kNumberOfArenas; ++index) {
+ while (PoolEntry* entry = pool_[index]) {
+ pool_[index] = entry->next;
+ PageMemory* memory = entry->data;
+ DCHECK(memory);
+ delete memory;
+ delete entry;
+ }
+ }
+}
+
+void PagePool::Add(int index, PageMemory* memory) {
+ // When adding a page to the pool we decommit it to ensure it is unused
+ // while in the pool. This also allows the physical memory, backing the
+ // page, to be given back to the OS.
+ memory->Decommit();
+ PoolEntry* entry = new PoolEntry(memory, pool_[index]);
+ pool_[index] = entry;
+}
+
+PageMemory* PagePool::Take(int index) {
+ while (PoolEntry* entry = pool_[index]) {
+ pool_[index] = entry->next;
+ PageMemory* memory = entry->data;
+ DCHECK(memory);
+ delete entry;
+ if (memory->Commit())
+ return memory;
+
+ // We got some memory, but failed to commit it, try again.
+ delete memory;
+ }
+ return nullptr;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/page_pool.h b/chromium/third_party/blink/renderer/platform/heap/impl/page_pool.h
new file mode 100644
index 00000000000..fdf448f0200
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/page_pool.h
@@ -0,0 +1,48 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PAGE_POOL_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PAGE_POOL_H_
+
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+
+namespace blink {
+
+class PageMemory;
+
+// Once pages have been used for one type of thread heap they will never be
+// reused for another type of thread heap. Instead of unmapping, we add the
+// pages to a pool of pages to be reused later by a thread heap of the same
+// type. This is done as a security feature to avoid type confusion. The
+// heaps are type segregated by having separate thread arenas for different
+// types of objects. Holding on to pages ensures that the same virtual address
+// space cannot be used for objects of another type than the type contained
+// in this page to begin with.
+class PagePool {
+ USING_FAST_MALLOC(PagePool);
+
+ public:
+ PagePool();
+ ~PagePool();
+ void Add(int, PageMemory*);
+ PageMemory* Take(int);
+
+ private:
+ class PoolEntry {
+ USING_FAST_MALLOC(PoolEntry);
+
+ public:
+ PoolEntry(PageMemory* data, PoolEntry* next) : data(data), next(next) {}
+
+ PageMemory* data;
+ PoolEntry* next;
+ };
+
+ PoolEntry* pool_[BlinkGC::kNumberOfArenas];
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PAGE_POOL_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/persistent.h b/chromium/third_party/blink/renderer/platform/heap/impl/persistent.h
new file mode 100644
index 00000000000..00cff273e2f
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/persistent.h
@@ -0,0 +1,971 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PERSISTENT_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PERSISTENT_H_
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "third_party/blink/renderer/platform/bindings/buildflags.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/heap_allocator.h"
+#include "third_party/blink/renderer/platform/heap/impl/heap_compact.h"
+#include "third_party/blink/renderer/platform/heap/impl/persistent_node.h"
+#include "third_party/blink/renderer/platform/heap/member.h"
+#include "third_party/blink/renderer/platform/heap/visitor.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/cross_thread_copier.h"
+
+namespace blink {
+
+template <typename T>
+class CrossThreadWeakPersistent;
+
+// Wrapping type to force callers to go through macros that expand or drop
+// base::Location. This is needed to avoid adding the strings when not needed.
+// The type can be dropped once http://crbug.com/760702 is resolved and
+// ENABLE_LOCATION_SOURCE is disabled for release builds.
+class PersistentLocation final {
+ public:
+ PersistentLocation() = default;
+ explicit PersistentLocation(const base::Location& location)
+ : location_(location) {}
+ PersistentLocation(const PersistentLocation& other) = default;
+
+ const base::Location& get() const { return location_; }
+
+ private:
+ base::Location location_;
+};
+
+#if !BUILDFLAG(FROM_HERE_USES_LOCATION_BUILTINS) && \
+ BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+#if !BUILDFLAG(ENABLE_LOCATION_SOURCE)
+#define PERSISTENT_FROM_HERE \
+ PersistentLocation(::base::Location::CreateFromHere(__FILE__))
+#else
+#define PERSISTENT_FROM_HERE \
+ PersistentLocation( \
+ ::base::Location::CreateFromHere(__func__, __FILE__, __LINE__))
+#endif
+#else
+#define PERSISTENT_FROM_HERE PersistentLocation()
+#endif // BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+
+template <typename T,
+ WeaknessPersistentConfiguration weaknessConfiguration,
+ CrossThreadnessPersistentConfiguration crossThreadnessConfiguration>
+class PersistentBase {
+ USING_FAST_MALLOC(PersistentBase);
+
+ public:
+ bool IsHashTableDeletedValue() const {
+ return raw_ == reinterpret_cast<T*>(-1);
+ }
+
+ T* Release() {
+ T* result = raw_;
+ AssignSafe(nullptr);
+ return result;
+ }
+
+ void Clear() {
+ // Note that this also frees up related data in the backend.
+ AssignSafe(nullptr);
+ }
+
+ T* Get() const {
+ CheckPointer();
+ return raw_;
+ }
+
+ // TODO(https://crbug.com/653394): Consider returning a thread-safe best
+ // guess of validity.
+ bool MaybeValid() const { return true; }
+
+ explicit operator bool() const { return Get(); }
+ T& operator*() const { return *Get(); }
+ operator T*() const { return Get(); }
+ T* operator->() const { return Get(); }
+
+ // Register the persistent node as a 'static reference',
+ // belonging to the current thread and a persistent that must
+ // be cleared when the ThreadState itself is cleared out and
+ // destructed.
+ //
+ // Static singletons arrange for this to happen, either to ensure
+ // clean LSan leak reports or to register a thread-local persistent
+ // needing to be cleared out before the thread is terminated.
+ PersistentBase* RegisterAsStaticReference() {
+ static_assert(weaknessConfiguration == kNonWeakPersistentConfiguration,
+ "Can only register non-weak Persistent references as static "
+ "references.");
+ if (PersistentNode* node = persistent_node_.Get()) {
+ ThreadState::Current()->RegisterStaticPersistentNode(node);
+ LEAK_SANITIZER_IGNORE_OBJECT(this);
+ }
+ return this;
+ }
+
+ NO_SANITIZE_ADDRESS
+ void ClearWithLockHeld() {
+ static_assert(
+ crossThreadnessConfiguration == kCrossThreadPersistentConfiguration,
+ "This Persistent does not require the cross-thread lock.");
+ PersistentMutexTraits<crossThreadnessConfiguration>::AssertAcquired();
+ raw_ = nullptr;
+ persistent_node_.ClearWithLockHeld();
+ }
+
+ void UpdateLocation(const PersistentLocation& other) {
+#if BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+ location_ = other;
+#endif // BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+ }
+
+ protected:
+ ~PersistentBase() {
+ UninitializeSafe();
+ // Not resetting raw_ as it is not observable.
+ }
+
+ PersistentBase() : raw_(nullptr) {
+ SaveCreationThreadHeap();
+ // No initialization needed for empty handle.
+ }
+ PersistentBase(const PersistentLocation& location) : PersistentBase() {
+ UpdateLocation(location);
+ }
+
+ PersistentBase(std::nullptr_t) : raw_(nullptr) {
+ SaveCreationThreadHeap();
+ // No initialization needed for empty handle.
+ }
+ PersistentBase(const PersistentLocation& location, std::nullptr_t)
+ : PersistentBase(nullptr) {
+ UpdateLocation(location);
+ }
+
+ PersistentBase(T* raw) : raw_(raw) {
+ SaveCreationThreadHeap();
+ InitializeSafe();
+ CheckPointer();
+ }
+ PersistentBase(const PersistentLocation& location, T* raw)
+ : PersistentBase(raw) {
+ UpdateLocation(location);
+ }
+
+ PersistentBase(T& raw) : raw_(&raw) {
+ SaveCreationThreadHeap();
+ InitializeSafe();
+ CheckPointer();
+ }
+ PersistentBase(const PersistentLocation& location, T& raw)
+ : PersistentBase(raw) {
+ UpdateLocation(location);
+ }
+
+ PersistentBase(const PersistentBase& other) : raw_(other) {
+ SaveCreationThreadHeap();
+ InitializeSafe();
+ CheckPointer();
+ }
+ PersistentBase(const PersistentLocation& location, PersistentBase& other)
+ : PersistentBase(other) {
+ UpdateLocation(location);
+ }
+
+ template <typename U>
+ PersistentBase(const PersistentBase<U,
+ weaknessConfiguration,
+ crossThreadnessConfiguration>& other)
+ : raw_(other) {
+ SaveCreationThreadHeap();
+ InitializeSafe();
+ CheckPointer();
+ }
+ template <typename U>
+ PersistentBase(const PersistentLocation& location,
+ const PersistentBase<U,
+ weaknessConfiguration,
+ crossThreadnessConfiguration>& other)
+ : PersistentBase(other) {
+ UpdateLocation(location);
+ }
+
+ template <typename U>
+ PersistentBase(const Member<U>& other) : raw_(other) {
+ SaveCreationThreadHeap();
+ InitializeSafe();
+ CheckPointer();
+ }
+ template <typename U>
+ PersistentBase(const PersistentLocation& location, const Member<U>& other)
+ : PersistentBase(other) {
+ UpdateLocation(location);
+ }
+
+ PersistentBase(WTF::HashTableDeletedValueType)
+ : raw_(reinterpret_cast<T*>(-1)) {
+ SaveCreationThreadHeap();
+ // No initialization needed for empty handle.
+ }
+ PersistentBase(const PersistentLocation& location,
+ WTF::HashTableDeletedValueType)
+ : PersistentBase(WTF::kHashTableDeletedValue) {
+ UpdateLocation(location);
+ }
+
+ template <typename U>
+ PersistentBase& operator=(U* other) {
+ AssignSafe(other);
+ return *this;
+ }
+
+ PersistentBase& operator=(std::nullptr_t) {
+ AssignSafe(nullptr);
+ return *this;
+ }
+
+ template <typename U>
+ PersistentBase& operator=(const Member<U>& other) {
+ AssignSafe(other);
+ return *this;
+ }
+
+ // Using unsafe operations and assuming that caller acquires the lock for
+ // kCrossThreadPersistentConfiguration configuration.
+ PersistentBase& operator=(const PersistentBase& other) {
+ PersistentMutexTraits<crossThreadnessConfiguration>::AssertAcquired();
+ AssignUnsafe(other);
+ return *this;
+ }
+
+ // Using unsafe operations and assuming that caller acquires the lock for
+ // kCrossThreadPersistentConfiguration configuration.
+ template <typename U>
+ PersistentBase& operator=(
+ const PersistentBase<U,
+ weaknessConfiguration,
+ crossThreadnessConfiguration>& other) {
+ PersistentMutexTraits<crossThreadnessConfiguration>::AssertAcquired();
+ AssignUnsafe(other);
+ return *this;
+ }
+
+ // Using unsafe operations and assuming that caller acquires the lock for
+ // kCrossThreadPersistentConfiguration configuration.
+ template <typename U>
+ PersistentBase& operator=(
+ PersistentBase<U, weaknessConfiguration, crossThreadnessConfiguration>&&
+ other) {
+ PersistentMutexTraits<crossThreadnessConfiguration>::AssertAcquired();
+ if (persistent_node_.IsInitialized()) {
+ // Drop persistent node if present as it's always possible to reuse the
+ // node (if present) from |other|.
+ persistent_node_.Uninitialize();
+ }
+ // Explicit cast enabling downcasting.
+ raw_ = static_cast<T*>(other.raw_);
+ other.raw_ = nullptr;
+ // Efficiently move by just rewiring the node pointer.
+ persistent_node_ = std::move(other.persistent_node_);
+ DCHECK(!other.persistent_node_.Get());
+ if (persistent_node_.IsInitialized()) {
+ // If |raw_| points to a non-null or deleted value, just reuse the node.
+ TraceCallback trace_callback =
+ TraceMethodDelegate<PersistentBase,
+ &PersistentBase::TracePersistent>::Trampoline;
+ persistent_node_.Get()->Reinitialize(this, trace_callback);
+ }
+ CheckPointer();
+ return *this;
+ }
+
+ NO_SANITIZE_ADDRESS
+ bool IsNotNull() const { return raw_; }
+
+ NO_SANITIZE_ADDRESS
+ void AssignSafe(T* ptr) {
+ typename PersistentMutexTraits<crossThreadnessConfiguration>::Locker lock;
+ AssignUnsafe(ptr);
+ }
+
+ NO_SANITIZE_ADDRESS
+ void AssignUnsafe(T* ptr) {
+ raw_ = ptr;
+ CheckPointer();
+ if (raw_ && !IsHashTableDeletedValue()) {
+ if (!persistent_node_.IsInitialized())
+ InitializeUnsafe();
+ return;
+ }
+ UninitializeUnsafe();
+ }
+
+ void TracePersistent(Visitor* visitor) const {
+ static_assert(sizeof(T), "T must be fully defined");
+ static_assert(IsGarbageCollectedType<T>::value,
+ "T needs to be a garbage collected object");
+ DCHECK(!IsHashTableDeletedValue());
+ if (weaknessConfiguration == kWeakPersistentConfiguration) {
+ visitor->RegisterWeakCallback(HandleWeakPersistent, this);
+ } else {
+#if BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+ visitor->TraceRoot(raw_, location_.get());
+#else
+ visitor->TraceRoot(raw_, base::Location());
+#endif // BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+ }
+ }
+
+ NO_SANITIZE_ADDRESS
+ void InitializeSafe() {
+ DCHECK(!persistent_node_.IsInitialized());
+ if (!raw_ || IsHashTableDeletedValue())
+ return;
+
+ TraceCallback trace_callback =
+ TraceMethodDelegate<PersistentBase,
+ &PersistentBase::TracePersistent>::Trampoline;
+ typename PersistentMutexTraits<crossThreadnessConfiguration>::Locker lock;
+ persistent_node_.Initialize(this, trace_callback);
+ }
+
+ NO_SANITIZE_ADDRESS
+ void InitializeUnsafe() {
+ DCHECK(!persistent_node_.IsInitialized());
+ if (!raw_ || IsHashTableDeletedValue())
+ return;
+
+ TraceCallback trace_callback =
+ TraceMethodDelegate<PersistentBase,
+ &PersistentBase::TracePersistent>::Trampoline;
+ persistent_node_.Initialize(this, trace_callback);
+ }
+
+ void UninitializeSafe() {
+ if (persistent_node_.IsInitialized()) {
+ typename PersistentMutexTraits<crossThreadnessConfiguration>::Locker lock;
+ persistent_node_.Uninitialize();
+ }
+ }
+
+ void UninitializeUnsafe() {
+ if (persistent_node_.IsInitialized())
+ persistent_node_.Uninitialize();
+ }
+
+ void CheckPointer() const {
+#if DCHECK_IS_ON()
+ if (!raw_ || IsHashTableDeletedValue())
+ return;
+
+ if (crossThreadnessConfiguration != kCrossThreadPersistentConfiguration) {
+ ThreadState* current = ThreadState::Current();
+ DCHECK(current);
+ // m_creationThreadState may be null when this is used in a heap
+ // collection which initialized the Persistent with memset and the
+ // constructor wasn't called.
+ if (creation_thread_state_) {
+ // Member should point to objects that belong in the same ThreadHeap.
+ DCHECK_EQ(&ThreadState::FromObject(raw_)->Heap(),
+ &creation_thread_state_->Heap());
+ // Member should point to objects that belong in the same ThreadHeap.
+ DCHECK_EQ(&current->Heap(), &creation_thread_state_->Heap());
+ }
+ }
+#endif
+ }
+
+ void SaveCreationThreadHeap() {
+#if DCHECK_IS_ON()
+ if (crossThreadnessConfiguration == kCrossThreadPersistentConfiguration) {
+ creation_thread_state_ = nullptr;
+ } else {
+ creation_thread_state_ = ThreadState::Current();
+ DCHECK(creation_thread_state_);
+ }
+#endif
+ }
+
+ static void HandleWeakPersistent(const LivenessBroker& broker,
+ const void* persistent_pointer) {
+ using Base =
+ PersistentBase<typename std::remove_const<T>::type,
+ weaknessConfiguration, crossThreadnessConfiguration>;
+ Base* persistent =
+ reinterpret_cast<Base*>(const_cast<void*>(persistent_pointer));
+ T* object = persistent->Get();
+ if (object && !broker.IsHeapObjectAlive(object))
+ ClearWeakPersistent(persistent);
+ }
+
+ static void ClearWeakPersistent(
+ PersistentBase<std::remove_const_t<T>,
+ kWeakPersistentConfiguration,
+ kCrossThreadPersistentConfiguration>* persistent) {
+ PersistentMutexTraits<crossThreadnessConfiguration>::AssertAcquired();
+ persistent->ClearWithLockHeld();
+ }
+
+ static void ClearWeakPersistent(
+ PersistentBase<std::remove_const_t<T>,
+ kWeakPersistentConfiguration,
+ kSingleThreadPersistentConfiguration>* persistent) {
+ persistent->Clear();
+ }
+
+ template <typename BadPersistent>
+ static void ClearWeakPersistent(BadPersistent* non_weak_persistent) {
+ NOTREACHED();
+ }
+
+ // raw_ is accessed most, so put it at the first field.
+ T* raw_;
+
+ // The pointer to the underlying persistent node.
+ //
+ // Since accesses are atomics in the cross-thread case, a different type is
+ // needed to prevent the compiler producing an error when it encounters
+ // operations that are legal on raw pointers but not on atomics, or
+ // vice-versa.
+ std::conditional_t<
+ crossThreadnessConfiguration == kCrossThreadPersistentConfiguration,
+ CrossThreadPersistentNodePtr<weaknessConfiguration>,
+ PersistentNodePtr<ThreadingTrait<T>::kAffinity, weaknessConfiguration>>
+ persistent_node_;
+
+#if BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+ PersistentLocation location_;
+#endif // BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+
+#if DCHECK_IS_ON()
+ const ThreadState* creation_thread_state_;
+#endif
+
+ template <typename F,
+ WeaknessPersistentConfiguration,
+ CrossThreadnessPersistentConfiguration>
+ friend class PersistentBase;
+};
+
+// Persistent is a way to create a strong pointer from an off-heap object
+// to another on-heap object. As long as the Persistent handle is alive
+// the GC will keep the object pointed to alive. The Persistent handle is
+// always a GC root from the point of view of the GC.
+//
+// We have to construct and destruct Persistent in the same thread.
+template <typename T>
+class Persistent : public PersistentBase<T,
+ kNonWeakPersistentConfiguration,
+ kSingleThreadPersistentConfiguration> {
+ using Parent = PersistentBase<T,
+ kNonWeakPersistentConfiguration,
+ kSingleThreadPersistentConfiguration>;
+
+ public:
+ Persistent() : Parent() {}
+ Persistent(const PersistentLocation& location) : Parent(location) {}
+ Persistent(std::nullptr_t) : Parent(nullptr) {}
+ Persistent(const PersistentLocation& location, std::nullptr_t)
+ : Parent(location, nullptr) {}
+ Persistent(T* raw) : Parent(raw) {}
+ Persistent(const PersistentLocation& location, T* raw)
+ : Parent(location, raw) {}
+ Persistent(T& raw) : Parent(raw) {}
+ Persistent(const PersistentLocation& location, T& raw)
+ : Parent(location, raw) {}
+ Persistent(const Persistent& other) : Parent(other) {}
+ Persistent(const PersistentLocation& location, const Persistent& other)
+ : Parent(location, other) {}
+ template <typename U>
+ Persistent(const Persistent<U>& other) : Parent(other) {}
+ template <typename U>
+ Persistent(const PersistentLocation& location, const Persistent<U>& other)
+ : Parent(location, other) {}
+ template <typename U>
+ Persistent(const Member<U>& other) : Parent(other) {}
+ template <typename U>
+ Persistent(const PersistentLocation& location, const Member<U>& other)
+ : Parent(location, other) {}
+ Persistent(WTF::HashTableDeletedValueType x) : Parent(x) {}
+ Persistent(const PersistentLocation& location,
+ WTF::HashTableDeletedValueType x)
+ : Parent(location, x) {}
+
+ template <typename U>
+ Persistent& operator=(U* other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ Persistent& operator=(std::nullptr_t) {
+ Parent::operator=(nullptr);
+ return *this;
+ }
+
+ Persistent& operator=(const Persistent& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ Persistent& operator=(const Persistent<U>& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ Persistent& operator=(const Member<U>& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+};
+
+// WeakPersistent is a way to create a weak pointer from an off-heap object
+// to an on-heap object. The m_raw is automatically cleared when the pointee
+// gets collected.
+//
+// We have to construct and destruct WeakPersistent in the same thread.
+//
+// Note that collections of WeakPersistents are not supported. Use a collection
+// of WeakMembers instead.
+//
+// HashSet<WeakPersistent<T>> m_set; // wrong
+// Persistent<HeapHashSet<WeakMember<T>>> m_set; // correct
+template <typename T>
+class WeakPersistent
+ : public PersistentBase<T,
+ kWeakPersistentConfiguration,
+ kSingleThreadPersistentConfiguration> {
+ using Parent = PersistentBase<T,
+ kWeakPersistentConfiguration,
+ kSingleThreadPersistentConfiguration>;
+
+ public:
+ WeakPersistent() : Parent() {}
+ WeakPersistent(std::nullptr_t) : Parent(nullptr) {}
+ WeakPersistent(T* raw) : Parent(raw) {}
+ WeakPersistent(T& raw) : Parent(raw) {}
+ WeakPersistent(const WeakPersistent& other) : Parent(other) {}
+ template <typename U>
+ WeakPersistent(const WeakPersistent<U>& other) : Parent(other) {}
+ template <typename U>
+ WeakPersistent(const Member<U>& other) : Parent(other) {}
+
+ template <typename U>
+ WeakPersistent& operator=(U* other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ WeakPersistent& operator=(std::nullptr_t) {
+ Parent::operator=(nullptr);
+ return *this;
+ }
+
+ WeakPersistent& operator=(const WeakPersistent& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ WeakPersistent& operator=(const WeakPersistent<U>& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ WeakPersistent& operator=(const Member<U>& other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ NO_SANITIZE_ADDRESS
+ bool IsClearedUnsafe() const { return this->IsNotNull(); }
+};
+
+// CrossThreadPersistent allows for holding onto an object strongly on a
+// different thread.
+//
+// Thread-safe operations:
+// - Construction
+// - Destruction
+// - Copy and move construction and assignment
+// - Clearing
+// - Deref if treated as immutable reference or if externally synchronized (e.g.
+// mutex, task). The current implementation of Get() uses a raw load (on
+// purpose) which prohibits mutation while accessing the reference on a
+// different thread.
+template <typename T>
+class CrossThreadPersistent
+ : public PersistentBase<T,
+ kNonWeakPersistentConfiguration,
+ kCrossThreadPersistentConfiguration> {
+ using Parent = PersistentBase<T,
+ kNonWeakPersistentConfiguration,
+ kCrossThreadPersistentConfiguration>;
+
+ public:
+ CrossThreadPersistent() : Parent() {}
+ CrossThreadPersistent(const PersistentLocation& location)
+ : Parent(location) {}
+ CrossThreadPersistent(std::nullptr_t) : Parent(nullptr) {}
+ CrossThreadPersistent(const PersistentLocation& location, std::nullptr_t)
+ : Parent(location, nullptr) {}
+ explicit CrossThreadPersistent(T* raw) : Parent(raw) {}
+ CrossThreadPersistent(const PersistentLocation& location, T* raw)
+ : Parent(location, raw) {}
+ explicit CrossThreadPersistent(T& raw) : Parent(raw) {}
+ CrossThreadPersistent(const PersistentLocation& location, T& raw)
+ : Parent(location, raw) {}
+ CrossThreadPersistent(const CrossThreadPersistent& other) { *this = other; }
+ CrossThreadPersistent(const PersistentLocation& location,
+ const CrossThreadPersistent& other) {
+ *this = other;
+ }
+ template <typename U>
+ CrossThreadPersistent(const CrossThreadPersistent<U>& other) {
+ *this = other;
+ }
+ template <typename U>
+ CrossThreadPersistent(const PersistentLocation& location,
+ const CrossThreadPersistent<U>& other) {
+ *this = other;
+ }
+ template <typename U>
+ CrossThreadPersistent(const Member<U>& other) : Parent(other) {}
+ template <typename U>
+ CrossThreadPersistent(const PersistentLocation& location,
+ const Member<U>& other)
+ : Parent(location, other) {}
+ CrossThreadPersistent(WTF::HashTableDeletedValueType x) : Parent(x) {}
+ CrossThreadPersistent(const PersistentLocation& location,
+ WTF::HashTableDeletedValueType x)
+ : Parent(location, x) {}
+ template <typename U>
+ CrossThreadPersistent(const CrossThreadWeakPersistent<U>& other) {
+ *this = other;
+ }
+
+ // Instead of using release(), assign then clear() instead.
+ // Using release() with per thread heap enabled can cause the object to be
+ // destroyed before assigning it to a new handle.
+ T* Release() = delete;
+
+ template <typename U>
+ CrossThreadPersistent& operator=(U* other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ CrossThreadPersistent& operator=(std::nullptr_t) {
+ Parent::operator=(nullptr);
+ return *this;
+ }
+
+ CrossThreadPersistent& operator=(const CrossThreadPersistent& other) {
+ MutexLocker locker(ProcessHeap::CrossThreadPersistentMutex());
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ CrossThreadPersistent& operator=(const CrossThreadPersistent<U>& other) {
+ MutexLocker locker(ProcessHeap::CrossThreadPersistentMutex());
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ CrossThreadPersistent& operator=(const CrossThreadWeakPersistent<U>&);
+};
+
+// CrossThreadWeakPersistent combines behavior of CrossThreadPersistent and
+// WeakPersistent, i.e., it allows holding onto an object weakly on a different
+// thread.
+//
+// Thread-safe operations:
+// - Construction
+// - Destruction
+// - Copy and move construction and assignment
+// - Clearing
+//
+// Note that this does not include dereferencing and using the raw pointer as
+// there is no guarantee that the object will be alive at the time it is used.
+template <typename T>
+class CrossThreadWeakPersistent
+ : public PersistentBase<T,
+ kWeakPersistentConfiguration,
+ kCrossThreadPersistentConfiguration> {
+ using Parent = PersistentBase<T,
+ kWeakPersistentConfiguration,
+ kCrossThreadPersistentConfiguration>;
+
+ public:
+ CrossThreadWeakPersistent() : Parent() {}
+ explicit CrossThreadWeakPersistent(T* raw) : Parent(raw) {}
+ explicit CrossThreadWeakPersistent(T& raw) : Parent(raw) {}
+ CrossThreadWeakPersistent(const CrossThreadWeakPersistent& other) {
+ *this = other;
+ }
+ template <typename U>
+ CrossThreadWeakPersistent(const CrossThreadWeakPersistent<U>& other) {
+ *this = other;
+ }
+ CrossThreadWeakPersistent(CrossThreadWeakPersistent&& other) {
+ *this = std::move(other);
+ }
+ template <typename U>
+ CrossThreadWeakPersistent(CrossThreadWeakPersistent<U>&& other) {
+ *this = std::move(other);
+ }
+
+ CrossThreadWeakPersistent& operator=(const CrossThreadWeakPersistent& other) {
+ MutexLocker locker(ProcessHeap::CrossThreadPersistentMutex());
+ Parent::operator=(other);
+ return *this;
+ }
+
+ template <typename U>
+ CrossThreadWeakPersistent& operator=(
+ const CrossThreadWeakPersistent<U>& other) {
+ MutexLocker locker(ProcessHeap::CrossThreadPersistentMutex());
+ Parent::operator=(other);
+ return *this;
+ }
+
+ CrossThreadWeakPersistent& operator=(CrossThreadWeakPersistent&& other) {
+ MutexLocker locker(ProcessHeap::CrossThreadPersistentMutex());
+ Parent::operator=(std::move(other));
+ return *this;
+ }
+
+ template <typename U>
+ CrossThreadWeakPersistent& operator=(CrossThreadWeakPersistent<U>&& other) {
+ MutexLocker locker(ProcessHeap::CrossThreadPersistentMutex());
+ Parent::operator=(std::move(other));
+ return *this;
+ }
+
+ template <typename U>
+ CrossThreadWeakPersistent& operator=(U* other) {
+ Parent::operator=(other);
+ return *this;
+ }
+
+ // Create a CrossThreadPersistent that keeps the underlying object alive if
+ // there is still on set. Can be used to work with an object on a different
+ // thread than it was allocated. Note that CTP does not block threads from
+ // terminating, in which case the reference would still be invalid.
+ const CrossThreadPersistent<T> Lock() const {
+ return CrossThreadPersistent<T>(*this);
+ }
+
+ // Disallow directly using CrossThreadWeakPersistent. Users must go through
+ // CrossThreadPersistent to access the pointee. Note that this does not
+ // guarantee that the object is still alive at that point. Users must check
+ // the state of CTP manually before invoking any calls.
+ T* operator->() const = delete;
+ T& operator*() const = delete;
+ operator T*() const = delete;
+ T* Get() const = delete;
+
+ private:
+ template <typename U>
+ friend class CrossThreadPersistent;
+};
+
+template <typename T>
+template <typename U>
+CrossThreadPersistent<T>& CrossThreadPersistent<T>::operator=(
+ const CrossThreadWeakPersistent<U>& other) {
+ MutexLocker locker(ProcessHeap::CrossThreadPersistentMutex());
+ using ParentU = PersistentBase<U, kWeakPersistentConfiguration,
+ kCrossThreadPersistentConfiguration>;
+ this->AssignUnsafe(static_cast<const ParentU&>(other).Get());
+ return *this;
+}
+
+template <typename T>
+Persistent<T> WrapPersistentInternal(const PersistentLocation& location,
+ T* value) {
+ return Persistent<T>(location, value);
+}
+
+template <typename T>
+Persistent<T> WrapPersistentInternal(T* value) {
+ return Persistent<T>(value);
+}
+
+#if BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+#define WrapPersistent(value) \
+ WrapPersistentInternal(PERSISTENT_FROM_HERE, value)
+#else
+#define WrapPersistent(value) WrapPersistentInternal(value)
+#endif // BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+
+template <typename T,
+ typename = std::enable_if_t<WTF::IsGarbageCollectedType<T>::value>>
+Persistent<T> WrapPersistentIfNeeded(T* value) {
+ return Persistent<T>(value);
+}
+
+template <typename T>
+T& WrapPersistentIfNeeded(T& value) {
+ return value;
+}
+
+template <typename T>
+WeakPersistent<T> WrapWeakPersistent(T* value) {
+ return WeakPersistent<T>(value);
+}
+
+template <typename T>
+CrossThreadPersistent<T> WrapCrossThreadPersistentInternal(
+ const PersistentLocation& location,
+ T* value) {
+ return CrossThreadPersistent<T>(location, value);
+}
+
+template <typename T>
+CrossThreadPersistent<T> WrapCrossThreadPersistentInternal(T* value) {
+ return CrossThreadPersistent<T>(value);
+}
+
+#if BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+#define WrapCrossThreadPersistent(value) \
+ WrapCrossThreadPersistentInternal(PERSISTENT_FROM_HERE, value)
+#else
+#define WrapCrossThreadPersistent(value) \
+ WrapCrossThreadPersistentInternal(value)
+#endif // BUILDFLAG(RAW_HEAP_SNAPSHOTS)
+
+template <typename T>
+CrossThreadWeakPersistent<T> WrapCrossThreadWeakPersistent(T* value) {
+ return CrossThreadWeakPersistent<T>(value);
+}
+
+// Comparison operators between (Weak)Members, Persistents, and UntracedMembers.
+template <typename T, typename U>
+inline bool operator==(const Member<T>& a, const Member<U>& b) {
+ return a.Get() == b.Get();
+}
+template <typename T, typename U>
+inline bool operator!=(const Member<T>& a, const Member<U>& b) {
+ return a.Get() != b.Get();
+}
+template <typename T, typename U>
+inline bool operator==(const Persistent<T>& a, const Persistent<U>& b) {
+ return a.Get() == b.Get();
+}
+template <typename T, typename U>
+inline bool operator!=(const Persistent<T>& a, const Persistent<U>& b) {
+ return a.Get() != b.Get();
+}
+
+template <typename T, typename U>
+inline bool operator==(const Member<T>& a, const Persistent<U>& b) {
+ return a.Get() == b.Get();
+}
+template <typename T, typename U>
+inline bool operator!=(const Member<T>& a, const Persistent<U>& b) {
+ return a.Get() != b.Get();
+}
+template <typename T, typename U>
+inline bool operator==(const Persistent<T>& a, const Member<U>& b) {
+ return a.Get() == b.Get();
+}
+template <typename T, typename U>
+inline bool operator!=(const Persistent<T>& a, const Member<U>& b) {
+ return a.Get() != b.Get();
+}
+
+} // namespace blink
+
+namespace WTF {
+
+template <
+ typename T,
+ blink::WeaknessPersistentConfiguration weaknessConfiguration,
+ blink::CrossThreadnessPersistentConfiguration crossThreadnessConfiguration>
+struct VectorTraits<blink::PersistentBase<T,
+ weaknessConfiguration,
+ crossThreadnessConfiguration>>
+ : VectorTraitsBase<blink::PersistentBase<T,
+ weaknessConfiguration,
+ crossThreadnessConfiguration>> {
+ STATIC_ONLY(VectorTraits);
+ static const bool kNeedsDestruction = true;
+ static const bool kCanInitializeWithMemset = true;
+ static const bool kCanClearUnusedSlotsWithMemset = false;
+ static const bool kCanMoveWithMemcpy = true;
+};
+
+template <typename T>
+struct HashTraits<blink::Persistent<T>>
+ : HandleHashTraits<T, blink::Persistent<T>> {};
+
+template <typename T>
+struct HashTraits<blink::CrossThreadPersistent<T>>
+ : HandleHashTraits<T, blink::CrossThreadPersistent<T>> {};
+
+template <typename T>
+struct DefaultHash<blink::Persistent<T>> {
+ STATIC_ONLY(DefaultHash);
+ using Hash = MemberHash<T>;
+};
+
+template <typename T>
+struct DefaultHash<blink::WeakPersistent<T>> {
+ STATIC_ONLY(DefaultHash);
+ using Hash = MemberHash<T>;
+};
+
+template <typename T>
+struct DefaultHash<blink::CrossThreadPersistent<T>> {
+ STATIC_ONLY(DefaultHash);
+ using Hash = MemberHash<T>;
+};
+
+template <typename T>
+struct DefaultHash<blink::CrossThreadWeakPersistent<T>> {
+ STATIC_ONLY(DefaultHash);
+ using Hash = MemberHash<T>;
+};
+
+template <typename T>
+struct CrossThreadCopier<blink::CrossThreadPersistent<T>>
+ : public CrossThreadCopierPassThrough<blink::CrossThreadPersistent<T>> {
+ STATIC_ONLY(CrossThreadCopier);
+};
+
+template <typename T>
+struct CrossThreadCopier<blink::CrossThreadWeakPersistent<T>>
+ : public CrossThreadCopierPassThrough<blink::CrossThreadWeakPersistent<T>> {
+ STATIC_ONLY(CrossThreadCopier);
+};
+
+} // namespace WTF
+
+namespace base {
+
+template <typename T>
+struct IsWeakReceiver<blink::WeakPersistent<T>> : std::true_type {};
+
+template <typename T>
+struct IsWeakReceiver<blink::CrossThreadWeakPersistent<T>> : std::true_type {};
+
+template <typename T>
+struct BindUnwrapTraits<blink::CrossThreadWeakPersistent<T>> {
+ static blink::CrossThreadPersistent<T> Unwrap(
+ const blink::CrossThreadWeakPersistent<T>& wrapped) {
+ return blink::CrossThreadPersistent<T>(wrapped);
+ }
+};
+} // namespace base
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PERSISTENT_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/persistent_node.cc b/chromium/third_party/blink/renderer/platform/heap/impl/persistent_node.cc
new file mode 100644
index 00000000000..64733dc57f6
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/persistent_node.cc
@@ -0,0 +1,206 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/impl/persistent_node.h"
+
+#include "base/debug/alias.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+#include "third_party/blink/renderer/platform/heap/persistent.h"
+#include "third_party/blink/renderer/platform/heap/process_heap.h"
+
+namespace blink {
+
+namespace {
+
+class DummyGCBase final : public GarbageCollected<DummyGCBase> {
+ public:
+ void Trace(Visitor* visitor) const {}
+};
+} // namespace
+
+PersistentRegionBase::~PersistentRegionBase() {
+ PersistentNodeSlots* slots = slots_;
+ while (slots) {
+ PersistentNodeSlots* dead_slots = slots;
+ slots = slots->next;
+ delete dead_slots;
+ }
+}
+
+int PersistentRegionBase::NodesInUse() const {
+ size_t persistent_count = 0;
+ for (PersistentNodeSlots* slots = slots_; slots; slots = slots->next) {
+ for (int i = 0; i < PersistentNodeSlots::kSlotCount; ++i) {
+ if (!slots->slot[i].IsUnused())
+ ++persistent_count;
+ }
+ }
+#if DCHECK_IS_ON()
+ DCHECK_EQ(persistent_count, used_node_count_);
+#endif
+ return persistent_count;
+}
+
+void PersistentRegionBase::EnsureNodeSlots() {
+ DCHECK(!free_list_head_);
+ PersistentNodeSlots* slots = new PersistentNodeSlots;
+ for (int i = 0; i < PersistentNodeSlots::kSlotCount; ++i) {
+ PersistentNode* node = &slots->slot[i];
+ node->SetFreeListNext(free_list_head_);
+ free_list_head_ = node;
+ DCHECK(node->IsUnused());
+ }
+ slots->next = slots_;
+ slots_ = slots;
+}
+
+void PersistentRegionBase::TraceNodesImpl(Visitor* visitor,
+ ShouldTraceCallback should_trace) {
+ free_list_head_ = nullptr;
+ size_t persistent_count = 0;
+ PersistentNodeSlots** prev_next = &slots_;
+ PersistentNodeSlots* slots = slots_;
+ while (slots) {
+ PersistentNode* free_list_next = nullptr;
+ PersistentNode* free_list_last = nullptr;
+ int free_count = 0;
+ for (int i = 0; i < PersistentNodeSlots::kSlotCount; ++i) {
+ PersistentNode* node = &slots->slot[i];
+ if (node->IsUnused()) {
+ if (!free_list_next)
+ free_list_last = node;
+ node->SetFreeListNext(free_list_next);
+ free_list_next = node;
+ ++free_count;
+ } else {
+ ++persistent_count;
+ if (!should_trace(visitor, node))
+ continue;
+ node->TracePersistentNode(visitor);
+ }
+ }
+ if (free_count == PersistentNodeSlots::kSlotCount) {
+ PersistentNodeSlots* dead_slots = slots;
+ *prev_next = slots->next;
+ slots = slots->next;
+ delete dead_slots;
+ } else {
+ if (free_list_last) {
+ DCHECK(free_list_next);
+ DCHECK(!free_list_last->FreeListNext());
+ free_list_last->SetFreeListNext(free_list_head_);
+ free_list_head_ = free_list_next;
+ }
+ prev_next = &slots->next;
+ slots = slots->next;
+ }
+ }
+#if DCHECK_IS_ON()
+ DCHECK_EQ(persistent_count, used_node_count_);
+#endif
+}
+
+void PersistentRegion::ReleaseNode(PersistentNode* persistent_node) {
+ DCHECK(!persistent_node->IsUnused());
+ // 'self' is in use, containing the persistent wrapper object.
+ void* self = persistent_node->Self();
+ Persistent<DummyGCBase>* persistent =
+ reinterpret_cast<Persistent<DummyGCBase>*>(self);
+ persistent->Clear();
+ DCHECK(persistent_node->IsUnused());
+}
+
+void PersistentRegion::PrepareForThreadStateTermination(ThreadState* state) {
+ DCHECK_EQ(state, ThreadState::Current());
+ DCHECK(!IsMainThread());
+ PersistentNodeSlots* slots = slots_;
+ while (slots) {
+ for (int i = 0; i < PersistentNodeSlots::kSlotCount; ++i) {
+ PersistentNode* node = &slots->slot[i];
+ if (node->IsUnused())
+ continue;
+ // It is safe to cast to Persistent<DummyGCBase> because persistent heap
+ // collections are banned in non-main threads.
+ Persistent<DummyGCBase>* persistent =
+ reinterpret_cast<Persistent<DummyGCBase>*>(node->Self());
+ DCHECK(persistent);
+ persistent->Clear();
+ DCHECK(node->IsUnused());
+ }
+ slots = slots->next;
+ }
+#if DCHECK_IS_ON()
+ DCHECK_EQ(used_node_count_, 0u);
+#endif
+}
+
+bool CrossThreadPersistentRegion::ShouldTracePersistentNode(
+ Visitor* visitor,
+ PersistentNode* node) {
+ CrossThreadPersistent<DummyGCBase>* persistent =
+ reinterpret_cast<CrossThreadPersistent<DummyGCBase>*>(node->Self());
+ DCHECK(persistent);
+ DCHECK(!persistent->IsHashTableDeletedValue());
+ Address raw_object = reinterpret_cast<Address>(persistent->Get());
+ if (!raw_object)
+ return false;
+ return &visitor->Heap() == &ThreadState::FromObject(raw_object)->Heap();
+}
+
+void CrossThreadPersistentRegion::PrepareForThreadStateTermination(
+ ThreadState* thread_state) {
+ // For heaps belonging to a thread that's detaching, any cross-thread
+ // persistents pointing into them needs to be disabled. Do that by clearing
+ // out the underlying heap reference.
+ MutexLocker lock(ProcessHeap::CrossThreadPersistentMutex());
+
+ PersistentNodeSlots* slots = slots_;
+ while (slots) {
+ for (int i = 0; i < PersistentNodeSlots::kSlotCount; ++i) {
+ if (slots->slot[i].IsUnused())
+ continue;
+
+ // 'self' is in use, containing the cross-thread persistent wrapper
+ // object.
+ CrossThreadPersistent<DummyGCBase>* persistent =
+ reinterpret_cast<CrossThreadPersistent<DummyGCBase>*>(
+ slots->slot[i].Self());
+ DCHECK(persistent);
+ void* raw_object = persistent->Get();
+ if (!raw_object)
+ continue;
+ BasePage* page = PageFromObject(raw_object);
+ DCHECK(page);
+ if (page->Arena()->GetThreadState() == thread_state) {
+ persistent->ClearWithLockHeld();
+ DCHECK(slots->slot[i].IsUnused());
+ }
+ }
+ slots = slots->next;
+ }
+}
+
+#if defined(ADDRESS_SANITIZER)
+void CrossThreadPersistentRegion::UnpoisonCrossThreadPersistents() {
+#if DCHECK_IS_ON()
+ ProcessHeap::CrossThreadPersistentMutex().AssertAcquired();
+#endif
+ size_t persistent_count = 0;
+ for (PersistentNodeSlots* slots = slots_; slots; slots = slots->next) {
+ for (int i = 0; i < PersistentNodeSlots::kSlotCount; ++i) {
+ const PersistentNode& node = slots->slot[i];
+ if (!node.IsUnused()) {
+ ASAN_UNPOISON_MEMORY_REGION(node.Self(),
+ sizeof(CrossThreadPersistent<void*>));
+ ++persistent_count;
+ }
+ }
+ }
+#if DCHECK_IS_ON()
+ DCHECK_EQ(persistent_count, used_node_count_);
+#endif
+}
+#endif
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/persistent_node.h b/chromium/third_party/blink/renderer/platform/heap/impl/persistent_node.h
new file mode 100644
index 00000000000..d8c2b08471e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/persistent_node.h
@@ -0,0 +1,385 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PERSISTENT_NODE_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PERSISTENT_NODE_H_
+
+#include <atomic>
+#include <memory>
+#include "third_party/blink/renderer/platform/heap/process_heap.h"
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+
+namespace blink {
+
+class CrossThreadPersistentRegion;
+class PersistentRegion;
+
+enum WeaknessPersistentConfiguration {
+ kNonWeakPersistentConfiguration,
+ kWeakPersistentConfiguration
+};
+
+enum CrossThreadnessPersistentConfiguration {
+ kSingleThreadPersistentConfiguration,
+ kCrossThreadPersistentConfiguration
+};
+
+template <CrossThreadnessPersistentConfiguration>
+struct PersistentMutexTraits {
+ struct [[maybe_unused]] Locker{};
+ static void AssertAcquired() {}
+};
+
+template <>
+struct PersistentMutexTraits<kCrossThreadPersistentConfiguration> {
+ struct Locker {
+ MutexLocker locker{ProcessHeap::CrossThreadPersistentMutex()};
+ };
+ static void AssertAcquired() {
+#if DCHECK_IS_ON()
+ ProcessHeap::CrossThreadPersistentMutex().AssertAcquired();
+#endif
+ }
+};
+
+class PersistentNode final {
+ DISALLOW_NEW();
+
+ public:
+ PersistentNode() { DCHECK(IsUnused()); }
+
+#if DCHECK_IS_ON()
+ ~PersistentNode() {
+ // If you hit this assert, it means that the thread finished
+ // without clearing persistent handles that the thread created.
+ // We don't enable the assert for the main thread because the
+ // main thread finishes without clearing all persistent handles.
+ DCHECK(IsMainThread() || IsUnused());
+ }
+#endif
+
+ // It is dangerous to copy the PersistentNode because it breaks the
+ // free list.
+ PersistentNode& operator=(const PersistentNode& otherref) = delete;
+
+ // Ideally the trace method should be virtual and automatically dispatch
+ // to the most specific implementation. However having a virtual method
+ // on PersistentNode leads to too eager template instantiation with MSVC
+ // which leads to include cycles.
+ // Instead we call the constructor with a TraceCallback which knows the
+ // type of the most specific child and calls trace directly. See
+ // TraceMethodDelegate in Visitor.h for how this is done.
+ void TracePersistentNode(Visitor* visitor) const {
+ DCHECK(!IsUnused());
+ DCHECK(trace_);
+ trace_(visitor, self_);
+ }
+
+ void Initialize(void* self, TraceCallback trace) {
+ DCHECK(IsUnused());
+ self_ = self;
+ trace_ = trace;
+ }
+
+ void Reinitialize(void* self, TraceCallback trace) {
+ self_ = self;
+ trace_ = trace;
+ }
+
+ void SetFreeListNext(PersistentNode* node) {
+ DCHECK(!node || node->IsUnused());
+ self_ = node;
+ trace_ = nullptr;
+ DCHECK(IsUnused());
+ }
+
+ PersistentNode* FreeListNext() {
+ DCHECK(IsUnused());
+ PersistentNode* node = reinterpret_cast<PersistentNode*>(self_);
+ DCHECK(!node || node->IsUnused());
+ return node;
+ }
+
+ bool IsUnused() const { return !trace_; }
+
+ void* Self() const { return self_; }
+
+ private:
+ // If this PersistentNode is in use:
+ // - m_self points to the corresponding Persistent handle.
+ // - m_trace points to the trace method.
+ // If this PersistentNode is freed:
+ // - m_self points to the next freed PersistentNode.
+ // - m_trace is nullptr.
+ void* self_ = nullptr;
+ TraceCallback trace_ = nullptr;
+};
+
+struct PersistentNodeSlots final {
+ USING_FAST_MALLOC(PersistentNodeSlots);
+
+ public:
+ static constexpr int kSlotCount = 256;
+
+ PersistentNodeSlots* next;
+ PersistentNode slot[kSlotCount];
+};
+
+// Used by PersistentBase to manage a pointer to a thread heap persistent node.
+// This class mostly passes accesses through, but provides an interface
+// compatible with CrossThreadPersistentNodePtr.
+template <ThreadAffinity affinity,
+ WeaknessPersistentConfiguration weakness_configuration>
+class PersistentNodePtr {
+ STACK_ALLOCATED();
+
+ public:
+ PersistentNode* Get() const { return ptr_; }
+ bool IsInitialized() const { return ptr_; }
+
+ void Initialize(void* owner, TraceCallback);
+ void Uninitialize();
+
+ PersistentNodePtr& operator=(PersistentNodePtr&& other) {
+ ptr_ = other.ptr_;
+ other.ptr_ = nullptr;
+ return *this;
+ }
+
+ private:
+ PersistentNode* ptr_ = nullptr;
+#if DCHECK_IS_ON()
+ ThreadState* state_ = nullptr;
+#endif
+};
+
+// Used by PersistentBase to manage a pointer to a cross-thread persistent node.
+// It uses ProcessHeap::CrossThreadPersistentMutex() to protect most accesses,
+// but can be polled to see whether it is initialized without the mutex.
+template <WeaknessPersistentConfiguration weakness_configuration>
+class CrossThreadPersistentNodePtr {
+ STACK_ALLOCATED();
+
+ public:
+ PersistentNode* Get() const {
+ PersistentMutexTraits<
+ kCrossThreadPersistentConfiguration>::AssertAcquired();
+ return ptr_.load(std::memory_order_relaxed);
+ }
+ bool IsInitialized() const { return ptr_.load(std::memory_order_acquire); }
+
+ void Initialize(void* owner, TraceCallback);
+ void Uninitialize();
+
+ void ClearWithLockHeld();
+
+ CrossThreadPersistentNodePtr& operator=(
+ CrossThreadPersistentNodePtr&& other) {
+ PersistentMutexTraits<
+ kCrossThreadPersistentConfiguration>::AssertAcquired();
+ PersistentNode* node = other.ptr_.load(std::memory_order_relaxed);
+ ptr_.store(node, std::memory_order_relaxed);
+ other.ptr_.store(nullptr, std::memory_order_relaxed);
+ return *this;
+ }
+
+ private:
+ // Access must either be protected by the cross-thread persistent mutex or
+ // handle the fact that this may be changed concurrently (with a
+ // release-store).
+ std::atomic<PersistentNode*> ptr_{nullptr};
+};
+
+class PLATFORM_EXPORT PersistentRegionBase {
+ public:
+ ~PersistentRegionBase();
+
+ inline PersistentNode* AllocateNode(void* self, TraceCallback trace);
+ inline void FreeNode(PersistentNode* persistent_node);
+ int NodesInUse() const;
+
+ protected:
+ using ShouldTraceCallback = bool (*)(Visitor*, PersistentNode*);
+
+ void TraceNodesImpl(Visitor*, ShouldTraceCallback);
+
+ void EnsureNodeSlots();
+
+ PersistentNode* free_list_head_ = nullptr;
+ PersistentNodeSlots* slots_ = nullptr;
+#if DCHECK_IS_ON()
+ size_t used_node_count_ = 0;
+#endif
+};
+
+inline PersistentNode* PersistentRegionBase::AllocateNode(void* self,
+ TraceCallback trace) {
+#if DCHECK_IS_ON()
+ ++used_node_count_;
+#endif
+ if (UNLIKELY(!free_list_head_))
+ EnsureNodeSlots();
+ DCHECK(free_list_head_);
+ PersistentNode* node = free_list_head_;
+ free_list_head_ = free_list_head_->FreeListNext();
+ node->Initialize(self, trace);
+ DCHECK(!node->IsUnused());
+ return node;
+}
+
+void PersistentRegionBase::FreeNode(PersistentNode* persistent_node) {
+#if DCHECK_IS_ON()
+ DCHECK_GT(used_node_count_, 0u);
+#endif
+ persistent_node->SetFreeListNext(free_list_head_);
+ free_list_head_ = persistent_node;
+#if DCHECK_IS_ON()
+ --used_node_count_;
+#endif
+}
+
+class PLATFORM_EXPORT PersistentRegion final : public PersistentRegionBase {
+ USING_FAST_MALLOC(PersistentRegion);
+
+ public:
+ inline void TraceNodes(Visitor*);
+
+ // Clears the Persistent and then frees the node.
+ void ReleaseNode(PersistentNode*);
+
+ void PrepareForThreadStateTermination(ThreadState*);
+
+ private:
+ static constexpr bool ShouldTracePersistentNode(Visitor*, PersistentNode*) {
+ return true;
+ }
+};
+
+inline void PersistentRegion::TraceNodes(Visitor* visitor) {
+ PersistentRegionBase::TraceNodesImpl(visitor, ShouldTracePersistentNode);
+}
+
+class PLATFORM_EXPORT CrossThreadPersistentRegion final
+ : public PersistentRegionBase {
+ USING_FAST_MALLOC(CrossThreadPersistentRegion);
+
+ public:
+ inline PersistentNode* AllocateNode(void* self, TraceCallback trace);
+ inline void FreeNode(PersistentNode*);
+ inline void TraceNodes(Visitor*);
+
+ void PrepareForThreadStateTermination(ThreadState*);
+
+#if defined(ADDRESS_SANITIZER)
+ void UnpoisonCrossThreadPersistents();
+#endif
+
+ private:
+ NO_SANITIZE_ADDRESS
+ static bool ShouldTracePersistentNode(Visitor*, PersistentNode*);
+};
+
+inline PersistentNode* CrossThreadPersistentRegion::AllocateNode(
+ void* self,
+ TraceCallback trace) {
+ PersistentMutexTraits<kCrossThreadPersistentConfiguration>::AssertAcquired();
+ return PersistentRegionBase::AllocateNode(self, trace);
+}
+
+inline void CrossThreadPersistentRegion::FreeNode(PersistentNode* node) {
+ PersistentMutexTraits<kCrossThreadPersistentConfiguration>::AssertAcquired();
+ // PersistentBase::UninitializeSafe opportunistically checks for uninitialized
+ // nodes to allow a fast path destruction of unused nodes. This check is
+ // performed without taking the lock that is required for processing a
+ // cross-thread node. After taking the lock the condition needs to checked
+ // again to avoid double-freeing a node because the node may have been
+ // concurrently freed by the garbage collector on another thread.
+ if (!node)
+ return;
+ PersistentRegionBase::FreeNode(node);
+}
+
+inline void CrossThreadPersistentRegion::TraceNodes(Visitor* visitor) {
+ PersistentRegionBase::TraceNodesImpl(visitor, ShouldTracePersistentNode);
+}
+
+template <ThreadAffinity affinity,
+ WeaknessPersistentConfiguration weakness_configuration>
+void PersistentNodePtr<affinity, weakness_configuration>::Initialize(
+ void* owner,
+ TraceCallback trace_callback) {
+ ThreadState* state = ThreadStateFor<affinity>::GetState();
+ DCHECK(state->CheckThread());
+ PersistentRegion* region =
+ weakness_configuration == kWeakPersistentConfiguration
+ ? state->GetWeakPersistentRegion()
+ : state->GetPersistentRegion();
+ ptr_ = region->AllocateNode(owner, trace_callback);
+#if DCHECK_IS_ON()
+ state_ = state;
+#endif
+}
+
+template <ThreadAffinity affinity,
+ WeaknessPersistentConfiguration weakness_configuration>
+void PersistentNodePtr<affinity, weakness_configuration>::Uninitialize() {
+ if (!ptr_)
+ return;
+ ThreadState* state = ThreadStateFor<affinity>::GetState();
+ DCHECK(state->CheckThread());
+#if DCHECK_IS_ON()
+ DCHECK_EQ(state_, state)
+ << "must be initialized and uninitialized on the same thread";
+ state_ = nullptr;
+#endif
+ PersistentRegion* region =
+ weakness_configuration == kWeakPersistentConfiguration
+ ? state->GetWeakPersistentRegion()
+ : state->GetPersistentRegion();
+ state->FreePersistentNode(region, ptr_);
+ ptr_ = nullptr;
+}
+
+template <WeaknessPersistentConfiguration weakness_configuration>
+void CrossThreadPersistentNodePtr<weakness_configuration>::Initialize(
+ void* owner,
+ TraceCallback trace_callback) {
+ PersistentMutexTraits<kCrossThreadPersistentConfiguration>::AssertAcquired();
+ CrossThreadPersistentRegion& region =
+ weakness_configuration == kWeakPersistentConfiguration
+ ? ProcessHeap::GetCrossThreadWeakPersistentRegion()
+ : ProcessHeap::GetCrossThreadPersistentRegion();
+ PersistentNode* node = region.AllocateNode(owner, trace_callback);
+ ptr_.store(node, std::memory_order_release);
+}
+
+template <WeaknessPersistentConfiguration weakness_configuration>
+void CrossThreadPersistentNodePtr<weakness_configuration>::Uninitialize() {
+ PersistentMutexTraits<kCrossThreadPersistentConfiguration>::AssertAcquired();
+ CrossThreadPersistentRegion& region =
+ weakness_configuration == kWeakPersistentConfiguration
+ ? ProcessHeap::GetCrossThreadWeakPersistentRegion()
+ : ProcessHeap::GetCrossThreadPersistentRegion();
+ region.FreeNode(ptr_.load(std::memory_order_relaxed));
+ ptr_.store(nullptr, std::memory_order_release);
+}
+
+template <WeaknessPersistentConfiguration weakness_configuration>
+void CrossThreadPersistentNodePtr<weakness_configuration>::ClearWithLockHeld() {
+ PersistentMutexTraits<kCrossThreadPersistentConfiguration>::AssertAcquired();
+ CrossThreadPersistentRegion& region =
+ weakness_configuration == kWeakPersistentConfiguration
+ ? ProcessHeap::GetCrossThreadWeakPersistentRegion()
+ : ProcessHeap::GetCrossThreadPersistentRegion();
+ region.FreeNode(ptr_.load(std::memory_order_relaxed));
+ ptr_.store(nullptr, std::memory_order_release);
+}
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PERSISTENT_NODE_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/process_heap.cc b/chromium/third_party/blink/renderer/platform/heap/impl/process_heap.cc
new file mode 100644
index 00000000000..038a7385aad
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/process_heap.cc
@@ -0,0 +1,71 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/process_heap.h"
+
+#include "base/sampling_heap_profiler/poisson_allocation_sampler.h"
+#include "third_party/blink/public/common/features.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/impl/gc_info.h"
+#include "third_party/blink/renderer/platform/heap/impl/persistent_node.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+
+namespace blink {
+
+namespace {
+
+void BlinkGCAllocHook(uint8_t* address, size_t size, const char* context) {
+ base::PoissonAllocationSampler::RecordAlloc(
+ address, size, base::PoissonAllocationSampler::AllocatorType::kBlinkGC,
+ context);
+}
+
+void BlinkGCFreeHook(uint8_t* address) {
+ base::PoissonAllocationSampler::RecordFree(address);
+}
+
+} // namespace
+
+void ProcessHeap::Init() {
+ DCHECK(!base::FeatureList::IsEnabled(
+ blink::features::kBlinkHeapConcurrentMarking) ||
+ base::FeatureList::IsEnabled(
+ blink::features::kBlinkHeapIncrementalMarking));
+
+ total_allocated_space_ = 0;
+ total_allocated_object_size_ = 0;
+
+ GCInfoTable::CreateGlobalTable();
+
+ base::PoissonAllocationSampler::SetHooksInstallCallback([]() {
+ HeapAllocHooks::SetAllocationHook(&BlinkGCAllocHook);
+ HeapAllocHooks::SetFreeHook(&BlinkGCFreeHook);
+ });
+}
+
+void ProcessHeap::ResetHeapCounters() {
+ total_allocated_object_size_ = 0;
+}
+
+CrossThreadPersistentRegion& ProcessHeap::GetCrossThreadPersistentRegion() {
+ DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion,
+ persistent_region, ());
+ return persistent_region;
+}
+
+CrossThreadPersistentRegion& ProcessHeap::GetCrossThreadWeakPersistentRegion() {
+ DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion,
+ persistent_region, ());
+ return persistent_region;
+}
+
+Mutex& ProcessHeap::CrossThreadPersistentMutex() {
+ DEFINE_THREAD_SAFE_STATIC_LOCAL(Mutex, mutex, ());
+ return mutex;
+}
+
+std::atomic_size_t ProcessHeap::total_allocated_space_{0};
+std::atomic_size_t ProcessHeap::total_allocated_object_size_{0};
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/process_heap.h b/chromium/third_party/blink/renderer/platform/heap/impl/process_heap.h
new file mode 100644
index 00000000000..91112a7c039
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/process_heap.h
@@ -0,0 +1,69 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PROCESS_HEAP_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PROCESS_HEAP_H_
+
+#include <atomic>
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+
+namespace blink {
+
+class CrossThreadPersistentRegion;
+
+class PLATFORM_EXPORT ProcessHeap {
+ STATIC_ONLY(ProcessHeap);
+
+ public:
+ static void Init();
+
+ static CrossThreadPersistentRegion& GetCrossThreadPersistentRegion();
+ static CrossThreadPersistentRegion& GetCrossThreadWeakPersistentRegion();
+
+ // Access to the CrossThreadPersistentRegion from multiple threads has to be
+ // prevented as allocation, freeing, and iteration of nodes may otherwise
+ // cause data races.
+ //
+ // Examples include:
+ // - Iteration of strong cross-thread Persistents.
+ // - Iteration and processing of weak cross-thread Persistents. The lock
+ // needs to span both operations as iteration of weak persistents only
+ // registers memory regions that are then processed afterwards.
+ // - Marking phase in garbage collection: The whole phase requires locking
+ // as CrossThreadWeakPersistents may be converted to CrossThreadPersistent
+ // which must observe GC as an atomic operation.
+ static Mutex& CrossThreadPersistentMutex();
+
+ static void IncreaseTotalAllocatedObjectSize(size_t delta) {
+ total_allocated_object_size_.fetch_add(delta, std::memory_order_relaxed);
+ }
+ static void DecreaseTotalAllocatedObjectSize(size_t delta) {
+ total_allocated_object_size_.fetch_sub(delta, std::memory_order_relaxed);
+ }
+ static size_t TotalAllocatedObjectSize() {
+ return total_allocated_object_size_.load(std::memory_order_relaxed);
+ }
+ static void IncreaseTotalAllocatedSpace(size_t delta) {
+ total_allocated_space_.fetch_add(delta, std::memory_order_relaxed);
+ }
+ static void DecreaseTotalAllocatedSpace(size_t delta) {
+ total_allocated_space_.fetch_sub(delta, std::memory_order_relaxed);
+ }
+ static size_t TotalAllocatedSpace() {
+ return total_allocated_space_.load(std::memory_order_relaxed);
+ }
+ static void ResetHeapCounters();
+
+ private:
+ static std::atomic_size_t total_allocated_space_;
+ static std::atomic_size_t total_allocated_object_size_;
+
+ friend class ThreadState;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PROCESS_HEAP_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/thread_state.cc b/chromium/third_party/blink/renderer/platform/heap/impl/thread_state.cc
new file mode 100644
index 00000000000..228902f91f7
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/thread_state.cc
@@ -0,0 +1,1754 @@
+/*
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+
+#include <algorithm>
+#include <iomanip>
+#include <limits>
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/location.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/task_runner.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "build/build_config.h"
+#include "third_party/blink/public/common/features.h"
+#include "third_party/blink/public/platform/platform.h"
+#include "third_party/blink/renderer/platform/bindings/active_script_wrappable_base.h"
+#include "third_party/blink/renderer/platform/bindings/runtime_call_stats.h"
+#include "third_party/blink/renderer/platform/bindings/script_forbidden_scope.h"
+#include "third_party/blink/renderer/platform/bindings/v8_per_isolate_data.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc_memory_dump_provider.h"
+#include "third_party/blink/renderer/platform/heap/handle.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/heap_buildflags.h"
+#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
+#include "third_party/blink/renderer/platform/heap/impl/heap_compact.h"
+#include "third_party/blink/renderer/platform/heap/impl/marking_scheduling_oracle.h"
+#include "third_party/blink/renderer/platform/heap/impl/marking_visitor.h"
+#include "third_party/blink/renderer/platform/heap/impl/page_pool.h"
+#include "third_party/blink/renderer/platform/heap/persistent.h"
+#include "third_party/blink/renderer/platform/heap/thread_state_scopes.h"
+#include "third_party/blink/renderer/platform/heap/unified_heap_controller.h"
+#include "third_party/blink/renderer/platform/heap/unified_heap_marking_visitor.h"
+#include "third_party/blink/renderer/platform/heap/visitor.h"
+#include "third_party/blink/renderer/platform/instrumentation/histogram.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/web_memory_allocator_dump.h"
+#include "third_party/blink/renderer/platform/instrumentation/tracing/web_process_memory_dump.h"
+#include "third_party/blink/renderer/platform/scheduler/public/post_cancellable_task.h"
+#include "third_party/blink/renderer/platform/scheduler/public/thread.h"
+#include "third_party/blink/renderer/platform/scheduler/public/thread_scheduler.h"
+#include "third_party/blink/renderer/platform/scheduler/public/worker_pool.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
+#include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
+#include "third_party/blink/renderer/platform/wtf/stack_util.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+#include "v8/include/v8-profiler.h"
+#include "v8/include/v8.h"
+
+#if defined(OS_WIN)
+#include <stddef.h>
+#include <windows.h>
+#include <winnt.h>
+#endif
+
+#if defined(MEMORY_SANITIZER)
+#include <sanitizer/msan_interface.h>
+#endif
+
+#if defined(OS_FREEBSD)
+#include <pthread_np.h>
+#endif
+
+namespace blink {
+
+WTF::ThreadSpecific<ThreadState*>* ThreadState::thread_specific_ = nullptr;
+uint8_t ThreadState::main_thread_state_storage_[sizeof(ThreadState)];
+
+namespace {
+
+constexpr double kMarkingScheduleRatioBeforeConcurrentPriorityIncrease = 0.5;
+
+constexpr size_t kMaxTerminationGCLoops = 20;
+
+// Helper function to convert a byte count to a KB count, capping at
+// INT_MAX if the number is larger than that.
+constexpr base::Histogram::Sample CappedSizeInKB(size_t size_in_bytes) {
+ return base::saturated_cast<base::Histogram::Sample>(size_in_bytes / 1024);
+}
+
+class WorkerPoolTaskRunner : public base::TaskRunner {
+ public:
+ bool PostDelayedTask(const base::Location& location,
+ base::OnceClosure task,
+ base::TimeDelta) override {
+ worker_pool::PostTask(location, WTF::CrossThreadBindOnce(std::move(task)));
+ return true;
+ }
+};
+
+} // namespace
+
+class ThreadState::IncrementalMarkingScheduler {
+ public:
+ explicit IncrementalMarkingScheduler(ThreadState* thread_state)
+ : thread_state_(thread_state) {}
+
+ // Starts incremental marking with further scheduled steps.
+ void Start(BlinkGC::GCReason reason) {
+ Init(reason);
+ thread_state_->IncrementalMarkingStart(reason_);
+ ScheduleTask();
+ }
+
+ void Restart() {
+ DCHECK(!task_.IsActive());
+ ScheduleTask();
+ }
+
+ // Cancels incremental marking task in case there is any pending.
+ void Cancel() { task_.Cancel(); }
+
+ private:
+ void Init(BlinkGC::GCReason reason) {
+ DCHECK(!task_.IsActive());
+ reason_ = reason;
+ }
+
+ void ScheduleTask() {
+ // Reassigning to the task will cancel the currently scheduled one.
+ task_ = PostNonNestableCancellableTask(
+ *ThreadScheduler::Current()->V8TaskRunner(), FROM_HERE,
+ WTF::Bind(&IncrementalMarkingScheduler::Dispatch,
+ WTF::Unretained(this)));
+ }
+
+ void Dispatch() {
+ switch (thread_state_->GetGCState()) {
+ case ThreadState::kIncrementalMarkingStepScheduled:
+ thread_state_->IncrementalMarkingStep(BlinkGC::kNoHeapPointersOnStack);
+ if (thread_state_->GetGCState() !=
+ ThreadState::kIncrementalMarkingStepPaused) {
+ ScheduleTask();
+ }
+ break;
+ case ThreadState::kIncrementalMarkingFinalizeScheduled:
+ thread_state_->IncrementalMarkingFinalize();
+ break;
+ default:
+ break;
+ }
+ }
+
+ ThreadState* thread_state_;
+ BlinkGC::GCReason reason_;
+ TaskHandle task_;
+};
+
+ThreadState::ThreadState()
+ : thread_(CurrentThread()),
+ persistent_region_(std::make_unique<PersistentRegion>()),
+ weak_persistent_region_(std::make_unique<PersistentRegion>()),
+ start_of_stack_(reinterpret_cast<Address*>(WTF::GetStackStart())),
+#if defined(ADDRESS_SANITIZER)
+ asan_fake_stack_(__asan_get_current_fake_stack()),
+#endif
+ incremental_marking_scheduler_(
+ std::make_unique<IncrementalMarkingScheduler>(this)) {
+ DCHECK(CheckThread());
+ DCHECK(!**thread_specific_);
+ **thread_specific_ = this;
+ heap_ = std::make_unique<ThreadHeap>(this);
+}
+
+ThreadState::~ThreadState() {
+ DCHECK(CheckThread());
+ if (IsMainThread())
+ DCHECK_EQ(0u, Heap().stats_collector()->allocated_space_bytes());
+ CHECK(GetGCState() == ThreadState::kNoGCScheduled);
+
+ **thread_specific_ = nullptr;
+}
+
+ThreadState* ThreadState::AttachMainThread() {
+ thread_specific_ = new WTF::ThreadSpecific<ThreadState*>();
+ return new (main_thread_state_storage_) ThreadState();
+}
+
+ThreadState* ThreadState::AttachCurrentThread() {
+ return new ThreadState();
+}
+
+void ThreadState::DetachCurrentThread() {
+ ThreadState* state = Current();
+ DCHECK(!state->IsMainThread());
+ state->RunTerminationGC();
+ delete state;
+}
+
+void ThreadState::AttachToIsolate(
+ v8::Isolate* isolate,
+ V8BuildEmbedderGraphCallback v8_build_embedder_graph) {
+ DCHECK(isolate);
+ isolate_ = isolate;
+ v8_build_embedder_graph_ = v8_build_embedder_graph;
+ unified_heap_controller_.reset(new UnifiedHeapController(this));
+ isolate_->SetEmbedderHeapTracer(unified_heap_controller_.get());
+ unified_heap_controller_.get()->SetStackStart(WTF::GetStackStart());
+ if (v8::HeapProfiler* profiler = isolate->GetHeapProfiler()) {
+ profiler->AddBuildEmbedderGraphCallback(v8_build_embedder_graph, nullptr);
+ }
+}
+
+void ThreadState::DetachFromIsolate() {
+ if (isolate_) {
+ isolate_->SetEmbedderHeapTracer(nullptr);
+ if (v8::HeapProfiler* profiler = isolate_->GetHeapProfiler()) {
+ profiler->RemoveBuildEmbedderGraphCallback(v8_build_embedder_graph_,
+ nullptr);
+ }
+ }
+ isolate_ = nullptr;
+ v8_build_embedder_graph_ = nullptr;
+ unified_heap_controller_.reset();
+}
+
+void ThreadState::RunTerminationGC() {
+ DCHECK(!IsMainThread());
+ DCHECK(CheckThread());
+
+ FinishIncrementalMarkingIfRunning(BlinkGC::CollectionType::kMajor,
+ BlinkGC::kNoHeapPointersOnStack,
+ BlinkGC::kIncrementalAndConcurrentMarking,
+ BlinkGC::kConcurrentAndLazySweeping,
+ BlinkGC::GCReason::kThreadTerminationGC);
+
+ // Finish sweeping.
+ CompleteSweep();
+
+ ReleaseStaticPersistentNodes();
+
+ // PrepareForThreadStateTermination removes strong references so no need to
+ // call it on CrossThreadWeakPersistentRegion.
+ ProcessHeap::GetCrossThreadPersistentRegion()
+ .PrepareForThreadStateTermination(this);
+
+ // Do thread local GC's as long as the count of thread local Persistents
+ // changes and is above zero.
+ int old_count = -1;
+ int current_count = GetPersistentRegion()->NodesInUse();
+ DCHECK_GE(current_count, 0);
+ while (current_count != old_count) {
+ CollectGarbage(BlinkGC::CollectionType::kMajor,
+ BlinkGC::kNoHeapPointersOnStack, BlinkGC::kAtomicMarking,
+ BlinkGC::kEagerSweeping,
+ BlinkGC::GCReason::kThreadTerminationGC);
+ // Release the thread-local static persistents that were
+ // instantiated while running the termination GC.
+ ReleaseStaticPersistentNodes();
+ old_count = current_count;
+ current_count = GetPersistentRegion()->NodesInUse();
+ }
+
+ // We should not have any persistents left when getting to this point,
+ // if we have it is a bug, and we have a reference cycle or a missing
+ // RegisterAsStaticReference. Clearing out all the Persistents will avoid
+ // stale pointers and gets them reported as nullptr dereferences.
+ if (current_count) {
+ for (size_t i = 0;
+ i < kMaxTerminationGCLoops && GetPersistentRegion()->NodesInUse();
+ i++) {
+ GetPersistentRegion()->PrepareForThreadStateTermination(this);
+ CollectGarbage(BlinkGC::CollectionType::kMajor,
+ BlinkGC::kNoHeapPointersOnStack, BlinkGC::kAtomicMarking,
+ BlinkGC::kEagerSweeping,
+ BlinkGC::GCReason::kThreadTerminationGC);
+ }
+ }
+
+ CHECK(!GetPersistentRegion()->NodesInUse());
+
+ // All of pre-finalizers should be consumed.
+ DCHECK(ordered_pre_finalizers_.empty());
+ CHECK_EQ(GetGCState(), kNoGCScheduled);
+
+ Heap().RemoveAllPages();
+}
+
+NO_SANITIZE_ADDRESS
+void ThreadState::VisitAsanFakeStackForPointer(MarkingVisitor* visitor,
+ Address ptr,
+ Address* start_of_stack,
+ Address* end_of_stack) {
+#if defined(ADDRESS_SANITIZER)
+ Address* fake_frame_start = nullptr;
+ Address* fake_frame_end = nullptr;
+ Address* maybe_fake_frame = reinterpret_cast<Address*>(ptr);
+ Address* real_frame_for_fake_frame = reinterpret_cast<Address*>(
+ __asan_addr_is_in_fake_stack(asan_fake_stack_, maybe_fake_frame,
+ reinterpret_cast<void**>(&fake_frame_start),
+ reinterpret_cast<void**>(&fake_frame_end)));
+ if (real_frame_for_fake_frame) {
+ // This is a fake frame from the asan fake stack.
+ if (real_frame_for_fake_frame > end_of_stack &&
+ start_of_stack > real_frame_for_fake_frame) {
+ // The real stack address for the asan fake frame is
+ // within the stack range that we need to scan so we need
+ // to visit the values in the fake frame.
+ for (Address* p = fake_frame_start; p < fake_frame_end; ++p)
+ heap_->CheckAndMarkPointer(visitor, *p);
+ }
+ }
+#endif // ADDRESS_SANITIZER
+}
+
+// Stack scanning may overrun the bounds of local objects and/or race with
+// other threads that use this stack.
+NO_SANITIZE_ADDRESS
+NO_SANITIZE_HWADDRESS
+NO_SANITIZE_THREAD
+void ThreadState::VisitStackImpl(MarkingVisitor* visitor,
+ Address* start_of_stack,
+ Address* end_of_stack) {
+ DCHECK_EQ(current_gc_data_.stack_state, BlinkGC::kHeapPointersOnStack);
+
+ // Ensure that current is aligned by address size otherwise the loop below
+ // will read past start address.
+ Address* current = reinterpret_cast<Address*>(
+ reinterpret_cast<intptr_t>(end_of_stack) & ~(sizeof(Address) - 1));
+
+ for (; current < start_of_stack; ++current) {
+ Address ptr = *current;
+#if defined(MEMORY_SANITIZER)
+ // |ptr| may be uninitialized by design. Mark it as initialized to keep
+ // MSan from complaining.
+ // Note: it may be tempting to get rid of |ptr| and simply use |current|
+ // here, but that would be incorrect. We intentionally use a local
+ // variable because we don't want to unpoison the original stack.
+ __msan_unpoison(&ptr, sizeof(ptr));
+#endif
+ heap_->CheckAndMarkPointer(visitor, ptr);
+ VisitAsanFakeStackForPointer(visitor, ptr, start_of_stack, end_of_stack);
+ }
+}
+
+void ThreadState::VisitStack(MarkingVisitor* visitor, Address* end_of_stack) {
+ VisitStackImpl(visitor, start_of_stack_, end_of_stack);
+}
+
+void ThreadState::VisitUnsafeStack(MarkingVisitor* visitor) {
+#if HAS_FEATURE(safe_stack)
+ VisitStackImpl(visitor,
+ static_cast<Address*>(__builtin___get_unsafe_stack_top()),
+ static_cast<Address*>(__builtin___get_unsafe_stack_ptr()));
+#endif // HAS_FEATURE(safe_stack)
+}
+
+void ThreadState::VisitPersistents(Visitor* visitor) {
+ ThreadHeapStatsCollector::Scope stats_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kVisitPersistentRoots);
+ {
+ ThreadHeapStatsCollector::Scope inner_stats_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kVisitCrossThreadPersistents);
+ ProcessHeap::CrossThreadPersistentMutex().AssertAcquired();
+ ProcessHeap::GetCrossThreadPersistentRegion().TraceNodes(visitor);
+ }
+ {
+ ThreadHeapStatsCollector::Scope inner_stats_scope(
+ Heap().stats_collector(), ThreadHeapStatsCollector::kVisitPersistents);
+ persistent_region_->TraceNodes(visitor);
+ }
+}
+
+void ThreadState::VisitRememberedSets(MarkingVisitor* visitor) {
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ Heap().stats_collector(), ThreadHeapStatsCollector::kVisitRememberedSets);
+ Heap().VisitRememberedSets(visitor);
+}
+
+void ThreadState::VisitWeakPersistents(Visitor* visitor) {
+ ProcessHeap::GetCrossThreadWeakPersistentRegion().TraceNodes(visitor);
+ weak_persistent_region_->TraceNodes(visitor);
+}
+
+void ThreadState::ScheduleForcedGCForTesting() {
+ DCHECK(CheckThread());
+ CompleteSweep();
+ SetGCState(kForcedGCForTestingScheduled);
+}
+
+void ThreadState::ScheduleGCIfNeeded() {
+ VLOG(2) << "[state:" << this << "] ScheduleGCIfNeeded";
+ DCHECK(CheckThread());
+
+ // Allocation is allowed during sweeping, but those allocations should not
+ // trigger nested GCs.
+ if (IsGCForbidden() || SweepForbidden())
+ return;
+
+ // This method should not call out to V8 during unified heap garbage
+ // collections. Specifically, reporting memory to V8 may trigger a marking
+ // step which is not allowed during construction of an object. The reason is
+ // that a parent object's constructor is potentially being invoked which may
+ // have already published the object. In that case the object may be colored
+ // black in a v8 marking step which invalidates the assumption that write
+ // barriers may be avoided when constructing an object as it is white.
+ if (IsUnifiedGCMarkingInProgress())
+ return;
+
+ if (GetGCState() == kNoGCScheduled &&
+ base::FeatureList::IsEnabled(
+ blink::features::kBlinkHeapIncrementalMarkingStress)) {
+ VLOG(2) << "[state:" << this << "] "
+ << "ScheduleGCIfNeeded: Scheduled incremental marking for testing";
+ StartIncrementalMarking(BlinkGC::GCReason::kForcedGCForTesting);
+ return;
+ }
+}
+
+ThreadState* ThreadState::FromObject(const void* object) {
+ DCHECK(object);
+ BasePage* page = PageFromObject(object);
+ DCHECK(page);
+ DCHECK(page->Arena());
+ return page->Arena()->GetThreadState();
+}
+
+void ThreadState::PerformIdleLazySweep(base::TimeTicks deadline) {
+ DCHECK(CheckThread());
+
+ // If we are not in a sweeping phase, there is nothing to do here.
+ if (!IsSweepingInProgress())
+ return;
+
+ // This check is here to prevent performIdleLazySweep() from being called
+ // recursively. I'm not sure if it can happen but it would be safer to have
+ // the check just in case.
+ if (SweepForbidden())
+ return;
+
+ RUNTIME_CALL_TIMER_SCOPE_IF_ISOLATE_EXISTS(
+ GetIsolate(), RuntimeCallStats::CounterId::kPerformIdleLazySweep);
+
+ bool sweep_completed = false;
+ {
+ AtomicPauseScope atomic_pause_scope(this);
+ ScriptForbiddenScope script_forbidden_scope;
+ SweepForbiddenScope scope(this);
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ Heap().stats_collector(), ThreadHeapStatsCollector::kLazySweepInIdle,
+ "idleDeltaInSeconds", (deadline - base::TimeTicks::Now()).InSecondsF());
+ sweep_completed = Heap().AdvanceLazySweep(deadline);
+ // We couldn't finish the sweeping within the deadline.
+ // We request another idle task for the remaining sweeping.
+ if (sweep_completed) {
+ SynchronizeAndFinishConcurrentSweeping();
+ } else {
+ ScheduleIdleLazySweep();
+ }
+ }
+
+ if (sweep_completed) {
+ NotifySweepDone();
+ }
+}
+
+void ThreadState::PerformConcurrentSweep(base::JobDelegate* job) {
+ VLOG(2) << "[state:" << this << "] [threadid:" << CurrentThread() << "] "
+ << "ConcurrentSweep";
+ ThreadHeapStatsCollector::EnabledConcurrentScope stats_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kConcurrentSweepingStep);
+
+ if (Heap().AdvanceConcurrentSweep(job))
+ has_unswept_pages_.store(false, std::memory_order_relaxed);
+}
+
+void ThreadState::StartIncrementalMarking(BlinkGC::GCReason reason) {
+ DCHECK(CheckThread());
+ // Schedule an incremental GC only when no GC is scheduled. Otherwise, already
+ // scheduled GCs should be prioritized.
+ if (GetGCState() != kNoGCScheduled) {
+ return;
+ }
+ CompleteSweep();
+ reason_for_scheduled_gc_ = reason;
+ SetGCState(kIncrementalGCScheduled);
+ incremental_marking_scheduler_->Start(reason);
+}
+
+void ThreadState::ScheduleIdleLazySweep() {
+ ThreadScheduler::Current()->PostIdleTask(
+ FROM_HERE,
+ WTF::Bind(&ThreadState::PerformIdleLazySweep, WTF::Unretained(this)));
+}
+
+void ThreadState::ScheduleConcurrentAndLazySweep() {
+ ScheduleIdleLazySweep();
+
+ if (!base::FeatureList::IsEnabled(
+ blink::features::kBlinkHeapConcurrentSweeping)) {
+ return;
+ }
+
+ has_unswept_pages_ = true;
+ sweeper_handle_ = base::PostJob(
+ FROM_HERE,
+ {base::TaskPriority::USER_VISIBLE,
+ base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+ ConvertToBaseRepeatingCallback(
+ WTF::CrossThreadBindRepeating(&ThreadState::PerformConcurrentSweep,
+ WTF::CrossThreadUnretained(this))),
+ ConvertToBaseRepeatingCallback(WTF::CrossThreadBindRepeating(
+ [](ThreadState* state, size_t /*worker_count*/) -> size_t {
+ return state->has_unswept_pages_.load(std::memory_order_relaxed)
+ ? 1
+ : 0;
+ },
+ WTF::CrossThreadUnretained(this))));
+}
+
+namespace {
+
+#define UNEXPECTED_GCSTATE(s) \
+ case ThreadState::s: \
+ LOG(FATAL) << "Unexpected transition while in GCState " #s; \
+ return
+
+void UnexpectedGCState(ThreadState::GCState gc_state) {
+ switch (gc_state) {
+ UNEXPECTED_GCSTATE(kNoGCScheduled);
+ UNEXPECTED_GCSTATE(kForcedGCForTestingScheduled);
+ UNEXPECTED_GCSTATE(kIncrementalMarkingStepPaused);
+ UNEXPECTED_GCSTATE(kIncrementalMarkingStepScheduled);
+ UNEXPECTED_GCSTATE(kIncrementalMarkingFinalizeScheduled);
+ UNEXPECTED_GCSTATE(kIncrementalGCScheduled);
+ }
+}
+
+#undef UNEXPECTED_GCSTATE
+
+} // namespace
+
+#define VERIFY_STATE_TRANSITION(condition) \
+ if (UNLIKELY(!(condition))) \
+ UnexpectedGCState(gc_state_)
+
+void ThreadState::SetGCState(GCState gc_state) {
+ switch (gc_state) {
+ case kNoGCScheduled:
+ DCHECK(CheckThread());
+ VERIFY_STATE_TRANSITION(gc_state_ == kNoGCScheduled ||
+ gc_state_ == kForcedGCForTestingScheduled ||
+ gc_state_ == kIncrementalMarkingStepPaused ||
+ gc_state_ == kIncrementalMarkingStepScheduled ||
+ gc_state_ ==
+ kIncrementalMarkingFinalizeScheduled ||
+ gc_state_ == kIncrementalGCScheduled);
+ break;
+ case kIncrementalMarkingStepScheduled:
+ DCHECK(CheckThread());
+ VERIFY_STATE_TRANSITION(gc_state_ == kNoGCScheduled ||
+ gc_state_ == kIncrementalMarkingStepScheduled ||
+ gc_state_ == kIncrementalGCScheduled ||
+ gc_state_ == kIncrementalMarkingStepPaused);
+ break;
+ case kIncrementalMarkingFinalizeScheduled:
+ DCHECK(CheckThread());
+ VERIFY_STATE_TRANSITION(gc_state_ == kIncrementalMarkingStepScheduled);
+ break;
+ case kForcedGCForTestingScheduled:
+ DCHECK(CheckThread());
+ DCHECK(!IsSweepingInProgress());
+ VERIFY_STATE_TRANSITION(gc_state_ == kNoGCScheduled ||
+ gc_state_ == kIncrementalMarkingStepPaused ||
+ gc_state_ == kIncrementalMarkingStepScheduled ||
+ gc_state_ ==
+ kIncrementalMarkingFinalizeScheduled ||
+ gc_state_ == kForcedGCForTestingScheduled ||
+ gc_state_ == kIncrementalGCScheduled);
+ break;
+ case kIncrementalGCScheduled:
+ DCHECK(CheckThread());
+ DCHECK(!IsMarkingInProgress());
+ DCHECK(!IsSweepingInProgress());
+ VERIFY_STATE_TRANSITION(gc_state_ == kNoGCScheduled);
+ break;
+ case kIncrementalMarkingStepPaused:
+ DCHECK(CheckThread());
+ DCHECK(IsMarkingInProgress());
+ DCHECK(!IsSweepingInProgress());
+ VERIFY_STATE_TRANSITION(gc_state_ == kIncrementalMarkingStepScheduled);
+ break;
+ default:
+ NOTREACHED();
+ }
+ gc_state_ = gc_state;
+}
+
+#undef VERIFY_STATE_TRANSITION
+
+void ThreadState::SetGCPhase(GCPhase gc_phase) {
+ switch (gc_phase) {
+ case GCPhase::kNone:
+ DCHECK_EQ(gc_phase_, GCPhase::kSweeping);
+ break;
+ case GCPhase::kMarking:
+ DCHECK_EQ(gc_phase_, GCPhase::kNone);
+ break;
+ case GCPhase::kSweeping:
+ DCHECK_EQ(gc_phase_, GCPhase::kMarking);
+ break;
+ }
+ gc_phase_ = gc_phase;
+}
+
+void ThreadState::RunScheduledGC(BlinkGC::StackState stack_state) {
+ DCHECK(CheckThread());
+ if (stack_state != BlinkGC::kNoHeapPointersOnStack)
+ return;
+
+ // If a safe point is entered while initiating a GC, we clearly do
+ // not want to do another as part of that -- the safe point is only
+ // entered after checking if a scheduled GC ought to run first.
+ // Prevent that from happening by marking GCs as forbidden while
+ // one is initiated and later running.
+ if (IsGCForbidden())
+ return;
+
+ switch (GetGCState()) {
+ case kForcedGCForTestingScheduled:
+ forced_scheduled_gc_for_testing_ = true;
+ CollectAllGarbageForTesting();
+ forced_scheduled_gc_for_testing_ = false;
+ break;
+ default:
+ break;
+ }
+}
+
+void ThreadState::AtomicPauseMarkPrologue(
+ BlinkGC::CollectionType collection_type,
+ BlinkGC::StackState stack_state,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::GCReason reason) {
+ ThreadHeapStatsCollector::EnabledScope mark_prologue_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kAtomicPauseMarkPrologue);
+ EnterAtomicPause();
+ EnterNoAllocationScope();
+ EnterGCForbiddenScope();
+
+ if (HeapPointersOnStackForced()) {
+ stack_state = BlinkGC::kHeapPointersOnStack;
+ }
+
+ if (IsMarkingInProgress()) {
+ // Incremental marking is already in progress. Only update the state
+ // that is necessary to update.
+ SetGCState(kNoGCScheduled);
+ if (base::FeatureList::IsEnabled(
+ blink::features::kBlinkHeapConcurrentMarking)) {
+ // Stop concurrent markers and wait synchronously until they have all
+ // returned.
+ marker_handle_.Cancel();
+ }
+#if DCHECK_IS_ON()
+ MarkingWorklist* marking_worklist = Heap().GetMarkingWorklist();
+ WriteBarrierWorklist* write_barrier_worklist =
+ Heap().GetWriteBarrierWorklist();
+ for (int concurrent_task = WorklistTaskId::ConcurrentThreadBase;
+ concurrent_task < MarkingWorklist::kNumTasks; ++concurrent_task) {
+ DCHECK(marking_worklist->IsLocalEmpty(concurrent_task));
+ DCHECK(write_barrier_worklist->IsLocalEmpty(concurrent_task));
+ }
+#endif // DCHECK_IS_ON()
+ // Compaction needs to be canceled when incremental marking ends with a
+ // conservative GC.
+ if (stack_state == BlinkGC::kHeapPointersOnStack)
+ Heap().Compaction()->Cancel();
+ DisableIncrementalMarkingBarrier();
+ current_gc_data_.reason = reason;
+ current_gc_data_.stack_state = stack_state;
+ Heap().stats_collector()->UpdateReason(reason);
+ } else {
+ DCHECK(!Heap().Compaction()->IsCompacting());
+ MarkPhasePrologue(collection_type, stack_state, marking_type, reason);
+ }
+
+ if (stack_state == BlinkGC::kNoHeapPointersOnStack) {
+ Heap().FlushNotFullyConstructedObjects();
+ }
+
+ DCHECK(InAtomicMarkingPause());
+ Heap().MakeConsistentForGC();
+ // AtomicPauseMarkPrologue is the common entry point for marking. The
+ // requirement is to lock from roots marking to weakness processing which is
+ // why the lock is taken at the end of the prologue.
+ static_cast<MutexBase&>(ProcessHeap::CrossThreadPersistentMutex()).lock();
+}
+
+void ThreadState::AtomicPauseEpilogue() {
+ if (!IsSweepingInProgress()) {
+ // Sweeping was finished during the atomic pause. Update statistics needs to
+ // run outside of the top-most stats scope.
+ PostSweep();
+ }
+}
+
+void ThreadState::CompleteSweep() {
+ DCHECK(CheckThread());
+ // If we are not in a sweeping phase, there is nothing to do here.
+ if (!IsSweepingInProgress())
+ return;
+
+ // CompleteSweep() can be called recursively if finalizers can allocate
+ // memory and the allocation triggers completeSweep(). This check prevents
+ // the sweeping from being executed recursively.
+ if (SweepForbidden())
+ return;
+
+ {
+ // CompleteSweep may be called during regular mutator execution, from a
+ // task, or from the atomic pause in which the atomic scope has already been
+ // opened.
+ const bool was_in_atomic_pause = in_atomic_pause();
+ if (!was_in_atomic_pause)
+ EnterAtomicPause();
+ ScriptForbiddenScope script_forbidden;
+ SweepForbiddenScope scope(this);
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ Heap().stats_collector(), ThreadHeapStatsCollector::kCompleteSweep);
+ // Boost priority of sweeping job to complete ASAP and avoid taking time on
+ // the main thread.
+ if (sweeper_handle_)
+ sweeper_handle_.UpdatePriority(base::TaskPriority::USER_BLOCKING);
+ Heap().CompleteSweep();
+ SynchronizeAndFinishConcurrentSweeping();
+
+ if (!was_in_atomic_pause)
+ LeaveAtomicPause();
+ }
+ NotifySweepDone();
+}
+
+void ThreadState::SynchronizeAndFinishConcurrentSweeping() {
+ DCHECK(CheckThread());
+ DCHECK(IsSweepingInProgress());
+ DCHECK(SweepForbidden());
+
+ // Wait for concurrent sweepers.
+ if (sweeper_handle_)
+ sweeper_handle_.Cancel();
+
+ // Concurrent sweepers may perform some work at the last stage (e.g.
+ // sweeping the last page and preparing finalizers).
+ Heap().InvokeFinalizersOnSweptPages();
+}
+
+BlinkGCObserver::BlinkGCObserver(ThreadState* thread_state)
+ : thread_state_(thread_state) {
+ thread_state_->AddObserver(this);
+}
+
+BlinkGCObserver::~BlinkGCObserver() {
+ thread_state_->RemoveObserver(this);
+}
+
+namespace {
+
+// Update trace counters with statistics from the current and previous garbage
+// collection cycle. We allow emitting current values here since these values
+// can be useful for inspecting traces.
+void UpdateTraceCounters(const ThreadHeapStatsCollector& stats_collector) {
+ bool gc_tracing_enabled;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
+ &gc_tracing_enabled);
+ if (!gc_tracing_enabled)
+ return;
+
+ // Previous garbage collection cycle values.
+ const ThreadHeapStatsCollector::Event& event = stats_collector.previous();
+ const int collection_rate_percent =
+ static_cast<int>(100 * (1.0 - event.live_object_rate));
+ TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
+ "BlinkGC.CollectionRate", collection_rate_percent);
+ TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
+ "BlinkGC.MarkedObjectSizeAtLastCompleteSweepKB",
+ CappedSizeInKB(event.marked_bytes));
+ TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
+ "BlinkGC.ObjectSizeAtLastGCKB",
+ CappedSizeInKB(event.object_size_in_bytes_before_sweeping));
+ TRACE_COUNTER1(
+ TRACE_DISABLED_BY_DEFAULT("blink_gc"), "BlinkGC.AllocatedSpaceAtLastGCKB",
+ CappedSizeInKB(event.allocated_space_in_bytes_before_sweeping));
+ TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
+ "BlinkGC.PartitionAllocSizeAtLastGCKB",
+ CappedSizeInKB(event.partition_alloc_bytes_before_sweeping));
+
+ // Current values.
+ TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
+ "BlinkGC.AllocatedSpaceKB",
+ CappedSizeInKB(stats_collector.allocated_space_bytes()));
+ size_t allocated_bytes_since_prev_gc =
+ stats_collector.allocated_bytes_since_prev_gc() > 0
+ ? static_cast<size_t>(stats_collector.allocated_bytes_since_prev_gc())
+ : 0;
+ TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
+ "BlinkGC.AllocatedObjectSizeSincePreviousGCKB",
+ CappedSizeInKB(allocated_bytes_since_prev_gc));
+ TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
+ "PartitionAlloc.TotalSizeOfCommittedPagesKB",
+ CappedSizeInKB(WTF::Partitions::TotalSizeOfCommittedPages()));
+}
+
+// Update histograms with statistics from the previous garbage collection cycle.
+// Anything that is part of a histogram should have a well-defined lifetime wrt.
+// to a garbage collection cycle.
+void UpdateHistograms(const ThreadHeapStatsCollector::Event& event) {
+ UMA_HISTOGRAM_ENUMERATION("BlinkGC.GCReason", event.reason);
+
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForAtomicPhase", event.atomic_pause_time());
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForAtomicPhaseMarking",
+ event.atomic_marking_time());
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForGCCycle", event.gc_cycle_time());
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForMarkingRoots",
+ event.roots_marking_time());
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForIncrementalMarking",
+ event.incremental_marking_time());
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForMarking.Foreground",
+ event.foreground_marking_time());
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForMarking.Background",
+ event.background_marking_time());
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForMarking", event.marking_time());
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForNestedInV8", event.gc_nested_in_v8);
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForSweepingForeground",
+ event.foreground_sweeping_time());
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForSweepingBackground",
+ event.background_sweeping_time());
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForSweepingSum", event.sweeping_time());
+ UMA_HISTOGRAM_TIMES(
+ "BlinkGC.TimeForCompleteSweep",
+ event.scope_data[ThreadHeapStatsCollector::kCompleteSweep]);
+ UMA_HISTOGRAM_TIMES(
+ "BlinkGC.TimeForInvokingPreFinalizers",
+ event.scope_data[ThreadHeapStatsCollector::kInvokePreFinalizers]);
+ UMA_HISTOGRAM_TIMES(
+ "BlinkGC.TimeForHeapCompaction",
+ event.scope_data[ThreadHeapStatsCollector::kAtomicPauseCompaction]);
+ UMA_HISTOGRAM_TIMES(
+ "BlinkGC.TimeForGlobalWeakProcessing",
+ event.scope_data[ThreadHeapStatsCollector::kMarkWeakProcessing]);
+
+ base::TimeDelta marking_duration = event.foreground_marking_time();
+ constexpr size_t kMinMarkedBytesForReportingThroughput = 1024 * 1024;
+ if (base::TimeTicks::IsHighResolution() &&
+ (event.marked_bytes > kMinMarkedBytesForReportingThroughput) &&
+ !marking_duration.is_zero()) {
+ DCHECK_GT(marking_duration.InMillisecondsF(), 0.0);
+ const int main_thread_marking_throughput_mb_per_s = static_cast<int>(
+ static_cast<double>(event.marked_bytes) /
+ marking_duration.InMillisecondsF() * 1000 / 1024 / 1024);
+ UMA_HISTOGRAM_COUNTS_100000("BlinkGC.MainThreadMarkingThroughput",
+ main_thread_marking_throughput_mb_per_s);
+ }
+
+ DEFINE_STATIC_LOCAL(
+ CustomCountHistogram, object_size_freed_by_heap_compaction,
+ ("BlinkGC.ObjectSizeFreedByHeapCompaction", 1, 4 * 1024 * 1024, 50));
+ if (event.compaction_recorded_events) {
+ object_size_freed_by_heap_compaction.Count(
+ CappedSizeInKB(event.compaction_freed_bytes));
+ }
+
+ DEFINE_STATIC_LOCAL(CustomCountHistogram, object_size_before_gc_histogram,
+ ("BlinkGC.ObjectSizeBeforeGC", 1, 4 * 1024 * 1024, 50));
+ object_size_before_gc_histogram.Count(
+ CappedSizeInKB(event.object_size_in_bytes_before_sweeping));
+ DEFINE_STATIC_LOCAL(CustomCountHistogram, object_size_after_gc_histogram,
+ ("BlinkGC.ObjectSizeAfterGC", 1, 4 * 1024 * 1024, 50));
+ object_size_after_gc_histogram.Count(CappedSizeInKB(event.marked_bytes));
+
+ const int collection_rate_percent =
+ static_cast<int>(100 * (1.0 - event.live_object_rate));
+ DEFINE_STATIC_LOCAL(CustomCountHistogram, collection_rate_histogram,
+ ("BlinkGC.CollectionRate", 1, 100, 20));
+ collection_rate_histogram.Count(collection_rate_percent);
+
+ // Per GCReason metrics.
+ switch (event.reason) {
+#define COUNT_BY_GC_REASON(reason) \
+ case BlinkGC::GCReason::k##reason: { \
+ UMA_HISTOGRAM_TIMES("BlinkGC.AtomicPhaseMarking_" #reason, \
+ event.atomic_marking_time()); \
+ DEFINE_STATIC_LOCAL(CustomCountHistogram, \
+ collection_rate_reason_histogram, \
+ ("BlinkGC.CollectionRate_" #reason, 1, 100, 20)); \
+ collection_rate_reason_histogram.Count(collection_rate_percent); \
+ break; \
+ }
+
+ COUNT_BY_GC_REASON(ForcedGCForTesting)
+ COUNT_BY_GC_REASON(ThreadTerminationGC)
+ COUNT_BY_GC_REASON(UnifiedHeapGC)
+ COUNT_BY_GC_REASON(UnifiedHeapForMemoryReductionGC)
+ COUNT_BY_GC_REASON(UnifiedHeapForcedForTestingGC)
+
+#undef COUNT_BY_GC_REASON
+ }
+}
+
+} // namespace
+
+void ThreadState::NotifySweepDone() {
+ DCHECK(CheckThread());
+ SetGCPhase(GCPhase::kNone);
+ if (!in_atomic_pause()) {
+ PostSweep();
+ }
+}
+
+void ThreadState::PostSweep() {
+ DCHECK(!in_atomic_pause());
+ DCHECK(!IsSweepingInProgress());
+
+ gc_age_++;
+
+ for (auto* const observer : observers_)
+ observer->OnCompleteSweepDone();
+
+ Heap().stats_collector()->NotifySweepingCompleted();
+
+ if (IsMainThread())
+ UpdateHistograms(Heap().stats_collector()->previous());
+ // Emit trace counters for all threads.
+ UpdateTraceCounters(*Heap().stats_collector());
+}
+
+void ThreadState::SafePoint(BlinkGC::StackState stack_state) {
+ DCHECK(CheckThread());
+
+ RunScheduledGC(stack_state);
+}
+
+using PushAllRegistersCallback = void (*)(ThreadState*, intptr_t*);
+extern "C" void PushAllRegisters(ThreadState*, PushAllRegistersCallback);
+
+// static
+void ThreadState::VisitStackAfterPushingRegisters(ThreadState* state,
+ intptr_t* end_of_stack) {
+ state->VisitStack(static_cast<MarkingVisitor*>(state->CurrentVisitor()),
+ reinterpret_cast<Address*>(end_of_stack));
+}
+
+void ThreadState::PushRegistersAndVisitStack() {
+ DCHECK(CheckThread());
+ DCHECK(IsGCForbidden());
+ DCHECK_EQ(current_gc_data_.stack_state, BlinkGC::kHeapPointersOnStack);
+ // Visit registers, native stack, and asan fake stack.
+ PushAllRegisters(this, ThreadState::VisitStackAfterPushingRegisters);
+ // For builds that use safe stack, also visit the unsafe stack.
+ VisitUnsafeStack(static_cast<MarkingVisitor*>(CurrentVisitor()));
+}
+
+void ThreadState::AddObserver(BlinkGCObserver* observer) {
+ DCHECK(observer);
+ DCHECK(!observers_.Contains(observer));
+ observers_.insert(observer);
+}
+
+void ThreadState::RemoveObserver(BlinkGCObserver* observer) {
+ DCHECK(observer);
+ DCHECK(observers_.Contains(observer));
+ observers_.erase(observer);
+}
+
+void ThreadState::EnterStaticReferenceRegistrationDisabledScope() {
+ static_persistent_registration_disabled_count_++;
+}
+
+void ThreadState::LeaveStaticReferenceRegistrationDisabledScope() {
+ DCHECK(static_persistent_registration_disabled_count_);
+ static_persistent_registration_disabled_count_--;
+}
+
+void ThreadState::RegisterStaticPersistentNode(PersistentNode* node) {
+ if (static_persistent_registration_disabled_count_)
+ return;
+
+ DCHECK(!static_persistents_.Contains(node));
+ static_persistents_.insert(node);
+}
+
+void ThreadState::ReleaseStaticPersistentNodes() {
+ HashSet<PersistentNode*> static_persistents;
+ static_persistents.swap(static_persistents_);
+
+ PersistentRegion* persistent_region = GetPersistentRegion();
+ for (PersistentNode* it : static_persistents)
+ persistent_region->ReleaseNode(it);
+}
+
+void ThreadState::FreePersistentNode(PersistentRegion* persistent_region,
+ PersistentNode* persistent_node) {
+ persistent_region->FreeNode(persistent_node);
+ // Do not allow static persistents to be freed before
+ // they're all released in releaseStaticPersistentNodes().
+ //
+ // There's no fundamental reason why this couldn't be supported,
+ // but no known use for it.
+ if (persistent_region == GetPersistentRegion())
+ DCHECK(!static_persistents_.Contains(persistent_node));
+}
+
+void ThreadState::InvokePreFinalizers() {
+ DCHECK(CheckThread());
+ DCHECK(!SweepForbidden());
+
+ ThreadHeapStatsCollector::Scope stats_scope(
+ Heap().stats_collector(), ThreadHeapStatsCollector::kInvokePreFinalizers);
+ SweepForbiddenScope sweep_forbidden(this);
+ // Pre finalizers are forbidden from allocating objects
+ NoAllocationScope no_allocation_scope(this);
+
+ // Call the prefinalizers in the opposite order to their registration.
+ //
+ // Deque does not support modification during iteration, so
+ // copy items first.
+ //
+ // The prefinalizer callback wrapper returns |true| when its associated
+ // object is unreachable garbage and the prefinalizer callback has run.
+ // The registered prefinalizer entry must then be removed and deleted.
+ LivenessBroker broker = internal::LivenessBrokerFactory::Create();
+ Deque<PreFinalizer> remaining_ordered_pre_finalizers;
+ for (auto rit = ordered_pre_finalizers_.rbegin();
+ rit != ordered_pre_finalizers_.rend(); ++rit) {
+ const PreFinalizer& pre_finalizer = *rit;
+ if (!(pre_finalizer.second)(broker, pre_finalizer.first))
+ remaining_ordered_pre_finalizers.push_front(pre_finalizer);
+ }
+
+ ordered_pre_finalizers_ = std::move(remaining_ordered_pre_finalizers);
+}
+
+// static
+AtomicEntryFlag ThreadState::incremental_marking_flag_;
+
+void ThreadState::EnableIncrementalMarkingBarrier() {
+ CHECK(!IsIncrementalMarking());
+ incremental_marking_flag_.Enter();
+ SetIncrementalMarking(true);
+}
+
+void ThreadState::DisableIncrementalMarkingBarrier() {
+ CHECK(IsIncrementalMarking());
+ incremental_marking_flag_.Exit();
+ SetIncrementalMarking(false);
+}
+
+void ThreadState::IncrementalMarkingStartForTesting() {
+ // kIncrementalGCScheduled state requires sweeping to not be in progress.
+ CompleteSweep();
+ SetGCState(kIncrementalGCScheduled);
+ IncrementalMarkingStart(BlinkGC::GCReason::kForcedGCForTesting);
+}
+
+void ThreadState::IncrementalMarkingStart(BlinkGC::GCReason reason) {
+ DCHECK(!IsGCForbidden());
+ DCHECK_EQ(kIncrementalGCScheduled, GetGCState());
+
+ VLOG(2) << "[state:" << this << "] "
+ << "IncrementalMarking: Start";
+ DCHECK(!IsMarkingInProgress());
+ // Sweeping is performed in driver functions.
+ DCHECK(!IsSweepingInProgress());
+ Heap().stats_collector()->NotifyMarkingStarted(
+ BlinkGC::CollectionType::kMajor, reason, IsForcedGC(reason));
+ {
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kIncrementalMarkingStartMarking, "reason",
+ BlinkGC::ToString(reason));
+ AtomicPauseScope atomic_pause_scope(this);
+ ScriptForbiddenScope script_forbidden_scope;
+ MarkPhasePrologue(BlinkGC::CollectionType::kMajor,
+ BlinkGC::kNoHeapPointersOnStack,
+ BlinkGC::kIncrementalAndConcurrentMarking, reason);
+ {
+ MutexLocker persistent_lock(ProcessHeap::CrossThreadPersistentMutex());
+ MarkPhaseVisitRoots();
+ }
+ DCHECK(Heap().GetV8ReferencesWorklist()->IsGlobalEmpty());
+ EnableIncrementalMarkingBarrier();
+ if (base::FeatureList::IsEnabled(
+ blink::features::kBlinkHeapConcurrentMarking)) {
+ current_gc_data_.visitor->FlushMarkingWorklists();
+ static_assert(
+ MarkingWorklist::kNumTasks == WriteBarrierWorklist::kNumTasks,
+ "Marking worklist and write-barrier worklist should be the "
+ "same size");
+ last_concurrently_marked_bytes_ = 0;
+ last_concurrently_marked_bytes_update_ = base::TimeTicks::Now();
+ concurrent_marking_priority_increased_ = false;
+ ScheduleConcurrentMarking();
+ }
+ SetGCState(kIncrementalMarkingStepScheduled);
+ DCHECK(IsMarkingInProgress());
+ }
+}
+
+void ThreadState::IncrementalMarkingStep(BlinkGC::StackState stack_state) {
+ DCHECK(IsMarkingInProgress());
+ DCHECK_EQ(kIncrementalMarkingStepScheduled, GetGCState());
+
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kIncrementalMarkingStep);
+ VLOG(2) << "[state:" << this << "] "
+ << "IncrementalMarking: Step "
+ << "Reason: " << BlinkGC::ToString(current_gc_data_.reason);
+ AtomicPauseScope atomic_pause_scope(this);
+ ScriptForbiddenScope script_forbidden_scope;
+ if (stack_state == BlinkGC::kNoHeapPointersOnStack) {
+ Heap().FlushNotFullyConstructedObjects();
+ }
+
+ bool complete;
+ if (skip_incremental_marking_for_testing_) {
+ complete = true;
+ skip_incremental_marking_for_testing_ = false;
+ } else {
+ complete = MarkPhaseAdvanceMarking(
+ marking_scheduling_->GetNextIncrementalStepDurationForTask(
+ Heap().stats_collector()->object_size_in_bytes()),
+ EphemeronProcessing::kPartialProcessing);
+ }
+
+ if (base::FeatureList::IsEnabled(
+ blink::features::kBlinkHeapConcurrentMarking)) {
+ complete = ConcurrentMarkingStep() && complete;
+ }
+
+ if (complete) {
+ if (IsUnifiedGCMarkingInProgress()) {
+ // If there are no more objects to mark for unified garbage collections
+ // just bail out of helping incrementally using tasks. V8 will drive
+ // further marking if new objects are discovered. Otherwise, just process
+ // the rest in the atomic pause.
+ DCHECK(IsUnifiedGCMarkingInProgress());
+ SetGCState(kIncrementalMarkingStepPaused);
+ } else {
+ SetGCState(kIncrementalMarkingFinalizeScheduled);
+ }
+ } else {
+ SetGCState(kIncrementalMarkingStepScheduled);
+ }
+ DCHECK(IsMarkingInProgress());
+}
+
+bool ThreadState::ConcurrentMarkingStep() {
+ current_gc_data_.visitor->FlushMarkingWorklists();
+ if (Heap().HasWorkForConcurrentMarking()) {
+ // Notifies the scheduler that max concurrency might have increased.
+ // This will adjust the number of markers if necessary.
+ marker_handle_.NotifyConcurrencyIncrease();
+ if (!concurrent_marking_priority_increased_) {
+ // If concurrent tasks aren't executed, it might delay GC finalization.
+ // As long as GC is active so is the write barrier, which incurs a
+ // performance cost. Marking is estimated to take overall
+ // |MarkingSchedulingOracle::kEstimatedMarkingTimeMs| (500ms). If
+ // concurrent marking tasks have not reported any progress (i.e. the
+ // concurrently marked bytes count as not changed) in over
+ // |kMarkingScheduleRatioBeforeConcurrentPriorityIncrease| (50%) of
+ // that expected duration, we increase the concurrent task priority
+ // for the duration of the current GC. This is meant to prevent the
+ // GC from exceeding it's expected end time.
+ size_t current_concurrently_marked_bytes_ =
+ marking_scheduling_->GetConcurrentlyMarkedBytes();
+ if (current_concurrently_marked_bytes_ >
+ last_concurrently_marked_bytes_) {
+ last_concurrently_marked_bytes_ = current_concurrently_marked_bytes_;
+ last_concurrently_marked_bytes_update_ = base::TimeTicks::Now();
+ } else if ((base::TimeTicks::Now() -
+ last_concurrently_marked_bytes_update_)
+ .InMilliseconds() >
+ kMarkingScheduleRatioBeforeConcurrentPriorityIncrease *
+ MarkingSchedulingOracle::kEstimatedMarkingTimeMs) {
+ marker_handle_.UpdatePriority(base::TaskPriority::USER_BLOCKING);
+ concurrent_marking_priority_increased_ = true;
+ }
+ }
+ return false;
+ }
+ return marker_handle_.IsCompleted();
+}
+
+void ThreadState::IncrementalMarkingFinalize() {
+ DCHECK(IsMarkingInProgress());
+ DCHECK(!IsUnifiedGCMarkingInProgress());
+ DCHECK_EQ(kIncrementalMarkingFinalizeScheduled, GetGCState());
+
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kIncrementalMarkingFinalize);
+ VLOG(2) << "[state:" << this << "] "
+ << "IncrementalMarking: Finalize "
+ << "Reason: " << BlinkGC::ToString(current_gc_data_.reason);
+ // Call into the regular bottleneck instead of the internal version to get
+ // UMA accounting and allow follow up GCs if necessary.
+ DCHECK_EQ(BlinkGC::kIncrementalAndConcurrentMarking,
+ current_gc_data_.marking_type);
+ CollectGarbage(current_gc_data_.collection_type,
+ BlinkGC::kNoHeapPointersOnStack, current_gc_data_.marking_type,
+ BlinkGC::kConcurrentAndLazySweeping, current_gc_data_.reason);
+}
+
+bool ThreadState::FinishIncrementalMarkingIfRunning(
+ BlinkGC::CollectionType collection_type,
+ BlinkGC::StackState stack_state,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::SweepingType sweeping_type,
+ BlinkGC::GCReason reason) {
+ if (IsMarkingInProgress()) {
+ // TODO(mlippautz): Consider improving this mechanism as it will pull in
+ // finalization of V8 upon Oilpan GCs during a unified GC. Alternative
+ // include either breaking up the GCs or avoiding the call in first place.
+ if (IsUnifiedGCMarkingInProgress()) {
+ unified_heap_controller()->FinalizeTracing();
+ } else {
+ RunAtomicPause(collection_type, stack_state, marking_type, sweeping_type,
+ reason);
+ }
+ return true;
+ }
+ return false;
+}
+
+void ThreadState::RestartIncrementalMarkingIfPaused() {
+ if (GetGCState() != ThreadState::kIncrementalMarkingStepPaused)
+ return;
+ SetGCState(ThreadState::kIncrementalMarkingStepScheduled);
+ incremental_marking_scheduler_->Restart();
+}
+
+void ThreadState::CollectGarbage(BlinkGC::CollectionType collection_type,
+ BlinkGC::StackState stack_state,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::SweepingType sweeping_type,
+ BlinkGC::GCReason reason) {
+ // Nested garbage collection invocations are not supported.
+ CHECK(!IsGCForbidden());
+ // Garbage collection during sweeping is not supported. This can happen when
+ // finalizers trigger garbage collections.
+ if (SweepForbidden())
+ return;
+
+ base::TimeTicks start_total_collect_garbage_time = base::TimeTicks::Now();
+ RUNTIME_CALL_TIMER_SCOPE_IF_ISOLATE_EXISTS(
+ GetIsolate(), RuntimeCallStats::CounterId::kCollectGarbage);
+
+ const bool was_incremental_marking = FinishIncrementalMarkingIfRunning(
+ collection_type, stack_state, marking_type, sweeping_type, reason);
+
+ // We don't want floating garbage for the specific garbage collection types
+ // mentioned below. In this case we will follow up with a regular full
+ // garbage collection.
+ const bool should_do_full_gc =
+ !was_incremental_marking ||
+ reason == BlinkGC::GCReason::kForcedGCForTesting ||
+ reason == BlinkGC::GCReason::kThreadTerminationGC;
+ if (should_do_full_gc) {
+ CompleteSweep();
+ SetGCState(kNoGCScheduled);
+ Heap().stats_collector()->NotifyMarkingStarted(collection_type, reason,
+ IsForcedGC(reason));
+ RunAtomicPause(collection_type, stack_state, marking_type, sweeping_type,
+ reason);
+ }
+
+ const base::TimeDelta total_collect_garbage_time =
+ base::TimeTicks::Now() - start_total_collect_garbage_time;
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForTotalCollectGarbage",
+ total_collect_garbage_time);
+
+#define COUNT_BY_GC_REASON(reason) \
+ case BlinkGC::GCReason::k##reason: { \
+ UMA_HISTOGRAM_TIMES("BlinkGC.TimeForTotalCollectGarbage_" #reason, \
+ total_collect_garbage_time); \
+ break; \
+ }
+
+ switch (reason) {
+ COUNT_BY_GC_REASON(ForcedGCForTesting)
+ COUNT_BY_GC_REASON(ThreadTerminationGC)
+ COUNT_BY_GC_REASON(UnifiedHeapGC)
+ COUNT_BY_GC_REASON(UnifiedHeapForMemoryReductionGC)
+ COUNT_BY_GC_REASON(UnifiedHeapForcedForTestingGC)
+ }
+#undef COUNT_BY_GC_REASON
+
+ VLOG(1) << "[state:" << this << "]"
+ << " CollectGarbage: time: " << std::setprecision(2)
+ << total_collect_garbage_time.InMillisecondsF() << "ms"
+ << " stack: " << BlinkGC::ToString(stack_state)
+ << " marking: " << BlinkGC::ToString(marking_type)
+ << " sweeping: " << BlinkGC::ToString(sweeping_type)
+ << " reason: " << BlinkGC::ToString(reason);
+}
+
+void ThreadState::AtomicPauseMarkRoots(BlinkGC::StackState stack_state,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::GCReason reason) {
+ ThreadHeapStatsCollector::EnabledScope advance_tracing_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kAtomicPauseMarkRoots);
+ MarkPhaseVisitRoots();
+ MarkPhaseVisitNotFullyConstructedObjects();
+}
+
+void ThreadState::AtomicPauseMarkTransitiveClosure() {
+ ThreadHeapStatsCollector::EnabledScope advance_tracing_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kAtomicPauseMarkTransitiveClosure);
+ // base::TimeTicks::Now() + base::TimeDelta::Max() == base::TimeTicks::Max()
+ CHECK(MarkPhaseAdvanceMarking(base::TimeDelta::Max(),
+ EphemeronProcessing::kFullProcessing));
+}
+
+void ThreadState::AtomicPauseMarkEpilogue(BlinkGC::MarkingType marking_type) {
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kAtomicPauseMarkEpilogue);
+ MarkPhaseEpilogue(marking_type);
+ LeaveGCForbiddenScope();
+ LeaveNoAllocationScope();
+ LeaveAtomicPause();
+ static_cast<MutexBase&>(ProcessHeap::CrossThreadPersistentMutex()).unlock();
+}
+
+void ThreadState::AtomicPauseSweepAndCompact(
+ BlinkGC::CollectionType collection_type,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::SweepingType sweeping_type) {
+ ThreadHeapStatsCollector::EnabledScope stats(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kAtomicPauseSweepAndCompact);
+ AtomicPauseScope atomic_pause_scope(this);
+ ScriptForbiddenScope script_forbidden_scope;
+
+ DCHECK(InAtomicMarkingPause());
+ DCHECK(CheckThread());
+ Heap().PrepareForSweep(collection_type);
+
+ // We have to set the GCPhase to Sweeping before calling pre-finalizers
+ // to disallow a GC during the pre-finalizers.
+ SetGCPhase(GCPhase::kSweeping);
+
+ InvokePreFinalizers();
+
+ if (collection_type == BlinkGC::CollectionType::kMajor) {
+ // Slots filtering requires liveness information which is only present
+ // before sweeping any arena.
+ ThreadHeapStatsCollector::Scope stats_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kAtomicPauseCompaction);
+ Heap().Compaction()->FilterNonLiveSlots();
+ }
+
+ // Last point where all mark bits are present.
+ VerifyMarking(marking_type);
+
+ if (collection_type == BlinkGC::CollectionType::kMajor) {
+ // Any sweep compaction must happen after pre-finalizers, as it will
+ // finalize dead objects in compactable arenas (e.g., backing stores
+ // for container objects.)
+ //
+ // As per-contract for prefinalizers, those finalizable objects must
+ // still be accessible when the prefinalizer runs, hence we cannot
+ // schedule compaction until those have run.
+ SweepForbiddenScope scope(this);
+ NoAllocationScope no_allocation_scope(this);
+ Heap().Compact();
+ Heap().DestroyCompactionWorklists();
+ }
+
+#if defined(ADDRESS_SANITIZER)
+ PoisonUnmarkedObjects();
+#endif // ADDRESS_SANITIZER
+ DCHECK(IsSweepingInProgress());
+ if (sweeping_type == BlinkGC::kEagerSweeping) {
+ // Eager sweeping should happen only in testing.
+ CompleteSweep();
+ } else {
+ DCHECK(sweeping_type == BlinkGC::kConcurrentAndLazySweeping);
+ // The default behavior is concurrent and lazy sweeping.
+ ScheduleConcurrentAndLazySweep();
+ }
+}
+
+#if defined(ADDRESS_SANITIZER)
+namespace {
+
+// Visitor unpoisoning all handles. Unpoisoning is required when dead objects
+// are poisoned until they are later on processed.
+//
+// The raceful operations are:
+// a. Running destructor that clears the handle.
+// b. Running a stand-alone V8 GC (e.g. Scavenger) that clears the handle.
+//
+// Both operations run on the main thread and not concurrent.
+class UnpoisonHandlesVisitor final
+ : public v8::PersistentHandleVisitor,
+ public v8::EmbedderHeapTracer::TracedGlobalHandleVisitor {
+ public:
+ explicit UnpoisonHandlesVisitor(ThreadHeap* heap) : heap_(heap) {}
+
+ void VisitPersistentHandle(v8::Persistent<v8::Value>* value,
+ uint16_t class_id) final {
+ VisitSlot(value, sizeof(v8::Persistent<v8::Value>));
+ }
+
+ void VisitTracedGlobalHandle(const v8::TracedGlobal<v8::Value>&) final {
+ CHECK(false) << "Blink does not use v8::TracedGlobal.";
+ }
+
+ void VisitTracedReference(const v8::TracedReference<v8::Value>& value) final {
+ // TODO(mlippautz): Avoid const_cast after changing the API to allow
+ // modificaton of the handle.
+ VisitSlot(&const_cast<v8::TracedReference<v8::Value>&>(value),
+ sizeof(v8::TracedReference<v8::Value>));
+ }
+
+ private:
+ void VisitSlot(void* address, size_t size) {
+ // Filter slots not on the heap.
+ if (!heap_->LookupPageForAddress(reinterpret_cast<Address>(address)))
+ return;
+
+ HeapObjectHeader* header = HeapObjectHeader::FromInnerAddress(address);
+ if (!header->IsMarked()) {
+ DCHECK(ASAN_REGION_IS_POISONED(address, size));
+ ASAN_UNPOISON_MEMORY_REGION(address, size);
+ }
+ }
+
+ ThreadHeap* const heap_;
+};
+
+} // namespace
+
+void ThreadState::PoisonUnmarkedObjects() {
+ {
+ // This lock must be held because other threads may access cross-thread
+ // persistents and should not observe them in a poisoned state.
+ MutexLocker lock(ProcessHeap::CrossThreadPersistentMutex());
+
+ Heap().PoisonUnmarkedObjects();
+
+ // CrossThreadPersistents in unmarked objects may be accessed from other
+ // threads (e.g. in CrossThreadPersistentRegion::ShouldTracePersistent) and
+ // that would be fine.
+ ProcessHeap::GetCrossThreadPersistentRegion()
+ .UnpoisonCrossThreadPersistents();
+ ProcessHeap::GetCrossThreadWeakPersistentRegion()
+ .UnpoisonCrossThreadPersistents();
+ }
+
+ // Similarly, unmarked object may contain handles to V8 that may be accessed
+ // (cleared) until the destructors are run.
+ if (GetIsolate()) {
+ UnpoisonHandlesVisitor visitor(&Heap());
+ GetIsolate()->VisitHandlesWithClassIds(&visitor);
+ unified_heap_controller()->IterateTracedGlobalHandles(&visitor);
+ }
+}
+#endif // ADDRESS_SANITIZER
+
+void ThreadState::RunAtomicPause(BlinkGC::CollectionType collection_type,
+ BlinkGC::StackState stack_state,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::SweepingType sweeping_type,
+ BlinkGC::GCReason reason) {
+ // Legacy scope that is used to add stand-alone Oilpan GCs to DevTools
+ // timeline.
+ TRACE_EVENT1("blink_gc,devtools.timeline", "BlinkGC.AtomicPhase", "forced",
+ IsForcedGC(reason));
+
+ AtomicPauseMarkPrologue(collection_type, stack_state, marking_type, reason);
+ AtomicPauseMarkRoots(stack_state, marking_type, reason);
+ AtomicPauseMarkTransitiveClosure();
+ AtomicPauseMarkEpilogue(marking_type);
+ AtomicPauseSweepAndCompact(collection_type, marking_type, sweeping_type);
+ AtomicPauseEpilogue();
+}
+
+namespace {
+
+MarkingVisitor::MarkingMode GetMarkingMode(bool should_compact) {
+ return (should_compact) ? MarkingVisitor::kGlobalMarkingWithCompaction
+ : MarkingVisitor::kGlobalMarking;
+}
+
+} // namespace
+
+void ThreadState::MarkPhasePrologue(BlinkGC::CollectionType collection_type,
+ BlinkGC::StackState stack_state,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::GCReason reason) {
+ SetGCPhase(GCPhase::kMarking);
+
+ const bool compaction_enabled =
+ Heap().Compaction()->ShouldCompact(stack_state, marking_type, reason);
+
+ Heap().SetupWorklists(compaction_enabled);
+
+ if (compaction_enabled) {
+ Heap().Compaction()->Initialize(this);
+ }
+
+#if BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
+ if (collection_type == BlinkGC::CollectionType::kMajor) {
+ // Unmark heap before doing major collection cycle.
+ Heap().Unmark();
+ }
+#endif
+
+ current_gc_data_.reason = reason;
+ current_gc_data_.collection_type = collection_type;
+ current_gc_data_.visitor =
+ IsUnifiedGCMarkingInProgress()
+ ? std::make_unique<UnifiedHeapMarkingVisitor>(
+ this, GetMarkingMode(compaction_enabled), GetIsolate())
+ : std::make_unique<MarkingVisitor>(
+ this, GetMarkingMode(compaction_enabled));
+ current_gc_data_.stack_state = stack_state;
+ current_gc_data_.marking_type = marking_type;
+
+ marking_scheduling_ = std::make_unique<MarkingSchedulingOracle>();
+}
+
+void ThreadState::MarkPhaseVisitRoots() {
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
+ Heap().stats_collector(), ThreadHeapStatsCollector::kVisitRoots);
+
+ Visitor* visitor = current_gc_data_.visitor.get();
+
+ VisitPersistents(visitor);
+
+ if (current_gc_data_.stack_state == BlinkGC::kHeapPointersOnStack) {
+ ThreadHeapStatsCollector::Scope stack_stats_scope(
+ Heap().stats_collector(), ThreadHeapStatsCollector::kVisitStackRoots);
+ PushRegistersAndVisitStack();
+ }
+
+ // Visit remembered sets (card tables) for minor collections.
+ if (current_gc_data_.collection_type == BlinkGC::CollectionType::kMinor) {
+ VisitRememberedSets(static_cast<MarkingVisitor*>(visitor));
+ }
+}
+
+bool ThreadState::MarkPhaseAdvanceMarkingBasedOnSchedule(
+ base::TimeDelta max_deadline,
+ EphemeronProcessing ephemeron_processing) {
+ return MarkPhaseAdvanceMarking(
+ std::min(max_deadline,
+ marking_scheduling_->GetNextIncrementalStepDurationForTask(
+ Heap().stats_collector()->object_size_in_bytes())),
+ ephemeron_processing);
+}
+
+bool ThreadState::MarkPhaseAdvanceMarking(
+ base::TimeDelta deadline,
+ EphemeronProcessing ephemeron_processing) {
+ MarkingVisitor* visitor = current_gc_data_.visitor.get();
+ ThreadHeapStatsCollector::EnabledScope deadline_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kIncrementalMarkingWithDeadline, "deadline_ms",
+ deadline.InMillisecondsF());
+ const bool finished = Heap().AdvanceMarking(
+ reinterpret_cast<MarkingVisitor*>(visitor),
+ base::TimeTicks::Now() + deadline, ephemeron_processing);
+ // visitor->marked_bytes() can also include bytes marked during roots
+ // visitation which is not counted in worklist_processing_time_foreground.
+ // Since the size of the roots is usually small relative to the size of
+ // the object graph, this is fine.
+ marking_scheduling_->UpdateIncrementalMarkingStats(
+ visitor->marked_bytes(),
+ Heap().stats_collector()->worklist_processing_time_foreground(),
+ Heap().stats_collector()->flushing_v8_references_time());
+ return finished;
+}
+
+bool ThreadState::IsVerifyMarkingEnabled() const {
+ bool should_verify_marking = base::FeatureList::IsEnabled(
+ blink::features::kBlinkHeapIncrementalMarkingStress);
+#if BUILDFLAG(BLINK_HEAP_VERIFICATION)
+ should_verify_marking = (disable_heap_verification_scope_ == 0);
+#endif // BLINK_HEAP_VERIFICATION
+ return should_verify_marking;
+}
+
+void ThreadState::MarkPhaseVisitNotFullyConstructedObjects() {
+ Heap().MarkNotFullyConstructedObjects(
+ reinterpret_cast<MarkingVisitor*>(current_gc_data_.visitor.get()));
+}
+
+void ThreadState::MarkPhaseEpilogue(BlinkGC::MarkingType marking_type) {
+ MarkingVisitor* visitor = current_gc_data_.visitor.get();
+ {
+ ProcessHeap::CrossThreadPersistentMutex().AssertAcquired();
+ VisitWeakPersistents(visitor);
+ Heap().WeakProcessing(visitor);
+ }
+ Heap().DestroyMarkingWorklists(current_gc_data_.stack_state);
+
+ incremental_marking_scheduler_->Cancel();
+
+ current_gc_data_.visitor->FlushCompactionWorklists();
+ current_gc_data_.visitor.reset();
+
+ Heap().stats_collector()->NotifyMarkingCompleted(
+ marking_scheduling_->GetOverallMarkedBytes());
+ marking_scheduling_.reset();
+
+ DEFINE_THREAD_SAFE_STATIC_LOCAL(
+ CustomCountHistogram, total_object_space_histogram,
+ ("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50));
+ total_object_space_histogram.Count(
+ CappedSizeInKB(ProcessHeap::TotalAllocatedObjectSize()));
+ DEFINE_THREAD_SAFE_STATIC_LOCAL(
+ CustomCountHistogram, total_allocated_space_histogram,
+ ("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 1024, 50));
+ total_allocated_space_histogram.Count(
+ CappedSizeInKB(ProcessHeap::TotalAllocatedSpace()));
+}
+
+void ThreadState::VerifyMarking(BlinkGC::MarkingType marking_type) {
+ if (IsVerifyMarkingEnabled())
+ Heap().VerifyMarking();
+}
+
+void ThreadState::CollectGarbageForTesting(
+ BlinkGC::CollectionType collection_type,
+ BlinkGC::StackState stack_state,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::SweepingType sweeping_type,
+ BlinkGC::GCReason reason) {
+ CollectGarbage(collection_type, stack_state, marking_type, sweeping_type,
+ reason);
+}
+
+void ThreadState::CollectAllGarbageForTesting(BlinkGC::StackState stack_state) {
+ // We need to run multiple GCs to collect a chain of persistent handles.
+ size_t previous_live_objects = 0;
+ for (int i = 0; i < 5; ++i) {
+ if (isolate_) {
+ unified_heap_controller()->GarbageCollectionForTesting(
+ stack_state == BlinkGC::kNoHeapPointersOnStack
+ ? v8::EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers
+ : v8::EmbedderHeapTracer::EmbedderStackState::
+ kMayContainHeapPointers);
+ } else {
+ CollectGarbage(BlinkGC::CollectionType::kMajor, stack_state,
+ BlinkGC::kAtomicMarking, BlinkGC::kEagerSweeping,
+ BlinkGC::GCReason::kForcedGCForTesting);
+ }
+ const size_t live_objects =
+ Heap().stats_collector()->previous().marked_bytes;
+ if (live_objects == previous_live_objects)
+ break;
+ previous_live_objects = live_objects;
+ }
+}
+
+void ThreadState::EnableCompactionForNextGCForTesting() {
+ Heap().Compaction()->EnableCompactionForNextGCForTesting();
+}
+
+void ThreadState::ScheduleConcurrentMarking() {
+ DCHECK(base::FeatureList::IsEnabled(
+ blink::features::kBlinkHeapConcurrentMarking));
+
+ marker_handle_ = base::PostJob(
+ FROM_HERE, {base::ThreadPool(), base::TaskPriority::USER_VISIBLE},
+ ConvertToBaseRepeatingCallback(
+ WTF::CrossThreadBindRepeating(&ThreadState::PerformConcurrentMark,
+ WTF::CrossThreadUnretained(this))),
+ ConvertToBaseRepeatingCallback(WTF::CrossThreadBindRepeating(
+ [](ThreadState* state, size_t active_worker_count) -> size_t {
+ // We need to account for local segments in addition to
+ // ConcurrentMarkingGlobalWorkSize().
+ return std::min<size_t>(
+ state->Heap().ConcurrentMarkingGlobalWorkSize() +
+ active_worker_count,
+ MarkingWorklist::kNumTasks -
+ WorklistTaskId::ConcurrentThreadBase);
+ },
+ WTF::CrossThreadUnretained(this))));
+}
+
+void ThreadState::PerformConcurrentMark(base::JobDelegate* job) {
+ VLOG(2) << "[state:" << this << "] [threadid:" << CurrentThread() << "] "
+ << "ConcurrentMark";
+ ThreadHeapStatsCollector::EnabledConcurrentScope stats_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kConcurrentMarkingStep);
+
+ if (!Heap().HasWorkForConcurrentMarking())
+ return;
+
+ uint8_t task_id = job->GetTaskId() + 1;
+
+ std::unique_ptr<ConcurrentMarkingVisitor> concurrent_visitor =
+ IsUnifiedGCMarkingInProgress()
+ ? std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(
+ this, GetMarkingMode(Heap().Compaction()->IsCompacting()),
+ GetIsolate(), task_id)
+ : std::make_unique<ConcurrentMarkingVisitor>(
+ this, GetMarkingMode(Heap().Compaction()->IsCompacting()),
+ task_id);
+
+ Heap().AdvanceConcurrentMarking(concurrent_visitor.get(), job,
+ marking_scheduling_.get());
+
+ marking_scheduling_->AddConcurrentlyMarkedBytes(
+ concurrent_visitor->RecentlyMarkedBytes());
+
+ concurrent_visitor->FlushWorklists();
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/thread_state.h b/chromium/third_party/blink/renderer/platform/heap/impl/thread_state.h
new file mode 100644
index 00000000000..ee318bd64bf
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/thread_state.h
@@ -0,0 +1,716 @@
+/*
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREAD_STATE_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREAD_STATE_H_
+
+#include <atomic>
+#include <memory>
+
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/task/post_job.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+#include "third_party/blink/renderer/platform/heap/impl/atomic_entry_flag.h"
+#include "third_party/blink/renderer/platform/heap/impl/threading_traits.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/forward.h"
+#include "third_party/blink/renderer/platform/wtf/hash_map.h"
+#include "third_party/blink/renderer/platform/wtf/hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/linked_hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/sanitizers.h"
+#include "third_party/blink/renderer/platform/wtf/thread_specific.h"
+#include "third_party/blink/renderer/platform/wtf/threading.h"
+#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+namespace v8 {
+class EmbedderGraph;
+class Isolate;
+} // namespace v8
+
+namespace blink {
+
+namespace incremental_marking_test {
+class IncrementalMarkingScope;
+} // namespace incremental_marking_test
+
+class MarkingVisitor;
+class MarkingSchedulingOracle;
+class PersistentNode;
+class PersistentRegion;
+class ThreadHeap;
+class ThreadState;
+template <ThreadAffinity affinity>
+class ThreadStateFor;
+class UnifiedHeapController;
+class Visitor;
+
+// Declare that a class has a pre-finalizer which gets invoked before objects
+// get swept. It is thus safe to touch on-heap objects that may be collected in
+// the same GC cycle. This is useful when it's not possible to avoid touching
+// on-heap objects in a destructor which is forbidden.
+//
+// Note that:
+// (a) Pre-finalizers *must* not resurrect dead objects.
+// (b) Run on the same thread they are registered.
+// (c) Decrease GC performance which means that they should only be used if
+// absolute necessary.
+//
+// Usage:
+// class Foo : GarbageCollected<Foo> {
+// USING_PRE_FINALIZER(Foo, Dispose);
+// private:
+// void Dispose() {
+// bar_->...; // It is safe to touch other on-heap objects.
+// }
+// Member<Bar> bar_;
+// };
+#define USING_PRE_FINALIZER(Class, PreFinalizer) \
+ public: \
+ static bool InvokePreFinalizer(const LivenessBroker& info, void* object) { \
+ Class* self = reinterpret_cast<Class*>(object); \
+ if (info.IsHeapObjectAlive(self)) \
+ return false; \
+ self->Class::PreFinalizer(); \
+ return true; \
+ } \
+ \
+ private: \
+ ThreadState::PrefinalizerRegistration<Class> prefinalizer_dummy_{this}; \
+ using UsingPreFinalizerMacroNeedsTrailingSemiColon = char
+
+class PLATFORM_EXPORT BlinkGCObserver {
+ USING_FAST_MALLOC(BlinkGCObserver);
+
+ public:
+ // The constructor automatically register this object to ThreadState's
+ // observer lists. The argument must not be null.
+ explicit BlinkGCObserver(ThreadState*);
+
+ // The destructor automatically unregister this object from ThreadState's
+ // observer lists.
+ virtual ~BlinkGCObserver();
+
+ virtual void OnCompleteSweepDone() = 0;
+
+ private:
+ // As a ThreadState must live when a BlinkGCObserver lives, holding a raw
+ // pointer is safe.
+ ThreadState* thread_state_;
+};
+
+class PLATFORM_EXPORT ThreadState final {
+ USING_FAST_MALLOC(ThreadState);
+
+ public:
+ // Register the pre-finalizer for the |self| object. The class T be using
+ // USING_PRE_FINALIZER() macro.
+ template <typename T>
+ class PrefinalizerRegistration final {
+ DISALLOW_NEW();
+
+ public:
+ PrefinalizerRegistration(T* self) { // NOLINT
+ static_assert(sizeof(&T::InvokePreFinalizer) > 0,
+ "USING_PRE_FINALIZER(T) must be defined.");
+ ThreadState* state =
+ ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
+#if DCHECK_IS_ON()
+ DCHECK(state->CheckThread());
+#endif
+ DCHECK(!state->SweepForbidden());
+ DCHECK(std::find(state->ordered_pre_finalizers_.begin(),
+ state->ordered_pre_finalizers_.end(),
+ PreFinalizer(self, T::InvokePreFinalizer)) ==
+ state->ordered_pre_finalizers_.end());
+ state->ordered_pre_finalizers_.emplace_back(self, T::InvokePreFinalizer);
+ }
+ };
+
+ // See setGCState() for possible state transitions.
+ enum GCState {
+ kNoGCScheduled,
+ kIncrementalMarkingStepPaused,
+ kIncrementalMarkingStepScheduled,
+ kIncrementalMarkingFinalizeScheduled,
+ kForcedGCForTestingScheduled,
+ kIncrementalGCScheduled,
+ };
+
+ // The phase that the GC is in. The GCPhase will not return kNone for mutators
+ // running during incremental marking and lazy sweeping. See SetGCPhase() for
+ // possible state transitions.
+ enum class GCPhase {
+ // GC is doing nothing.
+ kNone,
+ // GC is in marking phase.
+ kMarking,
+ // GC is in sweeping phase.
+ kSweeping,
+ };
+
+ enum class EphemeronProcessing {
+ kPartialProcessing, // Perofrm one ephemeron processing iteration every
+ // few step
+ kFullProcessing // Perofrm full fixed-point ephemeron processing on each
+ // step
+ };
+
+ class AtomicPauseScope;
+ class GCForbiddenScope;
+ class LsanDisabledScope;
+ class NoAllocationScope;
+ class StatisticsCollector;
+ struct Statistics;
+ class SweepForbiddenScope;
+ class HeapPointersOnStackScope;
+
+ using V8BuildEmbedderGraphCallback = void (*)(v8::Isolate*,
+ v8::EmbedderGraph*,
+ void*);
+
+ // Returns true if some thread (possibly the current thread) may be doing
+ // incremental marking. If false is returned, the *current* thread is
+ // definitely not doing incremental marking. See atomic_entry_flag.h for
+ // details.
+ //
+ // For an exact check, use ThreadState::IsIncrementalMarking.
+ ALWAYS_INLINE static bool IsAnyIncrementalMarking() {
+ return incremental_marking_flag_.MightBeEntered();
+ }
+
+ static ThreadState* AttachMainThread();
+
+ // Associate ThreadState object with the current thread. After this
+ // call thread can start using the garbage collected heap infrastructure.
+ // It also has to periodically check for safepoints.
+ static ThreadState* AttachCurrentThread();
+
+ // Disassociate attached ThreadState from the current thread. The thread
+ // can no longer use the garbage collected heap after this call.
+ //
+ // When ThreadState is detaching from non-main thread its heap is expected to
+ // be empty (because it is going away). Perform registered cleanup tasks and
+ // garbage collection to sweep away any objects that are left on this heap.
+ //
+ // This method asserts that no objects remain after this cleanup. If assertion
+ // does not hold we crash as we are potentially in the dangling pointer
+ // situation.
+ static void DetachCurrentThread();
+
+ static ThreadState* Current() { return **thread_specific_; }
+
+ static ThreadState* MainThreadState() {
+ return reinterpret_cast<ThreadState*>(main_thread_state_storage_);
+ }
+
+ static ThreadState* FromObject(const void*);
+
+ bool IsMainThread() const { return this == MainThreadState(); }
+ bool CheckThread() const { return thread_ == CurrentThread(); }
+
+ ThreadHeap& Heap() const { return *heap_; }
+ base::PlatformThreadId ThreadId() const { return thread_; }
+
+ // Associates |ThreadState| with a given |v8::Isolate|, essentially tying
+ // there garbage collectors together.
+ void AttachToIsolate(v8::Isolate*, V8BuildEmbedderGraphCallback);
+
+ // Removes the association from a potentially attached |v8::Isolate|.
+ void DetachFromIsolate();
+
+ // Returns an |UnifiedHeapController| if ThreadState is attached to a V8
+ // isolate (see |AttachToIsolate|) and nullptr otherwise.
+ UnifiedHeapController* unified_heap_controller() const {
+ DCHECK(isolate_);
+ return unified_heap_controller_.get();
+ }
+
+ void PerformIdleLazySweep(base::TimeTicks deadline);
+ void PerformConcurrentSweep(base::JobDelegate*);
+
+ void ScheduleForcedGCForTesting();
+ void ScheduleGCIfNeeded();
+ void SetGCState(GCState);
+ GCState GetGCState() const { return gc_state_; }
+ void SetGCPhase(GCPhase);
+
+ // Immediately starts incremental marking and schedules further steps if
+ // necessary.
+ void StartIncrementalMarking(BlinkGC::GCReason);
+
+ // Returns true if marking is in progress.
+ bool IsMarkingInProgress() const { return gc_phase_ == GCPhase::kMarking; }
+
+ // Returns true if unified heap marking is in progress.
+ bool IsUnifiedGCMarkingInProgress() const {
+ return IsMarkingInProgress() && IsUnifiedHeapGC();
+ }
+
+ // Returns true if sweeping is in progress.
+ bool IsSweepingInProgress() const { return gc_phase_ == GCPhase::kSweeping; }
+
+ // Returns true if the current GC is a memory reducing GC.
+ bool IsMemoryReducingGC() const {
+ return current_gc_data_.reason ==
+ BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC ||
+ current_gc_data_.reason ==
+ BlinkGC::GCReason::kUnifiedHeapForcedForTestingGC;
+ }
+
+ bool IsUnifiedHeapGC() const {
+ return current_gc_data_.reason == BlinkGC::GCReason::kUnifiedHeapGC ||
+ current_gc_data_.reason ==
+ BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC ||
+ current_gc_data_.reason ==
+ BlinkGC::GCReason::kUnifiedHeapForcedForTestingGC;
+ }
+
+ bool FinishIncrementalMarkingIfRunning(BlinkGC::CollectionType,
+ BlinkGC::StackState,
+ BlinkGC::MarkingType,
+ BlinkGC::SweepingType,
+ BlinkGC::GCReason);
+
+ void EnableIncrementalMarkingBarrier();
+ void DisableIncrementalMarkingBarrier();
+
+ void RestartIncrementalMarkingIfPaused();
+
+ void CompleteSweep();
+
+ // Returns whether it is currently allowed to allocate an object. Mainly used
+ // for sanity checks asserts.
+ bool IsAllocationAllowed() const {
+ // Allocation is not allowed during atomic marking pause, but it is allowed
+ // during atomic sweeping pause.
+ return !InAtomicMarkingPause() && !no_allocation_count_;
+ }
+
+ // Returns whether it is currently forbidden to trigger a GC.
+ bool IsGCForbidden() const { return gc_forbidden_count_; }
+
+ // Returns whether it is currently forbidden to sweep objects.
+ bool SweepForbidden() const { return sweep_forbidden_; }
+
+ bool in_atomic_pause() const { return in_atomic_pause_; }
+
+ bool InAtomicMarkingPause() const {
+ return in_atomic_pause() && IsMarkingInProgress();
+ }
+ bool InAtomicSweepingPause() const {
+ return in_atomic_pause() && IsSweepingInProgress();
+ }
+
+ bool IsIncrementalMarking() const { return incremental_marking_; }
+ void SetIncrementalMarking(bool value) { incremental_marking_ = value; }
+
+ void SafePoint(BlinkGC::StackState);
+
+ // A region of non-weak PersistentNodes allocated on the given thread.
+ PersistentRegion* GetPersistentRegion() const {
+ return persistent_region_.get();
+ }
+
+ // A region of PersistentNodes for WeakPersistents allocated on the given
+ // thread.
+ PersistentRegion* GetWeakPersistentRegion() const {
+ return weak_persistent_region_.get();
+ }
+
+ void RegisterStaticPersistentNode(PersistentNode*);
+ void ReleaseStaticPersistentNodes();
+ void FreePersistentNode(PersistentRegion*, PersistentNode*);
+
+ v8::Isolate* GetIsolate() const { return isolate_; }
+
+ // Returns |true| if |object| resides on this thread's heap.
+ // It is well-defined to call this method on any heap allocated
+ // reference, provided its associated heap hasn't been detached
+ // and shut down. Its behavior is undefined for any other pointer
+ // value.
+ bool IsOnThreadHeap(const void* object) const {
+ return &FromObject(object)->Heap() == &Heap();
+ }
+
+ ALWAYS_INLINE bool IsOnStack(Address address) const {
+ return reinterpret_cast<Address>(start_of_stack_) >= address &&
+ address >= (reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(
+ WTF::GetCurrentStackPosition())));
+ }
+
+ int GcAge() const { return gc_age_; }
+
+ MarkingVisitor* CurrentVisitor() const {
+ return current_gc_data_.visitor.get();
+ }
+
+ // Returns true if the marking verifier is enabled, false otherwise.
+ bool IsVerifyMarkingEnabled() const;
+
+ void SkipIncrementalMarkingForTesting() {
+ skip_incremental_marking_for_testing_ = true;
+ }
+
+ // Performs stand-alone garbage collections considering only C++ objects for
+ // testing.
+ //
+ // Since it only considers C++ objects this type of GC is mostly useful for
+ // unit tests.
+ void CollectGarbageForTesting(BlinkGC::CollectionType,
+ BlinkGC::StackState,
+ BlinkGC::MarkingType,
+ BlinkGC::SweepingType,
+ BlinkGC::GCReason);
+
+ // Forced garbage collection for testing:
+ // - Performs unified heap garbage collections if ThreadState is attached to a
+ // v8::Isolate using ThreadState::AttachToIsolate.
+ // - Otherwise, performs stand-alone garbage collections.
+ // - Collects garbage as long as live memory decreases (capped at 5).
+ void CollectAllGarbageForTesting(
+ BlinkGC::StackState stack_state =
+ BlinkGC::StackState::kNoHeapPointersOnStack);
+
+ // Enables compaction for next garbage collection.
+ void EnableCompactionForNextGCForTesting();
+
+ bool RequiresForcedGCForTesting() const {
+ return current_gc_data_.stack_state ==
+ BlinkGC::StackState::kHeapPointersOnStack &&
+ !forced_scheduled_gc_for_testing_;
+ }
+
+ void EnterNoHeapVerificationScopeForTesting() {
+ ++disable_heap_verification_scope_;
+ }
+ void LeaveNoHeapVerificationScopeForTesting() {
+ --disable_heap_verification_scope_;
+ }
+
+ private:
+ class IncrementalMarkingScheduler;
+
+ // Stores whether some ThreadState is currently in incremental marking.
+ static AtomicEntryFlag incremental_marking_flag_;
+
+ static WTF::ThreadSpecific<ThreadState*>* thread_specific_;
+
+ // We can't create a static member of type ThreadState here because it will
+ // introduce global constructor and destructor. We would like to manage
+ // lifetime of the ThreadState attached to the main thread explicitly instead
+ // and still use normal constructor and destructor for the ThreadState class.
+ // For this we reserve static storage for the main ThreadState and lazily
+ // construct ThreadState in it using placement new.
+ static uint8_t main_thread_state_storage_[];
+
+ // Callback executed directly after pushing all callee-saved registers.
+ // |end_of_stack| denotes the end of the stack that can hold references to
+ // managed objects.
+ static void VisitStackAfterPushingRegisters(ThreadState*,
+ intptr_t* end_of_stack);
+
+ static bool IsForcedGC(BlinkGC::GCReason reason) {
+ return reason == BlinkGC::GCReason::kThreadTerminationGC ||
+ reason == BlinkGC::GCReason::kForcedGCForTesting ||
+ reason == BlinkGC::GCReason::kUnifiedHeapForcedForTestingGC;
+ }
+
+ ThreadState();
+ ~ThreadState();
+
+ void EnterNoAllocationScope() { no_allocation_count_++; }
+ void LeaveNoAllocationScope() { no_allocation_count_--; }
+
+ void EnterAtomicPause() {
+ DCHECK(!in_atomic_pause_);
+ in_atomic_pause_ = true;
+ }
+ void LeaveAtomicPause() {
+ DCHECK(in_atomic_pause_);
+ in_atomic_pause_ = false;
+ }
+
+ void EnterGCForbiddenScope() { gc_forbidden_count_++; }
+ void LeaveGCForbiddenScope() {
+ DCHECK_GT(gc_forbidden_count_, 0u);
+ gc_forbidden_count_--;
+ }
+
+ void EnterStaticReferenceRegistrationDisabledScope();
+ void LeaveStaticReferenceRegistrationDisabledScope();
+
+ // Performs stand-alone garbage collections considering only C++ objects.
+ //
+ // Use the public *ForTesting calls for calling GC in tests.
+ void CollectGarbage(BlinkGC::CollectionType,
+ BlinkGC::StackState,
+ BlinkGC::MarkingType,
+ BlinkGC::SweepingType,
+ BlinkGC::GCReason);
+
+ // The following methods are used to compose RunAtomicPause. Public users
+ // should use the CollectGarbage entrypoint. Internal users should use these
+ // methods to compose a full garbage collection.
+ void AtomicPauseMarkPrologue(BlinkGC::CollectionType,
+ BlinkGC::StackState,
+ BlinkGC::MarkingType,
+ BlinkGC::GCReason);
+ void AtomicPauseMarkRoots(BlinkGC::StackState,
+ BlinkGC::MarkingType,
+ BlinkGC::GCReason);
+ void AtomicPauseMarkTransitiveClosure();
+ void AtomicPauseMarkEpilogue(BlinkGC::MarkingType);
+ void AtomicPauseSweepAndCompact(BlinkGC::CollectionType,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::SweepingType sweeping_type);
+ void AtomicPauseEpilogue();
+
+ // RunAtomicPause composes the final atomic pause that finishes a mark-compact
+ // phase of a garbage collection. Depending on SweepingType it may also finish
+ // sweeping or schedule lazy/concurrent sweeping.
+ void RunAtomicPause(BlinkGC::CollectionType,
+ BlinkGC::StackState,
+ BlinkGC::MarkingType,
+ BlinkGC::SweepingType,
+ BlinkGC::GCReason);
+
+ // The version is needed to be able to start incremental marking.
+ void MarkPhasePrologue(BlinkGC::CollectionType,
+ BlinkGC::StackState,
+ BlinkGC::MarkingType,
+ BlinkGC::GCReason);
+ void MarkPhaseEpilogue(BlinkGC::MarkingType);
+ void MarkPhaseVisitRoots();
+ void MarkPhaseVisitNotFullyConstructedObjects();
+ bool MarkPhaseAdvanceMarkingBasedOnSchedule(base::TimeDelta,
+ EphemeronProcessing);
+ bool MarkPhaseAdvanceMarking(base::TimeDelta, EphemeronProcessing);
+ void VerifyMarking(BlinkGC::MarkingType);
+
+ // Visit the stack after pushing registers onto the stack.
+ void PushRegistersAndVisitStack();
+
+ // Visit local thread stack and trace all pointers conservatively. Never call
+ // directly but always call through |PushRegistersAndVisitStack|.
+ void VisitStackImpl(MarkingVisitor*, Address*, Address*);
+ void VisitStack(MarkingVisitor*, Address*);
+ void VisitUnsafeStack(MarkingVisitor*);
+
+ // Visit the asan fake stack frame corresponding to a slot on the real machine
+ // stack if there is one. Never call directly but always call through
+ // |PushRegistersAndVisitStack|.
+ void VisitAsanFakeStackForPointer(MarkingVisitor*,
+ Address,
+ Address*,
+ Address*);
+
+ // Visit all non-weak persistents allocated on this thread.
+ void VisitPersistents(Visitor*);
+
+ // Visit all weak persistents allocated on this thread.
+ void VisitWeakPersistents(Visitor*);
+
+ // Visit card tables (remembered sets) containing inter-generational pointers.
+ void VisitRememberedSets(MarkingVisitor*);
+
+ // Incremental marking implementation functions.
+ void IncrementalMarkingStartForTesting();
+ void IncrementalMarkingStart(BlinkGC::GCReason);
+ // Incremental marking step advance marking on the mutator thread. This method
+ // also reschedules concurrent marking tasks if needed. The duration parameter
+ // applies only to incremental marking steps on the mutator thread.
+ void IncrementalMarkingStep(BlinkGC::StackState);
+ void IncrementalMarkingFinalize();
+
+ // Returns true if concurrent marking is finished (i.e. all current threads
+ // terminated and the worklist is empty)
+ bool ConcurrentMarkingStep();
+ void ScheduleConcurrentMarking();
+ void PerformConcurrentMark(base::JobDelegate* job);
+
+ // Schedule helpers.
+ void ScheduleIdleLazySweep();
+ void ScheduleConcurrentAndLazySweep();
+
+ void NotifySweepDone();
+ void PostSweep();
+
+ // See |DetachCurrentThread|.
+ void RunTerminationGC();
+
+ void RunScheduledGC(BlinkGC::StackState);
+
+ void SynchronizeAndFinishConcurrentSweeping();
+
+ void InvokePreFinalizers();
+
+ // Adds the given observer to the ThreadState's observer list. This doesn't
+ // take ownership of the argument. The argument must not be null. The argument
+ // must not be registered before calling this.
+ void AddObserver(BlinkGCObserver*);
+
+ // Removes the given observer from the ThreadState's observer list. This
+ // doesn't take ownership of the argument. The argument must not be null.
+ // The argument must be registered before calling this.
+ void RemoveObserver(BlinkGCObserver*);
+
+ bool IsForcedGC() const { return IsForcedGC(current_gc_data_.reason); }
+
+ // Returns whether stack scanning is forced. This is currently only used in
+ // platform tests where non nested tasks can be run with heap pointers on
+ // stack.
+ bool HeapPointersOnStackForced() const {
+ return heap_pointers_on_stack_forced_;
+ }
+
+#if defined(ADDRESS_SANITIZER)
+ // Poisons payload of unmarked objects.
+ //
+ // Also unpoisons memory areas for handles that may require resetting which
+ // can race with destructors. Note that cross-thread access still requires
+ // synchronization using a lock.
+ void PoisonUnmarkedObjects();
+#endif // ADDRESS_SANITIZER
+
+ std::unique_ptr<ThreadHeap> heap_;
+ base::PlatformThreadId thread_;
+ std::unique_ptr<PersistentRegion> persistent_region_;
+ std::unique_ptr<PersistentRegion> weak_persistent_region_;
+
+ // Start of the stack which is the boundary until conservative stack scanning
+ // needs to search for managed pointers.
+ Address* start_of_stack_;
+
+ bool in_atomic_pause_ = false;
+ bool sweep_forbidden_ = false;
+ bool heap_pointers_on_stack_forced_ = false;
+ bool incremental_marking_ = false;
+ bool should_optimize_for_load_time_ = false;
+ bool forced_scheduled_gc_for_testing_ = false;
+ size_t no_allocation_count_ = 0;
+ size_t gc_forbidden_count_ = 0;
+ size_t static_persistent_registration_disabled_count_ = 0;
+
+ GCState gc_state_ = GCState::kNoGCScheduled;
+ GCPhase gc_phase_ = GCPhase::kNone;
+ BlinkGC::GCReason reason_for_scheduled_gc_ =
+ BlinkGC::GCReason::kForcedGCForTesting;
+
+ using PreFinalizerCallback = bool (*)(const LivenessBroker&, void*);
+ using PreFinalizer = std::pair<void*, PreFinalizerCallback>;
+
+ // Pre-finalizers are called in the reverse order in which they are
+ // registered by the constructors (including constructors of Mixin objects)
+ // for an object, by processing the ordered_pre_finalizers_ back-to-front.
+ Deque<PreFinalizer> ordered_pre_finalizers_;
+
+ v8::Isolate* isolate_ = nullptr;
+ V8BuildEmbedderGraphCallback v8_build_embedder_graph_ = nullptr;
+ std::unique_ptr<UnifiedHeapController> unified_heap_controller_;
+
+#if defined(ADDRESS_SANITIZER)
+ void* asan_fake_stack_;
+#endif
+
+ HashSet<BlinkGCObserver*> observers_;
+
+ // PersistentNodes that are stored in static references;
+ // references that either have to be cleared upon the thread
+ // detaching from Oilpan and shutting down or references we
+ // have to clear before initiating LSan's leak detection.
+ HashSet<PersistentNode*> static_persistents_;
+
+ int gc_age_ = 0;
+
+ struct GCData {
+ BlinkGC::CollectionType collection_type;
+ BlinkGC::StackState stack_state;
+ BlinkGC::MarkingType marking_type;
+ BlinkGC::GCReason reason;
+ std::unique_ptr<MarkingVisitor> visitor;
+ };
+ GCData current_gc_data_;
+
+ std::unique_ptr<IncrementalMarkingScheduler> incremental_marking_scheduler_;
+ std::unique_ptr<MarkingSchedulingOracle> marking_scheduling_;
+
+ base::JobHandle marker_handle_;
+
+ base::JobHandle sweeper_handle_;
+ std::atomic_bool has_unswept_pages_{false};
+
+ size_t disable_heap_verification_scope_ = 0;
+
+ bool skip_incremental_marking_for_testing_ = false;
+
+ size_t last_concurrently_marked_bytes_ = 0;
+ base::TimeTicks last_concurrently_marked_bytes_update_;
+ bool concurrent_marking_priority_increased_ = false;
+
+ friend class BlinkGCObserver;
+ friend class incremental_marking_test::IncrementalMarkingScope;
+ friend class IncrementalMarkingTestDriver;
+ friend class HeapAllocator;
+ template <typename T>
+ friend class PrefinalizerRegistration;
+ friend class TestGCScope;
+ friend class TestSupportingGC;
+ friend class ThreadStateSchedulingTest;
+ friend class UnifiedHeapController;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadState);
+};
+
+template <>
+class ThreadStateFor<kMainThreadOnly> {
+ STATIC_ONLY(ThreadStateFor);
+
+ public:
+ static ThreadState* GetState() {
+ // This specialization must only be used from the main thread.
+ DCHECK(ThreadState::Current()->IsMainThread());
+ return ThreadState::MainThreadState();
+ }
+};
+
+template <>
+class ThreadStateFor<kAnyThread> {
+ STATIC_ONLY(ThreadStateFor);
+
+ public:
+ static ThreadState* GetState() { return ThreadState::Current(); }
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREAD_STATE_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/thread_state_scopes.h b/chromium/third_party/blink/renderer/platform/heap/impl/thread_state_scopes.h
new file mode 100644
index 00000000000..07e22c95cae
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/thread_state_scopes.h
@@ -0,0 +1,128 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREAD_STATE_SCOPES_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREAD_STATE_SCOPES_H_
+
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+
+#if defined(LEAK_SANITIZER)
+#include "third_party/blink/renderer/platform/wtf/leak_annotations.h"
+#endif
+
+namespace blink {
+
+// The NoAllocationScope class is used in debug mode to catch unwanted
+// allocations. E.g. allocations during GC.
+class ThreadState::NoAllocationScope final {
+ STACK_ALLOCATED();
+ DISALLOW_COPY_AND_ASSIGN(NoAllocationScope);
+
+ public:
+ explicit NoAllocationScope(ThreadState* state) : state_(state) {
+ state_->EnterNoAllocationScope();
+ }
+ ~NoAllocationScope() { state_->LeaveNoAllocationScope(); }
+
+ private:
+ ThreadState* const state_;
+};
+
+class ThreadState::SweepForbiddenScope final {
+ STACK_ALLOCATED();
+ DISALLOW_COPY_AND_ASSIGN(SweepForbiddenScope);
+
+ public:
+ explicit SweepForbiddenScope(ThreadState* state) : state_(state) {
+ DCHECK(!state_->sweep_forbidden_);
+ state_->sweep_forbidden_ = true;
+ }
+ ~SweepForbiddenScope() {
+ DCHECK(state_->sweep_forbidden_);
+ state_->sweep_forbidden_ = false;
+ }
+
+ private:
+ ThreadState* const state_;
+};
+
+class ThreadState::GCForbiddenScope final {
+ STACK_ALLOCATED();
+
+ public:
+ explicit GCForbiddenScope(ThreadState* thread_state)
+ : thread_state_(thread_state) {
+ thread_state_->EnterGCForbiddenScope();
+ }
+ ~GCForbiddenScope() { thread_state_->LeaveGCForbiddenScope(); }
+
+ private:
+ ThreadState* const thread_state_;
+};
+
+// Used to mark when we are in an atomic pause for GC.
+class ThreadState::AtomicPauseScope final {
+ STACK_ALLOCATED();
+
+ public:
+ explicit AtomicPauseScope(ThreadState* thread_state)
+ : thread_state_(thread_state), gc_forbidden_scope(thread_state) {
+ thread_state_->EnterAtomicPause();
+ }
+ ~AtomicPauseScope() { thread_state_->LeaveAtomicPause(); }
+
+ private:
+ ThreadState* const thread_state_;
+ GCForbiddenScope gc_forbidden_scope;
+};
+
+class ThreadState::HeapPointersOnStackScope final {
+ STACK_ALLOCATED();
+
+ public:
+ explicit HeapPointersOnStackScope(ThreadState* state) : state_(state) {
+ DCHECK(!state_->heap_pointers_on_stack_forced_);
+ state_->heap_pointers_on_stack_forced_ = true;
+ }
+ ~HeapPointersOnStackScope() {
+ DCHECK(state_->heap_pointers_on_stack_forced_);
+ state_->heap_pointers_on_stack_forced_ = false;
+ }
+
+ private:
+ ThreadState* const state_;
+};
+
+#if defined(LEAK_SANITIZER)
+class ThreadState::LsanDisabledScope final {
+ STACK_ALLOCATED();
+ DISALLOW_COPY_AND_ASSIGN(LsanDisabledScope);
+
+ public:
+ explicit LsanDisabledScope(ThreadState* thread_state)
+ : thread_state_(thread_state) {
+ __lsan_disable();
+ if (thread_state_)
+ thread_state_->EnterStaticReferenceRegistrationDisabledScope();
+ }
+
+ ~LsanDisabledScope() {
+ __lsan_enable();
+ if (thread_state_)
+ thread_state_->LeaveStaticReferenceRegistrationDisabledScope();
+ }
+
+ private:
+ ThreadState* const thread_state_;
+};
+
+#define LEAK_SANITIZER_DISABLED_SCOPE \
+ ThreadState::LsanDisabledScope lsan_disabled_scope(ThreadState::Current())
+#else
+#define LEAK_SANITIZER_DISABLED_SCOPE
+#endif
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREAD_STATE_SCOPES_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/thread_state_statistics.cc b/chromium/third_party/blink/renderer/platform/heap/impl/thread_state_statistics.cc
new file mode 100644
index 00000000000..d845f85de2e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/thread_state_statistics.cc
@@ -0,0 +1,32 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/impl/thread_state_statistics.h"
+
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
+
+namespace blink {
+
+ThreadState::Statistics ThreadState::StatisticsCollector::CollectStatistics(
+ Statistics::DetailLevel detail_level) const {
+ Statistics stats;
+ stats.detail_level = detail_level;
+ if (detail_level == Statistics::kBrief) {
+ ThreadHeapStatsCollector* stats_collector =
+ thread_state_->Heap().stats_collector();
+ stats.committed_size_bytes = stats_collector->allocated_space_bytes();
+ stats.used_size_bytes = stats_collector->object_size_in_bytes();
+ return stats;
+ }
+
+ thread_state_->CompleteSweep();
+
+ // Detailed statistics.
+ thread_state_->Heap().CollectStatistics(&stats);
+ stats.detail_level = Statistics::kDetailed;
+ return stats;
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/thread_state_statistics.h b/chromium/third_party/blink/renderer/platform/heap/impl/thread_state_statistics.h
new file mode 100644
index 00000000000..9eb1efa6d87
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/thread_state_statistics.h
@@ -0,0 +1,67 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREAD_STATE_STATISTICS_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREAD_STATE_STATISTICS_H_
+
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+
+namespace blink {
+
+struct PLATFORM_EXPORT ThreadState::Statistics final {
+ enum DetailLevel : uint32_t {
+ kBrief,
+ kDetailed,
+ };
+
+ struct ObjectStatistics {
+ size_t num_types = 0;
+ Vector<std::string> type_name;
+ Vector<size_t> type_count;
+ Vector<size_t> type_bytes;
+ };
+
+ struct PageStatistics {
+ size_t committed_size_bytes = 0;
+ size_t used_size_bytes = 0;
+ };
+
+ struct FreeListStatistics {
+ Vector<size_t> bucket_size;
+ Vector<size_t> free_count;
+ Vector<size_t> free_size;
+ };
+
+ struct ArenaStatistics {
+ std::string name;
+ size_t committed_size_bytes = 0;
+ size_t used_size_bytes = 0;
+ Vector<PageStatistics> page_stats;
+ FreeListStatistics free_list_stats;
+ // Only filled when NameClient::HideInternalName() is false.
+ ObjectStatistics object_stats;
+ };
+
+ size_t committed_size_bytes = 0;
+ size_t used_size_bytes = 0;
+ DetailLevel detail_level;
+
+ // Only filled when detail_level is kDetailed.
+ Vector<ArenaStatistics> arena_stats;
+};
+
+class PLATFORM_EXPORT ThreadState::StatisticsCollector {
+ public:
+ explicit StatisticsCollector(ThreadState* thread_state)
+ : thread_state_(thread_state) {}
+
+ ThreadState::Statistics CollectStatistics(Statistics::DetailLevel) const;
+
+ private:
+ ThreadState* const thread_state_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREAD_STATE_STATISTICS_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/threading_traits.h b/chromium/third_party/blink/renderer/platform/heap/impl/threading_traits.h
new file mode 100644
index 00000000000..bfae97b263b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/threading_traits.h
@@ -0,0 +1,168 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREADING_TRAITS_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREADING_TRAITS_H_
+
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/deque.h"
+#include "third_party/blink/renderer/platform/wtf/hash_counted_set.h"
+#include "third_party/blink/renderer/platform/wtf/hash_map.h"
+#include "third_party/blink/renderer/platform/wtf/hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/hash_table.h"
+#include "third_party/blink/renderer/platform/wtf/type_traits.h"
+#include "third_party/blink/renderer/platform/wtf/vector.h"
+
+namespace blink {
+
+// ThreadAffinity indicates which threads objects can be used on. We
+// distinguish between objects that can be used on the main thread
+// only and objects that can be used on any thread.
+//
+// For objects that can only be used on the main thread, we avoid going
+// through thread-local storage to get to the thread state. This is
+// important for performance.
+enum ThreadAffinity {
+ kAnyThread,
+ kMainThreadOnly,
+};
+
+// TODO(haraken): These forward declarations violate dependency rules.
+// Remove them.
+class Node;
+class NodeList;
+class NodeRareData;
+
+template <
+ typename T,
+ bool mainThreadOnly =
+ WTF::IsSubclass<typename std::remove_const<T>::type, Node>::value ||
+ WTF::IsSubclass<typename std::remove_const<T>::type, NodeList>::value ||
+ WTF::IsSubclass<typename std::remove_const<T>::type,
+ NodeRareData>::value>
+struct DefaultThreadingTrait;
+
+template <typename T>
+struct DefaultThreadingTrait<T, false> {
+ STATIC_ONLY(DefaultThreadingTrait);
+ static const ThreadAffinity kAffinity = kAnyThread;
+};
+
+template <typename T>
+struct DefaultThreadingTrait<T, true> {
+ STATIC_ONLY(DefaultThreadingTrait);
+ static const ThreadAffinity kAffinity = kMainThreadOnly;
+};
+
+class HeapAllocator;
+template <typename T>
+class Member;
+template <typename T>
+class WeakMember;
+
+template <typename T>
+struct ThreadingTrait {
+ STATIC_ONLY(ThreadingTrait);
+ static const ThreadAffinity kAffinity = DefaultThreadingTrait<T>::kAffinity;
+};
+
+template <typename U>
+class ThreadingTrait<const U> : public ThreadingTrait<U> {};
+
+template <typename T>
+struct ThreadingTrait<Member<T>> {
+ STATIC_ONLY(ThreadingTrait);
+ static const ThreadAffinity kAffinity = ThreadingTrait<T>::kAffinity;
+};
+
+template <typename T>
+struct ThreadingTrait<WeakMember<T>> {
+ STATIC_ONLY(ThreadingTrait);
+ static const ThreadAffinity kAffinity = ThreadingTrait<T>::kAffinity;
+};
+
+template <typename Key, typename Value, typename T, typename U, typename V>
+struct ThreadingTrait<HashMap<Key, Value, T, U, V, HeapAllocator>> {
+ STATIC_ONLY(ThreadingTrait);
+ static const ThreadAffinity kAffinity =
+ (ThreadingTrait<Key>::kAffinity == kMainThreadOnly) &&
+ (ThreadingTrait<Value>::kAffinity == kMainThreadOnly)
+ ? kMainThreadOnly
+ : kAnyThread;
+};
+
+template <typename First, typename Second>
+struct ThreadingTrait<WTF::KeyValuePair<First, Second>> {
+ STATIC_ONLY(ThreadingTrait);
+ static const ThreadAffinity kAffinity =
+ (ThreadingTrait<First>::kAffinity == kMainThreadOnly) &&
+ (ThreadingTrait<Second>::kAffinity == kMainThreadOnly)
+ ? kMainThreadOnly
+ : kAnyThread;
+};
+
+template <typename T, typename U, typename V>
+struct ThreadingTrait<HashSet<T, U, V, HeapAllocator>> {
+ STATIC_ONLY(ThreadingTrait);
+ static const ThreadAffinity kAffinity = ThreadingTrait<T>::kAffinity;
+};
+
+template <typename T, size_t inlineCapacity>
+struct ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator>> {
+ STATIC_ONLY(ThreadingTrait);
+ static const ThreadAffinity kAffinity = ThreadingTrait<T>::kAffinity;
+};
+
+template <typename T, size_t inlineCapacity>
+struct ThreadingTrait<Deque<T, inlineCapacity, HeapAllocator>> {
+ STATIC_ONLY(ThreadingTrait);
+ static const ThreadAffinity kAffinity = ThreadingTrait<T>::kAffinity;
+};
+
+template <typename T, typename U, typename V>
+struct ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator>> {
+ STATIC_ONLY(ThreadingTrait);
+ static const ThreadAffinity kAffinity = ThreadingTrait<T>::kAffinity;
+};
+
+template <typename T, typename U, typename V, typename W, typename X>
+class HeapHashMap;
+template <typename T, typename U, typename V>
+class HeapHashSet;
+template <typename T, wtf_size_t inlineCapacity>
+class HeapVector;
+template <typename T>
+class HeapDeque;
+template <typename T, typename U, typename V>
+class HeapHashCountedSet;
+
+template <typename T, typename U, typename V, typename W, typename X>
+struct ThreadingTrait<HeapHashMap<T, U, V, W, X>>
+ : public ThreadingTrait<HashMap<T, U, V, W, X, HeapAllocator>> {
+ STATIC_ONLY(ThreadingTrait);
+};
+template <typename T, typename U, typename V>
+struct ThreadingTrait<HeapHashSet<T, U, V>>
+ : public ThreadingTrait<HashSet<T, U, V, HeapAllocator>> {
+ STATIC_ONLY(ThreadingTrait);
+};
+template <typename T, size_t inlineCapacity>
+struct ThreadingTrait<HeapVector<T, inlineCapacity>>
+ : public ThreadingTrait<Vector<T, inlineCapacity, HeapAllocator>> {
+ STATIC_ONLY(ThreadingTrait);
+};
+template <typename T>
+struct ThreadingTrait<HeapDeque<T>>
+ : public ThreadingTrait<Deque<T, 0, HeapAllocator>> {
+ STATIC_ONLY(ThreadingTrait);
+};
+template <typename T, typename U, typename V>
+struct ThreadingTrait<HeapHashCountedSet<T, U, V>>
+ : public ThreadingTrait<HashCountedSet<T, U, V, HeapAllocator>> {
+ STATIC_ONLY(ThreadingTrait);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_THREADING_TRAITS_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/trace_traits.h b/chromium/third_party/blink/renderer/platform/heap/impl/trace_traits.h
new file mode 100644
index 00000000000..668da565219
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/trace_traits.h
@@ -0,0 +1,381 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_TRACE_TRAITS_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_TRACE_TRAITS_H_
+
+#include "base/optional.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/impl/gc_info.h"
+#include "third_party/blink/renderer/platform/heap/impl/heap_page.h"
+#include "third_party/blink/renderer/platform/heap/visitor.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+#include "third_party/blink/renderer/platform/wtf/hash_counted_set.h"
+#include "third_party/blink/renderer/platform/wtf/hash_map.h"
+#include "third_party/blink/renderer/platform/wtf/hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/hash_table.h"
+#include "third_party/blink/renderer/platform/wtf/linked_hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/list_hash_set.h"
+#include "third_party/blink/renderer/platform/wtf/type_traits.h"
+
+namespace blink {
+
+template <typename Table>
+class HeapHashTableBacking;
+template <typename ValueArg, wtf_size_t inlineCapacity>
+class HeapListHashSetAllocator;
+template <typename T>
+struct TraceTrait;
+template <typename T>
+class WeakMember;
+
+template <typename T, bool = NeedsAdjustPointer<T>::value>
+struct AdjustPointerTrait;
+
+template <typename T>
+struct AdjustPointerTrait<T, false> {
+ STATIC_ONLY(AdjustPointerTrait);
+
+ static TraceDescriptor GetTraceDescriptor(const void* self) {
+ return {self, TraceTrait<T>::Trace};
+ }
+};
+
+template <typename T>
+struct AdjustPointerTrait<T, true> {
+ STATIC_ONLY(AdjustPointerTrait);
+
+ static TraceDescriptor GetTraceDescriptor(const void* self) {
+ // Tracing an object, and more specifically GetTraceDescriptor for an
+ // object, implies having a reference which means the object is at least in
+ // construction. Therefore it is guaranteed that the ObjectStartBitmap was
+ // already updated to include the object, and its HeapObjectHeader was
+ // already created.
+ HeapObjectHeader* const header = HeapObjectHeader::FromInnerAddress<
+ HeapObjectHeader::AccessMode::kAtomic>(self);
+ return {header->Payload(),
+ GCInfo::From(
+ header->GcInfoIndex<HeapObjectHeader::AccessMode::kAtomic>())
+ .trace};
+ }
+};
+
+template <typename T, bool = WTF::IsTraceable<T>::value>
+struct TraceIfNeeded;
+
+template <typename T>
+struct TraceIfNeeded<T, false> {
+ STATIC_ONLY(TraceIfNeeded);
+ static void Trace(Visitor*, const T&) {}
+};
+
+template <typename T>
+struct TraceIfNeeded<T, true> {
+ STATIC_ONLY(TraceIfNeeded);
+ static void Trace(Visitor* visitor, const T& t) { visitor->Trace(t); }
+};
+
+template <WTF::WeakHandlingFlag weakness,
+ typename T,
+ typename Traits,
+ bool = WTF::IsTraceableInCollectionTrait<Traits>::value,
+ WTF::WeakHandlingFlag = WTF::WeakHandlingTrait<T>::value>
+struct TraceCollectionIfEnabled;
+
+template <WTF::WeakHandlingFlag weakness, typename T, typename Traits>
+struct TraceCollectionIfEnabled<weakness,
+ T,
+ Traits,
+ false,
+ WTF::kNoWeakHandling> {
+ STATIC_ONLY(TraceCollectionIfEnabled);
+
+ static bool IsAlive(const blink::LivenessBroker& info, const T&) {
+ return true;
+ }
+
+ static void Trace(Visitor*, const void*) {
+ static_assert(!WTF::IsTraceableInCollectionTrait<Traits>::value,
+ "T should not be traced");
+ }
+};
+
+template <typename T, typename Traits>
+struct TraceCollectionIfEnabled<WTF::kNoWeakHandling,
+ T,
+ Traits,
+ false,
+ WTF::kWeakHandling> {
+ STATIC_ONLY(TraceCollectionIfEnabled);
+
+ static void Trace(Visitor* visitor, const void* t) {
+ WTF::TraceInCollectionTrait<WTF::kNoWeakHandling, T, Traits>::Trace(
+ visitor, *reinterpret_cast<const T*>(t));
+ }
+};
+
+template <WTF::WeakHandlingFlag weakness,
+ typename T,
+ typename Traits,
+ bool,
+ WTF::WeakHandlingFlag>
+struct TraceCollectionIfEnabled {
+ STATIC_ONLY(TraceCollectionIfEnabled);
+
+ static bool IsAlive(const blink::LivenessBroker& info, const T& traceable) {
+ return WTF::TraceInCollectionTrait<weakness, T, Traits>::IsAlive(info,
+ traceable);
+ }
+
+ static void Trace(Visitor* visitor, const void* t) {
+ static_assert(WTF::IsTraceableInCollectionTrait<Traits>::value ||
+ weakness == WTF::kWeakHandling,
+ "Traits should be traced");
+ WTF::TraceInCollectionTrait<weakness, T, Traits>::Trace(
+ visitor, *reinterpret_cast<const T*>(t));
+ }
+};
+
+// The TraceTrait is used to specify how to trace and object for Oilpan and
+// wrapper tracing.
+//
+//
+// By default, the 'Trace' method implemented on an object itself is
+// used to trace the pointers to other heap objects inside the object.
+//
+// However, the TraceTrait can be specialized to use a different
+// implementation. A common case where a TraceTrait specialization is
+// needed is when multiple inheritance leads to pointers that are not
+// to the start of the object in the Blink garbage-collected heap. In
+// that case the pointer has to be adjusted before marking.
+template <typename T>
+struct TraceTrait {
+ STATIC_ONLY(TraceTrait);
+
+ public:
+ static TraceDescriptor GetTraceDescriptor(const void* self) {
+ return AdjustPointerTrait<T>::GetTraceDescriptor(
+ static_cast<const T*>(self));
+ }
+
+ static TraceDescriptor GetWeakTraceDescriptor(const void* self) {
+ return {self, nullptr};
+ }
+
+ static void Trace(Visitor*, const void* self);
+};
+
+template <typename T>
+struct TraceTrait<const T> : public TraceTrait<T> {};
+
+template <typename T>
+void TraceTrait<T>::Trace(Visitor* visitor, const void* self) {
+ static_assert(WTF::IsTraceable<T>::value, "T should be traceable");
+ static_cast<const T*>(self)->Trace(visitor);
+}
+
+// This trace trait for std::pair will null weak members if their referent is
+// collected. If you have a collection that contain weakness it does not remove
+// entries from the collection that contain nulled weak members.
+template <typename T, typename U>
+struct TraceTrait<std::pair<T, U>> {
+ STATIC_ONLY(TraceTrait);
+
+ public:
+ static void Trace(Visitor* visitor, const std::pair<T, U>* pair) {
+ TraceIfNeeded<T>::Trace(visitor, pair->first);
+ TraceIfNeeded<U>::Trace(visitor, pair->second);
+ }
+};
+
+// While using base::Optional<T> with garbage-collected types is generally
+// disallowed by the OptionalGarbageCollected check in blink_gc_plugin,
+// garbage-collected containers such as HeapVector are allowed and need to be
+// traced.
+template <typename T>
+struct TraceTrait<base::Optional<T>> {
+ STATIC_ONLY(TraceTrait);
+
+ public:
+ static void Trace(Visitor* visitor, const base::Optional<T>* optional) {
+ if (*optional != base::nullopt) {
+ TraceIfNeeded<T>::Trace(visitor, optional->value());
+ }
+ }
+};
+
+// Helper for processing ephemerons represented as KeyValuePair. Reorders
+// parameters if needed so that KeyType is always weak.
+template <typename _KeyType,
+ typename _ValueType,
+ typename _KeyTraits,
+ typename _ValueTraits,
+ bool = WTF::IsWeak<_ValueType>::value>
+struct EphemeronKeyValuePair {
+ using KeyType = _KeyType;
+ using ValueType = _ValueType;
+ using KeyTraits = _KeyTraits;
+ using ValueTraits = _ValueTraits;
+
+ // Ephemerons have different weakness for KeyType and ValueType. If weakness
+ // is equal, we either have Strong/Strong, or Weak/Weak, which would indicate
+ // a full strong or fully weak pair.
+ static constexpr bool is_ephemeron =
+ WTF::IsWeak<KeyType>::value != WTF::IsWeak<ValueType>::value;
+
+ static_assert(!WTF::IsWeak<KeyType>::value ||
+ WTF::IsSubclassOfTemplate<KeyType, WeakMember>::value,
+ "Weakness must be encoded using WeakMember.");
+
+ EphemeronKeyValuePair(const KeyType* k, const ValueType* v)
+ : key(k), value(v) {}
+ const KeyType* key;
+ const ValueType* value;
+};
+
+template <typename _KeyType,
+ typename _ValueType,
+ typename _KeyTraits,
+ typename _ValueTraits>
+struct EphemeronKeyValuePair<_KeyType,
+ _ValueType,
+ _KeyTraits,
+ _ValueTraits,
+ true> : EphemeronKeyValuePair<_ValueType,
+ _KeyType,
+ _ValueTraits,
+ _KeyTraits,
+ false> {
+ EphemeronKeyValuePair(const _KeyType* k, const _ValueType* v)
+ : EphemeronKeyValuePair<_ValueType,
+ _KeyType,
+ _ValueTraits,
+ _KeyTraits,
+ false>(v, k) {}
+};
+
+} // namespace blink
+
+namespace WTF {
+
+// Catch-all for types that have a way to trace that don't have special
+// handling for weakness in collections. This means that if this type
+// contains WeakMember fields, they will simply be zeroed, but the entry
+// will not be removed from the collection. This always happens for
+// things in vectors, which don't currently support special handling of
+// weak elements.
+template <typename T, typename Traits>
+struct TraceInCollectionTrait<kNoWeakHandling, T, Traits> {
+ static bool IsAlive(const blink::LivenessBroker& info, const T& t) {
+ return true;
+ }
+
+ static void Trace(blink::Visitor* visitor, const T& t) {
+ static_assert(IsTraceableInCollectionTrait<Traits>::value,
+ "T should be traceable");
+ visitor->Trace(t);
+ }
+};
+
+template <typename T, typename Traits>
+struct TraceInCollectionTrait<kNoWeakHandling, blink::WeakMember<T>, Traits> {
+ static void Trace(blink::Visitor* visitor, const blink::WeakMember<T>& t) {
+ // Extract raw pointer to avoid using the WeakMember<> overload in Visitor.
+ visitor->TraceStrongly(t);
+ }
+};
+
+// Catch-all for types that have HashTrait support for tracing with weakness.
+// Empty to enforce specialization.
+template <typename T, typename Traits>
+struct TraceInCollectionTrait<kWeakHandling, T, Traits> {};
+
+template <typename T, typename Traits>
+struct TraceInCollectionTrait<kWeakHandling, blink::WeakMember<T>, Traits> {
+ static bool IsAlive(const blink::LivenessBroker& info,
+ const blink::WeakMember<T>& value) {
+ return info.IsHeapObjectAlive(value);
+ }
+};
+
+// This specialization of TraceInCollectionTrait is for the backing of
+// HeapListHashSet. This is for the case that we find a reference to the
+// backing from the stack. That probably means we have a GC while we are in a
+// ListHashSet method since normal API use does not put pointers to the backing
+// on the stack.
+template <typename NodeContents,
+ size_t inlineCapacity,
+ typename T,
+ typename U,
+ typename V,
+ typename W,
+ typename X,
+ typename Y>
+struct TraceInCollectionTrait<
+ kNoWeakHandling,
+ blink::HeapHashTableBacking<HashTable<
+ ListHashSetNode<NodeContents,
+ blink::HeapListHashSetAllocator<T, inlineCapacity>>*,
+ U,
+ V,
+ W,
+ X,
+ Y,
+ blink::HeapAllocator>>,
+ void> {
+ using Node =
+ ListHashSetNode<NodeContents,
+ blink::HeapListHashSetAllocator<T, inlineCapacity>>;
+ using Table = HashTable<Node*, U, V, W, X, Y, blink::HeapAllocator>;
+
+ static void Trace(blink::Visitor* visitor, const void* self) {
+ const Node* const* array = reinterpret_cast<const Node* const*>(self);
+ blink::HeapObjectHeader* header =
+ blink::HeapObjectHeader::FromPayload(self);
+ size_t length = header->PayloadSize() / sizeof(Node*);
+ const bool is_concurrent = visitor->IsConcurrent();
+ for (size_t i = 0; i < length; ++i) {
+ const Node* node;
+ if (is_concurrent) {
+ // If tracing concurrently, IsEmptyOrDeletedBucket can cause data
+ // races. Loading array[i] atomically prevents possible data races.
+ // array[i] is of type Node* so can directly loaded atomically.
+ node = AsAtomicPtr(&array[i])->load(std::memory_order_relaxed);
+ } else {
+ node = array[i];
+ }
+ if (!HashTableHelper<
+ const Node*, typename Table::ExtractorType,
+ typename Table::KeyTraitsType>::IsEmptyOrDeletedBucket(node)) {
+ visitor->Trace(node);
+ }
+ }
+ }
+};
+
+// ListHashSetNode pointers (a ListHashSet is implemented as a hash table of
+// these pointers).
+template <typename Value, size_t inlineCapacity, typename Traits>
+struct TraceInCollectionTrait<
+ kNoWeakHandling,
+ ListHashSetNode<Value,
+ blink::HeapListHashSetAllocator<Value, inlineCapacity>>*,
+ Traits> {
+ using Node =
+ ListHashSetNode<Value,
+ blink::HeapListHashSetAllocator<Value, inlineCapacity>>;
+
+ static void Trace(blink::Visitor* visitor, const Node* node) {
+ static_assert(!IsWeak<Node>::value,
+ "ListHashSet does not support weakness");
+ static_assert(IsTraceableInCollectionTrait<Traits>::value,
+ "T should be traceable");
+ visitor->Trace(node);
+ }
+};
+
+} // namespace WTF
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_TRACE_TRAITS_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_controller.cc b/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_controller.cc
new file mode 100644
index 00000000000..43a4e292dbf
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_controller.cc
@@ -0,0 +1,252 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/unified_heap_controller.h"
+
+#include "base/macros.h"
+#include "third_party/blink/public/common/features.h"
+#include "third_party/blink/renderer/platform/bindings/dom_wrapper_world.h"
+#include "third_party/blink/renderer/platform/bindings/script_forbidden_scope.h"
+#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
+#include "third_party/blink/renderer/platform/bindings/wrapper_type_info.h"
+#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
+#include "third_party/blink/renderer/platform/heap/impl/marking_visitor.h"
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+#include "third_party/blink/renderer/platform/runtime_enabled_features.h"
+
+namespace blink {
+
+namespace {
+
+constexpr BlinkGC::StackState ToBlinkGCStackState(
+ v8::EmbedderHeapTracer::EmbedderStackState stack_state) {
+ return stack_state == v8::EmbedderHeapTracer::EmbedderStackState::kEmpty
+ ? BlinkGC::kNoHeapPointersOnStack
+ : BlinkGC::kHeapPointersOnStack;
+}
+
+} // namespace
+
+UnifiedHeapController::UnifiedHeapController(ThreadState* thread_state)
+ : thread_state_(thread_state) {
+ thread_state->Heap().stats_collector()->RegisterObserver(this);
+}
+
+UnifiedHeapController::~UnifiedHeapController() {
+ thread_state_->Heap().stats_collector()->UnregisterObserver(this);
+}
+
+void UnifiedHeapController::TracePrologue(
+ v8::EmbedderHeapTracer::TraceFlags v8_flags) {
+ VLOG(2) << "UnifiedHeapController::TracePrologue";
+ ThreadHeapStatsCollector::BlinkGCInV8Scope nested_scope(
+ thread_state_->Heap().stats_collector());
+
+ // Be conservative here as a new garbage collection gets started right away.
+ thread_state_->FinishIncrementalMarkingIfRunning(
+ BlinkGC::CollectionType::kMajor, BlinkGC::kHeapPointersOnStack,
+ BlinkGC::kIncrementalAndConcurrentMarking,
+ BlinkGC::kConcurrentAndLazySweeping,
+ thread_state_->current_gc_data_.reason);
+
+ thread_state_->SetGCState(ThreadState::kNoGCScheduled);
+ BlinkGC::GCReason gc_reason;
+ if (v8_flags & v8::EmbedderHeapTracer::TraceFlags::kForced) {
+ gc_reason = BlinkGC::GCReason::kUnifiedHeapForcedForTestingGC;
+ } else if (v8_flags & v8::EmbedderHeapTracer::TraceFlags::kReduceMemory) {
+ gc_reason = BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC;
+ } else {
+ gc_reason = BlinkGC::GCReason::kUnifiedHeapGC;
+ }
+ thread_state_->StartIncrementalMarking(gc_reason);
+
+ is_tracing_done_ = false;
+}
+
+void UnifiedHeapController::EnterFinalPause(EmbedderStackState stack_state) {
+ VLOG(2) << "UnifiedHeapController::EnterFinalPause";
+ ThreadHeapStatsCollector::BlinkGCInV8Scope nested_scope(
+ thread_state_->Heap().stats_collector());
+ thread_state_->AtomicPauseMarkPrologue(
+ BlinkGC::CollectionType::kMajor, ToBlinkGCStackState(stack_state),
+ BlinkGC::kIncrementalAndConcurrentMarking,
+ thread_state_->current_gc_data_.reason);
+ thread_state_->AtomicPauseMarkRoots(ToBlinkGCStackState(stack_state),
+ BlinkGC::kIncrementalAndConcurrentMarking,
+ thread_state_->current_gc_data_.reason);
+}
+
+void UnifiedHeapController::TraceEpilogue(
+ v8::EmbedderHeapTracer::TraceSummary* summary) {
+ VLOG(2) << "UnifiedHeapController::TraceEpilogue";
+ {
+ ThreadHeapStatsCollector::BlinkGCInV8Scope nested_scope(
+ thread_state_->Heap().stats_collector());
+ thread_state_->AtomicPauseMarkEpilogue(
+ BlinkGC::kIncrementalAndConcurrentMarking);
+ const BlinkGC::SweepingType sweeping_type =
+ thread_state_->IsForcedGC() ? BlinkGC::kEagerSweeping
+ : BlinkGC::kConcurrentAndLazySweeping;
+ thread_state_->AtomicPauseSweepAndCompact(
+ BlinkGC::CollectionType::kMajor,
+ BlinkGC::kIncrementalAndConcurrentMarking, sweeping_type);
+
+ ThreadHeapStatsCollector* const stats_collector =
+ thread_state_->Heap().stats_collector();
+ summary->allocated_size =
+ static_cast<size_t>(stats_collector->marked_bytes());
+ summary->time = stats_collector->marking_time_so_far().InMillisecondsF();
+ buffered_allocated_size_ = 0;
+ }
+ thread_state_->AtomicPauseEpilogue();
+}
+
+void UnifiedHeapController::RegisterV8References(
+ const std::vector<std::pair<void*, void*>>&
+ internal_fields_of_potential_wrappers) {
+ VLOG(2) << "UnifiedHeapController::RegisterV8References";
+ DCHECK(thread_state()->IsMarkingInProgress());
+
+ const bool was_in_atomic_pause = thread_state()->in_atomic_pause();
+ if (!was_in_atomic_pause)
+ ThreadState::Current()->EnterAtomicPause();
+ for (const auto& internal_fields : internal_fields_of_potential_wrappers) {
+ const WrapperTypeInfo* wrapper_type_info =
+ reinterpret_cast<const WrapperTypeInfo*>(internal_fields.first);
+ if (wrapper_type_info->gin_embedder != gin::GinEmbedder::kEmbedderBlink) {
+ continue;
+ }
+ is_tracing_done_ = false;
+ wrapper_type_info->Trace(thread_state_->CurrentVisitor(),
+ internal_fields.second);
+ }
+ if (!was_in_atomic_pause)
+ ThreadState::Current()->LeaveAtomicPause();
+}
+
+bool UnifiedHeapController::AdvanceTracing(double deadline_in_ms) {
+ VLOG(2) << "UnifiedHeapController::AdvanceTracing";
+ ThreadHeapStatsCollector::BlinkGCInV8Scope nested_scope(
+ thread_state_->Heap().stats_collector());
+ if (!thread_state_->in_atomic_pause()) {
+ ThreadHeapStatsCollector::EnabledScope advance_tracing_scope(
+ thread_state_->Heap().stats_collector(),
+ ThreadHeapStatsCollector::kUnifiedMarkingStep);
+ // V8 calls into embedder tracing from its own marking to ensure
+ // progress. Oilpan will additionally schedule marking steps.
+ ThreadState::AtomicPauseScope atomic_pause_scope(thread_state_);
+ ScriptForbiddenScope script_forbidden_scope;
+ is_tracing_done_ = thread_state_->MarkPhaseAdvanceMarkingBasedOnSchedule(
+ base::TimeDelta::FromMillisecondsD(deadline_in_ms),
+ ThreadState::EphemeronProcessing::kPartialProcessing);
+ if (!is_tracing_done_) {
+ if (base::FeatureList::IsEnabled(
+ blink::features::kBlinkHeapConcurrentMarking)) {
+ thread_state_->ConcurrentMarkingStep();
+ }
+ thread_state_->RestartIncrementalMarkingIfPaused();
+ }
+ return is_tracing_done_;
+ }
+ thread_state_->AtomicPauseMarkTransitiveClosure();
+ is_tracing_done_ = true;
+ return true;
+}
+
+bool UnifiedHeapController::IsTracingDone() {
+ return is_tracing_done_;
+}
+
+bool UnifiedHeapController::IsRootForNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) {
+ if (thread_state()->IsIncrementalMarking()) {
+ // We have a non-tracing GC while unified GC is in progress. Treat all
+ // objects as roots to avoid stale pointers in the marking worklists.
+ return true;
+ }
+ const uint16_t class_id = handle.WrapperClassId();
+ // Stand-alone reference or kCustomWrappableId. Keep as root as
+ // we don't know better.
+ if (class_id != WrapperTypeInfo::kNodeClassId &&
+ class_id != WrapperTypeInfo::kObjectClassId)
+ return true;
+
+ const v8::TracedReference<v8::Object>& traced =
+ handle.template As<v8::Object>();
+ if (ToWrapperTypeInfo(traced)->IsActiveScriptWrappable() &&
+ ToScriptWrappable(traced)->HasPendingActivity()) {
+ return true;
+ }
+
+ if (ToScriptWrappable(traced)->HasEventListeners()) {
+ return true;
+ }
+
+ return false;
+}
+
+void UnifiedHeapController::ResetHandleInNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle) {
+ const uint16_t class_id = handle.WrapperClassId();
+ // Only consider handles that have not been treated as roots, see
+ // IsRootForNonTracingGCInternal.
+ if (class_id != WrapperTypeInfo::kNodeClassId &&
+ class_id != WrapperTypeInfo::kObjectClassId)
+ return;
+
+ // We should not reset any handles during an already running tracing
+ // collection. Resetting a handle could re-allocate a backing or trigger
+ // potential in place rehashing. Both operations may trigger write barriers by
+ // moving references. Such references may already be dead but not yet cleared
+ // which would result in reporting dead objects to V8.
+ DCHECK(!thread_state()->IsIncrementalMarking());
+ // Clearing the wrapper below adjusts the DOM wrapper store which may
+ // re-allocate its backing. We have to avoid report memory to V8 as that may
+ // trigger GC during GC.
+ ThreadState::GCForbiddenScope gc_forbidden(thread_state());
+ const v8::TracedReference<v8::Object>& traced = handle.As<v8::Object>();
+ bool success = DOMWrapperWorld::UnsetSpecificWrapperIfSet(
+ ToScriptWrappable(traced), traced);
+ // Since V8 found a handle, Blink needs to find it as well when trying to
+ // remove it.
+ CHECK(success);
+}
+
+bool UnifiedHeapController::IsRootForNonTracingGC(
+ const v8::TracedGlobal<v8::Value>& handle) {
+ CHECK(false) << "Blink does not use v8::TracedGlobal.";
+ return false;
+}
+
+void UnifiedHeapController::ReportBufferedAllocatedSizeIfPossible() {
+ // Avoid reporting to V8 in the following conditions as that may trigger GC
+ // finalizations where not allowed.
+ // - Recursive sweeping.
+ // - GC forbidden scope.
+ if ((thread_state()->IsSweepingInProgress() &&
+ thread_state()->SweepForbidden()) ||
+ thread_state()->IsGCForbidden()) {
+ return;
+ }
+
+ if (buffered_allocated_size_ < 0) {
+ DecreaseAllocatedSize(static_cast<size_t>(-buffered_allocated_size_));
+ } else {
+ IncreaseAllocatedSize(static_cast<size_t>(buffered_allocated_size_));
+ }
+ buffered_allocated_size_ = 0;
+}
+
+void UnifiedHeapController::IncreaseAllocatedObjectSize(size_t delta_bytes) {
+ buffered_allocated_size_ += static_cast<int64_t>(delta_bytes);
+ ReportBufferedAllocatedSizeIfPossible();
+}
+
+void UnifiedHeapController::DecreaseAllocatedObjectSize(size_t delta_bytes) {
+ buffered_allocated_size_ -= static_cast<int64_t>(delta_bytes);
+ ReportBufferedAllocatedSizeIfPossible();
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_controller.h b/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_controller.h
new file mode 100644
index 00000000000..c42283ca6c3
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_controller.h
@@ -0,0 +1,75 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_UNIFIED_HEAP_CONTROLLER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_UNIFIED_HEAP_CONTROLLER_H_
+
+#include "base/macros.h"
+#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "v8/include/v8.h"
+
+namespace blink {
+
+class ThreadState;
+
+// UnifiedHeapController ties V8's garbage collector to Oilpan for performing a
+// garbage collection across both managed heaps.
+//
+// Unified heap garbage collections are triggered by V8 and mark the full
+// transitive closure of V8 and Blink (Oilpan) objects. The garbage collection
+// is initially triggered by V8. Both collecters report live references using
+// the EmbedderHeapTracer APIs. V8 and Blink both run separate incremental
+// marking steps to compute their live closures, respectively. The final atomic
+// pause is then initiated by V8 and triggers a fixed-point computation between
+// V8 and Blink where both GCs report live references to each other and drain
+// their marking work lists until they are empty and no new references are
+// found.
+//
+// Oilpan does not consider references from DOM wrappers (JavaScript objects on
+// V8's heap) as roots for such garbage collections.
+class PLATFORM_EXPORT UnifiedHeapController final
+ : public v8::EmbedderHeapTracer,
+ public ThreadHeapStatsObserver {
+ DISALLOW_IMPLICIT_CONSTRUCTORS(UnifiedHeapController);
+
+ public:
+ explicit UnifiedHeapController(ThreadState*);
+ ~UnifiedHeapController() override;
+
+ // v8::EmbedderHeapTracer implementation.
+ void TracePrologue(v8::EmbedderHeapTracer::TraceFlags) final;
+ void TraceEpilogue(v8::EmbedderHeapTracer::TraceSummary*) final;
+ void EnterFinalPause(EmbedderStackState) final;
+ void RegisterV8References(const std::vector<std::pair<void*, void*>>&) final;
+ bool AdvanceTracing(double) final;
+ bool IsTracingDone() final;
+ bool IsRootForNonTracingGC(const v8::TracedReference<v8::Value>&) final;
+ bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>&) final;
+ void ResetHandleInNonTracingGC(const v8::TracedReference<v8::Value>&) final;
+
+ ThreadState* thread_state() const { return thread_state_; }
+
+ // ThreadHeapStatsObserver implementation.
+ void IncreaseAllocatedObjectSize(size_t) final;
+ void DecreaseAllocatedObjectSize(size_t) final;
+ // Not needed.
+ void ResetAllocatedObjectSize(size_t) final {}
+ void IncreaseAllocatedSpace(size_t) final {}
+ void DecreaseAllocatedSpace(size_t) final {}
+
+ private:
+ void ReportBufferedAllocatedSizeIfPossible();
+
+ ThreadState* const thread_state_;
+ // Returns whether the Blink heap has been fully processed.
+ bool is_tracing_done_ = false;
+
+ // Buffered allocated size. Only positive values are forwarded to V8.
+ int64_t buffered_allocated_size_ = 0;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_UNIFIED_HEAP_CONTROLLER_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_marking_visitor.cc b/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_marking_visitor.cc
new file mode 100644
index 00000000000..00a9bc24826
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_marking_visitor.cc
@@ -0,0 +1,108 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/unified_heap_marking_visitor.h"
+
+#include "third_party/blink/public/common/features.h"
+#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
+#include "third_party/blink/renderer/platform/bindings/trace_wrapper_v8_reference.h"
+#include "third_party/blink/renderer/platform/bindings/v8_per_isolate_data.h"
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+#include "third_party/blink/renderer/platform/heap/thread_state.h"
+#include "third_party/blink/renderer/platform/heap/unified_heap_controller.h"
+
+namespace blink {
+
+UnifiedHeapMarkingVisitorBase::UnifiedHeapMarkingVisitorBase(
+ ThreadState* thread_state,
+ v8::Isolate* isolate,
+ int task_id)
+ : isolate_(isolate),
+ controller_(thread_state->unified_heap_controller()),
+ v8_references_worklist_(thread_state->Heap().GetV8ReferencesWorklist(),
+ task_id),
+ task_id_(task_id) {
+ DCHECK(controller_);
+}
+
+void UnifiedHeapMarkingVisitorBase::VisitImpl(
+ const TraceWrapperV8Reference<v8::Value>& v8_reference) {
+ DCHECK(isolate_);
+ if (v8_reference.IsEmptySafe())
+ return;
+ if (task_id_ != WorklistTaskId::MutatorThread) {
+ // This is a temporary solution. Pushing directly from concurrent threads
+ // to V8 marking worklist will currently result in data races. This
+ // solution guarantees correctness until we implement a long-term solution
+ // (i.e. allowing Oilpan concurrent threads concurrent-safe access to V8
+ // marking worklist without data-races)
+ v8_references_worklist_.Push(&v8_reference);
+ return;
+ }
+ controller_->RegisterEmbedderReference(
+ v8_reference.template Cast<v8::Data>().Get());
+}
+
+UnifiedHeapMarkingVisitor::UnifiedHeapMarkingVisitor(ThreadState* thread_state,
+ MarkingMode mode,
+ v8::Isolate* isolate)
+ : MarkingVisitor(thread_state, mode),
+ UnifiedHeapMarkingVisitorBase(thread_state,
+ isolate,
+ WorklistTaskId::MutatorThread) {}
+
+// static
+void UnifiedHeapMarkingVisitor::WriteBarrier(
+ const TraceWrapperV8Reference<v8::Value>& object) {
+ if (object.IsEmpty() || !ThreadState::IsAnyIncrementalMarking())
+ return;
+
+ ThreadState* thread_state = ThreadState::Current();
+ if (!thread_state->IsIncrementalMarking())
+ return;
+
+ thread_state->CurrentVisitor()->Trace(object);
+}
+
+// static
+void UnifiedHeapMarkingVisitor::WriteBarrier(
+ v8::Isolate* isolate,
+ const WrapperTypeInfo* wrapper_type_info,
+ const void* object) {
+ // |object| here is either ScriptWrappable or CustomWrappable.
+
+ if (!ThreadState::IsAnyIncrementalMarking())
+ return;
+
+ ThreadState* thread_state = ThreadState::Current();
+ if (!thread_state->IsIncrementalMarking())
+ return;
+
+ wrapper_type_info->Trace(thread_state->CurrentVisitor(), object);
+}
+
+void UnifiedHeapMarkingVisitor::Visit(
+ const TraceWrapperV8Reference<v8::Value>& v) {
+ VisitImpl(v);
+}
+
+ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor(
+ ThreadState* thread_state,
+ MarkingMode mode,
+ v8::Isolate* isolate,
+ int task_id)
+ : ConcurrentMarkingVisitor(thread_state, mode, task_id),
+ UnifiedHeapMarkingVisitorBase(thread_state, isolate, task_id) {}
+
+void ConcurrentUnifiedHeapMarkingVisitor::FlushWorklists() {
+ ConcurrentMarkingVisitor::FlushWorklists();
+ v8_references_worklist_.FlushToGlobal();
+}
+
+void ConcurrentUnifiedHeapMarkingVisitor::Visit(
+ const TraceWrapperV8Reference<v8::Value>& v) {
+ VisitImpl(v);
+}
+
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_marking_visitor.h b/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_marking_visitor.h
new file mode 100644
index 00000000000..8d589b4e52b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/unified_heap_marking_visitor.h
@@ -0,0 +1,89 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_UNIFIED_HEAP_MARKING_VISITOR_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_UNIFIED_HEAP_MARKING_VISITOR_H_
+
+#include "base/macros.h"
+#include "third_party/blink/renderer/platform/heap/impl/marking_visitor.h"
+
+namespace v8 {
+class EmbedderHeapTracer;
+}
+
+namespace blink {
+
+struct WrapperTypeInfo;
+
+// Marking visitor for unified heap garbage collections. Extends the regular
+// Oilpan marking visitor by also providing write barriers and visitation
+// methods that allow for announcing reachable objects to V8. Visitor can be
+// used from any thread.
+class PLATFORM_EXPORT UnifiedHeapMarkingVisitorBase {
+ public:
+ virtual ~UnifiedHeapMarkingVisitorBase() = default;
+
+ protected:
+ UnifiedHeapMarkingVisitorBase(ThreadState*, v8::Isolate*, int);
+
+ // Visitation methods that announce reachable wrappers to V8.
+ void VisitImpl(const TraceWrapperV8Reference<v8::Value>&);
+
+ v8::Isolate* const isolate_;
+ v8::EmbedderHeapTracer* const controller_;
+ V8ReferencesWorklist::View v8_references_worklist_;
+
+ private:
+ int task_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(UnifiedHeapMarkingVisitorBase);
+};
+
+// Same as the base visitor with the difference that it is bound to main thread.
+// Also implements various sorts of write barriers that should only be called
+// from the main thread.
+class PLATFORM_EXPORT UnifiedHeapMarkingVisitor
+ : public MarkingVisitor,
+ public UnifiedHeapMarkingVisitorBase {
+ public:
+ // Write barriers for annotating a write during incremental marking.
+ static void WriteBarrier(const TraceWrapperV8Reference<v8::Value>&);
+ static void WriteBarrier(v8::Isolate*, const WrapperTypeInfo*, const void*);
+
+ UnifiedHeapMarkingVisitor(ThreadState*, MarkingMode, v8::Isolate*);
+ ~UnifiedHeapMarkingVisitor() override = default;
+
+ protected:
+ using Visitor::Visit;
+ void Visit(const TraceWrapperV8Reference<v8::Value>&) final;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(UnifiedHeapMarkingVisitor);
+};
+
+// Same as the base visitor with the difference that it is bound to a
+// concurrent thread.
+class PLATFORM_EXPORT ConcurrentUnifiedHeapMarkingVisitor
+ : public ConcurrentMarkingVisitor,
+ public UnifiedHeapMarkingVisitorBase {
+ public:
+ ConcurrentUnifiedHeapMarkingVisitor(ThreadState*,
+ MarkingMode,
+ v8::Isolate*,
+ int task_id);
+ ~ConcurrentUnifiedHeapMarkingVisitor() override = default;
+
+ void FlushWorklists() override;
+
+ protected:
+ using Visitor::Visit;
+ void Visit(const TraceWrapperV8Reference<v8::Value>&) final;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConcurrentUnifiedHeapMarkingVisitor);
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_UNIFIED_HEAP_MARKING_VISITOR_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/unsanitized_atomic.cc b/chromium/third_party/blink/renderer/platform/heap/impl/unsanitized_atomic.cc
new file mode 100644
index 00000000000..5bd25b96116
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/unsanitized_atomic.cc
@@ -0,0 +1,87 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/heap/impl/unsanitized_atomic.h"
+
+#include "cstdint"
+
+#include "base/compiler_specific.h"
+
+#if HAS_FEATURE(address_sanitizer)
+#error "Must be built without asan."
+#endif
+
+namespace blink {
+namespace internal {
+
+namespace {
+constexpr int ToGCCMemoryOrder(std::memory_order order) {
+ switch (order) {
+ case std::memory_order_seq_cst:
+ return __ATOMIC_SEQ_CST;
+ case std::memory_order_relaxed:
+ return __ATOMIC_RELAXED;
+ case std::memory_order_acquire:
+ return __ATOMIC_ACQUIRE;
+ case std::memory_order_release:
+ return __ATOMIC_RELEASE;
+ case std::memory_order_acq_rel:
+ return __ATOMIC_ACQ_REL;
+ case std::memory_order_consume:
+ return __ATOMIC_CONSUME;
+ }
+}
+} // namespace
+
+template <typename T>
+void UnsanitizedAtomic<T>::store(T desired, std::memory_order order) {
+ __atomic_store(&value_, &desired, ToGCCMemoryOrder(order));
+}
+
+template <typename T>
+T UnsanitizedAtomic<T>::load(std::memory_order order) const {
+ T result;
+ __atomic_load(&value_, &result, ToGCCMemoryOrder(order));
+ return result;
+}
+
+template <typename T>
+bool UnsanitizedAtomic<T>::compare_exchange_strong(T& expected,
+ T desired,
+ std::memory_order order) {
+ return compare_exchange_strong(expected, desired, order, order);
+}
+
+template <typename T>
+bool UnsanitizedAtomic<T>::compare_exchange_strong(
+ T& expected,
+ T desired,
+ std::memory_order succ_order,
+ std::memory_order fail_order) {
+ return __atomic_compare_exchange(&value_, &expected, &desired, false,
+ ToGCCMemoryOrder(succ_order),
+ ToGCCMemoryOrder(fail_order));
+}
+
+template <typename T>
+bool UnsanitizedAtomic<T>::compare_exchange_weak(T& expected,
+ T desired,
+ std::memory_order order) {
+ return compare_exchange_weak(expected, desired, order, order);
+}
+
+template <typename T>
+bool UnsanitizedAtomic<T>::compare_exchange_weak(T& expected,
+ T desired,
+ std::memory_order succ_order,
+ std::memory_order fail_order) {
+ return __atomic_compare_exchange(&value_, &expected, &desired, true,
+ ToGCCMemoryOrder(succ_order),
+ ToGCCMemoryOrder(fail_order));
+}
+
+template class PLATFORM_EXPORT UnsanitizedAtomic<uint16_t>;
+
+} // namespace internal
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/unsanitized_atomic.h b/chromium/third_party/blink/renderer/platform/heap/impl/unsanitized_atomic.h
new file mode 100644
index 00000000000..7c6828d9ee1
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/unsanitized_atomic.h
@@ -0,0 +1,65 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_UNSANITIZED_ATOMIC_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_UNSANITIZED_ATOMIC_H_
+
+#include <atomic>
+
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+
+namespace blink {
+namespace internal {
+
+// Simple wrapper for std::atomic<> that makes sure that accesses to underlying
+// data are not sanitized. This is needed because the no_sanitize_address
+// attribute doesn't propagate down to callees. Must be used with care.
+// Currently is only used to access poisoned HeapObjectHeader. For derived or
+// user types an explicit instantiation must be added to unsanitized_atomic.cc.
+template <typename T>
+class PLATFORM_EXPORT UnsanitizedAtomic final {
+ public:
+ UnsanitizedAtomic() = default;
+ explicit UnsanitizedAtomic(T value) : value_(value) {}
+
+ void store(T, std::memory_order = std::memory_order_seq_cst);
+ T load(std::memory_order = std::memory_order_seq_cst) const;
+
+ bool compare_exchange_strong(T&,
+ T,
+ std::memory_order = std::memory_order_seq_cst);
+ bool compare_exchange_strong(T&, T, std::memory_order, std::memory_order);
+
+ bool compare_exchange_weak(T&,
+ T,
+ std::memory_order = std::memory_order_seq_cst);
+ bool compare_exchange_weak(T&, T, std::memory_order, std::memory_order);
+
+ private:
+ T value_;
+};
+
+template <typename T>
+auto* AsUnsanitizedAtomic(T* ptr) {
+#if defined(ADDRESS_SANITIZER)
+ return reinterpret_cast<UnsanitizedAtomic<T>*>(ptr);
+#else
+ return WTF::AsAtomicPtr(ptr);
+#endif
+}
+
+template <typename T>
+const auto* AsUnsanitizedAtomic(const T* ptr) {
+#if defined(ADDRESS_SANITIZER)
+ return reinterpret_cast<const UnsanitizedAtomic<T>*>(ptr);
+#else
+ return WTF::AsAtomicPtr(ptr);
+#endif
+}
+
+} // namespace internal
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_UNSANITIZED_ATOMIC_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/visitor.h b/chromium/third_party/blink/renderer/platform/heap/impl/visitor.h
new file mode 100644
index 00000000000..434bf882da7
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/visitor.h
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_VISITOR_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_VISITOR_H_
+
+#include <memory>
+#include "third_party/blink/renderer/platform/heap/blink_gc.h"
+#include "third_party/blink/renderer/platform/heap/garbage_collected.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+#include "third_party/blink/renderer/platform/wtf/assertions.h"
+#include "third_party/blink/renderer/platform/wtf/forward.h"
+#include "third_party/blink/renderer/platform/wtf/hash_traits.h"
+#include "third_party/blink/renderer/platform/wtf/type_traits.h"
+
+namespace base {
+class Location;
+}
+
+namespace v8 {
+class Value;
+}
+
+namespace blink {
+
+class LivenessBroker;
+template <typename T>
+struct TraceTrait;
+class ThreadState;
+class Visitor;
+template <typename T>
+class TraceWrapperV8Reference;
+
+// The TraceMethodDelegate is used to convert a trace method for type T to a
+// TraceCallback. This allows us to pass a type's trace method as a parameter
+// to the PersistentNode constructor. The PersistentNode constructor needs the
+// specific trace method due an issue with the Windows compiler which
+// instantiates even unused variables. This causes problems
+// in header files where we have only forward declarations of classes.
+//
+// This interface is safe to use on concurrent threads. All accesses (reads)
+// from member are done atomically.
+template <typename T, void (T::*method)(Visitor*) const>
+struct TraceMethodDelegate {
+ STATIC_ONLY(TraceMethodDelegate);
+ static void Trampoline(Visitor* visitor, const void* self) {
+ (reinterpret_cast<const T*>(self)->*method)(visitor);
+ }
+};
+
+template <typename T, void (T::*method)(const LivenessBroker&)>
+struct WeakCallbackMethodDelegate {
+ STATIC_ONLY(WeakCallbackMethodDelegate);
+ static void Trampoline(const LivenessBroker& info, const void* self) {
+ (reinterpret_cast<T*>(const_cast<void*>(self))->*method)(info);
+ }
+};
+
+// Visitor is used to traverse Oilpan's object graph.
+class PLATFORM_EXPORT Visitor {
+ USING_FAST_MALLOC(Visitor);
+
+ public:
+ explicit Visitor(ThreadState* state) : state_(state) {}
+ virtual ~Visitor() = default;
+
+ inline ThreadState* State() const { return state_; }
+ inline ThreadHeap& Heap() const { return state_->Heap(); }
+
+ // Static visitor implementation forwarding to dynamic interface.
+
+ template <typename T>
+ void TraceRoot(const T* t, const base::Location& location) {
+ static_assert(sizeof(T), "T must be fully defined");
+ static_assert(IsGarbageCollectedType<T>::value,
+ "T needs to be a garbage collected object");
+ if (!t)
+ return;
+ VisitRoot(t, TraceDescriptorFor(t), location);
+ }
+
+ template <typename T>
+ void Trace(const Member<T>& t) {
+ const T* value = t.GetSafe();
+
+ DCHECK(!Member<T>::IsMemberHashTableDeletedValue(value));
+
+ Trace(value);
+ }
+
+ // TraceStrongly strongifies WeakMembers.
+ template <typename T>
+ ALWAYS_INLINE void TraceStrongly(const WeakMember<T>& t) {
+ const T* value = t.GetSafe();
+
+ DCHECK(!WeakMember<T>::IsMemberHashTableDeletedValue(value));
+
+ Trace<T>(value);
+ }
+ // Fallback methods used only when we need to trace raw pointers of T. This is
+ // the case when a member is a union where we do not support members.
+ template <typename T>
+ void Trace(T* t) {
+ Trace(const_cast<const T*>(t));
+ }
+ template <typename T>
+ void Trace(const T* t) {
+ static_assert(sizeof(T), "T must be fully defined");
+ static_assert(IsGarbageCollectedType<T>::value,
+ "T needs to be a garbage collected object");
+ if (!t)
+ return;
+ Visit(t, TraceDescriptorFor(t));
+ }
+
+ // WeakMember version of the templated trace method. It doesn't keep
+ // the traced thing alive, but will write null to the WeakMember later
+ // if the pointed-to object is dead. It's lying for this to be const,
+ // but the overloading resolver prioritizes constness too high when
+ // picking the correct overload, so all these trace methods have to have
+ // the same constness on their argument to allow the type to decide.
+ template <typename T>
+ void Trace(const WeakMember<T>& weak_member) {
+ static_assert(sizeof(T), "T must be fully defined");
+ static_assert(IsGarbageCollectedType<T>::value,
+ "T needs to be a garbage collected object");
+
+ const T* value = weak_member.GetSafe();
+
+ if (!value)
+ return;
+
+ DCHECK(!WeakMember<T>::IsMemberHashTableDeletedValue(value));
+ VisitWeak(value, &weak_member, TraceDescriptorFor(value),
+ &HandleWeakCell<T>);
+ }
+
+ // Fallback trace method for part objects to allow individual trace methods
+ // to trace through a part object with visitor->trace(m_partObject). This
+ // takes a const argument, because otherwise it will match too eagerly: a
+ // non-const argument would match a non-const Vector<T>& argument better
+ // than the specialization that takes const Vector<T>&. For a similar reason,
+ // the other specializations take a const argument even though they are
+ // usually used with non-const arguments, otherwise this function would match
+ // too well.
+ template <typename T>
+ void Trace(const T& t) {
+ static_assert(sizeof(T), "T must be fully defined");
+ if (std::is_polymorphic<T>::value) {
+ const intptr_t vtable = *reinterpret_cast<const intptr_t*>(&t);
+ if (!vtable)
+ return;
+ }
+ TraceTrait<T>::Trace(this, &t);
+ }
+
+ template <typename T, typename U>
+ void TraceEphemeron(const WeakMember<T>& key, const U* value) {
+ const T* t = key.GetSafe();
+ if (!t)
+ return;
+ VisitEphemeron(TraceDescriptorFor(t).base_object_payload,
+ TraceDescriptorFor(value));
+ }
+
+ template <typename T>
+ void TraceWeakContainer(const T* object,
+ const T* const* slot,
+ TraceDescriptor strong_desc,
+ TraceDescriptor weak_dec,
+ WeakCallback weak_callback,
+ const void* weak_callback_parameter) {
+ static_assert(sizeof(T), "T must be fully defined");
+ static_assert(IsGarbageCollectedType<T>::value,
+ "T needs to be a garbage collected object");
+ VisitWeakContainer(reinterpret_cast<const void*>(object),
+ reinterpret_cast<const void* const*>(slot), strong_desc,
+ weak_dec, weak_callback, weak_callback_parameter);
+ }
+
+ template <typename T>
+ void TraceMovablePointer(const T* const* slot) {
+ RegisterMovableSlot(reinterpret_cast<const void* const*>(slot));
+ }
+
+ // Cross-component tracing interface.
+ template <typename V8Type>
+ void Trace(const TraceWrapperV8Reference<V8Type>& v8reference) {
+ Visit(v8reference.template Cast<v8::Value>());
+ }
+
+ // Dynamic visitor interface.
+
+ // Adds a |callback| that is invoked with |parameter| after liveness has been
+ // computed on the whole object graph. The |callback| may use the provided
+ // |LivenessBroker| to determine whether an object is considered alive or
+ // dead.
+ //
+ // - Upon returning from the callback all references to dead objects must have
+ // been cleared.
+ // - Any operation that extends the object graph, including allocation
+ // or reviving objects, is prohibited.
+ // - Clearing out pointers is allowed.
+ // - Removing elements from heap collections is allowed as these collections
+ // are aware of custom weakness and won't resize their backings.
+ virtual void RegisterWeakCallback(WeakCallback callback,
+ const void* parameter) {}
+
+ // Registers an instance method using |RegisterWeakCallback|. See description
+ // below.
+ template <typename T, void (T::*method)(const LivenessBroker&)>
+ void RegisterWeakCallbackMethod(const T* obj) {
+ RegisterWeakCallback(&WeakCallbackMethodDelegate<T, method>::Trampoline,
+ obj);
+ }
+
+ // Returns whether the visitor is used in a concurrent setting.
+ virtual bool IsConcurrent() const { return false; }
+
+ // Defers invoking |desc| to the main thread when running concurrently.
+ // Returns true if |desc| has been queued for later processing and false if
+ // running in a non-concurrent setting.
+ //
+ // This can be used to defer processing data structures to the main thread
+ // when support for concurrent processing is missing.
+ virtual bool DeferredTraceIfConcurrent(TraceDescriptor, size_t) {
+ return false;
+ }
+
+ protected:
+ // Visits an object through a strong reference.
+ virtual void Visit(const void*, TraceDescriptor) {}
+
+ // Visits an object through a weak reference.
+ virtual void VisitWeak(const void*,
+ const void*,
+ TraceDescriptor,
+ WeakCallback) {}
+
+ // Visits cross-component references to V8.
+ virtual void Visit(const TraceWrapperV8Reference<v8::Value>&) {}
+
+ virtual void VisitRoot(const void* t,
+ TraceDescriptor desc,
+ const base::Location&) {
+ Visit(t, desc);
+ }
+
+ // Visits ephemeron pairs which are a combination of weak and strong keys and
+ // values.
+ virtual void VisitEphemeron(const void*, TraceDescriptor) {}
+
+ // Visits a container |object| holding ephemeron pairs held from |slot|. The
+ // descriptor |strong_desc| can be used to enforce strong treatment of
+ // |object|. The |weak_desc| descriptor is invoked repeatedly until no
+ // more new objects are found. It is expected that |weak_desc| processing
+ // ultimately yields in a call to VisitEphemeron. After marking all reachable
+ // objects, |weak_callback| is invoked with |weak_callback_parameter|. It is
+ // expected that this callback is used to reset non-live entries in the
+ // ephemeron container.
+ virtual void VisitWeakContainer(const void* object,
+ const void* const* slot,
+ TraceDescriptor strong_desc,
+ TraceDescriptor weak_desc,
+ WeakCallback weak_callback,
+ const void* weak_callback_parameter) {}
+
+ virtual void RegisterMovableSlot(const void* const* slot) {}
+
+ template <typename T>
+ static TraceDescriptor TraceDescriptorFor(const T* traceable) {
+ return TraceTrait<T>::GetTraceDescriptor(traceable);
+ }
+
+ private:
+ template <typename T>
+ static void HandleWeakCell(const LivenessBroker&, const void*);
+
+ ThreadState* const state_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_VISITOR_H_
diff --git a/chromium/third_party/blink/renderer/platform/heap/impl/worklist.h b/chromium/third_party/blink/renderer/platform/heap/impl/worklist.h
new file mode 100644
index 00000000000..a26c61892f6
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/heap/impl/worklist.h
@@ -0,0 +1,469 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Copied and adopted from V8.
+//
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_WORKLIST_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_WORKLIST_H_
+
+#include <atomic>
+#include <cstddef>
+#include <utility>
+
+#include "base/atomicops.h"
+#include "base/check_op.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/synchronization/lock.h"
+#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
+
+namespace blink {
+
+// A concurrent worklist based on segments. Each tasks gets private
+// push and pop segments. Empty pop segments are swapped with their
+// corresponding push segments. Full push segments are published to a global
+// pool of segments and replaced with empty segments.
+//
+// Work stealing is best effort, i.e., there is no way to inform other tasks
+// of the need of items.
+template <typename _EntryType, int segment_size, int num_tasks = 4>
+class Worklist {
+ USING_FAST_MALLOC(Worklist);
+ using WorklistType = Worklist<_EntryType, segment_size, num_tasks>;
+
+ public:
+ using EntryType = _EntryType;
+
+ static constexpr int kNumTasks = num_tasks;
+
+ class View {
+ DISALLOW_NEW();
+
+ public:
+ View(WorklistType* worklist, int task_id)
+ : worklist_(worklist), task_id_(task_id) {}
+
+ // Pushes an entry onto the worklist.
+ bool Push(EntryType entry) { return worklist_->Push(task_id_, entry); }
+
+ // Pops an entry from the worklist.
+ bool Pop(EntryType* entry) { return worklist_->Pop(task_id_, entry); }
+
+ // Returns true if the local portion of the worklist is empty.
+ bool IsLocalEmpty() const { return worklist_->IsLocalEmpty(task_id_); }
+
+ // Returns true if the worklist is empty. Can only be used from the main
+ // thread without concurrent access.
+ bool IsGlobalEmpty() const { return worklist_->IsGlobalEmpty(); }
+
+ bool IsGlobalPoolEmpty() const { return worklist_->IsGlobalPoolEmpty(); }
+
+ // Returns true if the local portion and the global pool are empty (i.e.
+ // whether the current view cannot pop anymore).
+ bool IsLocalViewEmpty() const {
+ return worklist_->IsLocalViewEmpty(task_id_);
+ }
+
+ void FlushToGlobal() { worklist_->FlushToGlobal(task_id_); }
+
+ size_t LocalPushSegmentSize() const {
+ return worklist_->LocalPushSegmentSize(task_id_);
+ }
+
+ private:
+ WorklistType* const worklist_;
+ const int task_id_;
+ };
+
+ static constexpr size_t kSegmentCapacity = segment_size;
+
+ Worklist() {
+ for (int i = 0; i < kNumTasks; i++) {
+ private_push_segment(i) = NewSegment();
+ private_pop_segment(i) = NewSegment();
+ }
+ }
+
+ ~Worklist() {
+ CHECK(IsGlobalEmpty());
+ for (int i = 0; i < kNumTasks; i++) {
+ DCHECK(private_push_segment(i));
+ DCHECK(private_pop_segment(i));
+ delete private_push_segment(i);
+ delete private_pop_segment(i);
+ }
+ }
+
+ bool Push(int task_id, EntryType entry) {
+ DCHECK_LT(task_id, kNumTasks);
+ DCHECK(private_push_segment(task_id));
+ if (!private_push_segment(task_id)->Push(entry)) {
+ PublishPushSegmentToGlobal(task_id);
+ bool success = private_push_segment(task_id)->Push(entry);
+ ANALYZER_ALLOW_UNUSED(success);
+ DCHECK(success);
+ }
+ return true;
+ }
+
+ bool Pop(int task_id, EntryType* entry) {
+ DCHECK_LT(task_id, kNumTasks);
+ DCHECK(private_pop_segment(task_id));
+ if (!private_pop_segment(task_id)->Pop(entry)) {
+ if (!private_push_segment(task_id)->IsEmpty()) {
+ Segment* tmp = private_pop_segment(task_id);
+ private_pop_segment(task_id) = private_push_segment(task_id);
+ private_push_segment(task_id) = tmp;
+ } else if (!StealPopSegmentFromGlobal(task_id)) {
+ return false;
+ }
+ bool success = private_pop_segment(task_id)->Pop(entry);
+ ANALYZER_ALLOW_UNUSED(success);
+ DCHECK(success);
+ }
+ return true;
+ }
+
+ bool IsLocalEmpty(int task_id) const {
+ return private_pop_segment(task_id)->IsEmpty() &&
+ private_push_segment(task_id)->IsEmpty();
+ }
+
+ bool IsGlobalPoolEmpty() const { return global_pool_.IsEmpty(); }
+
+ bool IsGlobalEmpty() const {
+ for (int i = 0; i < kNumTasks; i++) {
+ if (!IsLocalEmpty(i))
+ return false;
+ }
+ return global_pool_.IsEmpty();
+ }
+
+ bool IsLocalViewEmpty(int task_id) const {
+ return IsLocalEmpty(task_id) && IsGlobalPoolEmpty();
+ }
+
+ size_t LocalSize(int task_id) const {
+ return private_pop_segment(task_id)->Size() +
+ private_push_segment(task_id)->Size();
+ }
+
+ // Thread-safe but may return an outdated result.
+ size_t GlobalPoolSize() const { return global_pool_.Size(); }
+
+ size_t LocalPushSegmentSize(int task_id) const {
+ return private_push_segment(task_id)->Size();
+ }
+
+ // Clears all segments. Frees the global segment pool.
+ //
+ // Assumes that no other tasks are running.
+ void Clear() {
+ for (int i = 0; i < kNumTasks; i++) {
+ private_pop_segment(i)->Clear();
+ private_push_segment(i)->Clear();
+ }
+ global_pool_.Clear();
+ }
+
+ // Calls the specified callback on each element of the deques and replaces
+ // the element with the result of the callback.
+ // The signature of the callback is
+ // bool Callback(EntryType old, EntryType* new).
+ // If the callback returns |false| then the element is removed from the
+ // worklist. Otherwise the |new| entry is updated.
+ //
+ // Assumes that no other tasks are running.
+ template <typename Callback>
+ void Update(Callback callback) {
+ for (int i = 0; i < kNumTasks; i++) {
+ private_pop_segment(i)->Update(callback);
+ private_push_segment(i)->Update(callback);
+ }
+ global_pool_.Update(callback);
+ }
+
+ template <typename Callback>
+ void IterateGlobalPool(Callback callback) {
+ global_pool_.Iterate(callback);
+ }
+
+ void FlushToGlobal(int task_id) {
+ PublishPushSegmentToGlobal(task_id);
+ PublishPopSegmentToGlobal(task_id);
+ }
+
+ void MergeGlobalPool(Worklist* other) {
+ global_pool_.Merge(&other->global_pool_);
+ }
+
+ size_t SizeForTesting() {
+ size_t size = global_pool_.SizeForTesting();
+ for (int i = 0; i < kNumTasks; i++) {
+ size += private_pop_segment(i)->Size() + private_push_segment(i)->Size();
+ }
+ return size;
+ }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(WorklistTest, SegmentCreate);
+ FRIEND_TEST_ALL_PREFIXES(WorklistTest, SegmentPush);
+ FRIEND_TEST_ALL_PREFIXES(WorklistTest, SegmentPushPop);
+ FRIEND_TEST_ALL_PREFIXES(WorklistTest, SegmentIsEmpty);
+ FRIEND_TEST_ALL_PREFIXES(WorklistTest, SegmentIsFull);
+ FRIEND_TEST_ALL_PREFIXES(WorklistTest, SegmentClear);
+ FRIEND_TEST_ALL_PREFIXES(WorklistTest, SegmentFullPushFails);
+ FRIEND_TEST_ALL_PREFIXES(WorklistTest, SegmentEmptyPopFails);
+ FRIEND_TEST_ALL_PREFIXES(WorklistTest, SegmentUpdateFalse);
+ FRIEND_TEST_ALL_PREFIXES(WorklistTest, SegmentUpdate);
+
+ class Segment {
+ USING_FAST_MALLOC(Segment);
+
+ public:
+ static const size_t kCapacity = kSegmentCapacity;
+
+ Segment() : index_(0) {}
+
+ bool Push(EntryType entry) {
+ if (IsFull())
+ return false;
+ entries_[index_++] = entry;
+ return true;
+ }
+
+ bool Pop(EntryType* entry) {
+ if (IsEmpty())
+ return false;
+ *entry = entries_[--index_];
+ return true;
+ }
+
+ size_t Size() const { return index_; }
+ bool IsEmpty() const { return index_ == 0; }
+ bool IsFull() const { return index_ == kCapacity; }
+ void Clear() { index_ = 0; }
+
+ template <typename Callback>
+ void Update(Callback callback) {
+ size_t new_index = 0;
+ for (size_t i = 0; i < index_; i++) {
+ if (callback(entries_[i], &entries_[new_index])) {
+ new_index++;
+ }
+ }
+ index_ = new_index;
+ }
+
+ template <typename Callback>
+ void Iterate(Callback callback) const {
+ for (size_t i = 0; i < index_; i++) {
+ callback(entries_[i]);
+ }
+ }
+
+ Segment* next() const { return next_; }
+ void set_next(Segment* segment) { next_ = segment; }
+
+ private:
+ Segment* next_;
+ size_t index_;
+ EntryType entries_[kCapacity];
+ };
+
+ struct PrivateSegmentHolder {
+ Segment* private_push_segment;
+ Segment* private_pop_segment;
+ char cache_line_padding[64];
+ };
+
+ class GlobalPool {
+ DISALLOW_NEW();
+
+ public:
+ GlobalPool() : top_(nullptr) {}
+
+ inline void Push(Segment* segment) {
+ base::AutoLock guard(lock_);
+ segment->set_next(top_);
+ set_top(segment);
+ size_.fetch_add(1, std::memory_order_relaxed);
+ }
+
+ inline bool Pop(Segment** segment) {
+ base::AutoLock guard(lock_);
+ if (top_) {
+ DCHECK_LT(0U, size_);
+ size_.fetch_sub(1, std::memory_order_relaxed);
+ *segment = top_;
+ set_top(top_->next());
+ return true;
+ }
+ return false;
+ }
+
+ inline bool IsEmpty() const {
+ return base::subtle::NoBarrier_Load(
+ reinterpret_cast<const base::subtle::AtomicWord*>(&top_)) == 0;
+ }
+
+ inline size_t Size() const {
+ // It is safe to read |size_| without a lock since this variable is
+ // atomic, keeping in mind that threads may not immediately see the new
+ // value when it is updated.
+ return TS_UNCHECKED_READ(size_).load(std::memory_order_relaxed);
+ }
+
+ void Clear() {
+ base::AutoLock guard(lock_);
+ size_.store(0, std::memory_order_relaxed);
+ Segment* current = top_;
+ while (current) {
+ Segment* tmp = current;
+ current = current->next();
+ delete tmp;
+ }
+ set_top(nullptr);
+ }
+
+ // See Worklist::Update.
+ template <typename Callback>
+ void Update(Callback callback) {
+ base::AutoLock guard(lock_);
+ Segment* prev = nullptr;
+ Segment* current = top_;
+ while (current) {
+ current->Update(callback);
+ if (current->IsEmpty()) {
+ DCHECK_LT(0U, size_);
+ size_.fetch_sub(1, std::memory_order_relaxed);
+ if (!prev) {
+ top_ = current->next();
+ } else {
+ prev->set_next(current->next());
+ }
+ Segment* tmp = current;
+ current = current->next();
+ delete tmp;
+ } else {
+ prev = current;
+ current = current->next();
+ }
+ }
+ }
+
+ // See Worklist::Iterate.
+ template <typename Callback>
+ void Iterate(Callback callback) {
+ base::AutoLock guard(lock_);
+ for (Segment* current = top_; current; current = current->next()) {
+ current->Iterate(callback);
+ }
+ }
+
+ void Merge(GlobalPool* other) {
+ Segment* top = nullptr;
+ size_t other_size = 0;
+ {
+ base::AutoLock guard(other->lock_);
+ if (!other->top_)
+ return;
+ top = other->top_;
+ other_size = other->size_.load(std::memory_order_relaxed);
+ other->size_.store(0, std::memory_order_relaxed);
+ other->set_top(nullptr);
+ }
+
+ Segment* end = top;
+ while (end->next())
+ end = end->next();
+
+ {
+ base::AutoLock guard(lock_);
+ size_.fetch_add(other_size, std::memory_order_relaxed);
+ end->set_next(top_);
+ set_top(top);
+ }
+ }
+
+ size_t SizeForTesting() {
+ size_t size = 0;
+ base::AutoLock guard(lock_);
+ for (Segment* current = top_; current; current = current->next())
+ size += current->Size();
+ return size;
+ }
+
+ private:
+ void set_top(Segment* segment) {
+ return base::subtle::NoBarrier_Store(
+ reinterpret_cast<base::subtle::AtomicWord*>(&top_),
+ reinterpret_cast<base::subtle::AtomicWord>(segment));
+ }
+
+ mutable base::Lock lock_;
+ Segment* top_ GUARDED_BY(lock_);
+ std::atomic<size_t> size_ GUARDED_BY(lock_){0};
+ };
+
+ ALWAYS_INLINE Segment*& private_push_segment(int task_id) {
+ return private_segments_[task_id].private_push_segment;
+ }
+
+ ALWAYS_INLINE Segment* const& private_push_segment(int task_id) const {
+ return const_cast<const PrivateSegmentHolder*>(private_segments_)[task_id]
+ .private_push_segment;
+ }
+
+ ALWAYS_INLINE Segment*& private_pop_segment(int task_id) {
+ return private_segments_[task_id].private_pop_segment;
+ }
+
+ ALWAYS_INLINE Segment* const& private_pop_segment(int task_id) const {
+ return const_cast<const PrivateSegmentHolder*>(private_segments_)[task_id]
+ .private_pop_segment;
+ }
+
+ ALWAYS_INLINE void PublishPushSegmentToGlobal(int task_id) {
+ if (!private_push_segment(task_id)->IsEmpty()) {
+ global_pool_.Push(private_push_segment(task_id));
+ private_push_segment(task_id) = NewSegment();
+ }
+ }
+
+ ALWAYS_INLINE void PublishPopSegmentToGlobal(int task_id) {
+ if (!private_pop_segment(task_id)->IsEmpty()) {
+ global_pool_.Push(private_pop_segment(task_id));
+ private_pop_segment(task_id) = NewSegment();
+ }
+ }
+
+ ALWAYS_INLINE bool StealPopSegmentFromGlobal(int task_id) {
+ if (global_pool_.IsEmpty())
+ return false;
+ Segment* new_segment = nullptr;
+ if (global_pool_.Pop(&new_segment)) {
+ delete private_pop_segment(task_id);
+ private_pop_segment(task_id) = new_segment;
+ return true;
+ }
+ return false;
+ }
+
+ ALWAYS_INLINE Segment* NewSegment() {
+ // Bottleneck for filtering in crash dumps.
+ return new Segment();
+ }
+
+ PrivateSegmentHolder private_segments_[kNumTasks];
+ GlobalPool global_pool_;
+};
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_WORKLIST_H_