summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/cppgc
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2023-03-30 12:11:08 +0200
committerNode.js GitHub Bot <github-bot@iojs.org>2023-03-31 14:15:23 +0000
commitf226350fcbebd4449fb0034fdaffa147e4de28ea (patch)
tree8896397ec8829c238012bfbe9781f4e2d94708bc /deps/v8/src/heap/cppgc
parent10928cb0a4643a11c02af7bab93fc4b5abe2ce7d (diff)
downloadnode-new-f226350fcbebd4449fb0034fdaffa147e4de28ea.tar.gz
deps: update V8 to 11.3.244.4
PR-URL: https://github.com/nodejs/node/pull/47251 Reviewed-By: Yagiz Nizipli <yagiz@nizipli.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Rafael Gonzaga <rafael.nunu@hotmail.com> Reviewed-By: Richard Lau <rlau@redhat.com>
Diffstat (limited to 'deps/v8/src/heap/cppgc')
-rw-r--r--deps/v8/src/heap/cppgc/DEPS5
-rw-r--r--deps/v8/src/heap/cppgc/concurrent-marker.cc14
-rw-r--r--deps/v8/src/heap/cppgc/gc-info.cc32
-rw-r--r--deps/v8/src/heap/cppgc/globals.h2
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc44
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h13
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc3
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc11
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc31
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.cc47
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h113
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc6
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.h2
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.cc2
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.cc3
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.h18
-rw-r--r--deps/v8/src/heap/cppgc/member-storage.cc12
-rw-r--r--deps/v8/src/heap/cppgc/member-storage.h5
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc33
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h43
-rw-r--r--deps/v8/src/heap/cppgc/object-start-bitmap.h32
-rw-r--r--deps/v8/src/heap/cppgc/pointer-policies.cc2
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.cc10
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.h9
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc59
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc49
26 files changed, 374 insertions, 226 deletions
diff --git a/deps/v8/src/heap/cppgc/DEPS b/deps/v8/src/heap/cppgc/DEPS
index 37049928d5..d5c1d108b8 100644
--- a/deps/v8/src/heap/cppgc/DEPS
+++ b/deps/v8/src/heap/cppgc/DEPS
@@ -1,3 +1,8 @@
include_rules = [
"+include/cppgc",
+ "-src",
+ "+src/base",
+ "+src/heap/base",
+ "+src/heap/cppgc",
+ "+src/tracing/trace-event.h",
]
diff --git a/deps/v8/src/heap/cppgc/concurrent-marker.cc b/deps/v8/src/heap/cppgc/concurrent-marker.cc
index 326b35d1f7..0a02ddf61f 100644
--- a/deps/v8/src/heap/cppgc/concurrent-marker.cc
+++ b/deps/v8/src/heap/cppgc/concurrent-marker.cc
@@ -148,20 +148,6 @@ void ConcurrentMarkingTask::ProcessWorklists(
return;
}
- if (!DrainWorklistWithYielding(
- job_delegate, concurrent_marking_state,
- concurrent_marker_.incremental_marking_schedule(),
- concurrent_marking_state.retrace_marked_objects_worklist(),
- [&concurrent_marking_visitor](HeapObjectHeader* header) {
- BasePage::FromPayload(header)->SynchronizedLoad();
- // Retracing does not increment marked bytes as the object has
- // already been processed before.
- DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
- concurrent_marking_visitor, *header);
- })) {
- return;
- }
-
{
StatsCollector::DisabledConcurrentScope stats_scope(
concurrent_marker_.heap().stats_collector(),
diff --git a/deps/v8/src/heap/cppgc/gc-info.cc b/deps/v8/src/heap/cppgc/gc-info.cc
index ddb294cb5c..fcbf6ac356 100644
--- a/deps/v8/src/heap/cppgc/gc-info.cc
+++ b/deps/v8/src/heap/cppgc/gc-info.cc
@@ -20,68 +20,68 @@ HeapObjectName GetHiddenName(const void*, HeapObjectNameForUnnamedObject) {
} // namespace
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
FinalizationCallback finalization_callback, NameCallback name_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
{finalization_callback, trace_callback, name_callback, true});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
FinalizationCallback finalization_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
{finalization_callback, trace_callback, GetHiddenName, true});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
NameCallback name_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index, {nullptr, trace_callback, name_callback, true});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index, {nullptr, trace_callback, GetHiddenName, true});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
FinalizationCallback finalization_callback, NameCallback name_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
{finalization_callback, trace_callback, name_callback, false});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
FinalizationCallback finalization_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
{finalization_callback, trace_callback, GetHiddenName, false});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
NameCallback name_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index, {nullptr, trace_callback, name_callback, false});
}
// static
-GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+void EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback) {
- return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index, {nullptr, trace_callback, GetHiddenName, false});
}
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
index 84fb389a7e..67ccd37e25 100644
--- a/deps/v8/src/heap/cppgc/globals.h
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -43,7 +43,7 @@ constexpr size_t kPageSize = 1 << kPageSizeLog2;
constexpr size_t kPageOffsetMask = kPageSize - 1;
constexpr size_t kPageBaseMask = ~kPageOffsetMask;
-#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS)
+#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_DARWIN)
// No guard pages on ARM64 macOS. This target has 16 kiB pages, meaning that
// the guard pages do not protect anything, since there is no inaccessible
// region surrounding the allocation.
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index f399665b48..bf76376939 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -94,41 +94,6 @@ class AgeTableResetter final : protected HeapVisitor<AgeTableResetter> {
};
#endif // defined(CPPGC_YOUNG_GENERATION)
-class PlatformWithPageAllocator final : public cppgc::Platform {
- public:
- explicit PlatformWithPageAllocator(std::shared_ptr<cppgc::Platform> delegate)
- : delegate_(std::move(delegate)),
- page_allocator_(GetGlobalPageAllocator()) {
- // This platform wrapper should only be used if the platform doesn't provide
- // a `PageAllocator`.
- CHECK_NULL(delegate->GetPageAllocator());
- }
- ~PlatformWithPageAllocator() override = default;
-
- PageAllocator* GetPageAllocator() final { return &page_allocator_; }
-
- double MonotonicallyIncreasingTime() final {
- return delegate_->MonotonicallyIncreasingTime();
- }
-
- std::shared_ptr<TaskRunner> GetForegroundTaskRunner() final {
- return delegate_->GetForegroundTaskRunner();
- }
-
- std::unique_ptr<JobHandle> PostJob(TaskPriority priority,
- std::unique_ptr<JobTask> job_task) final {
- return delegate_->PostJob(std::move(priority), std::move(job_task));
- }
-
- TracingController* GetTracingController() final {
- return delegate_->GetTracingController();
- }
-
- private:
- std::shared_ptr<cppgc::Platform> delegate_;
- cppgc::PageAllocator& page_allocator_;
-};
-
} // namespace
HeapBase::HeapBase(
@@ -137,11 +102,7 @@ HeapBase::HeapBase(
StackSupport stack_support, MarkingType marking_support,
SweepingType sweeping_support, GarbageCollector& garbage_collector)
: raw_heap_(this, custom_spaces),
- platform_(platform->GetPageAllocator()
- ? std::move(platform)
- : std::static_pointer_cast<cppgc::Platform>(
- std::make_shared<PlatformWithPageAllocator>(
- std::move(platform)))),
+ platform_(std::move(platform)),
oom_handler_(std::make_unique<FatalOutOfMemoryHandler>(this)),
#if defined(LEAK_SANITIZER)
lsan_page_allocator_(std::make_unique<v8::base::LsanPageAllocator>(
@@ -212,10 +173,13 @@ size_t HeapBase::ExecutePreFinalizers() {
#if defined(CPPGC_YOUNG_GENERATION)
void HeapBase::EnableGenerationalGC() {
DCHECK(in_atomic_pause());
+ if (HeapHandle::is_young_generation_enabled_) return;
// Notify the global flag that the write barrier must always be enabled.
YoungGenerationEnabler::Enable();
// Enable young generation for the current heap.
HeapHandle::is_young_generation_enabled_ = true;
+ // Assume everything that has so far been allocated is young.
+ object_allocator_.MarkAllPagesAsYoung();
}
void HeapBase::ResetRememberedSet() {
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index 3e4f24cad4..ac1dd3ff5b 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -183,11 +183,6 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
stack_state_of_prev_gc_ = stack_state;
}
- uintptr_t stack_end_of_current_gc() const { return stack_end_of_current_gc_; }
- void SetStackEndOfCurrentGC(uintptr_t stack_end) {
- stack_end_of_current_gc_ = stack_end;
- }
-
void SetInAtomicPauseForTesting(bool value) { in_atomic_pause_ = value; }
virtual void StartIncrementalGarbageCollectionForTesting() = 0;
@@ -203,6 +198,10 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
MarkingType marking_support() const { return marking_support_; }
SweepingType sweeping_support() const { return sweeping_support_; }
+ bool incremental_marking_supported() const {
+ return marking_support_ != MarkingType::kAtomic;
+ }
+
bool generational_gc_supported() const {
const bool supported = is_young_generation_enabled();
#if defined(CPPGC_YOUNG_GENERATION)
@@ -289,10 +288,6 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
EmbedderStackState::kNoHeapPointers;
std::unique_ptr<EmbedderStackState> override_stack_state_;
- // Marker that signals end of the interesting stack region in which on-heap
- // pointers can be found.
- uintptr_t stack_end_of_current_gc_ = 0;
-
bool in_atomic_pause_ = false;
int creation_thread_id_ = v8::base::OS::GetCurrentThreadId();
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
index 7e85eeca47..51c2e5b7c6 100644
--- a/deps/v8/src/heap/cppgc/heap-page.cc
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -202,8 +202,7 @@ void NormalPage::Destroy(NormalPage* page) {
}
NormalPage::NormalPage(HeapBase& heap, BaseSpace& space)
- : BasePage(heap, space, PageType::kNormal),
- object_start_bitmap_(PayloadStart()) {
+ : BasePage(heap, space, PageType::kNormal), object_start_bitmap_() {
DCHECK_LT(kLargeObjectSizeThreshold,
static_cast<size_t>(PayloadEnd() - PayloadStart()));
}
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 13e2fe1993..1f3e70440d 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -146,8 +146,13 @@ void Heap::StartGarbageCollection(GCConfig config) {
epoch_++;
#if defined(CPPGC_YOUNG_GENERATION)
- if (config.collection_type == CollectionType::kMajor)
+ if (config.collection_type == CollectionType::kMajor &&
+ generational_gc_supported()) {
+ stats_collector()->NotifyUnmarkingStarted(config.collection_type);
+ cppgc::internal::StatsCollector::EnabledScope stats_scope(
+ stats_collector(), cppgc::internal::StatsCollector::kUnmark);
SequentialUnmarker unmarker(raw_heap());
+ }
#endif // defined(CPPGC_YOUNG_GENERATION)
const MarkingConfig marking_config{config.collection_type, config.stack_state,
@@ -161,7 +166,7 @@ void Heap::FinalizeGarbageCollection(StackState stack_state) {
DCHECK(!in_no_gc_scope());
CHECK(!in_disallow_gc_scope());
config_.stack_state = stack_state;
- SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
+ stack()->SetMarkerToCurrentStackPosition();
in_atomic_pause_ = true;
#if defined(CPPGC_YOUNG_GENERATION)
@@ -182,7 +187,7 @@ void Heap::FinalizeGarbageCollection(StackState stack_state) {
const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
#if CPPGC_VERIFY_HEAP
MarkingVerifier verifier(*this, config_.collection_type);
- verifier.Run(config_.stack_state, stack_end_of_current_gc(),
+ verifier.Run(config_.stack_state,
stats_collector()->marked_bytes_on_current_cycle() +
bytes_allocated_in_prefinalizers);
#endif // CPPGC_VERIFY_HEAP
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 306b880857..bc715e1917 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -395,15 +395,32 @@ void MarkerBase::ProcessWeakness() {
}
#endif // defined(CPPGC_YOUNG_GENERATION)
- MarkingWorklists::WeakCallbackItem item;
- MarkingWorklists::WeakCallbackWorklist::Local& local =
- mutator_marking_state_.weak_callback_worklist();
- while (local.Pop(&item)) {
- item.callback(broker, item.parameter);
+ {
+ // First, process weak container callbacks.
+ StatsCollector::EnabledScope stats_scope(
+ heap().stats_collector(),
+ StatsCollector::kWeakContainerCallbacksProcessing);
+ MarkingWorklists::WeakCallbackItem item;
+ MarkingWorklists::WeakCallbackWorklist::Local& collections_local =
+ mutator_marking_state_.weak_container_callback_worklist();
+ while (collections_local.Pop(&item)) {
+ item.callback(broker, item.parameter);
+ }
+ }
+ {
+ // Then, process custom weak callbacks.
+ StatsCollector::EnabledScope stats_scope(
+ heap().stats_collector(), StatsCollector::kCustomCallbacksProcessing);
+ MarkingWorklists::WeakCallbackItem item;
+ MarkingWorklists::WeakCustomCallbackWorklist::Local& custom_callbacks =
+ mutator_marking_state_.weak_custom_callback_worklist();
+ while (custom_callbacks.Pop(&item)) {
+ item.callback(broker, item.parameter);
#if defined(CPPGC_YOUNG_GENERATION)
- if (heap().generational_gc_supported())
- heap().remembered_set().AddWeakCallback(item);
+ if (heap().generational_gc_supported())
+ heap().remembered_set().AddWeakCallback(item);
#endif // defined(CPPGC_YOUNG_GENERATION)
+ }
}
if (job_handle) {
diff --git a/deps/v8/src/heap/cppgc/marking-state.cc b/deps/v8/src/heap/cppgc/marking-state.cc
index c64173e1ec..660e6b2aec 100644
--- a/deps/v8/src/heap/cppgc/marking-state.cc
+++ b/deps/v8/src/heap/cppgc/marking-state.cc
@@ -12,6 +12,48 @@
namespace cppgc {
namespace internal {
+void MarkingStateBase::Publish() { marking_worklist_.Publish(); }
+
+BasicMarkingState::BasicMarkingState(HeapBase& heap,
+ MarkingWorklists& marking_worklists,
+ CompactionWorklists* compaction_worklists)
+ : MarkingStateBase(heap, marking_worklists),
+ previously_not_fully_constructed_worklist_(
+ *marking_worklists.previously_not_fully_constructed_worklist()),
+ weak_container_callback_worklist_(
+ *marking_worklists.weak_container_callback_worklist()),
+ parallel_weak_callback_worklist_(
+ *marking_worklists.parallel_weak_callback_worklist()),
+ weak_custom_callback_worklist_(
+ *marking_worklists.weak_custom_callback_worklist()),
+ write_barrier_worklist_(*marking_worklists.write_barrier_worklist()),
+ concurrent_marking_bailout_worklist_(
+ *marking_worklists.concurrent_marking_bailout_worklist()),
+ discovered_ephemeron_pairs_worklist_(
+ *marking_worklists.discovered_ephemeron_pairs_worklist()),
+ ephemeron_pairs_for_processing_worklist_(
+ *marking_worklists.ephemeron_pairs_for_processing_worklist()),
+ weak_containers_worklist_(*marking_worklists.weak_containers_worklist()) {
+ if (compaction_worklists) {
+ movable_slots_worklist_ =
+ std::make_unique<CompactionWorklists::MovableReferencesWorklist::Local>(
+ *compaction_worklists->movable_slots_worklist());
+ }
+}
+
+void BasicMarkingState::Publish() {
+ MarkingStateBase::Publish();
+ previously_not_fully_constructed_worklist_.Publish();
+ weak_container_callback_worklist_.Publish();
+ parallel_weak_callback_worklist_.Publish();
+ weak_custom_callback_worklist_.Publish();
+ write_barrier_worklist_.Publish();
+ concurrent_marking_bailout_worklist_.Publish();
+ discovered_ephemeron_pairs_worklist_.Publish();
+ ephemeron_pairs_for_processing_worklist_.Publish();
+ if (movable_slots_worklist_) movable_slots_worklist_->Publish();
+}
+
void MutatorMarkingState::FlushNotFullyConstructedObjects() {
std::unordered_set<HeapObjectHeader*> objects =
not_fully_constructed_worklist_.Extract<AccessMode::kAtomic>();
@@ -31,5 +73,10 @@ void MutatorMarkingState::FlushDiscoveredEphemeronPairs() {
}
}
+void MutatorMarkingState::Publish() {
+ BasicMarkingState::Publish();
+ retrace_marked_objects_worklist_.Publish();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index ca3656a8d1..4ce1ce4074 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -24,6 +24,7 @@ namespace internal {
class MarkingStateBase {
public:
inline MarkingStateBase(HeapBase&, MarkingWorklists&);
+ virtual ~MarkingStateBase() = default;
MarkingStateBase(const MarkingStateBase&) = delete;
MarkingStateBase& operator=(const MarkingStateBase&) = delete;
@@ -33,7 +34,7 @@ class MarkingStateBase {
inline void PushMarked(HeapObjectHeader&, TraceDescriptor desc);
- void Publish() { marking_worklist_.Publish(); }
+ V8_EXPORT_PRIVATE virtual void Publish();
MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
return marking_worklist_;
@@ -107,15 +108,16 @@ void MarkingStateBase::PushMarked(HeapObjectHeader& header,
class BasicMarkingState : public MarkingStateBase {
public:
- inline BasicMarkingState(HeapBase& heap, MarkingWorklists&,
- CompactionWorklists*);
+ BasicMarkingState(HeapBase& heap, MarkingWorklists&, CompactionWorklists*);
+ ~BasicMarkingState() override = default;
BasicMarkingState(const BasicMarkingState&) = delete;
BasicMarkingState& operator=(const BasicMarkingState&) = delete;
inline void RegisterWeakReferenceIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*);
- inline void RegisterWeakCallback(WeakCallback, const void*);
+ inline void RegisterWeakContainerCallback(WeakCallback, const void*);
+ inline void RegisterWeakCustomCallback(WeakCallback, const void*);
void RegisterMovableReference(const void** slot) {
if (!movable_slots_worklist_) return;
@@ -136,29 +138,24 @@ class BasicMarkingState : public MarkingStateBase {
inline void AccountMarkedBytes(size_t);
size_t marked_bytes() const { return marked_bytes_; }
- void Publish() {
- MarkingStateBase::Publish();
- previously_not_fully_constructed_worklist_.Publish();
- weak_callback_worklist_.Publish();
- parallel_weak_callback_worklist_.Publish();
- write_barrier_worklist_.Publish();
- concurrent_marking_bailout_worklist_.Publish();
- discovered_ephemeron_pairs_worklist_.Publish();
- ephemeron_pairs_for_processing_worklist_.Publish();
- if (IsCompactionEnabled()) movable_slots_worklist_->Publish();
- }
+ V8_EXPORT_PRIVATE void Publish() override;
MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local&
previously_not_fully_constructed_worklist() {
return previously_not_fully_constructed_worklist_;
}
- MarkingWorklists::WeakCallbackWorklist::Local& weak_callback_worklist() {
- return weak_callback_worklist_;
+ MarkingWorklists::WeakCallbackWorklist::Local&
+ weak_container_callback_worklist() {
+ return weak_container_callback_worklist_;
}
MarkingWorklists::WeakCallbackWorklist::Local&
parallel_weak_callback_worklist() {
return parallel_weak_callback_worklist_;
}
+ MarkingWorklists::WeakCustomCallbackWorklist::Local&
+ weak_custom_callback_worklist() {
+ return weak_custom_callback_worklist_;
+ }
MarkingWorklists::WriteBarrierWorklist::Local& write_barrier_worklist() {
return write_barrier_worklist_;
}
@@ -177,10 +174,6 @@ class BasicMarkingState : public MarkingStateBase {
MarkingWorklists::WeakContainersWorklist& weak_containers_worklist() {
return weak_containers_worklist_;
}
- MarkingWorklists::RetraceMarkedObjectsWorklist::Local&
- retrace_marked_objects_worklist() {
- return retrace_marked_objects_worklist_;
- }
CompactionWorklists::MovableReferencesWorklist::Local*
movable_slots_worklist() {
@@ -200,15 +193,14 @@ class BasicMarkingState : public MarkingStateBase {
protected:
inline void RegisterWeakContainer(HeapObjectHeader&);
- inline bool IsCompactionEnabled() const {
- return movable_slots_worklist_.get();
- }
-
MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local
previously_not_fully_constructed_worklist_;
- MarkingWorklists::WeakCallbackWorklist::Local weak_callback_worklist_;
+ MarkingWorklists::WeakCallbackWorklist::Local
+ weak_container_callback_worklist_;
MarkingWorklists::WeakCallbackWorklist::Local
parallel_weak_callback_worklist_;
+ MarkingWorklists::WeakCustomCallbackWorklist::Local
+ weak_custom_callback_worklist_;
MarkingWorklists::WriteBarrierWorklist::Local write_barrier_worklist_;
MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local
concurrent_marking_bailout_worklist_;
@@ -217,8 +209,6 @@ class BasicMarkingState : public MarkingStateBase {
MarkingWorklists::EphemeronPairsWorklist::Local
ephemeron_pairs_for_processing_worklist_;
MarkingWorklists::WeakContainersWorklist& weak_containers_worklist_;
- MarkingWorklists::RetraceMarkedObjectsWorklist::Local
- retrace_marked_objects_worklist_;
// Existence of the worklist (|movable_slot_worklist_| != nullptr) denotes
// that compaction is currently enabled and slots must be recorded.
std::unique_ptr<CompactionWorklists::MovableReferencesWorklist::Local>
@@ -230,32 +220,6 @@ class BasicMarkingState : public MarkingStateBase {
bool in_atomic_pause_ = false;
};
-BasicMarkingState::BasicMarkingState(HeapBase& heap,
- MarkingWorklists& marking_worklists,
- CompactionWorklists* compaction_worklists)
- : MarkingStateBase(heap, marking_worklists),
- previously_not_fully_constructed_worklist_(
- *marking_worklists.previously_not_fully_constructed_worklist()),
- weak_callback_worklist_(*marking_worklists.weak_callback_worklist()),
- parallel_weak_callback_worklist_(
- *marking_worklists.parallel_weak_callback_worklist()),
- write_barrier_worklist_(*marking_worklists.write_barrier_worklist()),
- concurrent_marking_bailout_worklist_(
- *marking_worklists.concurrent_marking_bailout_worklist()),
- discovered_ephemeron_pairs_worklist_(
- *marking_worklists.discovered_ephemeron_pairs_worklist()),
- ephemeron_pairs_for_processing_worklist_(
- *marking_worklists.ephemeron_pairs_for_processing_worklist()),
- weak_containers_worklist_(*marking_worklists.weak_containers_worklist()),
- retrace_marked_objects_worklist_(
- *marking_worklists.retrace_marked_objects_worklist()) {
- if (compaction_worklists) {
- movable_slots_worklist_ =
- std::make_unique<CompactionWorklists::MovableReferencesWorklist::Local>(
- *compaction_worklists->movable_slots_worklist());
- }
-}
-
void BasicMarkingState::RegisterWeakReferenceIfNeeded(
const void* object, TraceDescriptor desc, WeakCallback weak_callback,
const void* parameter) {
@@ -270,10 +234,16 @@ void BasicMarkingState::RegisterWeakReferenceIfNeeded(
parallel_weak_callback_worklist_.Push({weak_callback, parameter});
}
-void BasicMarkingState::RegisterWeakCallback(WeakCallback callback,
- const void* object) {
+void BasicMarkingState::RegisterWeakContainerCallback(WeakCallback callback,
+ const void* object) {
DCHECK_NOT_NULL(callback);
- weak_callback_worklist_.Push({callback, object});
+ weak_container_callback_worklist_.Push({callback, object});
+}
+
+void BasicMarkingState::RegisterWeakCustomCallback(WeakCallback callback,
+ const void* object) {
+ DCHECK_NOT_NULL(callback);
+ weak_custom_callback_worklist_.Push({callback, object});
}
void BasicMarkingState::RegisterWeakContainer(HeapObjectHeader& header) {
@@ -301,7 +271,7 @@ void BasicMarkingState::ProcessWeakContainer(const void* object,
if (!MarkNoPush(header)) return;
// Register final weak processing of the backing store.
- RegisterWeakCallback(callback, data);
+ RegisterWeakContainerCallback(callback, data);
// Weak containers might not require tracing. In such cases the callback in
// the TraceDescriptor will be nullptr. For ephemerons the callback will be
@@ -362,11 +332,14 @@ void BasicMarkingState::AccountMarkedBytes(size_t marked_bytes) {
marked_bytes_ += marked_bytes;
}
-class MutatorMarkingState : public BasicMarkingState {
+class MutatorMarkingState final : public BasicMarkingState {
public:
MutatorMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
- : BasicMarkingState(heap, marking_worklists, compaction_worklists) {}
+ : BasicMarkingState(heap, marking_worklists, compaction_worklists),
+ retrace_marked_objects_worklist_(
+ *marking_worklists.retrace_marked_objects_worklist()) {}
+ ~MutatorMarkingState() override = default;
inline bool MarkNoPush(HeapObjectHeader& header) {
return MutatorMarkingState::BasicMarkingState::MarkNoPush(header);
@@ -389,6 +362,13 @@ class MutatorMarkingState : public BasicMarkingState {
inline bool IsMarkedWeakContainer(HeapObjectHeader&);
+ MarkingWorklists::RetraceMarkedObjectsWorklist::Local&
+ retrace_marked_objects_worklist() {
+ return retrace_marked_objects_worklist_;
+ }
+
+ V8_EXPORT_PRIVATE void Publish() override;
+
private:
// Weak containers are strongly retraced during conservative stack scanning.
// Stack scanning happens once per GC at the start of the atomic pause.
@@ -398,13 +378,16 @@ class MutatorMarkingState : public BasicMarkingState {
static constexpr size_t kMaxCacheSize = 8;
public:
- inline bool Contains(const HeapObjectHeader*);
+ inline bool Contains(const HeapObjectHeader*) const;
inline void Insert(const HeapObjectHeader*);
private:
std::vector<const HeapObjectHeader*> recently_retraced_cache_;
size_t last_used_index_ = -1;
} recently_retraced_weak_containers_;
+
+ MarkingWorklists::RetraceMarkedObjectsWorklist::Local
+ retrace_marked_objects_worklist_;
};
void MutatorMarkingState::ReTraceMarkedWeakContainer(cppgc::Visitor& visitor,
@@ -450,7 +433,7 @@ bool MutatorMarkingState::IsMarkedWeakContainer(HeapObjectHeader& header) {
}
bool MutatorMarkingState::RecentlyRetracedWeakContainers::Contains(
- const HeapObjectHeader* header) {
+ const HeapObjectHeader* header) const {
return std::find(recently_retraced_cache_.begin(),
recently_retraced_cache_.end(),
header) != recently_retraced_cache_.end();
@@ -465,13 +448,15 @@ void MutatorMarkingState::RecentlyRetracedWeakContainers::Insert(
recently_retraced_cache_[last_used_index_] = header;
}
-class ConcurrentMarkingState : public BasicMarkingState {
+class ConcurrentMarkingState final : public BasicMarkingState {
public:
ConcurrentMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
: BasicMarkingState(heap, marking_worklists, compaction_worklists) {}
- ~ConcurrentMarkingState() { DCHECK_EQ(last_marked_bytes_, marked_bytes_); }
+ ~ConcurrentMarkingState() override {
+ DCHECK_EQ(last_marked_bytes_, marked_bytes_);
+ }
size_t RecentlyMarkedBytes() {
return marked_bytes_ - std::exchange(last_marked_bytes_, marked_bytes_);
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 0b649c7d3f..fbd7de335f 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -45,8 +45,7 @@ MarkingVerifierBase::MarkingVerifierBase(
collection_type_(collection_type) {}
void MarkingVerifierBase::Run(
- StackState stack_state, uintptr_t stack_end,
- v8::base::Optional<size_t> expected_marked_bytes) {
+ StackState stack_state, v8::base::Optional<size_t> expected_marked_bytes) {
Traverse(heap_.raw_heap());
// Avoid verifying the stack when running with TSAN as the TSAN runtime changes
// stack contents when e.g. working with locks. Specifically, the marker uses
@@ -63,8 +62,7 @@ void MarkingVerifierBase::Run(
#if !defined(THREAD_SANITIZER) && !defined(CPPGC_POINTER_COMPRESSION)
if (stack_state == StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
- heap_.stack()->IteratePointersUnsafe(
- this, reinterpret_cast<const void*>(stack_end));
+ heap_.stack()->IteratePointersUntilMarker(this);
// The objects found through the unsafe iteration are only a subset of the
// regular iteration as they miss objects held alive only from callee-saved
// registers that are never pushed on the stack and SafeStack.
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.h b/deps/v8/src/heap/cppgc/marking-verifier.h
index c966aea51f..5132b3af9f 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.h
+++ b/deps/v8/src/heap/cppgc/marking-verifier.h
@@ -41,7 +41,7 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
MarkingVerifierBase(const MarkingVerifierBase&) = delete;
MarkingVerifierBase& operator=(const MarkingVerifierBase&) = delete;
- void Run(StackState, uintptr_t, v8::base::Optional<size_t>);
+ void Run(StackState, v8::base::Optional<size_t>);
protected:
MarkingVerifierBase(HeapBase&, CollectionType, VerificationState&,
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.cc b/deps/v8/src/heap/cppgc/marking-visitor.cc
index e479f7f6b4..544b6f8100 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc/marking-visitor.cc
@@ -41,7 +41,7 @@ void MarkingVisitorBase::VisitWeakContainer(const void* object,
void MarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
const void* object) {
- marking_state_.RegisterWeakCallback(callback, object);
+ marking_state_.RegisterWeakCustomCallback(callback, object);
}
void MarkingVisitorBase::HandleMovableReference(const void** slot) {
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.cc b/deps/v8/src/heap/cppgc/marking-worklists.cc
index 8307f0ed0a..2f9afeddc9 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.cc
+++ b/deps/v8/src/heap/cppgc/marking-worklists.cc
@@ -15,8 +15,9 @@ void MarkingWorklists::ClearForTesting() {
not_fully_constructed_worklist_.Clear();
previously_not_fully_constructed_worklist_.Clear();
write_barrier_worklist_.Clear();
- weak_callback_worklist_.Clear();
+ weak_container_callback_worklist_.Clear();
parallel_weak_callback_worklist_.Clear();
+ weak_custom_callback_worklist_.Clear();
concurrent_marking_bailout_worklist_.Clear();
discovered_ephemeron_pairs_worklist_.Clear();
ephemeron_pairs_for_processing_worklist_.Clear();
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.h b/deps/v8/src/heap/cppgc/marking-worklists.h
index f2cde89c4c..7ec2e8daf2 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.h
+++ b/deps/v8/src/heap/cppgc/marking-worklists.h
@@ -76,6 +76,8 @@ class MarkingWorklists {
heap::base::Worklist<HeapObjectHeader*, 16 /* local entries */>;
using WeakCallbackWorklist =
heap::base::Worklist<WeakCallbackItem, 64 /* local entries */>;
+ using WeakCustomCallbackWorklist =
+ heap::base::Worklist<WeakCallbackItem, 16 /* local entries */>;
using WriteBarrierWorklist =
heap::base::Worklist<HeapObjectHeader*, 64 /*local entries */>;
using ConcurrentMarkingBailoutWorklist =
@@ -98,12 +100,15 @@ class MarkingWorklists {
WriteBarrierWorklist* write_barrier_worklist() {
return &write_barrier_worklist_;
}
- WeakCallbackWorklist* weak_callback_worklist() {
- return &weak_callback_worklist_;
+ WeakCallbackWorklist* weak_container_callback_worklist() {
+ return &weak_container_callback_worklist_;
}
WeakCallbackWorklist* parallel_weak_callback_worklist() {
return &parallel_weak_callback_worklist_;
}
+ WeakCustomCallbackWorklist* weak_custom_callback_worklist() {
+ return &weak_custom_callback_worklist_;
+ }
ConcurrentMarkingBailoutWorklist* concurrent_marking_bailout_worklist() {
return &concurrent_marking_bailout_worklist_;
}
@@ -128,9 +133,12 @@ class MarkingWorklists {
PreviouslyNotFullyConstructedWorklist
previously_not_fully_constructed_worklist_;
WriteBarrierWorklist write_barrier_worklist_;
- // Hold weak callbacks which can only invoke on main thread.
- WeakCallbackWorklist weak_callback_worklist_;
- // Hold weak callbacks which can invoke on main or worker thread.
+ // Hold weak callbacks for weak containers (e.g. containers with WeakMembers).
+ WeakCallbackWorklist weak_container_callback_worklist_;
+ // Hold weak custom callbacks (e.g. for containers with UntracedMembers).
+ WeakCustomCallbackWorklist weak_custom_callback_worklist_;
+ // Hold weak callbacks which can invoke on main or worker thread (used for
+ // regular WeakMember).
WeakCallbackWorklist parallel_weak_callback_worklist_;
ConcurrentMarkingBailoutWorklist concurrent_marking_bailout_worklist_;
EphemeronPairsWorklist discovered_ephemeron_pairs_worklist_;
diff --git a/deps/v8/src/heap/cppgc/member-storage.cc b/deps/v8/src/heap/cppgc/member-storage.cc
index c457c60ba4..b315ecfda2 100644
--- a/deps/v8/src/heap/cppgc/member-storage.cc
+++ b/deps/v8/src/heap/cppgc/member-storage.cc
@@ -13,7 +13,8 @@ namespace cppgc {
namespace internal {
#if defined(CPPGC_POINTER_COMPRESSION)
-uintptr_t CageBaseGlobal::g_base_ = CageBaseGlobal::kLowerHalfWordMask;
+alignas(api_constants::kCachelineSize) CageBaseGlobal::Base
+ CageBaseGlobal::g_base_ = {CageBaseGlobal::kLowerHalfWordMask};
#endif // defined(CPPGC_POINTER_COMPRESSION)
// Debugging helpers.
@@ -21,20 +22,21 @@ uintptr_t CageBaseGlobal::g_base_ = CageBaseGlobal::kLowerHalfWordMask;
#if defined(CPPGC_POINTER_COMPRESSION)
extern "C" V8_DONT_STRIP_SYMBOL V8_EXPORT_PRIVATE void*
_cppgc_internal_Decompress_Compressed_Pointer(uint32_t cmprsd) {
- return MemberStorage::Decompress(cmprsd);
+ return CompressedPointer::Decompress(cmprsd);
}
#endif // !defined(CPPGC_POINTER_COMPRESSION)
class MemberDebugHelper final {
public:
- static void* PrintUncompressed(MemberBase* m) {
+ static void* Uncompress(MemberBase<DefaultMemberStorage>* m) {
return const_cast<void*>(m->GetRaw());
}
};
extern "C" V8_DONT_STRIP_SYMBOL V8_EXPORT_PRIVATE void*
-_cppgc_internal_Print_Member(MemberBase* m) {
- return MemberDebugHelper::PrintUncompressed(m);
+_cppgc_internal_Uncompress_Member(void* m) {
+ return MemberDebugHelper::Uncompress(
+ static_cast<MemberBase<DefaultMemberStorage>*>(m));
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/member-storage.h b/deps/v8/src/heap/cppgc/member-storage.h
index 829bea28d5..168c79caf1 100644
--- a/deps/v8/src/heap/cppgc/member-storage.h
+++ b/deps/v8/src/heap/cppgc/member-storage.h
@@ -17,12 +17,13 @@ class CageBaseGlobalUpdater final {
static void UpdateCageBase(uintptr_t cage_base) {
CPPGC_DCHECK(CageBaseGlobal::IsBaseConsistent());
CPPGC_DCHECK(0u == (cage_base & CageBaseGlobal::kLowerHalfWordMask));
- CageBaseGlobal::g_base_ = cage_base | CageBaseGlobal::kLowerHalfWordMask;
+ CageBaseGlobal::g_base_.base =
+ cage_base | CageBaseGlobal::kLowerHalfWordMask;
}
static uintptr_t GetCageBase() {
CPPGC_DCHECK(CageBaseGlobal::IsBaseConsistent());
- return CageBaseGlobal::g_base_ & ~CageBaseGlobal::kLowerHalfWordMask;
+ return CageBaseGlobal::g_base_.base & ~CageBaseGlobal::kLowerHalfWordMask;
}
};
#endif // defined(CPPGC_POINTER_COMPRESSION)
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index b88ba5c200..f0a394a72d 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -113,22 +113,23 @@ ObjectAllocator::ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
oom_handler_(oom_handler),
garbage_collector_(garbage_collector) {}
-void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
- AlignVal alignment,
- GCInfoIndex gcinfo) {
- void* memory = OutOfLineAllocateImpl(space, size, alignment, gcinfo);
+void ObjectAllocator::OutOfLineAllocateGCSafePoint(NormalPageSpace& space,
+ size_t size,
+ AlignVal alignment,
+ GCInfoIndex gcinfo,
+ void** object) {
+ *object = OutOfLineAllocateImpl(space, size, alignment, gcinfo);
stats_collector_.NotifySafePointForConservativeCollection();
if (prefinalizer_handler_.IsInvokingPreFinalizers()) {
// Objects allocated during pre finalizers should be allocated as black
// since marking is already done. Atomics are not needed because there is
// no concurrent marking in the background.
- HeapObjectHeader::FromObject(memory).MarkNonAtomic();
+ HeapObjectHeader::FromObject(*object).MarkNonAtomic();
// Resetting the allocation buffer forces all further allocations in pre
// finalizers to go through this slow path.
ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
prefinalizer_handler_.NotifyAllocationInPrefinalizer(size);
}
- return memory;
}
void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
@@ -283,6 +284,26 @@ void ObjectAllocator::ResetLinearAllocationBuffers() {
visitor.Traverse(raw_heap_);
}
+void ObjectAllocator::MarkAllPagesAsYoung() {
+ class YoungMarker : public HeapVisitor<YoungMarker> {
+ public:
+ bool VisitNormalPage(NormalPage& page) {
+ MarkRangeAsYoung(page, page.PayloadStart(), page.PayloadEnd());
+ return true;
+ }
+
+ bool VisitLargePage(LargePage& page) {
+ MarkRangeAsYoung(page, page.PayloadStart(), page.PayloadEnd());
+ return true;
+ }
+ } visitor;
+ USE(visitor);
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ visitor.Traverse(raw_heap_);
+#endif // defined(CPPGC_YOUNG_GENERATION)
+}
+
bool ObjectAllocator::in_disallow_gc_scope() const {
return raw_heap_.heap()->in_disallow_gc_scope();
}
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index 77f26ce3b5..82d1441af1 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -52,6 +52,7 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
GCInfoIndex gcinfo, CustomSpaceIndex space_index);
void ResetLinearAllocationBuffers();
+ void MarkAllPagesAsYoung();
private:
bool in_disallow_gc_scope() const;
@@ -61,11 +62,21 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
inline static RawHeap::RegularSpaceType GetInitialSpaceIndexForSize(
size_t size);
- inline void* AllocateObjectOnSpace(NormalPageSpace& space, size_t size,
- GCInfoIndex gcinfo);
- inline void* AllocateObjectOnSpace(NormalPageSpace& space, size_t size,
- AlignVal alignment, GCInfoIndex gcinfo);
- void* OutOfLineAllocate(NormalPageSpace&, size_t, AlignVal, GCInfoIndex);
+ inline void* AllocateObjectOnSpace(NormalPageSpace&, size_t, GCInfoIndex);
+ inline void* AllocateObjectOnSpace(NormalPageSpace&, size_t, AlignVal,
+ GCInfoIndex);
+ inline void* OutOfLineAllocate(NormalPageSpace&, size_t, AlignVal,
+ GCInfoIndex);
+
+ // Called from the fast path LAB allocation when the LAB capacity cannot fit
+ // the allocation or a large object is requested. Use out parameter as
+ // `V8_PRESERVE_MOST` cannot handle non-void return values.
+ //
+ // Prefer using `OutOfLineAllocate()`.
+ void V8_PRESERVE_MOST OutOfLineAllocateGCSafePoint(NormalPageSpace&, size_t,
+ AlignVal, GCInfoIndex,
+ void**);
+ // Raw allocation, does not emit safepoint for conservative GC.
void* OutOfLineAllocateImpl(NormalPageSpace&, size_t, AlignVal, GCInfoIndex);
bool TryRefillLinearAllocationBuffer(NormalPageSpace&, size_t);
@@ -135,6 +146,14 @@ RawHeap::RegularSpaceType ObjectAllocator::GetInitialSpaceIndexForSize(
return RawHeap::RegularSpaceType::kNormal4;
}
+void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
+ AlignVal alignment,
+ GCInfoIndex gcinfo) {
+ void* object;
+ OutOfLineAllocateGCSafePoint(space, size, alignment, gcinfo, &object);
+ return object;
+}
+
void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
size_t size, AlignVal alignment,
GCInfoIndex gcinfo) {
@@ -174,13 +193,13 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
.SetBit<AccessMode::kAtomic>(reinterpret_cast<ConstAddress>(&filler));
lab_allocation_will_succeed = true;
}
- if (lab_allocation_will_succeed) {
- void* object = AllocateObjectOnSpace(space, size, gcinfo);
- DCHECK_NOT_NULL(object);
- DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(object) & kAlignmentMask);
- return object;
+ if (V8_UNLIKELY(!lab_allocation_will_succeed)) {
+ return OutOfLineAllocate(space, size, alignment, gcinfo);
}
- return OutOfLineAllocate(space, size, alignment, gcinfo);
+ void* object = AllocateObjectOnSpace(space, size, gcinfo);
+ DCHECK_NOT_NULL(object);
+ DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(object) & kAlignmentMask);
+ return object;
}
void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
@@ -189,7 +208,7 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
NormalPageSpace::LinearAllocationBuffer& current_lab =
space.linear_allocation_buffer();
- if (current_lab.size() < size) {
+ if (V8_UNLIKELY(current_lab.size() < size)) {
return OutOfLineAllocate(
space, size, static_cast<AlignVal>(kAllocationGranularity), gcinfo);
}
diff --git a/deps/v8/src/heap/cppgc/object-start-bitmap.h b/deps/v8/src/heap/cppgc/object-start-bitmap.h
index dff8b6eae3..cf45d29190 100644
--- a/deps/v8/src/heap/cppgc/object-start-bitmap.h
+++ b/deps/v8/src/heap/cppgc/object-start-bitmap.h
@@ -28,7 +28,8 @@ namespace internal {
// - kAllocationGranularity
//
// ObjectStartBitmap supports concurrent reads from multiple threads but
-// only a single mutator thread can write to it.
+// only a single mutator thread can write to it. ObjectStartBitmap relies on
+// being allocated inside the same normal page.
class V8_EXPORT_PRIVATE ObjectStartBitmap {
public:
// Granularity of addresses added to the bitmap.
@@ -39,7 +40,7 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
return kReservedForBitmap * kBitsPerCell;
}
- explicit inline ObjectStartBitmap(Address offset);
+ inline ObjectStartBitmap();
// Finds an object header based on a
// address_maybe_pointing_to_the_middle_of_object. Will search for an object
@@ -87,7 +88,6 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
inline void ObjectStartIndexAndBit(ConstAddress, size_t*, size_t*) const;
- const Address offset_;
// `fully_populated_` is used to denote that the bitmap is populated with all
// currently allocated objects on the page and is in a consistent state. It is
// used to guard against using the bitmap for finding headers during
@@ -104,7 +104,7 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
std::array<uint8_t, kReservedForBitmap> object_start_bit_map_;
};
-ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
+ObjectStartBitmap::ObjectStartBitmap() {
Clear();
MarkAsFullyPopulated();
}
@@ -113,9 +113,13 @@ template <AccessMode mode>
HeapObjectHeader* ObjectStartBitmap::FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
DCHECK(fully_populated_);
- DCHECK_LE(offset_, address_maybe_pointing_to_the_middle_of_object);
- size_t object_offset =
- address_maybe_pointing_to_the_middle_of_object - offset_;
+ const size_t page_base = reinterpret_cast<uintptr_t>(
+ address_maybe_pointing_to_the_middle_of_object) &
+ kPageBaseMask;
+ DCHECK_EQ(page_base, reinterpret_cast<uintptr_t>(this) & kPageBaseMask);
+ size_t object_offset = reinterpret_cast<uintptr_t>(
+ address_maybe_pointing_to_the_middle_of_object) &
+ kPageOffsetMask;
size_t object_start_number = object_offset / kAllocationGranularity;
size_t cell_index = object_start_number / kBitsPerCell;
DCHECK_GT(object_start_bit_map_.size(), cell_index);
@@ -129,7 +133,7 @@ HeapObjectHeader* ObjectStartBitmap::FindHeader(
object_start_number =
(cell_index * kBitsPerCell) + (kBitsPerCell - 1) - leading_zeroes;
object_offset = object_start_number * kAllocationGranularity;
- return reinterpret_cast<HeapObjectHeader*>(object_offset + offset_);
+ return reinterpret_cast<HeapObjectHeader*>(page_base + object_offset);
}
template <AccessMode mode>
@@ -178,7 +182,8 @@ uint8_t ObjectStartBitmap::load(size_t cell_index) const {
void ObjectStartBitmap::ObjectStartIndexAndBit(ConstAddress header_address,
size_t* cell_index,
size_t* bit) const {
- const size_t object_offset = header_address - offset_;
+ const size_t object_offset =
+ reinterpret_cast<size_t>(header_address) & kPageOffsetMask;
DCHECK(!(object_offset & kAllocationMask));
const size_t object_start_number = object_offset / kAllocationGranularity;
*cell_index = object_start_number / kBitsPerCell;
@@ -188,6 +193,8 @@ void ObjectStartBitmap::ObjectStartIndexAndBit(ConstAddress header_address,
template <typename Callback>
inline void ObjectStartBitmap::Iterate(Callback callback) const {
+ const Address page_base = reinterpret_cast<Address>(
+ reinterpret_cast<uintptr_t>(this) & kPageBaseMask);
for (size_t cell_index = 0; cell_index < kReservedForBitmap; cell_index++) {
if (!object_start_bit_map_[cell_index]) continue;
@@ -197,7 +204,7 @@ inline void ObjectStartBitmap::Iterate(Callback callback) const {
const size_t object_start_number =
(cell_index * kBitsPerCell) + trailing_zeroes;
const Address object_address =
- offset_ + (kAllocationGranularity * object_start_number);
+ page_base + (kAllocationGranularity * object_start_number);
callback(object_address);
// Clear current object bit in temporary value to advance iteration.
value &= ~(1 << (object_start_number & kCellMask));
@@ -220,8 +227,6 @@ void ObjectStartBitmap::Clear() {
class V8_EXPORT_PRIVATE PlatformAwareObjectStartBitmap
: public ObjectStartBitmap {
public:
- explicit inline PlatformAwareObjectStartBitmap(Address offset);
-
template <AccessMode = AccessMode::kNonAtomic>
inline void SetBit(ConstAddress);
template <AccessMode = AccessMode::kNonAtomic>
@@ -232,9 +237,6 @@ class V8_EXPORT_PRIVATE PlatformAwareObjectStartBitmap
static bool ShouldForceNonAtomic();
};
-PlatformAwareObjectStartBitmap::PlatformAwareObjectStartBitmap(Address offset)
- : ObjectStartBitmap(offset) {}
-
// static
template <AccessMode mode>
bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
diff --git a/deps/v8/src/heap/cppgc/pointer-policies.cc b/deps/v8/src/heap/cppgc/pointer-policies.cc
index 9087b14d21..0cbde5ede3 100644
--- a/deps/v8/src/heap/cppgc/pointer-policies.cc
+++ b/deps/v8/src/heap/cppgc/pointer-policies.cc
@@ -68,7 +68,7 @@ void SameThreadEnabledCheckingPolicyBase::CheckPointerImpl(
// in progress.
header = &base_page->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(ptr);
DCHECK_LE(header->ObjectStart(), ptr);
- DCHECK_GT(header->ObjectEnd(), ptr);
+ DCHECK_GT(header->ObjectEnd<AccessMode::kAtomic>(), ptr);
}
if (header) {
DCHECK(!header->IsFree());
diff --git a/deps/v8/src/heap/cppgc/stats-collector.cc b/deps/v8/src/heap/cppgc/stats-collector.cc
index f65309b6f4..ccad82c81d 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.cc
+++ b/deps/v8/src/heap/cppgc/stats-collector.cc
@@ -107,10 +107,18 @@ StatsCollector::Event::Event() {
epoch = epoch_counter.fetch_add(1);
}
+void StatsCollector::NotifyUnmarkingStarted(CollectionType collection_type) {
+ DCHECK_EQ(GarbageCollectionState::kNotRunning, gc_state_);
+ DCHECK_EQ(CollectionType::kMajor, collection_type);
+ gc_state_ = GarbageCollectionState::kUnmarking;
+}
+
void StatsCollector::NotifyMarkingStarted(CollectionType collection_type,
MarkingType marking_type,
IsForcedGC is_forced_gc) {
- DCHECK_EQ(GarbageCollectionState::kNotRunning, gc_state_);
+ DCHECK_IMPLIES(gc_state_ != GarbageCollectionState::kNotRunning,
+ (gc_state_ == GarbageCollectionState::kUnmarking &&
+ collection_type == CollectionType::kMajor));
current_.collection_type = collection_type;
current_.is_forced_gc = is_forced_gc;
current_.marking_type = marking_type;
diff --git a/deps/v8/src/heap/cppgc/stats-collector.h b/deps/v8/src/heap/cppgc/stats-collector.h
index ff040a3dcc..2cf728489d 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.h
+++ b/deps/v8/src/heap/cppgc/stats-collector.h
@@ -53,6 +53,8 @@ namespace internal {
V(MarkVisitCrossThreadPersistents) \
V(MarkVisitStack) \
V(MarkVisitRememberedSets) \
+ V(WeakContainerCallbacksProcessing) \
+ V(CustomCallbacksProcessing) \
V(SweepFinishIfOutOfWork) \
V(SweepInvokePreFinalizers) \
V(SweepInTask) \
@@ -274,7 +276,11 @@ class V8_EXPORT_PRIVATE StatsCollector final {
void NotifySafePointForTesting();
- // Indicates a new garbage collection cycle.
+ // Indicates a new garbage collection cycle. The phase is optional and is only
+ // used for major GC when generational GC is enabled.
+ void NotifyUnmarkingStarted(CollectionType);
+ // Indicates a new minor garbage collection cycle or a major, if generational
+ // GC is not enabled.
void NotifyMarkingStarted(CollectionType, MarkingType, IsForcedGC);
// Indicates that marking of the current garbage collection cycle is
// completed.
@@ -323,6 +329,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
private:
enum class GarbageCollectionState : uint8_t {
kNotRunning,
+ kUnmarking,
kMarking,
kSweeping
};
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 3cb96f8baa..953bb0aeab 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -413,9 +413,17 @@ class SweepFinalizer final {
using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
public:
+ enum class EmptyPageHandling {
+ kDestroy,
+ kReturn,
+ };
+
SweepFinalizer(cppgc::Platform* platform,
- FreeMemoryHandling free_memory_handling)
- : platform_(platform), free_memory_handling_(free_memory_handling) {}
+ FreeMemoryHandling free_memory_handling,
+ EmptyPageHandling empty_page_handling_type)
+ : platform_(platform),
+ free_memory_handling_(free_memory_handling),
+ empty_page_handling_(empty_page_handling_type) {}
void FinalizeHeap(SpaceStates* space_states) {
for (SpaceState& space_state : *space_states) {
@@ -471,8 +479,22 @@ class SweepFinalizer final {
// Unmap page if empty.
if (page_state->is_empty) {
- BasePage::Destroy(page);
- return;
+ if (empty_page_handling_ == EmptyPageHandling::kDestroy ||
+ page->is_large()) {
+ BasePage::Destroy(page);
+ return;
+ }
+
+ // Otherwise, we currently sweep on allocation. Reinitialize the empty
+ // page and return it right away.
+ auto* normal_page = NormalPage::From(page);
+
+ page_state->cached_free_list.Clear();
+ page_state->cached_free_list.Add(
+ {normal_page->PayloadStart(), normal_page->PayloadSize()});
+
+ page_state->unfinalized_free_list.clear();
+ page_state->largest_new_free_list_entry = normal_page->PayloadSize();
}
DCHECK(!page->is_large());
@@ -482,13 +504,15 @@ class SweepFinalizer final {
space_freelist.Append(std::move(page_state->cached_free_list));
// Merge freelist with finalizers.
- std::unique_ptr<FreeHandlerBase> handler =
- (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
- ? std::unique_ptr<FreeHandlerBase>(new DiscardingFreeHandler(
- *platform_->GetPageAllocator(), space_freelist, *page))
- : std::unique_ptr<FreeHandlerBase>(new RegularFreeHandler(
- *platform_->GetPageAllocator(), space_freelist, *page));
- handler->FreeFreeList(page_state->unfinalized_free_list);
+ if (!page_state->unfinalized_free_list.empty()) {
+ std::unique_ptr<FreeHandlerBase> handler =
+ (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
+ ? std::unique_ptr<FreeHandlerBase>(new DiscardingFreeHandler(
+ *platform_->GetPageAllocator(), space_freelist, *page))
+ : std::unique_ptr<FreeHandlerBase>(new RegularFreeHandler(
+ *platform_->GetPageAllocator(), space_freelist, *page));
+ handler->FreeFreeList(page_state->unfinalized_free_list);
+ }
largest_new_free_list_entry_ = std::max(
page_state->largest_new_free_list_entry, largest_new_free_list_entry_);
@@ -509,6 +533,7 @@ class SweepFinalizer final {
cppgc::Platform* platform_;
size_t largest_new_free_list_entry_ = 0;
const FreeMemoryHandling free_memory_handling_;
+ const EmptyPageHandling empty_page_handling_;
};
class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
@@ -544,7 +569,8 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
const auto deadline = v8::base::TimeTicks::Now() + max_duration;
// First, prioritize finalization of pages that were swept concurrently.
- SweepFinalizer finalizer(platform_, free_memory_handling_);
+ SweepFinalizer finalizer(platform_, free_memory_handling_,
+ SweepFinalizer::EmptyPageHandling::kDestroy);
if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline)) {
return false;
}
@@ -831,7 +857,8 @@ class Sweeper::SweeperImpl final {
{
// First, process unfinalized pages as finalizing a page is faster than
// sweeping.
- SweepFinalizer finalizer(platform_, config_.free_memory_handling);
+ SweepFinalizer finalizer(platform_, config_.free_memory_handling,
+ SweepFinalizer::EmptyPageHandling::kReturn);
while (auto page = space_state.swept_unfinalized_pages.Pop()) {
finalizer.FinalizePage(&*page);
if (size <= finalizer.largest_new_free_list_entry()) {
@@ -924,7 +951,8 @@ class Sweeper::SweeperImpl final {
MutatorThreadSweepingScope sweeping_in_progress(*this);
// First, call finalizers on the mutator thread.
- SweepFinalizer finalizer(platform_, config_.free_memory_handling);
+ SweepFinalizer finalizer(platform_, config_.free_memory_handling,
+ SweepFinalizer::EmptyPageHandling::kDestroy);
finalizer.FinalizeHeap(&space_states_);
// Then, help out the concurrent thread.
@@ -1108,7 +1136,8 @@ class Sweeper::SweeperImpl final {
void SynchronizeAndFinalizeConcurrentSweeping() {
CancelSweepers();
- SweepFinalizer finalizer(platform_, config_.free_memory_handling);
+ SweepFinalizer finalizer(platform_, config_.free_memory_handling,
+ SweepFinalizer::EmptyPageHandling::kDestroy);
finalizer.FinalizeHeap(&space_states_);
}
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 5cbec656a9..a2f1eb4ab4 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -5,6 +5,7 @@
#include "src/heap/cppgc/write-barrier.h"
#include "include/cppgc/heap-consistency.h"
+#include "include/cppgc/internal/member-storage.h"
#include "include/cppgc/internal/pointer-policies.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -222,5 +223,53 @@ bool YoungGenerationEnabler::IsEnabled() {
#endif // defined(CPPGC_YOUNG_GENERATION)
+#ifdef CPPGC_SLIM_WRITE_BARRIER
+
+// static
+template <WriteBarrierSlotType SlotType>
+void WriteBarrier::CombinedWriteBarrierSlow(const void* slot) {
+ DCHECK_NOT_NULL(slot);
+
+ const void* value = nullptr;
+#if defined(CPPGC_POINTER_COMPRESSION)
+ if constexpr (SlotType == WriteBarrierSlotType::kCompressed) {
+ value = CompressedPointer::Decompress(
+ *static_cast<const CompressedPointer::IntegralType*>(slot));
+ } else {
+ value = *reinterpret_cast<const void* const*>(slot);
+ }
+#else
+ static_assert(SlotType == WriteBarrierSlotType::kUncompressed);
+ value = *reinterpret_cast<const void* const*>(slot);
+#endif
+
+ WriteBarrier::Params params;
+ const WriteBarrier::Type type =
+ WriteBarrier::GetWriteBarrierType(slot, value, params);
+ switch (type) {
+ case WriteBarrier::Type::kGenerational:
+ WriteBarrier::GenerationalBarrier<
+ WriteBarrier::GenerationalBarrierType::kPreciseSlot>(params, slot);
+ break;
+ case WriteBarrier::Type::kMarking:
+ WriteBarrier::DijkstraMarkingBarrier(params, value);
+ break;
+ case WriteBarrier::Type::kNone:
+ // The fast checks are approximate and may trigger spuriously if any heap
+ // has marking in progress. `GetWriteBarrierType()` above is exact which
+ // is the reason we could still observe a bailout here.
+ break;
+ }
+}
+
+template V8_EXPORT_PRIVATE void WriteBarrier::CombinedWriteBarrierSlow<
+ WriteBarrierSlotType::kUncompressed>(const void* slot);
+#if defined(CPPGC_POINTER_COMPRESSION)
+template V8_EXPORT_PRIVATE void WriteBarrier::CombinedWriteBarrierSlow<
+ WriteBarrierSlotType::kCompressed>(const void* slot);
+#endif // defined(CPPGC_POINTER_COMPRESSION)
+
+#endif // CPPGC_SLIM_WRITE_BARRIER
+
} // namespace internal
} // namespace cppgc