summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/cppgc/marker.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/cppgc/marker.cc')
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc37
1 files changed, 28 insertions, 9 deletions
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 0410a4eaea..e792c4c844 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -240,6 +240,7 @@ void MarkerBase::StartMarking() {
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
mutator_marking_state_.Publish();
concurrent_marker_->Start();
+ concurrent_marking_active_ = true;
}
incremental_marking_allocation_observer_ =
std::make_unique<IncrementalMarkingAllocationObserver>(*this);
@@ -255,8 +256,9 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
StatsCollector::kMarkAtomicPrologue);
if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
- // Cancel remaining concurrent/incremental tasks.
- concurrent_marker_->Cancel();
+ // Cancel remaining incremental tasks. Concurrent marking jobs are left to
+ // run in parallel with the atomic pause until the mutator thread runs out
+ // of work.
incremental_marking_handle_.Cancel();
heap().stats_collector()->UnregisterObserver(
incremental_marking_allocation_observer_.get());
@@ -276,6 +278,17 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
MarkNotFullyConstructedObjects();
}
}
+ if (heap().marking_support() ==
+ MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ // Start parallel marking.
+ mutator_marking_state_.Publish();
+ if (concurrent_marking_active_) {
+ concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
+ } else {
+ concurrent_marker_->Start();
+ concurrent_marking_active_ = true;
+ }
+ }
}
void MarkerBase::LeaveAtomicPause() {
@@ -414,6 +427,16 @@ void MarkerBase::AdvanceMarkingOnAllocation() {
}
}
+bool MarkerBase::CancelConcurrentMarkingIfNeeded() {
+ if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
+ !concurrent_marking_active_)
+ return false;
+
+ concurrent_marker_->Cancel();
+ concurrent_marking_active_ = false;
+ return true;
+}
+
bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
size_t marked_bytes_limit) {
bool is_done = false;
@@ -433,6 +456,9 @@ bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
// adjustment.
is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
}
+ if (is_done && CancelConcurrentMarkingIfNeeded()) {
+ is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
+ }
schedule_.UpdateMutatorThreadMarkedBytes(
mutator_marking_state_.marked_bytes());
}
@@ -592,13 +618,6 @@ void MarkerBase::WaitForConcurrentMarkingForTesting() {
concurrent_marker_->JoinForTesting();
}
-void MarkerBase::NotifyCompactionCancelled() {
- // Compaction cannot be cancelled while concurrent marking is active.
- DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
- DCHECK_IMPLIES(concurrent_marker_, !concurrent_marker_->IsActive());
- mutator_marking_state_.NotifyCompactionCancelled();
-}
-
Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config)
: MarkerBase(key, heap, platform, config),