diff options
author | Michaƫl Zasso <targos@protonmail.com> | 2023-03-30 12:11:08 +0200 |
---|---|---|
committer | Node.js GitHub Bot <github-bot@iojs.org> | 2023-03-31 14:15:23 +0000 |
commit | f226350fcbebd4449fb0034fdaffa147e4de28ea (patch) | |
tree | 8896397ec8829c238012bfbe9781f4e2d94708bc /deps/v8/src/heap/memory-reducer.cc | |
parent | 10928cb0a4643a11c02af7bab93fc4b5abe2ce7d (diff) | |
download | node-new-f226350fcbebd4449fb0034fdaffa147e4de28ea.tar.gz |
deps: update V8 to 11.3.244.4
PR-URL: https://github.com/nodejs/node/pull/47251
Reviewed-By: Yagiz Nizipli <yagiz@nizipli.com>
Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
Reviewed-By: Rafael Gonzaga <rafael.nunu@hotmail.com>
Reviewed-By: Richard Lau <rlau@redhat.com>
Diffstat (limited to 'deps/v8/src/heap/memory-reducer.cc')
-rw-r--r-- | deps/v8/src/heap/memory-reducer.cc | 169 |
1 files changed, 99 insertions, 70 deletions
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc index 2cb2fb3d89..816a2d2f5c 100644 --- a/deps/v8/src/heap/memory-reducer.cc +++ b/deps/v8/src/heap/memory-reducer.cc @@ -25,9 +25,12 @@ MemoryReducer::MemoryReducer(Heap* heap) : heap_(heap), taskrunner_(V8::GetCurrentPlatform()->GetForegroundTaskRunner( reinterpret_cast<v8::Isolate*>(heap->isolate()))), - state_(kDone, 0, 0.0, 0.0, 0), + state_(State::CreateUninitialized()), js_calls_counter_(0), - js_calls_sample_time_ms_(0.0) {} + js_calls_sample_time_ms_(0.0) { + DCHECK(v8_flags.incremental_marking); + DCHECK(v8_flags.memory_reducer); +} MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer) : CancelableTask(memory_reducer->heap()->isolate()), @@ -36,49 +39,49 @@ MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer) void MemoryReducer::TimerTask::RunInternal() { Heap* heap = memory_reducer_->heap(); - Event event; - double time_ms = heap->MonotonicallyIncreasingTimeInMs(); + const double time_ms = heap->MonotonicallyIncreasingTimeInMs(); heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(), heap->OldGenerationAllocationCounter(), heap->EmbedderAllocationCounter()); - bool low_allocation_rate = heap->HasLowAllocationRate(); - bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage(); + const bool low_allocation_rate = heap->HasLowAllocationRate(); + const bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage(); if (v8_flags.trace_gc_verbose) { heap->isolate()->PrintWithTimestamp( "Memory reducer: %s, %s\n", low_allocation_rate ? "low alloc" : "high alloc", optimize_for_memory ? "background" : "foreground"); } - event.type = kTimer; - event.time_ms = time_ms; - // The memory reducer will start incremental markig if + // The memory reducer will start incremental marking if // 1) mutator is likely idle: js call rate is low and allocation rate is low. // 2) mutator is in background: optimize for memory flag is set. - event.should_start_incremental_gc = - low_allocation_rate || optimize_for_memory; - event.can_start_incremental_gc = + const Event event{ + kTimer, + time_ms, + heap->CommittedOldGenerationMemory(), + false, + low_allocation_rate || optimize_for_memory, heap->incremental_marking()->IsStopped() && - (heap->incremental_marking()->CanBeStarted() || optimize_for_memory); - event.committed_memory = heap->CommittedOldGenerationMemory(); + (heap->incremental_marking()->CanBeStarted() || optimize_for_memory), + }; memory_reducer_->NotifyTimer(event); } void MemoryReducer::NotifyTimer(const Event& event) { DCHECK_EQ(kTimer, event.type); - DCHECK_EQ(kWait, state_.action); + DCHECK_EQ(kWait, state_.id()); state_ = Step(state_, event); - if (state_.action == kRun) { + if (state_.id() == kRun) { DCHECK(heap()->incremental_marking()->IsStopped()); DCHECK(v8_flags.incremental_marking); if (v8_flags.trace_gc_verbose) { heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n", - state_.started_gcs); + state_.started_gcs()); } - heap()->StartIdleIncrementalMarking( - GarbageCollectionReason::kMemoryReducer, - kGCCallbackFlagCollectAllExternalMemory); - } else if (state_.action == kWait) { + heap()->StartIncrementalMarking(Heap::kReduceMemoryFootprintMask, + GarbageCollectionReason::kMemoryReducer, + kGCCallbackFlagCollectAllExternalMemory); + } else if (state_.id() == kWait) { if (!heap()->incremental_marking()->IsStopped() && heap()->ShouldOptimizeForMemoryUsage()) { // Make progress with pending incremental marking if memory usage has @@ -87,114 +90,140 @@ void MemoryReducer::NotifyTimer(const Event& event) { heap()->incremental_marking()->AdvanceAndFinalizeIfComplete(); } // Re-schedule the timer. - ScheduleTimer(state_.next_gc_start_ms - event.time_ms); + ScheduleTimer(state_.next_gc_start_ms() - event.time_ms); if (v8_flags.trace_gc_verbose) { heap()->isolate()->PrintWithTimestamp( "Memory reducer: waiting for %.f ms\n", - state_.next_gc_start_ms - event.time_ms); + state_.next_gc_start_ms() - event.time_ms); } } } - -void MemoryReducer::NotifyMarkCompact(const Event& event) { - DCHECK_EQ(kMarkCompact, event.type); - Action old_action = state_.action; +void MemoryReducer::NotifyMarkCompact(size_t committed_memory_before) { + if (!v8_flags.incremental_marking) return; + const size_t committed_memory = heap()->CommittedOldGenerationMemory(); + + // Trigger one more GC if + // - this GC decreased committed memory, + // - there is high fragmentation, + const MemoryReducer::Event event{ + MemoryReducer::kMarkCompact, + heap()->MonotonicallyIncreasingTimeInMs(), + committed_memory, + (committed_memory_before > committed_memory + MB) || + heap()->HasHighFragmentation(), + false, + false}; + const Id old_action = state_.id(); + int old_started_gcs = state_.started_gcs(); state_ = Step(state_, event); - if (old_action != kWait && state_.action == kWait) { + if (old_action != kWait && state_.id() == kWait) { // If we are transitioning to the WAIT state, start the timer. - ScheduleTimer(state_.next_gc_start_ms - event.time_ms); + ScheduleTimer(state_.next_gc_start_ms() - event.time_ms); } if (old_action == kRun) { if (v8_flags.trace_gc_verbose) { heap()->isolate()->PrintWithTimestamp( - "Memory reducer: finished GC #%d (%s)\n", state_.started_gcs, - state_.action == kWait ? "will do more" : "done"); + "Memory reducer: finished GC #%d (%s)\n", old_started_gcs, + state_.id() == kWait ? "will do more" : "done"); } } } -void MemoryReducer::NotifyPossibleGarbage(const Event& event) { - DCHECK_EQ(kPossibleGarbage, event.type); - Action old_action = state_.action; +void MemoryReducer::NotifyPossibleGarbage() { + const MemoryReducer::Event event{MemoryReducer::kPossibleGarbage, + heap()->MonotonicallyIncreasingTimeInMs(), + 0, + false, + false, + false}; + const Id old_action = state_.id(); state_ = Step(state_, event); - if (old_action != kWait && state_.action == kWait) { + if (old_action != kWait && state_.id() == kWait) { // If we are transitioning to the WAIT state, start the timer. - ScheduleTimer(state_.next_gc_start_ms - event.time_ms); + ScheduleTimer(state_.next_gc_start_ms() - event.time_ms); } } - bool MemoryReducer::WatchdogGC(const State& state, const Event& event) { - return state.last_gc_time_ms != 0 && - event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs; + return state.last_gc_time_ms() != 0 && + event.time_ms > state.last_gc_time_ms() + kWatchdogDelayMs; } // For specification of this function see the comment for MemoryReducer class. MemoryReducer::State MemoryReducer::Step(const State& state, const Event& event) { - if (!v8_flags.incremental_marking || !v8_flags.memory_reducer) { - return State(kDone, 0, 0, state.last_gc_time_ms, 0); - } - switch (state.action) { + DCHECK(v8_flags.memory_reducer); + DCHECK(v8_flags.incremental_marking); + + switch (state.id()) { case kDone: + CHECK_IMPLIES( + v8_flags.memory_reducer_single_gc, + state.started_gcs() == 0 || state.started_gcs() == kMaxNumberOfGCs); if (event.type == kTimer) { return state; } else if (event.type == kMarkCompact) { if (event.committed_memory < std::max( - static_cast<size_t>(state.committed_memory_at_last_run * + static_cast<size_t>(state.committed_memory_at_last_run() * kCommittedMemoryFactor), - state.committed_memory_at_last_run + kCommittedMemoryDelta)) { + state.committed_memory_at_last_run() + kCommittedMemoryDelta)) { return state; } else { - return State(kWait, 0, event.time_ms + kLongDelayMs, event.time_ms, - 0); + return State::CreateWait(0, event.time_ms + kLongDelayMs, + event.time_ms); } } else { DCHECK_EQ(kPossibleGarbage, event.type); - return State(kWait, 0, - event.time_ms + v8_flags.gc_memory_reducer_start_delay_ms, - state.last_gc_time_ms, 0); + return State::CreateWait( + 0, event.time_ms + v8_flags.gc_memory_reducer_start_delay_ms, + state.last_gc_time_ms()); } case kWait: + CHECK_IMPLIES(v8_flags.memory_reducer_single_gc, + state.started_gcs() == 0); switch (event.type) { case kPossibleGarbage: return state; case kTimer: - if (state.started_gcs >= kMaxNumberOfGCs) { - return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms, - event.committed_memory); + if (state.started_gcs() >= kMaxNumberOfGCs) { + return State::CreateDone(state.last_gc_time_ms(), + event.committed_memory); } else if (event.can_start_incremental_gc && (event.should_start_incremental_gc || WatchdogGC(state, event))) { - if (state.next_gc_start_ms <= event.time_ms) { - return State(kRun, state.started_gcs + 1, 0.0, - state.last_gc_time_ms, 0); + if (state.next_gc_start_ms() <= event.time_ms) { + return State::CreateRun(state.started_gcs() + 1); } else { return state; } } else { - return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs, - state.last_gc_time_ms, 0); + return State::CreateWait(state.started_gcs(), + event.time_ms + kLongDelayMs, + state.last_gc_time_ms()); } case kMarkCompact: - return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs, - event.time_ms, 0); + return State::CreateWait(state.started_gcs(), + event.time_ms + kLongDelayMs, event.time_ms); } case kRun: - if (event.type != kMarkCompact) { - return state; - } else { - if (state.started_gcs < kMaxNumberOfGCs && - (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) { - return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs, - event.time_ms, 0); + CHECK_IMPLIES(v8_flags.memory_reducer_single_gc, + state.started_gcs() == 1); + if (event.type == kMarkCompact) { + if (!v8_flags.memory_reducer_single_gc && + state.started_gcs() < kMaxNumberOfGCs && + (event.next_gc_likely_to_collect_more || + state.started_gcs() == 1)) { + return State::CreateWait(state.started_gcs(), + event.time_ms + kShortDelayMs, + event.time_ms); } else { - return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms, - event.committed_memory); + return State::CreateDone(event.time_ms, event.committed_memory); } + } else { + return state; } } UNREACHABLE(); @@ -209,7 +238,7 @@ void MemoryReducer::ScheduleTimer(double delay_ms) { (delay_ms + kSlackMs) / 1000.0); } -void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); } +void MemoryReducer::TearDown() { state_ = State::CreateUninitialized(); } } // namespace internal } // namespace v8 |