summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/sweeper.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/sweeper.cc')
-rw-r--r--deps/v8/src/heap/sweeper.cc148
1 files changed, 84 insertions, 64 deletions
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 5745c4bf1f..ed9385532d 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -9,6 +9,7 @@
#include "src/heap/base/active-system-pages.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/free-list-inl.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
@@ -189,15 +190,6 @@ Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
return nullptr;
}
-void Sweeper::MergeOldToNewRememberedSetsForSweptPages() {
- base::MutexGuard guard(&mutex_);
-
- ForAllSweepingSpaces([this](AllocationSpace space) {
- SweptList& swept_list = swept_list_[GetSweepSpaceIndex(space)];
- for (Page* p : swept_list) p->MergeOldToNewRememberedSets();
- });
-}
-
void Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return;
@@ -205,8 +197,9 @@ void Sweeper::EnsureCompleted() {
// If sweeping is not completed or not running at all, we try to complete it
// here.
- ForAllSweepingSpaces(
- [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
+ });
if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
@@ -218,13 +211,14 @@ void Sweeper::EnsureCompleted() {
void Sweeper::DrainSweepingWorklistForSpace(AllocationSpace space) {
if (!sweeping_in_progress_) return;
- ParallelSweepSpace(space, 0);
+ ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
}
void Sweeper::SupportConcurrentSweeping() {
ForAllSweepingSpaces([this](AllocationSpace space) {
const int kMaxPagesToSweepPerSpace = 1;
- ParallelSweepSpace(space, 0, kMaxPagesToSweepPerSpace);
+ ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0,
+ kMaxPagesToSweepPerSpace);
});
}
@@ -234,7 +228,7 @@ bool Sweeper::AreSweeperTasksRunning() {
V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
Address free_start, Address free_end, Page* page, Space* space,
- bool non_empty_typed_slots, FreeListRebuildingMode free_list_mode,
+ FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode) {
CHECK_GT(free_end, free_start);
size_t freed_bytes = 0;
@@ -258,15 +252,33 @@ V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
}
V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
- Address free_start, Address free_end, Page* page,
- bool non_empty_typed_slots, FreeRangesMap* free_ranges_map,
+ Address free_start, Address free_end, Page* page, bool record_free_ranges,
+ TypedSlotSet::FreeRangesMap* free_ranges_map, SweepingMode sweeping_mode,
InvalidatedSlotsCleanup* old_to_new_cleanup) {
DCHECK_LE(free_start, free_end);
- RememberedSetSweeping::RemoveRange(page, free_start, free_end,
- SlotSet::KEEP_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_OLD>::RemoveRange(page, free_start, free_end,
- SlotSet::KEEP_EMPTY_BUCKETS);
- if (non_empty_typed_slots) {
+ if (sweeping_mode == SweepingMode::kEagerDuringGC) {
+ // New space and in consequence the old-to-new remembered set is always
+ // empty after a full GC, so we do not need to remove from it after the full
+ // GC. However, we wouldn't even be allowed to do that, since the main
+ // thread then owns the old-to-new remembered set. Removing from it from a
+ // sweeper thread would race with the main thread.
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+
+ // While we only add old-to-old slots on live objects, we can still end up
+ // with old-to-old slots in free memory with e.g. right-trimming of objects.
+ RememberedSet<OLD_TO_OLD>::RemoveRange(page, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ } else {
+ DCHECK_NULL(page->slot_set<OLD_TO_OLD>());
+ }
+
+ // Old-to-shared isn't reset after a full GC, so needs to be cleaned both
+ // during and after a full GC.
+ RememberedSet<OLD_TO_SHARED>::RemoveRange(page, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+
+ if (record_free_ranges) {
free_ranges_map->insert(std::pair<uint32_t, uint32_t>(
static_cast<uint32_t>(free_start - page->address()),
static_cast<uint32_t>(free_end - page->address())));
@@ -276,17 +288,27 @@ V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
}
void Sweeper::CleanupInvalidTypedSlotsOfFreeRanges(
- Page* page, const FreeRangesMap& free_ranges_map) {
- if (!free_ranges_map.empty()) {
- TypedSlotSet* old_to_new = page->typed_slot_set<OLD_TO_NEW>();
- if (old_to_new != nullptr) {
- old_to_new->ClearInvalidSlots(free_ranges_map);
- }
- TypedSlotSet* old_to_old = page->typed_slot_set<OLD_TO_OLD>();
- if (old_to_old != nullptr) {
- old_to_old->ClearInvalidSlots(free_ranges_map);
- }
+ Page* page, const TypedSlotSet::FreeRangesMap& free_ranges_map,
+ SweepingMode sweeping_mode) {
+ if (sweeping_mode == SweepingMode::kEagerDuringGC) {
+ page->ClearInvalidTypedSlots<OLD_TO_NEW>(free_ranges_map);
+
+ // Typed old-to-old slot sets are only ever recorded in live code objects.
+ // Also code objects are never right-trimmed, so there cannot be any slots
+ // in a free range.
+ page->AssertNoInvalidTypedSlots<OLD_TO_OLD>(free_ranges_map);
+
+ page->ClearInvalidTypedSlots<OLD_TO_SHARED>(free_ranges_map);
+ return;
}
+
+ DCHECK_EQ(sweeping_mode, SweepingMode::kLazyOrConcurrent);
+
+ // After a full GC there are no old-to-new typed slots. The main thread
+ // could create new slots but not in a free range.
+ page->AssertNoInvalidTypedSlots<OLD_TO_NEW>(free_ranges_map);
+ DCHECK_NULL(page->typed_slot_set<OLD_TO_OLD>());
+ page->ClearInvalidTypedSlots<OLD_TO_SHARED>(free_ranges_map);
}
void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(
@@ -305,11 +327,10 @@ void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(
}
}
-int Sweeper::RawSweep(
- Page* p, FreeListRebuildingMode free_list_mode,
- FreeSpaceTreatmentMode free_space_mode,
- FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space,
- const base::MutexGuard& page_guard) {
+int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
+ FreeSpaceTreatmentMode free_space_mode,
+ SweepingMode sweeping_mode,
+ const base::MutexGuard& page_guard) {
Space* space = p->owner();
DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
@@ -342,20 +363,21 @@ int Sweeper::RawSweep(
size_t live_bytes = 0;
size_t max_freed_bytes = 0;
- bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
- p->typed_slot_set<OLD_TO_OLD>() != nullptr;
+ bool record_free_ranges = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
+ p->typed_slot_set<OLD_TO_OLD>() != nullptr ||
+ p->typed_slot_set<OLD_TO_SHARED>() != nullptr ||
+ DEBUG_BOOL;
// Clean invalidated slots during the final atomic pause. After resuming
// execution this isn't necessary, invalid old-to-new refs were already
// removed by mark compact's update pointers phase.
InvalidatedSlotsCleanup old_to_new_cleanup =
InvalidatedSlotsCleanup::NoCleanup(p);
- if (invalidated_slots_in_free_space ==
- FreeSpaceMayContainInvalidatedSlots::kYes)
+ if (sweeping_mode == SweepingMode::kEagerDuringGC)
old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
// The free ranges map is used for filtering typed slots.
- FreeRangesMap free_ranges_map;
+ TypedSlotSet::FreeRangesMap free_ranges_map;
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
p->object_start_bitmap()->Clear();
@@ -376,11 +398,10 @@ int Sweeper::RawSweep(
max_freed_bytes =
std::max(max_freed_bytes,
FreeAndProcessFreedMemory(free_start, free_end, p, space,
- non_empty_typed_slots,
free_list_mode, free_space_mode));
CleanupRememberedSetEntriesForFreedMemory(
- free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
- &old_to_new_cleanup);
+ free_start, free_end, p, record_free_ranges, &free_ranges_map,
+ sweeping_mode, &old_to_new_cleanup);
}
Map map = object.map(cage_base, kAcquireLoad);
// Map might be forwarded during GC.
@@ -406,15 +427,14 @@ int Sweeper::RawSweep(
max_freed_bytes =
std::max(max_freed_bytes,
FreeAndProcessFreedMemory(free_start, free_end, p, space,
- non_empty_typed_slots,
free_list_mode, free_space_mode));
CleanupRememberedSetEntriesForFreedMemory(
- free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
- &old_to_new_cleanup);
+ free_start, free_end, p, record_free_ranges, &free_ranges_map,
+ sweeping_mode, &old_to_new_cleanup);
}
// Phase 3: Post process the page.
- CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map);
+ CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map, sweeping_mode);
ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes, free_list_mode);
if (active_system_pages_after_sweeping) {
@@ -445,9 +465,9 @@ bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
if (page == nullptr) return true;
// Typed slot sets are only recorded on code pages. Code pages
// are not swept concurrently to the application to ensure W^X.
- DCHECK(!page->typed_slot_set<OLD_TO_NEW>() &&
- !page->typed_slot_set<OLD_TO_OLD>());
- ParallelSweepPage(page, identity);
+ DCHECK_NULL((page->typed_slot_set<OLD_TO_NEW>()));
+ DCHECK_NULL((page->typed_slot_set<OLD_TO_OLD>()));
+ ParallelSweepPage(page, identity, SweepingMode::kLazyOrConcurrent);
}
return false;
}
@@ -455,21 +475,23 @@ bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) {
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING,
ThreadKind::kMain);
+ const double start = heap_->MonotonicallyIncreasingTimeInMs();
if (Page* page = GetSweepingPageSafe(identity)) {
- ParallelSweepPage(page, identity);
+ ParallelSweepPage(page, identity, SweepingMode::kLazyOrConcurrent);
}
+ const double duration = heap_->MonotonicallyIncreasingTimeInMs() - start;
+ heap_->tracer()->AddIncrementalSweepingStep(duration);
return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
}
-int Sweeper::ParallelSweepSpace(
- AllocationSpace identity, int required_freed_bytes, int max_pages,
- FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
+int Sweeper::ParallelSweepSpace(AllocationSpace identity,
+ SweepingMode sweeping_mode,
+ int required_freed_bytes, int max_pages) {
int max_freed = 0;
int pages_freed = 0;
Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
- int freed =
- ParallelSweepPage(page, identity, invalidated_slots_in_free_space);
+ int freed = ParallelSweepPage(page, identity, sweeping_mode);
++pages_freed;
if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// Free list of a never-allocate page will be dropped later on.
@@ -484,9 +506,8 @@ int Sweeper::ParallelSweepSpace(
return max_freed;
}
-int Sweeper::ParallelSweepPage(
- Page* page, AllocationSpace identity,
- FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
+int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity,
+ SweepingMode sweeping_mode) {
DCHECK(IsValidSweepingSpace(identity));
// The Scavenger may add already swept pages back.
@@ -507,7 +528,7 @@ int Sweeper::ParallelSweepPage(
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
- invalidated_slots_in_free_space, guard);
+ sweeping_mode, guard);
DCHECK(page->SweepingDone());
}
@@ -526,7 +547,7 @@ void Sweeper::EnsurePageIsSwept(Page* page) {
if (IsValidSweepingSpace(space)) {
if (TryRemoveSweepingPageSafe(space, page)) {
// Page was successfully removed and can now be swept.
- ParallelSweepPage(page, space);
+ ParallelSweepPage(page, space, SweepingMode::kLazyOrConcurrent);
} else {
// Some sweeper task already took ownership of that page, wait until
// sweeping is finished.
@@ -593,7 +614,6 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK(!category->is_linked(page->owner()->free_list()));
});
#endif // DEBUG
- page->MoveOldToNewRememberedSetForSweeping();
page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
@@ -690,7 +710,7 @@ void Sweeper::MakeIterable(Page* page) {
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
RawSweep(page, IGNORE_FREE_LIST, free_space_mode,
- FreeSpaceMayContainInvalidatedSlots::kNo, guard);
+ SweepingMode::kLazyOrConcurrent, guard);
}
} // namespace internal