diff options
Diffstat (limited to 'chromium/v8/src/heap/mark-compact.cc')
-rw-r--r-- | chromium/v8/src/heap/mark-compact.cc | 164 |
1 files changed, 111 insertions, 53 deletions
diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc index 951b49507ca..73eab9e2038 100644 --- a/chromium/v8/src/heap/mark-compact.cc +++ b/chromium/v8/src/heap/mark-compact.cc @@ -42,6 +42,7 @@ #include "src/heap/worklist.h" #include "src/ic/stub-cache.h" #include "src/init/v8.h" +#include "src/logging/tracing-flags.h" #include "src/objects/embedder-data-array-inl.h" #include "src/objects/foreign.h" #include "src/objects/hash-table-inl.h" @@ -51,6 +52,7 @@ #include "src/objects/slots-inl.h" #include "src/objects/transitions-inl.h" #include "src/tasks/cancelable-task.h" +#include "src/tracing/tracing-category-observer.h" #include "src/utils/utils-inl.h" namespace v8 { @@ -82,6 +84,7 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor { virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap( const MemoryChunk* chunk) = 0; + virtual void VerifyMap(Map map) = 0; virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0; virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0; virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0; @@ -105,6 +108,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor { VerifyRootPointers(start, end); } + void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); } + void VerifyRoots(); void VerifyMarkingOnPage(const Page* page, Address start, Address end); void VerifyMarking(NewSpace* new_space); @@ -146,6 +151,7 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start, } void MarkingVerifier::VerifyMarking(NewSpace* space) { + if (!space) return; Address end = space->top(); // The bottom position is at the start of its page. Allows us to use // page->area_start() as start of range on all pages. @@ -168,6 +174,7 @@ void MarkingVerifier::VerifyMarking(PagedSpace* space) { } void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) { + if (!lo_space) return; LargeObjectSpaceObjectIterator it(lo_space); for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) { if (IsBlackOrGrey(obj)) { @@ -208,6 +215,8 @@ class FullMarkingVerifier : public MarkingVerifier { return marking_state_->IsBlackOrGrey(object); } + void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); } + void VerifyPointers(ObjectSlot start, ObjectSlot end) override { VerifyPointersImpl(start, end); } @@ -235,6 +244,9 @@ class FullMarkingVerifier : public MarkingVerifier { private: V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) { + if (!heap_->IsShared() && + BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap()) + return; CHECK(marking_state_->IsBlackOrGrey(heap_object)); } @@ -271,11 +283,14 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor { VerifyRootPointers(start, end); } + void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); } + protected: explicit EvacuationVerifier(Heap* heap) : heap_(heap) {} inline Heap* heap() { return heap_; } + virtual void VerifyMap(Map map) = 0; virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0; virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0; virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0; @@ -302,6 +317,7 @@ void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) { } void EvacuationVerifier::VerifyEvacuation(NewSpace* space) { + if (!space) return; PageRange range(space->first_allocatable_address(), space->top()); for (auto it = range.begin(); it != range.end();) { Page* page = *(it++); @@ -354,7 +370,7 @@ class FullEvacuationVerifier : public EvacuationVerifier { } } } - + void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); } void VerifyPointers(ObjectSlot start, ObjectSlot end) override { VerifyPointersImpl(start, end); } @@ -408,6 +424,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) #ifdef DEBUG state_(IDLE), #endif + is_shared_heap_(heap->IsShared()), was_marked_incrementally_(false), evacuation_(false), compacting_(false), @@ -548,6 +565,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { } void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { + if (!space) return; for (Page* p : PageRange(space->first_allocatable_address(), space->top())) { CHECK(non_atomic_marking_state()->bitmap(p)->IsClean()); CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p)); @@ -555,6 +573,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { } void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) { + if (!space) return; LargeObjectSpaceObjectIterator it(space); for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) { CHECK(non_atomic_marking_state()->IsWhite(obj)); @@ -863,9 +882,14 @@ void MarkCompactCollector::Prepare() { [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); }); // All objects are guaranteed to be initialized in atomic pause - heap()->new_lo_space()->ResetPendingObject(); - DCHECK_EQ(heap()->new_space()->top(), - heap()->new_space()->original_top_acquire()); + if (heap()->new_lo_space()) { + heap()->new_lo_space()->ResetPendingObject(); + } + + if (heap()->new_space()) { + DCHECK_EQ(heap()->new_space()->top(), + heap()->new_space()->original_top_acquire()); + } } void MarkCompactCollector::FinishConcurrentMarking() { @@ -950,26 +974,34 @@ void MarkCompactCollector::SweepArrayBufferExtensions() { class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor { public: explicit RootMarkingVisitor(MarkCompactCollector* collector) - : collector_(collector) {} + : collector_(collector), is_shared_heap_(collector->is_shared_heap()) {} void VisitRootPointer(Root root, const char* description, FullObjectSlot p) final { + DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr())); MarkObjectByPointer(root, p); } void VisitRootPointers(Root root, const char* description, FullObjectSlot start, FullObjectSlot end) final { - for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p); + for (FullObjectSlot p = start; p < end; ++p) { + MarkObjectByPointer(root, p); + } } private: V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) { - if (!(*p).IsHeapObject()) return; - - collector_->MarkRootObject(root, HeapObject::cast(*p)); + Object object = *p; + if (!object.IsHeapObject()) return; + HeapObject heap_object = HeapObject::cast(object); + BasicMemoryChunk* target_page = + BasicMemoryChunk::FromHeapObject(heap_object); + if (!is_shared_heap_ && target_page->InSharedHeap()) return; + collector_->MarkRootObject(root, heap_object); } MarkCompactCollector* const collector_; + const bool is_shared_heap_; }; // This visitor is used to visit the body of special objects held alive by @@ -991,8 +1023,12 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final MarkObject(host, *p); } + void VisitMapPointer(HeapObject host) final { MarkObject(host, host.map()); } + void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final { for (ObjectSlot p = start; p < end; ++p) { + // The map slot should be handled in VisitMapPointer. + DCHECK_NE(host.map_slot(), p); DCHECK(!HasWeakHeapObjectTag(*p)); MarkObject(host, *p); } @@ -1145,6 +1181,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor { } inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final { + DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr())); RecordMigratedSlot(host, *p, p.address()); } @@ -1310,7 +1347,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor { if (mode != MigrationMode::kFast) base->ExecuteMigrationObservers(dest, src, dst, size); } - src.set_map_word(MapWord::FromForwardingAddress(dst)); + src.set_map_word(MapWord::FromForwardingAddress(dst), kRelaxedStore); } EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator, @@ -1439,7 +1476,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { if (map.visitor_id() == kVisitThinString) { HeapObject actual = ThinString::cast(object).unchecked_actual(); if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false; - object.set_map_word(MapWord::FromForwardingAddress(actual)); + object.set_map_word(MapWord::FromForwardingAddress(actual), + kRelaxedStore); return true; } // TODO(mlippautz): Handle ConsString. @@ -1543,7 +1581,7 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase { HeapObject target_object; if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(), object, size, &target_object)) { - DCHECK(object.map_word().IsForwardingAddress()); + DCHECK(object.map_word(kRelaxedLoad).IsForwardingAddress()); return true; } return false; @@ -2483,9 +2521,6 @@ void MarkCompactCollector::ClearWeakReferences() { } void MarkCompactCollector::ClearJSWeakRefs() { - if (!FLAG_harmony_weak_refs) { - return; - } JSWeakRef weak_ref; while (weak_objects_.js_weak_refs.Pop(kMainThreadTask, &weak_ref)) { HeapObject target = HeapObject::cast(weak_ref.target()); @@ -2680,7 +2715,7 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot, std::is_same<TSlot, OffHeapObjectSlot>::value, "Only [Full|OffHeap]ObjectSlot and [Full]MaybeObjectSlot are " "expected here"); - MapWord map_word = heap_obj.map_word(); + MapWord map_word = heap_obj.map_word(kRelaxedLoad); if (map_word.IsForwardingAddress()) { DCHECK_IMPLIES(!Heap::InFromPage(heap_obj), MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) || @@ -2762,6 +2797,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor { void VisitRootPointer(Root root, const char* description, FullObjectSlot p) override { + DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr())); UpdateRootSlotInternal(cage_base_, p); } @@ -2821,7 +2857,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor { static String UpdateReferenceInExternalStringTableEntry(Heap* heap, FullObjectSlot p) { - MapWord map_word = HeapObject::cast(*p).map_word(); + MapWord map_word = HeapObject::cast(*p).map_word(kRelaxedLoad); if (map_word.IsForwardingAddress()) { String new_string = String::cast(map_word.ToForwardingAddress()); @@ -2841,18 +2877,23 @@ static String UpdateReferenceInExternalStringTableEntry(Heap* heap, void MarkCompactCollector::EvacuatePrologue() { // New space. NewSpace* new_space = heap()->new_space(); - // Append the list of new space pages to be processed. - for (Page* p : - PageRange(new_space->first_allocatable_address(), new_space->top())) { - new_space_evacuation_pages_.push_back(p); - } - new_space->Flip(); - new_space->ResetLinearAllocationArea(); - DCHECK_EQ(new_space->Size(), 0); + if (new_space) { + // Append the list of new space pages to be processed. + for (Page* p : + PageRange(new_space->first_allocatable_address(), new_space->top())) { + new_space_evacuation_pages_.push_back(p); + } + new_space->Flip(); + new_space->ResetLinearAllocationArea(); + + DCHECK_EQ(new_space->Size(), 0); + } - heap()->new_lo_space()->Flip(); - heap()->new_lo_space()->ResetPendingObject(); + if (heap()->new_lo_space()) { + heap()->new_lo_space()->Flip(); + heap()->new_lo_space()->ResetPendingObject(); + } // Old space. DCHECK(old_space_evacuation_pages_.empty()); @@ -2863,18 +2904,27 @@ void MarkCompactCollector::EvacuatePrologue() { void MarkCompactCollector::EvacuateEpilogue() { aborted_evacuation_candidates_.clear(); + // New space. - heap()->new_space()->set_age_mark(heap()->new_space()->top()); - DCHECK_IMPLIES(FLAG_always_promote_young_mc, - heap()->new_space()->Size() == 0); + if (heap()->new_space()) { + heap()->new_space()->set_age_mark(heap()->new_space()->top()); + DCHECK_IMPLIES(FLAG_always_promote_young_mc, + heap()->new_space()->Size() == 0); + } + // Deallocate unmarked large objects. heap()->lo_space()->FreeUnmarkedObjects(); heap()->code_lo_space()->FreeUnmarkedObjects(); - heap()->new_lo_space()->FreeUnmarkedObjects(); + if (heap()->new_lo_space()) { + heap()->new_lo_space()->FreeUnmarkedObjects(); + } + // Old space. Deallocate evacuated candidate pages. ReleaseEvacuationCandidates(); + // Give pages that are queued to be freed back to the OS. heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); + #ifdef DEBUG // Old-to-old slot sets must be empty after evacuation. for (Page* p : *heap()->old_space()) { @@ -3274,19 +3324,21 @@ void MarkCompactCollector::EvacuatePagesInParallel() { } // Promote young generation large objects. - IncrementalMarking::NonAtomicMarkingState* marking_state = - heap()->incremental_marking()->non_atomic_marking_state(); - - for (auto it = heap()->new_lo_space()->begin(); - it != heap()->new_lo_space()->end();) { - LargePage* current = *it; - it++; - HeapObject object = current->GetObject(); - DCHECK(!marking_state->IsGrey(object)); - if (marking_state->IsBlack(object)) { - heap_->lo_space()->PromoteNewLargeObject(current); - current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); - evacuation_items.emplace_back(ParallelWorkItem{}, current); + if (heap()->new_lo_space()) { + IncrementalMarking::NonAtomicMarkingState* marking_state = + heap()->incremental_marking()->non_atomic_marking_state(); + + for (auto it = heap()->new_lo_space()->begin(); + it != heap()->new_lo_space()->end();) { + LargePage* current = *it; + it++; + HeapObject object = current->GetObject(); + DCHECK(!marking_state->IsGrey(object)); + if (marking_state->IsBlack(object)) { + heap_->lo_space()->PromoteNewLargeObject(current); + current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); + evacuation_items.emplace_back(ParallelWorkItem{}, current); + } } } @@ -3314,7 +3366,7 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer { Object RetainAs(Object object) override { if (object.IsHeapObject()) { HeapObject heap_object = HeapObject::cast(object); - MapWord map_word = heap_object.map_word(); + MapWord map_word = heap_object.map_word(kRelaxedLoad); if (map_word.IsForwardingAddress()) { return map_word.ToForwardingAddress(); } @@ -3443,7 +3495,7 @@ void MarkCompactCollector::Evacuate() { UpdatePointersAfterEvacuation(); - { + if (heap()->new_space()) { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE); if (!heap()->new_space()->Rebalance()) { heap()->FatalProcessOutOfMemory("NewSpace::Rebalance"); @@ -3645,7 +3697,7 @@ class RememberedSetUpdatingItem : public UpdatingItem { return REMOVE_SLOT; } if (Heap::InFromPage(heap_object)) { - MapWord map_word = heap_object.map_word(); + MapWord map_word = heap_object.map_word(kRelaxedLoad); if (map_word.IsForwardingAddress()) { HeapObjectReference::Update(THeapObjectSlot(slot), map_word.ToForwardingAddress()); @@ -3815,6 +3867,8 @@ MarkCompactCollector::CreateRememberedSetUpdatingItem( int MarkCompactCollectorBase::CollectToSpaceUpdatingItems( std::vector<std::unique_ptr<UpdatingItem>>* items) { + if (!heap()->new_space()) return 0; + // Seed to space pages. const Address space_start = heap()->new_space()->first_allocatable_address(); const Address space_end = heap()->new_space()->top(); @@ -3877,7 +3931,7 @@ class EphemeronTableUpdatingItem : public UpdatingItem { it != heap_->ephemeron_remembered_set_.end();) { EphemeronHashTable table = it->first; auto& indices = it->second; - if (table.map_word().IsForwardingAddress()) { + if (table.map_word(kRelaxedLoad).IsForwardingAddress()) { // The table has moved, and RecordMigratedSlotVisitor::VisitEphemeron // inserts entries for the moved table into ephemeron_remembered_set_. it = heap_->ephemeron_remembered_set_.erase(it); @@ -3890,7 +3944,7 @@ class EphemeronTableUpdatingItem : public UpdatingItem { HeapObjectSlot key_slot(table.RawFieldOfElementAt( EphemeronHashTable::EntryToIndex(InternalIndex(*iti)))); HeapObject key = key_slot.ToHeapObject(); - MapWord map_word = key.map_word(); + MapWord map_word = key.map_word(kRelaxedLoad); if (map_word.IsForwardingAddress()) { key = map_word.ToForwardingAddress(); key_slot.StoreHeapObject(key); @@ -4145,6 +4199,8 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier { } protected: + void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); } + void VerifyPointers(ObjectSlot start, ObjectSlot end) override { VerifyPointersImpl(start, end); } @@ -4213,7 +4269,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier { } } } - + void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); } void VerifyPointers(ObjectSlot start, ObjectSlot end) override { VerifyPointersImpl(start, end); } @@ -4482,6 +4538,7 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor { void VisitRootPointers(Root root, const char* description, FullObjectSlot start, FullObjectSlot end) final { for (FullObjectSlot p = start; p < end; ++p) { + DCHECK(!MapWord::IsPacked((*p).ptr())); MarkObjectByPointer(p); } } @@ -4539,7 +4596,8 @@ void MinorMarkCompactCollector::CollectGarbage() { } // Since we promote all surviving large objects immediatelly, all remaining // large objects must be dead. - // TODO(ulan): Don't free all as soon as we have an intermediate generation. + // TODO(v8:11685): Don't free all as soon as we have an intermediate + // generation. heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; }); } @@ -4572,7 +4630,7 @@ void MinorMarkCompactCollector::MakeIterable( p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size), ClearRecordedSlots::kNo); } - Map map = object.synchronized_map(); + Map map = object.map(kAcquireLoad); int size = object.SizeFromMap(map); free_start = free_end + size; } @@ -5027,7 +5085,7 @@ void MinorMarkCompactCollector::TraceFragmentation() { free_bytes_index++; } } - Map map = object.synchronized_map(); + Map map = object.map(kAcquireLoad); int size = object.SizeFromMap(map); live_bytes += size; free_start = free_end + size; |