diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2017-06-27 06:07:23 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2017-06-27 06:07:23 +0000 |
commit | 1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch) | |
tree | 46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/heap/SlotVisitor.cpp | |
parent | 32761a6cee1d0dee366b885b7b9c777e67885688 (diff) | |
download | WebKitGtk-tarball-master.tar.gz |
webkitgtk-2.16.5HEADwebkitgtk-2.16.5master
Diffstat (limited to 'Source/JavaScriptCore/heap/SlotVisitor.cpp')
-rw-r--r-- | Source/JavaScriptCore/heap/SlotVisitor.cpp | 926 |
1 files changed, 697 insertions, 229 deletions
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.cpp b/Source/JavaScriptCore/heap/SlotVisitor.cpp index 4fd0da725..f0260fc2f 100644 --- a/Source/JavaScriptCore/heap/SlotVisitor.cpp +++ b/Source/JavaScriptCore/heap/SlotVisitor.cpp @@ -1,29 +1,88 @@ +/* + * Copyright (C) 2012-2017 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + #include "config.h" #include "SlotVisitor.h" -#include "SlotVisitorInlines.h" +#include "CPU.h" #include "ConservativeRoots.h" -#include "CopiedSpace.h" -#include "CopiedSpaceInlines.h" -#include "GCThread.h" +#include "GCSegmentedArrayInlines.h" +#include "HeapCellInlines.h" +#include "HeapProfiler.h" +#include "HeapSnapshotBuilder.h" #include "JSArray.h" #include "JSDestructibleObject.h" -#include "VM.h" #include "JSObject.h" #include "JSString.h" -#include "Operations.h" -#include <wtf/StackStats.h> +#include "JSCInlines.h" +#include "SlotVisitorInlines.h" +#include "StopIfNecessaryTimer.h" +#include "SuperSampler.h" +#include "VM.h" +#include <wtf/Lock.h> namespace JSC { -SlotVisitor::SlotVisitor(GCThreadSharedData& shared) - : m_stack(shared.m_vm->heap.blockAllocator()) - , m_bytesVisited(0) - , m_bytesCopied(0) +#if ENABLE(GC_VALIDATION) +static void validate(JSCell* cell) +{ + RELEASE_ASSERT(cell); + + if (!cell->structure()) { + dataLogF("cell at %p has a null structure\n" , cell); + CRASH(); + } + + // Both the cell's structure, and the cell's structure's structure should be the Structure Structure. + // I hate this sentence. + VM& vm = *cell->vm(); + if (cell->structure()->structure()->JSCell::classInfo(vm) != cell->structure()->JSCell::classInfo(vm)) { + const char* parentClassName = 0; + const char* ourClassName = 0; + if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo(vm)) + parentClassName = cell->structure()->structure()->JSCell::classInfo(vm)->className; + if (cell->structure()->JSCell::classInfo(vm)) + ourClassName = cell->structure()->JSCell::classInfo(vm)->className; + dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n", + cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName); + CRASH(); + } + + // Make sure we can walk the ClassInfo chain + const ClassInfo* info = cell->classInfo(vm); + do { } while ((info = info->parentClass)); +} +#endif + +SlotVisitor::SlotVisitor(Heap& heap, CString codeName) + : m_bytesVisited(0) , m_visitCount(0) , m_isInParallelMode(false) - , m_shared(shared) - , m_shouldHashCons(false) + , m_markingVersion(MarkedSpace::initialVersion) + , m_heap(heap) + , m_codeName(codeName) #if !ASSERT_DISABLED , m_isCheckingForDefaultMarkViolation(false) , m_isDraining(false) @@ -33,343 +92,752 @@ SlotVisitor::SlotVisitor(GCThreadSharedData& shared) SlotVisitor::~SlotVisitor() { - clearMarkStack(); + clearMarkStacks(); } -void SlotVisitor::setup() +void SlotVisitor::didStartMarking() { - m_shared.m_shouldHashCons = m_shared.m_vm->haveEnoughNewStringsToHashCons(); - m_shouldHashCons = m_shared.m_shouldHashCons; -#if ENABLE(PARALLEL_GC) - for (unsigned i = 0; i < m_shared.m_gcThreads.size(); ++i) - m_shared.m_gcThreads[i]->slotVisitor()->m_shouldHashCons = m_shared.m_shouldHashCons; -#endif + if (heap()->collectionScope() == CollectionScope::Full) + RELEASE_ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now. + else + reset(); + + if (HeapProfiler* heapProfiler = vm().heapProfiler()) + m_heapSnapshotBuilder = heapProfiler->activeSnapshotBuilder(); + + m_markingVersion = heap()->objectSpace().markingVersion(); } void SlotVisitor::reset() { + RELEASE_ASSERT(!m_opaqueRoots.size()); m_bytesVisited = 0; - m_bytesCopied = 0; m_visitCount = 0; - ASSERT(m_stack.isEmpty()); -#if ENABLE(PARALLEL_GC) - ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now. -#else - m_opaqueRoots.clear(); -#endif - if (m_shouldHashCons) { - m_uniqueStrings.clear(); - m_shouldHashCons = false; - } + m_heapSnapshotBuilder = nullptr; + RELEASE_ASSERT(!m_currentCell); } -void SlotVisitor::clearMarkStack() +void SlotVisitor::clearMarkStacks() { - m_stack.clear(); + forEachMarkStack( + [&] (MarkStackArray& stack) -> IterationStatus { + stack.clear(); + return IterationStatus::Continue; + }); } void SlotVisitor::append(ConservativeRoots& conservativeRoots) { - StackStats::probe(); - JSCell** roots = conservativeRoots.roots(); + HeapCell** roots = conservativeRoots.roots(); size_t size = conservativeRoots.size(); for (size_t i = 0; i < size; ++i) - internalAppend(0, roots[i]); + appendJSCellOrAuxiliary(roots[i]); } -ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell) +void SlotVisitor::appendJSCellOrAuxiliary(HeapCell* heapCell) { - StackStats::probe(); - - ASSERT(Heap::isMarked(cell)); + if (!heapCell) + return; - if (isJSString(cell)) { - JSString::visitChildren(const_cast<JSCell*>(cell), visitor); + ASSERT(!m_isCheckingForDefaultMarkViolation); + + auto validateCell = [&] (JSCell* jsCell) { + StructureID structureID = jsCell->structureID(); + + auto die = [&] (const char* text) { + WTF::dataFile().atomically( + [&] (PrintStream& out) { + out.print(text); + out.print("GC type: ", heap()->collectionScope(), "\n"); + out.print("Object at: ", RawPointer(jsCell), "\n"); +#if USE(JSVALUE64) + out.print("Structure ID: ", structureID, " (0x", format("%x", structureID), ")\n"); + out.print("Structure ID table size: ", heap()->structureIDTable().size(), "\n"); +#else + out.print("Structure: ", RawPointer(structureID), "\n"); +#endif + out.print("Object contents:"); + for (unsigned i = 0; i < 2; ++i) + out.print(" ", format("0x%016llx", bitwise_cast<uint64_t*>(jsCell)[i])); + out.print("\n"); + CellContainer container = jsCell->cellContainer(); + out.print("Is marked: ", container.isMarked(jsCell), "\n"); + out.print("Is newly allocated: ", container.isNewlyAllocated(jsCell), "\n"); + if (container.isMarkedBlock()) { + MarkedBlock& block = container.markedBlock(); + out.print("Block: ", RawPointer(&block), "\n"); + block.handle().dumpState(out); + out.print("\n"); + out.print("Is marked raw: ", block.isMarkedRaw(jsCell), "\n"); + out.print("Marking version: ", block.markingVersion(), "\n"); + out.print("Heap marking version: ", heap()->objectSpace().markingVersion(), "\n"); + out.print("Is newly allocated raw: ", block.handle().isNewlyAllocated(jsCell), "\n"); + out.print("Newly allocated version: ", block.handle().newlyAllocatedVersion(), "\n"); + out.print("Heap newly allocated version: ", heap()->objectSpace().newlyAllocatedVersion(), "\n"); + } + UNREACHABLE_FOR_PLATFORM(); + }); + }; + + // It's not OK for the structure to be null at any GC scan point. We must not GC while + // an object is not fully initialized. + if (!structureID) + die("GC scan found corrupt object: structureID is zero!\n"); + + // It's not OK for the structure to be nuked at any GC scan point. + if (isNuked(structureID)) + die("GC scan found object in bad state: structureID is nuked!\n"); + +#if USE(JSVALUE64) + // This detects the worst of the badness. + if (structureID >= heap()->structureIDTable().size()) + die("GC scan found corrupt object: structureID is out of bounds!\n"); +#endif + }; + + // In debug mode, we validate before marking since this makes it clearer what the problem + // was. It's also slower, so we don't do it normally. + if (!ASSERT_DISABLED && heapCell->cellKind() == HeapCell::JSCell) + validateCell(static_cast<JSCell*>(heapCell)); + + if (Heap::testAndSetMarked(m_markingVersion, heapCell)) return; - } + + switch (heapCell->cellKind()) { + case HeapCell::JSCell: { + // We have ample budget to perform validation here. + + JSCell* jsCell = static_cast<JSCell*>(heapCell); + validateCell(jsCell); + + jsCell->setCellState(CellState::PossiblyGrey); - if (isJSFinalObject(cell)) { - JSFinalObject::visitChildren(const_cast<JSCell*>(cell), visitor); + appendToMarkStack(jsCell); return; } + + case HeapCell::Auxiliary: { + noteLiveAuxiliaryCell(heapCell); + return; + } } +} + +void SlotVisitor::appendUnbarriered(JSValue value) +{ + if (!value || !value.isCell()) + return; + + if (UNLIKELY(m_heapSnapshotBuilder)) + m_heapSnapshotBuilder->appendEdge(m_currentCell, value.asCell()); + + setMarkedAndAppendToMarkStack(value.asCell()); +} + +void SlotVisitor::appendHidden(JSValue value) +{ + if (!value || !value.isCell()) + return; + + setMarkedAndAppendToMarkStack(value.asCell()); +} + +void SlotVisitor::setMarkedAndAppendToMarkStack(JSCell* cell) +{ + SuperSamplerScope superSamplerScope(false); + + ASSERT(!m_isCheckingForDefaultMarkViolation); + if (!cell) + return; + +#if ENABLE(GC_VALIDATION) + validate(cell); +#endif + + if (cell->isLargeAllocation()) + setMarkedAndAppendToMarkStack(cell->largeAllocation(), cell); + else + setMarkedAndAppendToMarkStack(cell->markedBlock(), cell); +} + +template<typename ContainerType> +ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& container, JSCell* cell) +{ + container.aboutToMark(m_markingVersion); + + if (container.testAndSetMarked(cell)) + return; + + ASSERT(cell->structure()); + + // Indicate that the object is grey and that: + // In case of concurrent GC: it's the first time it is grey in this GC cycle. + // In case of eden collection: it's a new object that became grey rather than an old remembered object. + cell->setCellState(CellState::PossiblyGrey); + + appendToMarkStack(container, cell); +} + +void SlotVisitor::appendToMarkStack(JSCell* cell) +{ + if (cell->isLargeAllocation()) + appendToMarkStack(cell->largeAllocation(), cell); + else + appendToMarkStack(cell->markedBlock(), cell); +} + +template<typename ContainerType> +ALWAYS_INLINE void SlotVisitor::appendToMarkStack(ContainerType& container, JSCell* cell) +{ + ASSERT(Heap::isMarkedConcurrently(cell)); + ASSERT(!cell->isZapped()); + + container.noteMarked(); + + m_visitCount++; + m_bytesVisited += container.cellSize(); + + m_collectorStack.append(cell); +} - if (isJSArray(cell)) { - JSArray::visitChildren(const_cast<JSCell*>(cell), visitor); +void SlotVisitor::appendToMutatorMarkStack(const JSCell* cell) +{ + m_mutatorStack.append(cell); +} + +void SlotVisitor::markAuxiliary(const void* base) +{ + HeapCell* cell = bitwise_cast<HeapCell*>(base); + + ASSERT(cell->heap() == heap()); + + if (Heap::testAndSetMarked(m_markingVersion, cell)) return; + + noteLiveAuxiliaryCell(cell); +} + +void SlotVisitor::noteLiveAuxiliaryCell(HeapCell* cell) +{ + // We get here once per GC under these circumstances: + // + // Eden collection: if the cell was allocated since the last collection and is live somehow. + // + // Full collection: if the cell is live somehow. + + CellContainer container = cell->cellContainer(); + + container.assertValidCell(vm(), cell); + container.noteMarked(); + + m_visitCount++; + + size_t cellSize = container.cellSize(); + m_bytesVisited += cellSize; + m_nonCellVisitCount += cellSize; +} + +class SetCurrentCellScope { +public: + SetCurrentCellScope(SlotVisitor& visitor, const JSCell* cell) + : m_visitor(visitor) + { + ASSERT(!m_visitor.m_currentCell); + m_visitor.m_currentCell = const_cast<JSCell*>(cell); + } + + ~SetCurrentCellScope() + { + ASSERT(m_visitor.m_currentCell); + m_visitor.m_currentCell = nullptr; + } + +private: + SlotVisitor& m_visitor; +}; + +ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell) +{ + ASSERT(Heap::isMarkedConcurrently(cell)); + + SetCurrentCellScope currentCellScope(*this, cell); + + if (false) { + dataLog("Visiting ", RawPointer(cell)); + if (!m_isFirstVisit) + dataLog(" (subsequent)"); + dataLog("\n"); + } + + // Funny story: it's possible for the object to be black already, if we barrier the object at + // about the same time that it's marked. That's fine. It's a gnarly and super-rare race. It's + // not clear to me that it would be correct or profitable to bail here if the object is already + // black. + + cell->setCellState(CellState::PossiblyBlack); + + WTF::storeLoadFence(); + + switch (cell->type()) { + case StringType: + JSString::visitChildren(const_cast<JSCell*>(cell), *this); + break; + + case FinalObjectType: + JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this); + break; + + case ArrayType: + JSArray::visitChildren(const_cast<JSCell*>(cell), *this); + break; + + default: + // FIXME: This could be so much better. + // https://bugs.webkit.org/show_bug.cgi?id=162462 + cell->methodTable(vm())->visitChildren(const_cast<JSCell*>(cell), *this); + break; + } + + if (UNLIKELY(m_heapSnapshotBuilder)) { + if (m_isFirstVisit) + m_heapSnapshotBuilder->appendNode(const_cast<JSCell*>(cell)); } +} - cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor); +void SlotVisitor::visitAsConstraint(const JSCell* cell) +{ + m_isFirstVisit = false; + visitChildren(cell); } -void SlotVisitor::donateKnownParallel() +void SlotVisitor::donateKnownParallel(MarkStackArray& from, MarkStackArray& to) { - StackStats::probe(); // NOTE: Because we re-try often, we can afford to be conservative, and // assume that donating is not profitable. // Avoid locking when a thread reaches a dead end in the object graph. - if (m_stack.size() < 2) + if (from.size() < 2) return; // If there's already some shared work queued up, be conservative and assume // that donating more is not profitable. - if (m_shared.m_sharedMarkStack.size()) + if (to.size()) return; // If we're contending on the lock, be conservative and assume that another // thread is already donating. - std::unique_lock<std::mutex> lock(m_shared.m_markingMutex, std::try_to_lock); + std::unique_lock<Lock> lock(m_heap.m_markingMutex, std::try_to_lock); if (!lock.owns_lock()) return; // Otherwise, assume that a thread will go idle soon, and donate. - m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack); + from.donateSomeCellsTo(to); - if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers()) - m_shared.m_markingConditionVariable.notify_all(); + m_heap.m_markingConditionVariable.notifyAll(); } -void SlotVisitor::drain() +void SlotVisitor::donateKnownParallel() { - StackStats::probe(); - ASSERT(m_isInParallelMode); - -#if ENABLE(PARALLEL_GC) - if (Options::numberOfGCMarkers() > 1) { - while (!m_stack.isEmpty()) { - m_stack.refill(); - for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;) - visitChildren(*this, m_stack.removeLast()); - donateKnownParallel(); - } - - mergeOpaqueRootsIfNecessary(); + forEachMarkStack( + [&] (MarkStackArray& stack) -> IterationStatus { + donateKnownParallel(stack, correspondingGlobalStack(stack)); + return IterationStatus::Continue; + }); +} + +void SlotVisitor::updateMutatorIsStopped(const AbstractLocker&) +{ + m_mutatorIsStopped = (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator); +} + +void SlotVisitor::updateMutatorIsStopped() +{ + if (mutatorIsStoppedIsUpToDate()) return; + updateMutatorIsStopped(holdLock(m_rightToRun)); +} + +bool SlotVisitor::hasAcknowledgedThatTheMutatorIsResumed() const +{ + return !m_mutatorIsStopped; +} + +bool SlotVisitor::mutatorIsStoppedIsUpToDate() const +{ + return m_mutatorIsStopped == (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator); +} + +void SlotVisitor::optimizeForStoppedMutator() +{ + m_canOptimizeForStoppedMutator = true; +} + +NEVER_INLINE void SlotVisitor::drain(MonotonicTime timeout) +{ + if (!m_isInParallelMode) { + dataLog("FATAL: attempting to drain when not in parallel mode.\n"); + RELEASE_ASSERT_NOT_REACHED(); } -#endif - while (!m_stack.isEmpty()) { - m_stack.refill(); - while (m_stack.canRemoveLast()) - visitChildren(*this, m_stack.removeLast()); + auto locker = holdLock(m_rightToRun); + + while (!hasElapsed(timeout)) { + updateMutatorIsStopped(locker); + IterationStatus status = forEachMarkStack( + [&] (MarkStackArray& stack) -> IterationStatus { + if (stack.isEmpty()) + return IterationStatus::Continue; + + stack.refill(); + + m_isFirstVisit = (&stack == &m_collectorStack); + + for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); stack.canRemoveLast() && countdown--;) + visitChildren(stack.removeLast()); + return IterationStatus::Done; + }); + if (status == IterationStatus::Continue) + break; + + m_rightToRun.safepoint(); + donateKnownParallel(); } + + mergeIfNecessary(); } -void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode) +size_t SlotVisitor::performIncrementOfDraining(size_t bytesRequested) { - StackStats::probe(); - ASSERT(m_isInParallelMode); - - ASSERT(Options::numberOfGCMarkers()); - - bool shouldBeParallel; + RELEASE_ASSERT(m_isInParallelMode); -#if ENABLE(PARALLEL_GC) - shouldBeParallel = Options::numberOfGCMarkers() > 1; -#else - ASSERT(Options::numberOfGCMarkers() == 1); - shouldBeParallel = false; -#endif - - if (!shouldBeParallel) { - // This call should be a no-op. - ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain); - ASSERT(m_stack.isEmpty()); - ASSERT(m_shared.m_sharedMarkStack.isEmpty()); - return; + size_t cellsRequested = bytesRequested / MarkedBlock::atomSize; + { + auto locker = holdLock(m_heap.m_markingMutex); + forEachMarkStack( + [&] (MarkStackArray& stack) -> IterationStatus { + cellsRequested -= correspondingGlobalStack(stack).transferTo(stack, cellsRequested); + return cellsRequested ? IterationStatus::Continue : IterationStatus::Done; + }); } + + size_t cellBytesVisited = 0; + m_nonCellVisitCount = 0; + + auto bytesVisited = [&] () -> size_t { + return cellBytesVisited + m_nonCellVisitCount; + }; + + auto isDone = [&] () -> bool { + return bytesVisited() >= bytesRequested; + }; -#if ENABLE(PARALLEL_GC) { - std::lock_guard<std::mutex> lock(m_shared.m_markingMutex); - m_shared.m_numberOfActiveParallelMarkers++; + auto locker = holdLock(m_rightToRun); + + while (!isDone()) { + updateMutatorIsStopped(locker); + IterationStatus status = forEachMarkStack( + [&] (MarkStackArray& stack) -> IterationStatus { + if (stack.isEmpty() || isDone()) + return IterationStatus::Continue; + + stack.refill(); + + m_isFirstVisit = (&stack == &m_collectorStack); + + unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); + while (countdown && stack.canRemoveLast() && !isDone()) { + const JSCell* cell = stack.removeLast(); + cellBytesVisited += cell->cellSize(); + visitChildren(cell); + countdown--; + } + return IterationStatus::Done; + }); + if (status == IterationStatus::Continue) + break; + m_rightToRun.safepoint(); + donateKnownParallel(); + } } + + donateAll(); + mergeIfNecessary(); + + return bytesVisited(); +} + +bool SlotVisitor::didReachTermination() +{ + LockHolder locker(m_heap.m_markingMutex); + return didReachTermination(locker); +} + +bool SlotVisitor::didReachTermination(const AbstractLocker&) +{ + return isEmpty() + && !m_heap.m_numberOfActiveParallelMarkers + && m_heap.m_sharedCollectorMarkStack->isEmpty() + && m_heap.m_sharedMutatorMarkStack->isEmpty(); +} + +bool SlotVisitor::hasWork(const AbstractLocker&) +{ + return !m_heap.m_sharedCollectorMarkStack->isEmpty() + || !m_heap.m_sharedMutatorMarkStack->isEmpty(); +} + +NEVER_INLINE SlotVisitor::SharedDrainResult SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode, MonotonicTime timeout) +{ + ASSERT(m_isInParallelMode); + + ASSERT(Options::numberOfGCMarkers()); + + bool isActive = false; while (true) { { - std::unique_lock<std::mutex> lock(m_shared.m_markingMutex); - m_shared.m_numberOfActiveParallelMarkers--; + LockHolder locker(m_heap.m_markingMutex); + if (isActive) + m_heap.m_numberOfActiveParallelMarkers--; + m_heap.m_numberOfWaitingParallelMarkers++; - // How we wait differs depending on drain mode. if (sharedDrainMode == MasterDrain) { - // Wait until either termination is reached, or until there is some work - // for us to do. while (true) { - // Did we reach termination? - if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) { - // Let any sleeping slaves know it's time for them to return; - m_shared.m_markingConditionVariable.notify_all(); - return; + if (hasElapsed(timeout)) + return SharedDrainResult::TimedOut; + + if (didReachTermination(locker)) { + m_heap.m_markingConditionVariable.notifyAll(); + return SharedDrainResult::Done; } - // Is there work to be done? - if (!m_shared.m_sharedMarkStack.isEmpty()) + if (hasWork(locker)) break; - - // Otherwise wait. - m_shared.m_markingConditionVariable.wait(lock); + + m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout); } } else { ASSERT(sharedDrainMode == SlaveDrain); + + if (hasElapsed(timeout)) + return SharedDrainResult::TimedOut; - // Did we detect termination? If so, let the master know. - if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) - m_shared.m_markingConditionVariable.notify_all(); + if (didReachTermination(locker)) { + m_heap.m_markingConditionVariable.notifyAll(); + + // If we're in concurrent mode, then we know that the mutator will eventually do + // the right thing because: + // - It's possible that the collector has the conn. In that case, the collector will + // wake up from the notification above. This will happen if the app released heap + // access. Native apps can spend a lot of time with heap access released. + // - It's possible that the mutator will allocate soon. Then it will check if we + // reached termination. This is the most likely outcome in programs that allocate + // a lot. + // - WebCore never releases access. But WebCore has a runloop. The runloop will check + // if we reached termination. + // So, this tells the runloop that it's got things to do. + m_heap.m_stopIfNecessaryTimer->scheduleSoon(); + } - m_shared.m_markingConditionVariable.wait(lock, [this] { return !m_shared.m_sharedMarkStack.isEmpty() || m_shared.m_parallelMarkersShouldExit; }); + auto isReady = [&] () -> bool { + return hasWork(locker) + || m_heap.m_parallelMarkersShouldExit; + }; + + m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout, isReady); - // Is the current phase done? If so, return from this function. - if (m_shared.m_parallelMarkersShouldExit) - return; + if (m_heap.m_parallelMarkersShouldExit) + return SharedDrainResult::Done; } - - size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers; - m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount); - m_shared.m_numberOfActiveParallelMarkers++; + + forEachMarkStack( + [&] (MarkStackArray& stack) -> IterationStatus { + stack.stealSomeCellsFrom( + correspondingGlobalStack(stack), + m_heap.m_numberOfWaitingParallelMarkers); + return IterationStatus::Continue; + }); + + m_heap.m_numberOfActiveParallelMarkers++; + m_heap.m_numberOfWaitingParallelMarkers--; } - drain(); + drain(timeout); + isActive = true; } -#endif } -void SlotVisitor::mergeOpaqueRoots() +SlotVisitor::SharedDrainResult SlotVisitor::drainInParallel(MonotonicTime timeout) { - StackStats::probe(); - ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty. - { - MutexLocker locker(m_shared.m_opaqueRootsLock); - HashSet<void*>::iterator begin = m_opaqueRoots.begin(); - HashSet<void*>::iterator end = m_opaqueRoots.end(); - for (HashSet<void*>::iterator iter = begin; iter != end; ++iter) - m_shared.m_opaqueRoots.add(*iter); - } - m_opaqueRoots.clear(); + donateAndDrain(timeout); + return drainFromShared(MasterDrain, timeout); } -ALWAYS_INLINE bool JSString::tryHashConsLock() +SlotVisitor::SharedDrainResult SlotVisitor::drainInParallelPassively(MonotonicTime timeout) { -#if ENABLE(PARALLEL_GC) - unsigned currentFlags = m_flags; - - if (currentFlags & HashConsLock) - return false; - - unsigned newFlags = currentFlags | HashConsLock; + ASSERT(m_isInParallelMode); + + ASSERT(Options::numberOfGCMarkers()); + + if (Options::numberOfGCMarkers() == 1 + || (m_heap.m_worldState.load() & Heap::mutatorWaitingBit) + || !m_heap.hasHeapAccess() + || m_heap.collectorBelievesThatTheWorldIsStopped()) { + // This is an optimization over drainInParallel() when we have a concurrent mutator but + // otherwise it is not profitable. + return drainInParallel(timeout); + } - if (!WTF::weakCompareAndSwap(&m_flags, currentFlags, newFlags)) - return false; + LockHolder locker(m_heap.m_markingMutex); + donateAll(locker); + + for (;;) { + if (hasElapsed(timeout)) + return SharedDrainResult::TimedOut; + + if (didReachTermination(locker)) { + m_heap.m_markingConditionVariable.notifyAll(); + return SharedDrainResult::Done; + } + + m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout); + } +} - WTF::memoryBarrierAfterLock(); - return true; -#else - if (isHashConsSingleton()) - return false; +void SlotVisitor::donateAll() +{ + if (isEmpty()) + return; + + donateAll(holdLock(m_heap.m_markingMutex)); +} - m_flags |= HashConsLock; +void SlotVisitor::donateAll(const AbstractLocker&) +{ + forEachMarkStack( + [&] (MarkStackArray& stack) -> IterationStatus { + stack.transferTo(correspondingGlobalStack(stack)); + return IterationStatus::Continue; + }); - return true; -#endif + m_heap.m_markingConditionVariable.notifyAll(); } -ALWAYS_INLINE void JSString::releaseHashConsLock() +void SlotVisitor::addOpaqueRoot(void* root) { -#if ENABLE(PARALLEL_GC) - WTF::memoryBarrierBeforeUnlock(); -#endif - m_flags &= ~HashConsLock; + if (!root) + return; + + if (m_ignoreNewOpaqueRoots) + return; + + if (Options::numberOfGCMarkers() == 1) { + // Put directly into the shared HashSet. + m_heap.m_opaqueRoots.add(root); + return; + } + // Put into the local set, but merge with the shared one every once in + // a while to make sure that the local sets don't grow too large. + mergeOpaqueRootsIfProfitable(); + m_opaqueRoots.add(root); } -ALWAYS_INLINE bool JSString::shouldTryHashCons() +bool SlotVisitor::containsOpaqueRoot(void* root) const { - return ((length() > 1) && !isRope() && !isHashConsSingleton()); + if (!root) + return false; + + ASSERT(!m_isInParallelMode); + return m_heap.m_opaqueRoots.contains(root); } -ALWAYS_INLINE void SlotVisitor::internalAppend(void* from, JSValue* slot) +TriState SlotVisitor::containsOpaqueRootTriState(void* root) const { - // This internalAppend is only intended for visits to object and array backing stores. - // as it can change the JSValue pointed to be the argument when the original JSValue - // is a string that contains the same contents as another string. + if (!root) + return FalseTriState; + + if (m_opaqueRoots.contains(root)) + return TrueTriState; + std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex); + if (m_heap.m_opaqueRoots.contains(root)) + return TrueTriState; + return MixedTriState; +} - StackStats::probe(); - ASSERT(slot); - JSValue value = *slot; - ASSERT(value); - if (!value.isCell()) +void SlotVisitor::mergeIfNecessary() +{ + if (m_opaqueRoots.isEmpty()) return; + mergeOpaqueRoots(); +} - JSCell* cell = value.asCell(); - if (!cell) +void SlotVisitor::mergeOpaqueRootsIfProfitable() +{ + if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold()) return; - - validate(cell); - - if (m_shouldHashCons && cell->isString()) { - JSString* string = jsCast<JSString*>(cell); - if (string->shouldTryHashCons() && string->tryHashConsLock()) { - UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value); - if (addResult.isNewEntry) - string->setHashConsSingleton(); - else { - JSValue existingJSValue = addResult.iterator->value; - if (value != existingJSValue) - jsCast<JSString*>(existingJSValue.asCell())->clearHashConsSingleton(); - *slot = existingJSValue; - string->releaseHashConsLock(); - return; - } - string->releaseHashConsLock(); - } + mergeOpaqueRoots(); +} + +void SlotVisitor::donate() +{ + if (!m_isInParallelMode) { + dataLog("FATAL: Attempting to donate when not in parallel mode.\n"); + RELEASE_ASSERT_NOT_REACHED(); } - - internalAppend(from, cell); + + if (Options::numberOfGCMarkers() == 1) + return; + + donateKnownParallel(); } -void SlotVisitor::harvestWeakReferences() +void SlotVisitor::donateAndDrain(MonotonicTime timeout) { - StackStats::probe(); - for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next()) - current->visitWeakReferences(*this); + donate(); + drain(timeout); } -void SlotVisitor::finalizeUnconditionalFinalizers() +void SlotVisitor::mergeOpaqueRoots() { - StackStats::probe(); - while (m_shared.m_unconditionalFinalizers.hasNext()) - m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally(); + { + std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex); + for (auto* root : m_opaqueRoots) + m_heap.m_opaqueRoots.add(root); + } + m_opaqueRoots.clear(); } -#if ENABLE(GC_VALIDATION) -void SlotVisitor::validate(JSCell* cell) +void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester) { - RELEASE_ASSERT(cell); + m_heap.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester); +} - if (!cell->structure()) { - dataLogF("cell at %p has a null structure\n" , cell); - CRASH(); - } +void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer) +{ + m_heap.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer); +} - // Both the cell's structure, and the cell's structure's structure should be the Structure Structure. - // I hate this sentence. - if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) { - const char* parentClassName = 0; - const char* ourClassName = 0; - if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo()) - parentClassName = cell->structure()->structure()->JSCell::classInfo()->className; - if (cell->structure()->JSCell::classInfo()) - ourClassName = cell->structure()->JSCell::classInfo()->className; - dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n", - cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName); - CRASH(); - } +void SlotVisitor::didRace(const VisitRaceKey& race) +{ + if (Options::verboseVisitRace()) + dataLog(toCString("GC visit race: ", race, "\n")); + + auto locker = holdLock(heap()->m_raceMarkStackLock); + JSCell* cell = race.cell(); + cell->setCellState(CellState::PossiblyGrey); + heap()->m_raceMarkStack->append(cell); +} - // Make sure we can walk the ClassInfo chain - const ClassInfo* info = cell->classInfo(); - do { } while ((info = info->parentClass)); +void SlotVisitor::dump(PrintStream& out) const +{ + out.print("Collector: [", pointerListDump(collectorMarkStack()), "], Mutator: [", pointerListDump(mutatorMarkStack()), "]"); } -#else -void SlotVisitor::validate(JSCell*) + +MarkStackArray& SlotVisitor::correspondingGlobalStack(MarkStackArray& stack) { + if (&stack == &m_collectorStack) + return *m_heap.m_sharedCollectorMarkStack; + RELEASE_ASSERT(&stack == &m_mutatorStack); + return *m_heap.m_sharedMutatorMarkStack; } -#endif } // namespace JSC |