summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap/Heap.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/heap/Heap.cpp')
-rw-r--r--Source/JavaScriptCore/heap/Heap.cpp3002
1 files changed, 2331 insertions, 671 deletions
diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp
index 26ec23980..f8f957b22 100644
--- a/Source/JavaScriptCore/heap/Heap.cpp
+++ b/Source/JavaScriptCore/heap/Heap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2017 Apple Inc. All rights reserved.
* Copyright (C) 2007 Eric Seidel <eric@webkit.org>
*
* This library is free software; you can redistribute it and/or
@@ -22,163 +22,107 @@
#include "Heap.h"
#include "CodeBlock.h"
+#include "CodeBlockSetInlines.h"
+#include "CollectingScope.h"
#include "ConservativeRoots.h"
-#include "CopiedSpace.h"
-#include "CopiedSpaceInlines.h"
-#include "CopyVisitorInlines.h"
-#include "DFGWorklist.h"
-#include "DelayedReleaseScope.h"
+#include "DFGWorklistInlines.h"
+#include "EdenGCActivityCallback.h"
+#include "Exception.h"
+#include "FullGCActivityCallback.h"
#include "GCActivityCallback.h"
#include "GCIncomingRefCountedSetInlines.h"
+#include "GCSegmentedArrayInlines.h"
+#include "GCTypeMap.h"
+#include "HasOwnPropertyCache.h"
+#include "HeapHelperPool.h"
#include "HeapIterationScope.h"
-#include "HeapRootVisitor.h"
-#include "HeapStatistics.h"
+#include "HeapProfiler.h"
+#include "HeapSnapshot.h"
+#include "HeapVerifier.h"
#include "IncrementalSweeper.h"
#include "Interpreter.h"
+#include "JITStubRoutineSet.h"
+#include "JITWorklist.h"
+#include "JSCInlines.h"
#include "JSGlobalObject.h"
#include "JSLock.h"
-#include "JSONObject.h"
-#include "Operations.h"
-#include "RecursiveAllocationScope.h"
-#include "Tracing.h"
+#include "JSVirtualMachineInternal.h"
+#include "MachineStackMarker.h"
+#include "MarkedSpaceInlines.h"
+#include "MarkingConstraintSet.h"
+#include "PreventCollectionScope.h"
+#include "SamplingProfiler.h"
+#include "ShadowChicken.h"
+#include "SpaceTimeMutatorScheduler.h"
+#include "SuperSampler.h"
+#include "StochasticSpaceTimeMutatorScheduler.h"
+#include "StopIfNecessaryTimer.h"
+#include "SweepingScope.h"
+#include "SynchronousStopTheWorldMutatorScheduler.h"
+#include "TypeProfilerLog.h"
#include "UnlinkedCodeBlock.h"
#include "VM.h"
#include "WeakSetInlines.h"
#include <algorithm>
-#include <wtf/RAMSize.h>
#include <wtf/CurrentTime.h>
+#include <wtf/MainThread.h>
+#include <wtf/ParallelVectorIterator.h>
+#include <wtf/ProcessID.h>
+#include <wtf/RAMSize.h>
+#include <wtf/SimpleStats.h>
+
+#if USE(FOUNDATION)
+#if __has_include(<objc/objc-internal.h>)
+#include <objc/objc-internal.h>
+#else
+extern "C" void* objc_autoreleasePoolPush(void);
+extern "C" void objc_autoreleasePoolPop(void *context);
+#endif
+#endif // USE(FOUNDATION)
using namespace std;
-using namespace JSC;
namespace JSC {
-namespace {
-
-static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
-static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
-
-#define ENABLE_GC_LOGGING 0
-
-#if ENABLE(GC_LOGGING)
-#if COMPILER(CLANG)
-#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
-_Pragma("clang diagnostic push") \
-_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
-_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
-static type name arguments; \
-_Pragma("clang diagnostic pop")
-#else
-#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
-static type name arguments;
-#endif // COMPILER(CLANG)
-
-struct GCTimer {
- GCTimer(const char* name)
- : m_time(0)
- , m_min(100000000)
- , m_max(0)
- , m_count(0)
- , m_name(name)
- {
- }
- ~GCTimer()
- {
- dataLogF("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
- }
- double m_time;
- double m_min;
- double m_max;
- size_t m_count;
- const char* m_name;
-};
-
-struct GCTimerScope {
- GCTimerScope(GCTimer* timer)
- : m_timer(timer)
- , m_start(WTF::monotonicallyIncreasingTime())
- {
- }
- ~GCTimerScope()
- {
- double delta = WTF::monotonicallyIncreasingTime() - m_start;
- if (delta < m_timer->m_min)
- m_timer->m_min = delta;
- if (delta > m_timer->m_max)
- m_timer->m_max = delta;
- m_timer->m_count++;
- m_timer->m_time += delta;
- }
- GCTimer* m_timer;
- double m_start;
-};
-
-struct GCCounter {
- GCCounter(const char* name)
- : m_name(name)
- , m_count(0)
- , m_total(0)
- , m_min(10000000)
- , m_max(0)
- {
- }
-
- void count(size_t amount)
- {
- m_count++;
- m_total += amount;
- if (amount < m_min)
- m_min = amount;
- if (amount > m_max)
- m_max = amount;
- }
- ~GCCounter()
- {
- dataLogF("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
- }
- const char* m_name;
- size_t m_count;
- size_t m_total;
- size_t m_min;
- size_t m_max;
-};
+namespace {
-#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
-#define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
-#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
-
-#else
+bool verboseStop = false;
-#define GCPHASE(name) do { } while (false)
-#define COND_GCPHASE(cond, name1, name2) do { } while (false)
-#define GCCOUNTER(name, value) do { } while (false)
-#endif
+double maxPauseMS(double thisPauseMS)
+{
+ static double maxPauseMS;
+ maxPauseMS = std::max(thisPauseMS, maxPauseMS);
+ return maxPauseMS;
+}
-static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
+size_t minHeapSize(HeapType heapType, size_t ramSize)
{
- if (heapType == LargeHeap)
- return min(largeHeapSize, ramSize / 4);
- return smallHeapSize;
+ if (heapType == LargeHeap) {
+ double result = min(
+ static_cast<double>(Options::largeHeapSize()),
+ ramSize * Options::smallHeapRAMFraction());
+ return static_cast<size_t>(result);
+ }
+ return Options::smallHeapSize();
}
-static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
+size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
{
- // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
- if (heapSize < ramSize / 4)
- return 2 * heapSize;
- if (heapSize < ramSize / 2)
- return 1.5 * heapSize;
- return 1.25 * heapSize;
+ if (heapSize < ramSize * Options::smallHeapRAMFraction())
+ return Options::smallHeapGrowthFactor() * heapSize;
+ if (heapSize < ramSize * Options::mediumHeapRAMFraction())
+ return Options::mediumHeapGrowthFactor() * heapSize;
+ return Options::largeHeapGrowthFactor() * heapSize;
}
-static inline bool isValidSharedInstanceThreadState(VM* vm)
+bool isValidSharedInstanceThreadState(VM* vm)
{
return vm->currentThreadIsHoldingAPILock();
}
-static inline bool isValidThreadState(VM* vm)
+bool isValidThreadState(VM* vm)
{
- if (vm->identifierTable != wtfThreadData().currentIdentifierTable())
+ if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
return false;
if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
@@ -187,155 +131,356 @@ static inline bool isValidThreadState(VM* vm)
return true;
}
-struct MarkObject : public MarkedBlock::VoidFunctor {
- void operator()(JSCell* cell)
- {
- if (cell->isZapped())
- return;
- Heap::heap(cell)->setMarked(cell);
- }
-};
-
-struct Count : public MarkedBlock::CountFunctor {
- void operator()(JSCell*) { count(1); }
-};
-
-struct CountIfGlobalObject : MarkedBlock::CountFunctor {
- void operator()(JSCell* cell) {
- if (!cell->isObject())
- return;
- if (!asObject(cell)->isGlobalObject())
- return;
- count(1);
- }
-};
-
-class RecordType {
-public:
- typedef PassOwnPtr<TypeCountSet> ReturnType;
-
- RecordType();
- void operator()(JSCell*);
- ReturnType returnValue();
-
-private:
- const char* typeName(JSCell*);
- OwnPtr<TypeCountSet> m_typeCountSet;
-};
-
-inline RecordType::RecordType()
- : m_typeCountSet(adoptPtr(new TypeCountSet))
+void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
{
+ const char* typeName = "[unknown]";
+ const ClassInfo* info = cell->classInfo(vm);
+ if (info && info->className)
+ typeName = info->className;
+ set.add(typeName);
}
-inline const char* RecordType::typeName(JSCell* cell)
+bool measurePhaseTiming()
{
- const ClassInfo* info = cell->classInfo();
- if (!info || !info->className)
- return "[unknown]";
- return info->className;
+ return false;
}
-inline void RecordType::operator()(JSCell* cell)
+HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
{
- m_typeCountSet->add(typeName(cell));
+ static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
+ static std::once_flag once;
+ std::call_once(
+ once,
+ [] {
+ result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
+ });
+ return *result;
}
-inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
+SimpleStats& timingStats(const char* name, CollectionScope scope)
{
- return m_typeCountSet.release();
+ return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
}
+class TimingScope {
+public:
+ TimingScope(std::optional<CollectionScope> scope, const char* name)
+ : m_scope(scope)
+ , m_name(name)
+ {
+ if (measurePhaseTiming())
+ m_before = monotonicallyIncreasingTimeMS();
+ }
+
+ TimingScope(Heap& heap, const char* name)
+ : TimingScope(heap.collectionScope(), name)
+ {
+ }
+
+ void setScope(std::optional<CollectionScope> scope)
+ {
+ m_scope = scope;
+ }
+
+ void setScope(Heap& heap)
+ {
+ setScope(heap.collectionScope());
+ }
+
+ ~TimingScope()
+ {
+ if (measurePhaseTiming()) {
+ double after = monotonicallyIncreasingTimeMS();
+ double timing = after - m_before;
+ SimpleStats& stats = timingStats(m_name, *m_scope);
+ stats.add(timing);
+ dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
+ }
+ }
+private:
+ std::optional<CollectionScope> m_scope;
+ double m_before;
+ const char* m_name;
+};
+
} // anonymous namespace
+class Heap::Thread : public AutomaticThread {
+public:
+ Thread(const AbstractLocker& locker, Heap& heap)
+ : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
+ , m_heap(heap)
+ {
+ }
+
+protected:
+ PollResult poll(const AbstractLocker& locker) override
+ {
+ if (m_heap.m_threadShouldStop) {
+ m_heap.notifyThreadStopping(locker);
+ return PollResult::Stop;
+ }
+ if (m_heap.shouldCollectInCollectorThread(locker))
+ return PollResult::Work;
+ return PollResult::Wait;
+ }
+
+ WorkResult work() override
+ {
+ m_heap.collectInCollectorThread();
+ return WorkResult::Continue;
+ }
+
+ void threadDidStart() override
+ {
+ WTF::registerGCThread(GCThreadType::Main);
+ }
+
+private:
+ Heap& m_heap;
+};
+
Heap::Heap(VM* vm, HeapType heapType)
: m_heapType(heapType)
- , m_ramSize(ramSize())
+ , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
, m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
, m_sizeAfterLastCollect(0)
+ , m_sizeAfterLastFullCollect(0)
+ , m_sizeBeforeLastFullCollect(0)
+ , m_sizeAfterLastEdenCollect(0)
+ , m_sizeBeforeLastEdenCollect(0)
, m_bytesAllocatedThisCycle(0)
- , m_bytesAbandonedThisCycle(0)
+ , m_bytesAbandonedSinceLastFullCollect(0)
, m_maxEdenSize(m_minBytesPerCycle)
, m_maxHeapSize(m_minBytesPerCycle)
, m_shouldDoFullCollection(false)
, m_totalBytesVisited(0)
- , m_totalBytesCopied(0)
- , m_operationInProgress(NoOperation)
- , m_blockAllocator()
, m_objectSpace(this)
- , m_storageSpace(this)
- , m_extraMemoryUsage(0)
- , m_machineThreads(this)
- , m_sharedData(vm)
- , m_slotVisitor(m_sharedData)
- , m_copyVisitor(m_sharedData)
+ , m_extraMemorySize(0)
+ , m_deprecatedExtraMemorySize(0)
+ , m_machineThreads(std::make_unique<MachineThreads>(this))
+ , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
+ , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
+ , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
+ , m_raceMarkStack(std::make_unique<MarkStackArray>())
+ , m_constraintSet(std::make_unique<MarkingConstraintSet>())
, m_handleSet(vm)
+ , m_codeBlocks(std::make_unique<CodeBlockSet>())
+ , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
, m_isSafeToCollect(false)
- , m_writeBarrierBuffer(256)
, m_vm(vm)
- , m_lastGCLength(0)
- , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime())
- , m_activityCallback(DefaultGCActivityCallback::create(this))
- , m_sweeper(IncrementalSweeper::create(this))
+ // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
+ // schedule the timer if we've never done a collection.
+ , m_lastFullGCLength(0.01)
+ , m_lastEdenGCLength(0.01)
+#if USE(CF)
+ , m_runLoop(CFRunLoopGetCurrent())
+#endif // USE(CF)
+ , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
+ , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
+ , m_sweeper(adoptRef(new IncrementalSweeper(this)))
+ , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
, m_deferralDepth(0)
+#if USE(FOUNDATION)
+ , m_delayedReleaseRecursionCount(0)
+#endif
+ , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
+ , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
+ , m_helperClient(&heapHelperPool())
+ , m_threadLock(Box<Lock>::create())
+ , m_threadCondition(AutomaticThreadCondition::create())
{
- m_storageSpace.init();
+ m_worldState.store(0);
+
+ if (Options::useConcurrentGC()) {
+ if (Options::useStochasticMutatorScheduler())
+ m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
+ else
+ m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
+ } else {
+ // We simulate turning off concurrent GC by making the scheduler say that the world
+ // should always be stopped when the collector is running.
+ m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
+ }
+
+ if (Options::verifyHeap())
+ m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
+
+ m_collectorSlotVisitor->optimizeForStoppedMutator();
+
+ LockHolder locker(*m_threadLock);
+ m_thread = adoptRef(new Thread(locker, *this));
}
Heap::~Heap()
{
+ forEachSlotVisitor(
+ [&] (SlotVisitor& visitor) {
+ visitor.clearMarkStacks();
+ });
+ m_mutatorMarkStack->clear();
+ m_raceMarkStack->clear();
+
+ for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
+ WeakBlock::destroy(*this, block);
}
bool Heap::isPagedOut(double deadline)
{
- return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
+ return m_objectSpace.isPagedOut(deadline);
}
// The VM is being destroyed and the collector will never run again.
// Run all pending finalizers now because we won't get another chance.
void Heap::lastChanceToFinalize()
{
+ MonotonicTime before;
+ if (Options::logGC()) {
+ before = MonotonicTime::now();
+ dataLog("[GC<", RawPointer(this), ">: shutdown ");
+ }
+
RELEASE_ASSERT(!m_vm->entryScope);
- RELEASE_ASSERT(m_operationInProgress == NoOperation);
+ RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
+
+ if (m_collectContinuouslyThread) {
+ {
+ LockHolder locker(m_collectContinuouslyLock);
+ m_shouldStopCollectingContinuously = true;
+ m_collectContinuouslyCondition.notifyOne();
+ }
+ waitForThreadCompletion(m_collectContinuouslyThread);
+ }
+
+ if (Options::logGC())
+ dataLog("1");
+
+ // Prevent new collections from being started. This is probably not even necessary, since we're not
+ // going to call into anything that starts collections. Still, this makes the algorithm more
+ // obviously sound.
+ m_isSafeToCollect = false;
+
+ if (Options::logGC())
+ dataLog("2");
+
+ bool isCollecting;
+ {
+ auto locker = holdLock(*m_threadLock);
+ RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
+ isCollecting = m_lastServedTicket < m_lastGrantedTicket;
+ }
+ if (isCollecting) {
+ if (Options::logGC())
+ dataLog("...]\n");
+
+ // Wait for the current collection to finish.
+ waitForCollector(
+ [&] (const AbstractLocker&) -> bool {
+ RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
+ return m_lastServedTicket == m_lastGrantedTicket;
+ });
+
+ if (Options::logGC())
+ dataLog("[GC<", RawPointer(this), ">: shutdown ");
+ }
+ if (Options::logGC())
+ dataLog("3");
+ RELEASE_ASSERT(m_requests.isEmpty());
+ RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
+
+ // Carefully bring the thread down.
+ bool stopped = false;
+ {
+ LockHolder locker(*m_threadLock);
+ stopped = m_thread->tryStop(locker);
+ m_threadShouldStop = true;
+ if (!stopped)
+ m_threadCondition->notifyOne(locker);
+ }
+
+ if (Options::logGC())
+ dataLog("4");
+
+ if (!stopped)
+ m_thread->join();
+
+ if (Options::logGC())
+ dataLog("5 ");
+
+ m_arrayBuffers.lastChanceToFinalize();
+ m_codeBlocks->lastChanceToFinalize(*m_vm);
+ m_objectSpace.stopAllocating();
m_objectSpace.lastChanceToFinalize();
+ releaseDelayedReleasedObjects();
+
+ sweepAllLogicallyEmptyWeakBlocks();
+
+ if (Options::logGC())
+ dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
}
-void Heap::reportExtraMemoryCostSlowCase(size_t cost)
+void Heap::releaseDelayedReleasedObjects()
{
- // Our frequency of garbage collection tries to balance memory use against speed
- // by collecting based on the number of newly created values. However, for values
- // that hold on to a great deal of memory that's not in the form of other JS values,
- // that is not good enough - in some cases a lot of those objects can pile up and
- // use crazy amounts of memory without a GC happening. So we track these extra
- // memory costs. Only unusually large objects are noted, and we only keep track
- // of this extra cost until the next GC. In garbage collected languages, most values
- // are either very short lived temporaries, or have extremely long lifetimes. So
- // if a large value survives one garbage collection, there is not much point to
- // collecting more frequently as long as it stays alive.
+#if USE(FOUNDATION)
+ // We need to guard against the case that releasing an object can create more objects due to the
+ // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
+ // back here and could try to recursively release objects. We guard that with a recursive entry
+ // count. Only the initial call will release objects, recursive calls simple return and let the
+ // the initial call to the function take care of any objects created during release time.
+ // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
+ // and use a temp Vector for the actual releasing.
+ if (!m_delayedReleaseRecursionCount++) {
+ while (!m_delayedReleaseObjects.isEmpty()) {
+ ASSERT(m_vm->currentThreadIsHoldingAPILock());
+
+ Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
+
+ {
+ // We need to drop locks before calling out to arbitrary code.
+ JSLock::DropAllLocks dropAllLocks(m_vm);
+
+ void* context = objc_autoreleasePoolPush();
+ objectsToRelease.clear();
+ objc_autoreleasePoolPop(context);
+ }
+ }
+ }
+ m_delayedReleaseRecursionCount--;
+#endif
+}
- didAllocate(cost);
+void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
+{
+ didAllocate(size);
collectIfNecessaryOrDefer();
}
+void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
+{
+ // FIXME: Change this to use SaturatedArithmetic when available.
+ // https://bugs.webkit.org/show_bug.cgi?id=170411
+ Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
+ checkedNewSize += size;
+ m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
+ reportExtraMemoryAllocatedSlowCase(size);
+}
+
void Heap::reportAbandonedObjectGraph()
{
// Our clients don't know exactly how much memory they
// are abandoning so we just guess for them.
- double abandonedBytes = 0.10 * m_sizeAfterLastCollect;
+ size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
// We want to accelerate the next collection. Because memory has just
// been abandoned, the next collection has the potential to
// be more profitable. Since allocation is the trigger for collection,
// we hasten the next collection by pretending that we've allocated more memory.
- didAbandon(abandonedBytes);
-}
-
-void Heap::didAbandon(size_t bytes)
-{
- if (m_activityCallback)
- m_activityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedThisCycle);
- m_bytesAbandonedThisCycle += bytes;
+ if (m_fullActivityCallback) {
+ m_fullActivityCallback->didAllocate(
+ m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
+ }
+ m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
}
void Heap::protect(JSValue k)
@@ -368,290 +513,194 @@ void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
}
}
-void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
+void Heap::finalizeUnconditionalFinalizers()
{
- ProtectCountSet::iterator end = m_protectedValues.end();
- for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
- heapRootVisitor.visit(&it->key);
+ while (m_unconditionalFinalizers.hasNext()) {
+ UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
+ finalizer->finalizeUnconditionally();
+ }
}
-void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
+void Heap::willStartIterating()
{
- m_tempSortingVectors.append(tempVector);
+ m_objectSpace.willStartIterating();
}
-void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
+void Heap::didFinishIterating()
{
- ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
- m_tempSortingVectors.removeLast();
+ m_objectSpace.didFinishIterating();
}
-void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
+void Heap::completeAllJITPlans()
{
- typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>* > VectorOfValueStringVectors;
-
- VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
- for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
- Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempSortingVector = *it;
-
- Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
- for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
- if (vectorIt->first)
- heapRootVisitor.visit(&vectorIt->first);
- }
- }
+#if ENABLE(JIT)
+ JITWorklist::instance()->completeAllForVM(*m_vm);
+#endif // ENABLE(JIT)
+ DFG::completeAllPlansForVM(*m_vm);
}
-void Heap::harvestWeakReferences()
+template<typename Func>
+void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
{
- m_slotVisitor.harvestWeakReferences();
+ m_codeBlocks->iterateCurrentlyExecuting(func);
+ DFG::iterateCodeBlocksForGC(*m_vm, func);
}
-void Heap::finalizeUnconditionalFinalizers()
+template<typename Func>
+void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
{
- m_slotVisitor.finalizeUnconditionalFinalizers();
+ Vector<CodeBlock*, 256> codeBlocks;
+ iterateExecutingAndCompilingCodeBlocks(
+ [&] (CodeBlock* codeBlock) {
+ codeBlocks.append(codeBlock);
+ });
+ for (CodeBlock* codeBlock : codeBlocks)
+ func(codeBlock);
}
-inline JSStack& Heap::stack()
+void Heap::assertSharedMarkStacksEmpty()
{
- return m_vm->interpreter->stack();
+ bool ok = true;
+
+ if (!m_sharedCollectorMarkStack->isEmpty()) {
+ dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
+ ok = false;
+ }
+
+ if (!m_sharedMutatorMarkStack->isEmpty()) {
+ dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
+ ok = false;
+ }
+
+ RELEASE_ASSERT(ok);
}
-void Heap::willStartIterating()
+void Heap::gatherStackRoots(ConservativeRoots& roots)
{
- m_objectSpace.willStartIterating();
+ m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState);
}
-void Heap::didFinishIterating()
+void Heap::gatherJSStackRoots(ConservativeRoots& roots)
{
- m_objectSpace.didFinishIterating();
+#if !ENABLE(JIT)
+ m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
+#else
+ UNUSED_PARAM(roots);
+#endif
}
-void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
+void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
{
- ASSERT(isValidThreadState(m_vm));
- ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
- stack().gatherConservativeRoots(stackRoots);
- size_t stackRootCount = stackRoots.size();
- JSCell** registerRoots = stackRoots.roots();
- for (size_t i = 0; i < stackRootCount; i++) {
- setMarked(registerRoots[i]);
- roots.add(registerRoots[i]);
- }
+#if ENABLE(DFG_JIT)
+ m_vm->gatherConservativeRoots(roots);
+#else
+ UNUSED_PARAM(roots);
+#endif
}
-void Heap::markRoots()
+void Heap::beginMarking()
{
- SamplingRegion samplingRegion("Garbage Collection: Tracing");
-
- GCPHASE(MarkRoots);
- ASSERT(isValidThreadState(m_vm));
+ TimingScope timingScope(*this, "Heap::beginMarking");
+ if (m_collectionScope == CollectionScope::Full)
+ m_codeBlocks->clearMarksForFullCollection();
+ m_jitStubRoutines->clearMarks();
+ m_objectSpace.beginMarking();
+ setMutatorShouldBeFenced(true);
+}
-#if ENABLE(OBJECT_MARK_LOGGING)
- double gcStartTime = WTF::monotonicallyIncreasingTime();
+void Heap::removeDeadCompilerWorklistEntries()
+{
+#if ENABLE(DFG_JIT)
+ for (unsigned i = DFG::numberOfWorklists(); i--;)
+ DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
#endif
+}
- void* dummy;
-
- // We gather conservative roots before clearing mark bits because conservative
- // gathering uses the mark bits to determine whether a reference is valid.
- ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
- m_jitStubRoutines.clearMarks();
- {
- GCPHASE(GatherConservativeRoots);
- m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
- }
+bool Heap::isHeapSnapshotting() const
+{
+ HeapProfiler* heapProfiler = m_vm->heapProfiler();
+ if (UNLIKELY(heapProfiler))
+ return heapProfiler->activeSnapshotBuilder();
+ return false;
+}
- ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
- m_codeBlocks.clearMarks();
+struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
+ GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
+ : m_builder(builder)
{
- GCPHASE(GatherStackRoots);
- stack().gatherConservativeRoots(stackRoots, m_jitStubRoutines, m_codeBlocks);
}
-#if ENABLE(DFG_JIT)
- ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
+ IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
{
- GCPHASE(GatherScratchBufferRoots);
- m_vm->gatherConservativeRoots(scratchBufferRoots);
- }
-#endif
-
- {
- GCPHASE(ClearLivenessData);
- m_objectSpace.clearNewlyAllocated();
- m_objectSpace.clearMarks();
+ if (kind == HeapCell::JSCell) {
+ JSCell* cell = static_cast<JSCell*>(heapCell);
+ cell->methodTable()->heapSnapshot(cell, m_builder);
+ }
+ return IterationStatus::Continue;
}
- m_sharedData.didStartMarking();
- SlotVisitor& visitor = m_slotVisitor;
- visitor.setup();
- HeapRootVisitor heapRootVisitor(visitor);
+ HeapSnapshotBuilder& m_builder;
+};
-#if ENABLE(GGC)
- Vector<const JSCell*> rememberedSet(m_slotVisitor.markStack().size());
- m_slotVisitor.markStack().fillVector(rememberedSet);
-#endif
+void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
+{
+ if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
+ HeapIterationScope heapIterationScope(*this);
+ GatherHeapSnapshotData functor(*builder);
+ m_objectSpace.forEachLiveCell(heapIterationScope, functor);
+ }
+}
+struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
+ RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
+ : m_snapshot(snapshot)
{
- ParallelModeEnabler enabler(visitor);
-
- m_vm->smallStrings.visitStrongReferences(visitor);
-
- {
- GCPHASE(VisitMachineRoots);
- MARK_LOG_ROOT(visitor, "C++ Stack");
- visitor.append(machineThreadRoots);
- visitor.donateAndDrain();
- }
- {
- GCPHASE(VisitStackRoots);
- MARK_LOG_ROOT(visitor, "Stack");
- visitor.append(stackRoots);
- visitor.donateAndDrain();
- }
-#if ENABLE(DFG_JIT)
- {
- GCPHASE(VisitScratchBufferRoots);
- MARK_LOG_ROOT(visitor, "Scratch Buffers");
- visitor.append(scratchBufferRoots);
- visitor.donateAndDrain();
- }
-#endif
- {
- GCPHASE(VisitProtectedObjects);
- MARK_LOG_ROOT(visitor, "Protected Objects");
- markProtectedObjects(heapRootVisitor);
- visitor.donateAndDrain();
- }
- {
- GCPHASE(VisitTempSortVectors);
- MARK_LOG_ROOT(visitor, "Temp Sort Vectors");
- markTempSortVectors(heapRootVisitor);
- visitor.donateAndDrain();
- }
-
- {
- GCPHASE(MarkingArgumentBuffers);
- if (m_markListSet && m_markListSet->size()) {
- MARK_LOG_ROOT(visitor, "Argument Buffers");
- MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
- visitor.donateAndDrain();
- }
- }
- if (m_vm->exception()) {
- GCPHASE(MarkingException);
- MARK_LOG_ROOT(visitor, "Exceptions");
- heapRootVisitor.visit(m_vm->addressOfException());
- visitor.donateAndDrain();
- }
-
- {
- GCPHASE(VisitStrongHandles);
- MARK_LOG_ROOT(visitor, "Strong Handles");
- m_handleSet.visitStrongHandles(heapRootVisitor);
- visitor.donateAndDrain();
- }
-
- {
- GCPHASE(HandleStack);
- MARK_LOG_ROOT(visitor, "Handle Stack");
- m_handleStack.visit(heapRootVisitor);
- visitor.donateAndDrain();
- }
-
- {
- GCPHASE(TraceCodeBlocksAndJITStubRoutines);
- MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines");
- m_codeBlocks.traceMarked(visitor);
- m_jitStubRoutines.traceMarkedStubRoutines(visitor);
- visitor.donateAndDrain();
- }
-
-#if ENABLE(PARALLEL_GC)
- {
- GCPHASE(Convergence);
- visitor.drainFromShared(SlotVisitor::MasterDrain);
- }
-#endif
}
- // Weak references must be marked last because their liveness depends on
- // the liveness of the rest of the object graph.
+ IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
{
- GCPHASE(VisitingLiveWeakHandles);
- MARK_LOG_ROOT(visitor, "Live Weak Handles");
- while (true) {
- m_objectSpace.visitWeakSets(heapRootVisitor);
- harvestWeakReferences();
- if (visitor.isEmpty())
- break;
- {
- ParallelModeEnabler enabler(visitor);
- visitor.donateAndDrain();
-#if ENABLE(PARALLEL_GC)
- visitor.drainFromShared(SlotVisitor::MasterDrain);
-#endif
- }
- }
+ if (kind == HeapCell::JSCell)
+ m_snapshot.sweepCell(static_cast<JSCell*>(cell));
+ return IterationStatus::Continue;
}
-#if ENABLE(GGC)
- {
- GCPHASE(ClearRememberedSet);
- for (unsigned i = 0; i < rememberedSet.size(); ++i) {
- const JSCell* cell = rememberedSet[i];
- MarkedBlock::blockFor(cell)->clearRemembered(cell);
- }
+ HeapSnapshot& m_snapshot;
+};
+
+void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
+{
+ if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
+ HeapIterationScope heapIterationScope(*this);
+ RemoveDeadHeapSnapshotNodes functor(*snapshot);
+ m_objectSpace.forEachDeadCell(heapIterationScope, functor);
+ snapshot->shrinkToFit();
}
-#endif
+}
- GCCOUNTER(VisitedValueCount, visitor.visitCount());
+void Heap::updateObjectCounts()
+{
+ if (m_collectionScope == CollectionScope::Full)
+ m_totalBytesVisited = 0;
- m_sharedData.didFinishMarking();
-#if ENABLE(OBJECT_MARK_LOGGING)
- size_t visitCount = visitor.visitCount();
-#if ENABLE(PARALLEL_GC)
- visitCount += m_sharedData.childVisitCount();
-#endif
- MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::monotonicallyIncreasingTime() - gcStartTime);
-#endif
+ m_totalBytesVisitedThisCycle = bytesVisited();
+
+ m_totalBytesVisited += m_totalBytesVisitedThisCycle;
+}
- if (m_operationInProgress == EdenCollection) {
- m_totalBytesVisited += visitor.bytesVisited();
- m_totalBytesCopied += visitor.bytesCopied();
- } else {
- ASSERT(m_operationInProgress == FullCollection);
- m_totalBytesVisited = visitor.bytesVisited();
- m_totalBytesCopied = visitor.bytesCopied();
- }
-#if ENABLE(PARALLEL_GC)
- m_totalBytesVisited += m_sharedData.childBytesVisited();
- m_totalBytesCopied += m_sharedData.childBytesCopied();
-#endif
+void Heap::endMarking()
+{
+ forEachSlotVisitor(
+ [&] (SlotVisitor& visitor) {
+ visitor.reset();
+ });
- visitor.reset();
-#if ENABLE(PARALLEL_GC)
- m_sharedData.resetChildren();
-#endif
- m_sharedData.reset();
-}
-
-template <HeapOperation collectionType>
-void Heap::copyBackingStores()
-{
- m_storageSpace.startedCopying<collectionType>();
- if (m_storageSpace.shouldDoCopyPhase()) {
- m_sharedData.didStartCopying();
- m_copyVisitor.startCopying();
- m_copyVisitor.copyFromShared();
- m_copyVisitor.doneCopying();
- // We need to wait for everybody to finish and return their CopiedBlocks
- // before signaling that the phase is complete.
- m_storageSpace.doneCopying();
- m_sharedData.didFinishCopying();
- } else
- m_storageSpace.doneCopying();
+ assertSharedMarkStacksEmpty();
+ m_weakReferenceHarvesters.removeAll();
+
+ RELEASE_ASSERT(m_raceMarkStack->isEmpty());
+
+ m_objectSpace.endMarking();
+ setMutatorShouldBeFenced(Options::forceFencedBarrier());
}
size_t Heap::objectCount()
@@ -659,327 +708,1500 @@ size_t Heap::objectCount()
return m_objectSpace.objectCount();
}
-size_t Heap::extraSize()
+size_t Heap::extraMemorySize()
{
- return m_extraMemoryUsage + m_arrayBuffers.size();
+ // FIXME: Change this to use SaturatedArithmetic when available.
+ // https://bugs.webkit.org/show_bug.cgi?id=170411
+ Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
+ checkedTotal += m_deprecatedExtraMemorySize;
+ checkedTotal += m_arrayBuffers.size();
+ size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
+
+ ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
+ return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
}
size_t Heap::size()
{
- return m_objectSpace.size() + m_storageSpace.size() + extraSize();
+ return m_objectSpace.size() + extraMemorySize();
}
size_t Heap::capacity()
{
- return m_objectSpace.capacity() + m_storageSpace.capacity() + extraSize();
-}
-
-size_t Heap::sizeAfterCollect()
-{
- // The result here may not agree with the normal Heap::size().
- // This is due to the fact that we only count live copied bytes
- // rather than all used (including dead) copied bytes, thus it's
- // always the case that m_totalBytesCopied <= m_storageSpace.size().
- ASSERT(m_totalBytesCopied <= m_storageSpace.size());
- return m_totalBytesVisited + m_totalBytesCopied + extraSize();
+ return m_objectSpace.capacity() + extraMemorySize();
}
size_t Heap::protectedGlobalObjectCount()
{
- return forEachProtectedCell<CountIfGlobalObject>();
+ size_t result = 0;
+ forEachProtectedCell(
+ [&] (JSCell* cell) {
+ if (cell->isObject() && asObject(cell)->isGlobalObject())
+ result++;
+ });
+ return result;
}
size_t Heap::globalObjectCount()
{
HeapIterationScope iterationScope(*this);
- return m_objectSpace.forEachLiveCell<CountIfGlobalObject>(iterationScope);
+ size_t result = 0;
+ m_objectSpace.forEachLiveCell(
+ iterationScope,
+ [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
+ if (kind != HeapCell::JSCell)
+ return IterationStatus::Continue;
+ JSCell* cell = static_cast<JSCell*>(heapCell);
+ if (cell->isObject() && asObject(cell)->isGlobalObject())
+ result++;
+ return IterationStatus::Continue;
+ });
+ return result;
}
size_t Heap::protectedObjectCount()
{
- return forEachProtectedCell<Count>();
+ size_t result = 0;
+ forEachProtectedCell(
+ [&] (JSCell*) {
+ result++;
+ });
+ return result;
}
-PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
+std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
{
- return forEachProtectedCell<RecordType>();
+ std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
+ forEachProtectedCell(
+ [&] (JSCell* cell) {
+ recordType(*vm(), *result, cell);
+ });
+ return result;
}
-PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
+std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
{
+ std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
HeapIterationScope iterationScope(*this);
- return m_objectSpace.forEachLiveCell<RecordType>(iterationScope);
+ m_objectSpace.forEachLiveCell(
+ iterationScope,
+ [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
+ if (kind == HeapCell::JSCell)
+ recordType(*vm(), *result, static_cast<JSCell*>(cell));
+ return IterationStatus::Continue;
+ });
+ return result;
}
-void Heap::deleteAllCompiledCode()
+void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
{
- // If JavaScript is running, it's not safe to delete code, since we'll end
- // up deleting code that is live on the stack.
- if (m_vm->entryScope)
+ if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
return;
+
+ PreventCollectionScope preventCollectionScope(*this);
+
+ // If JavaScript is running, it's not safe to delete all JavaScript code, since
+ // we'll end up returning to deleted code.
+ RELEASE_ASSERT(!m_vm->entryScope);
+ RELEASE_ASSERT(!m_collectionScope);
+
+ completeAllJITPlans();
+
+ for (ExecutableBase* executable : m_executables)
+ executable->clearCode();
+}
+
+void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
+{
+ if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
+ return;
+
+ PreventCollectionScope preventCollectionScope(*this);
- for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
+ RELEASE_ASSERT(!m_collectionScope);
+
+ for (ExecutableBase* current : m_executables) {
if (!current->isFunctionExecutable())
continue;
- static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
+ static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
}
-
- m_codeBlocks.clearMarks();
- m_codeBlocks.deleteUnmarkedAndUnreferenced();
}
-void Heap::deleteUnmarkedCompiledCode()
+void Heap::clearUnmarkedExecutables()
{
- ExecutableBase* next;
- for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
- next = current->next();
+ for (unsigned i = m_executables.size(); i--;) {
+ ExecutableBase* current = m_executables[i];
if (isMarked(current))
continue;
- // We do this because executable memory is limited on some platforms and because
- // CodeBlock requires eager finalization.
- ExecutableBase::clearCodeVirtual(current);
- m_compiledCode.remove(current);
+ // Eagerly dereference the Executable's JITCode in order to run watchpoint
+ // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
+ current->clearCode();
+ std::swap(m_executables[i], m_executables.last());
+ m_executables.removeLast();
}
- m_codeBlocks.deleteUnmarkedAndUnreferenced();
- m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
+ m_executables.shrinkToFit();
+}
+
+void Heap::deleteUnmarkedCompiledCode()
+{
+ clearUnmarkedExecutables();
+ m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_vm, *m_lastCollectionScope);
+ m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
}
-void Heap::addToRememberedSet(const JSCell* cell)
+void Heap::addToRememberedSet(const JSCell* constCell)
{
+ JSCell* cell = const_cast<JSCell*>(constCell);
ASSERT(cell);
- ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
- if (isInRememberedSet(cell))
- return;
- MarkedBlock::blockFor(cell)->setRemembered(cell);
- m_slotVisitor.unconditionallyAppend(const_cast<JSCell*>(cell));
+ ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
+ m_barriersExecuted++;
+ if (m_mutatorShouldBeFenced) {
+ WTF::loadLoadFence();
+ if (!isMarkedConcurrently(cell)) {
+ // During a full collection a store into an unmarked object that had surivived past
+ // collections will manifest as a store to an unmarked PossiblyBlack object. If the
+ // object gets marked at some time after this then it will go down the normal marking
+ // path. So, we don't have to remember this object. We could return here. But we go
+ // further and attempt to re-white the object.
+
+ RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
+
+ if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
+ // Now we protect against this race:
+ //
+ // 1) Object starts out black + unmarked.
+ // --> We do isMarkedConcurrently here.
+ // 2) Object is marked and greyed.
+ // 3) Object is scanned and blacked.
+ // --> We do atomicCompareExchangeCellStateStrong here.
+ //
+ // In this case we would have made the object white again, even though it should
+ // be black. This check lets us correct our mistake. This relies on the fact that
+ // isMarkedConcurrently converges monotonically to true.
+ if (isMarkedConcurrently(cell)) {
+ // It's difficult to work out whether the object should be grey or black at
+ // this point. We say black conservatively.
+ cell->setCellState(CellState::PossiblyBlack);
+ }
+
+ // Either way, we can return. Most likely, the object was not marked, and so the
+ // object is now labeled white. This means that future barrier executions will not
+ // fire. In the unlikely event that the object had become marked, we can still
+ // return anyway, since we proved that the object was not marked at the time that
+ // we executed this slow path.
+ }
+
+ return;
+ }
+ } else
+ ASSERT(Heap::isMarkedConcurrently(cell));
+ // It could be that the object was *just* marked. This means that the collector may set the
+ // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
+ // race with the collector here. If we win then this is accurate because the object _will_
+ // get scanned again. If we lose then someone else will barrier the object again. That would
+ // be unfortunate but not the end of the world.
+ cell->setCellState(CellState::PossiblyGrey);
+ m_mutatorMarkStack->append(cell);
+}
+
+void Heap::sweepSynchronously()
+{
+ double before = 0;
+ if (Options::logGC()) {
+ dataLog("Full sweep: ", capacity() / 1024, "kb ");
+ before = currentTimeMS();
+ }
+ m_objectSpace.sweep();
+ m_objectSpace.shrink();
+ if (Options::logGC()) {
+ double after = currentTimeMS();
+ dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms");
+ }
}
void Heap::collectAllGarbage()
{
if (!m_isSafeToCollect)
return;
+
+ collectSync(CollectionScope::Full);
- m_shouldDoFullCollection = true;
- collect();
+ DeferGCForAWhile deferGC(*this);
+ if (UNLIKELY(Options::useImmortalObjects()))
+ sweeper()->stopSweeping();
- SamplingRegion samplingRegion("Garbage Collection: Sweeping");
- DelayedReleaseScope delayedReleaseScope(m_objectSpace);
- m_objectSpace.sweep();
- m_objectSpace.shrink();
+ bool alreadySweptInCollectSync = Options::sweepSynchronously();
+ if (!alreadySweptInCollectSync) {
+ if (Options::logGC())
+ dataLog("[GC<", RawPointer(this), ">: ");
+ sweepSynchronously();
+ if (Options::logGC())
+ dataLog("]\n");
+ }
+ m_objectSpace.assertNoUnswept();
+
+ sweepAllLogicallyEmptyWeakBlocks();
}
-static double minute = 60.0;
+void Heap::collectAsync(std::optional<CollectionScope> scope)
+{
+ if (!m_isSafeToCollect)
+ return;
-void Heap::collect()
+ bool alreadyRequested = false;
+ {
+ LockHolder locker(*m_threadLock);
+ for (std::optional<CollectionScope> request : m_requests) {
+ if (scope) {
+ if (scope == CollectionScope::Eden) {
+ alreadyRequested = true;
+ break;
+ } else {
+ RELEASE_ASSERT(scope == CollectionScope::Full);
+ if (request == CollectionScope::Full) {
+ alreadyRequested = true;
+ break;
+ }
+ }
+ } else {
+ if (!request || request == CollectionScope::Full) {
+ alreadyRequested = true;
+ break;
+ }
+ }
+ }
+ }
+ if (alreadyRequested)
+ return;
+
+ requestCollection(scope);
+}
+
+void Heap::collectSync(std::optional<CollectionScope> scope)
{
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("JSC GC starting collection.\n");
-#endif
+ if (!m_isSafeToCollect)
+ return;
- double before = 0;
- if (Options::logGC()) {
- dataLog("[GC: ");
- before = currentTimeMS();
+ waitForCollection(requestCollection(scope));
+}
+
+bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
+{
+ RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
+ RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
+
+ if (false)
+ dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
+
+ return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
+}
+
+void Heap::collectInCollectorThread()
+{
+ for (;;) {
+ RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
+ switch (result) {
+ case RunCurrentPhaseResult::Finished:
+ return;
+ case RunCurrentPhaseResult::Continue:
+ break;
+ case RunCurrentPhaseResult::NeedCurrentThreadState:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+}
+
+void Heap::checkConn(GCConductor conn)
+{
+ switch (conn) {
+ case GCConductor::Mutator:
+ RELEASE_ASSERT(m_worldState.load() & mutatorHasConnBit);
+ return;
+ case GCConductor::Collector:
+ RELEASE_ASSERT(!(m_worldState.load() & mutatorHasConnBit));
+ return;
}
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
+{
+ checkConn(conn);
+ m_currentThreadState = currentThreadState;
- SamplingRegion samplingRegion("Garbage Collection");
+ // If the collector transfers the conn to the mutator, it leaves us in between phases.
+ if (!finishChangingPhase(conn)) {
+ // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
+ // this, but it's probably not the end of the world if it did happen.
+ if (false)
+ dataLog("Conn bounce-back.\n");
+ return RunCurrentPhaseResult::Finished;
+ }
- RELEASE_ASSERT(!m_deferralDepth);
- GCPHASE(Collect);
- ASSERT(vm()->currentThreadIsHoldingAPILock());
- RELEASE_ASSERT(vm()->identifierTable == wtfThreadData().currentIdentifierTable());
- ASSERT(m_isSafeToCollect);
- JAVASCRIPTCORE_GC_BEGIN();
- RELEASE_ASSERT(m_operationInProgress == NoOperation);
+ bool result = false;
+ switch (m_currentPhase) {
+ case CollectorPhase::NotRunning:
+ result = runNotRunningPhase(conn);
+ break;
+
+ case CollectorPhase::Begin:
+ result = runBeginPhase(conn);
+ break;
+
+ case CollectorPhase::Fixpoint:
+ if (!currentThreadState && conn == GCConductor::Mutator)
+ return RunCurrentPhaseResult::NeedCurrentThreadState;
+
+ result = runFixpointPhase(conn);
+ break;
+
+ case CollectorPhase::Concurrent:
+ result = runConcurrentPhase(conn);
+ break;
+
+ case CollectorPhase::Reloop:
+ result = runReloopPhase(conn);
+ break;
+
+ case CollectorPhase::End:
+ result = runEndPhase(conn);
+ break;
+ }
+
+ return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
+}
+
+NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
+{
+ // Check m_requests since the mutator calls this to poll what's going on.
+ {
+ auto locker = holdLock(*m_threadLock);
+ if (m_requests.isEmpty())
+ return false;
+ }
+ return changePhase(conn, CollectorPhase::Begin);
+}
+
+NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
+{
+ m_currentGCStartTime = MonotonicTime::now();
+
+ std::optional<CollectionScope> scope;
{
- RecursiveAllocationScope scope(*this);
- m_vm->prepareToDiscardCode();
+ LockHolder locker(*m_threadLock);
+ RELEASE_ASSERT(!m_requests.isEmpty());
+ scope = m_requests.first();
}
+
+ if (Options::logGC())
+ dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
- bool isFullCollection = m_shouldDoFullCollection;
- if (isFullCollection) {
- m_operationInProgress = FullCollection;
- m_slotVisitor.clearMarkStack();
- m_shouldDoFullCollection = false;
- if (Options::logGC())
- dataLog("FullCollection, ");
- } else {
-#if ENABLE(GGC)
- m_operationInProgress = EdenCollection;
- if (Options::logGC())
- dataLog("EdenCollection, ");
-#else
- m_operationInProgress = FullCollection;
- m_slotVisitor.clearMarkStack();
- if (Options::logGC())
- dataLog("FullCollection, ");
-#endif
+ m_beforeGC = MonotonicTime::now();
+
+ if (m_collectionScope) {
+ dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
}
- if (m_operationInProgress == FullCollection)
- m_extraMemoryUsage = 0;
+
+ willStartCollection(scope);
+
+ if (m_verifier) {
+ // Verify that live objects from the last GC cycle haven't been corrupted by
+ // mutators before we begin this new GC cycle.
+ m_verifier->verify(HeapVerifier::Phase::BeforeGC);
+
+ m_verifier->initializeGCCycle();
+ m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking);
+ }
+
+ prepareForMarking();
+
+ if (m_collectionScope == CollectionScope::Full) {
+ m_opaqueRoots.clear();
+ m_collectorSlotVisitor->clearMarkStacks();
+ m_mutatorMarkStack->clear();
+ }
+
+ RELEASE_ASSERT(m_raceMarkStack->isEmpty());
+
+ beginMarking();
+
+ forEachSlotVisitor(
+ [&] (SlotVisitor& visitor) {
+ visitor.didStartMarking();
+ });
+
+ m_parallelMarkersShouldExit = false;
+
+ m_helperClient.setFunction(
+ [this] () {
+ SlotVisitor* slotVisitor;
+ {
+ LockHolder locker(m_parallelSlotVisitorLock);
+ if (m_availableParallelSlotVisitors.isEmpty()) {
+ std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
+ *this, toCString("P", m_parallelSlotVisitors.size() + 1));
+
+ if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
+ newVisitor->optimizeForStoppedMutator();
+
+ newVisitor->didStartMarking();
+
+ slotVisitor = newVisitor.get();
+ m_parallelSlotVisitors.append(WTFMove(newVisitor));
+ } else
+ slotVisitor = m_availableParallelSlotVisitors.takeLast();
+ }
+
+ WTF::registerGCThread(GCThreadType::Helper);
+
+ {
+ ParallelModeEnabler parallelModeEnabler(*slotVisitor);
+ slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
+ }
+
+ {
+ LockHolder locker(m_parallelSlotVisitorLock);
+ m_availableParallelSlotVisitors.append(slotVisitor);
+ }
+ });
- if (m_activityCallback)
- m_activityCallback->willCollect();
+ SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
- double lastGCStartTime = WTF::monotonicallyIncreasingTime();
- if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
- deleteAllCompiledCode();
- m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime();
+ m_constraintSet->didStartMarking();
+
+ m_scheduler->beginCollection();
+ if (Options::logGC())
+ m_scheduler->log();
+
+ // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
+ // checks because bootstrap would have put things into the visitor. So, we should fall
+ // through to draining.
+
+ if (!slotVisitor.didReachTermination()) {
+ dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
+ dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
+ dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
+ dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
+ dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
+ dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
+ dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
+ dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
}
+
+ return changePhase(conn, CollectorPhase::Fixpoint);
+}
+NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
+{
+ RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
+
+ SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
+
+ if (Options::logGC()) {
+ HashMap<const char*, size_t> visitMap;
+ forEachSlotVisitor(
+ [&] (SlotVisitor& slotVisitor) {
+ visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
+ });
+
+ auto perVisitorDump = sortedMapDump(
+ visitMap,
+ [] (const char* a, const char* b) -> bool {
+ return strcmp(a, b) < 0;
+ },
+ ":", " ");
+
+ dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
+ }
+
+ if (slotVisitor.didReachTermination()) {
+ m_scheduler->didReachTermination();
+
+ assertSharedMarkStacksEmpty();
+
+ slotVisitor.mergeIfNecessary();
+ for (auto& parallelVisitor : m_parallelSlotVisitors)
+ parallelVisitor->mergeIfNecessary();
+
+ // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
+ // we don't have to execute root constraints again unless the mutator did run. At a
+ // minimum, we could use this for work estimates - but it's probably more than just an
+ // estimate.
+ // https://bugs.webkit.org/show_bug.cgi?id=166828
+
+ // FIXME: We should take advantage of the fact that we could timeout. This only comes
+ // into play if we're executing constraints for the first time. But that will matter
+ // when we have deep stacks or a lot of DOM stuff.
+ // https://bugs.webkit.org/show_bug.cgi?id=166831
+
+ // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
+ // add their own using Heap::addMarkingConstraint().
+ bool converged =
+ m_constraintSet->executeConvergence(slotVisitor, MonotonicTime::infinity());
+ if (converged && slotVisitor.isEmpty()) {
+ assertSharedMarkStacksEmpty();
+ return changePhase(conn, CollectorPhase::End);
+ }
+
+ m_scheduler->didExecuteConstraints();
+ }
+
+ if (Options::logGC())
+ dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
+
{
- GCPHASE(StopAllocation);
- m_objectSpace.stopAllocating();
- if (m_operationInProgress == FullCollection)
- m_storageSpace.didStartFullCollection();
+ ParallelModeEnabler enabler(slotVisitor);
+ slotVisitor.drainInParallel(m_scheduler->timeToResume());
}
+
+ m_scheduler->synchronousDrainingDidStall();
- {
- GCPHASE(FlushWriteBarrierBuffer);
- if (m_operationInProgress == EdenCollection)
- m_writeBarrierBuffer.flush(*this);
- else
- m_writeBarrierBuffer.reset();
+ if (slotVisitor.didReachTermination())
+ return true; // This is like relooping to the top if runFixpointPhase().
+
+ if (!m_scheduler->shouldResume())
+ return true;
+
+ m_scheduler->willResume();
+
+ if (Options::logGC()) {
+ double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
+ dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
}
- markRoots();
+ // Forgive the mutator for its past failures to keep up.
+ // FIXME: Figure out if moving this to different places results in perf changes.
+ m_incrementBalance = 0;
+
+ return changePhase(conn, CollectorPhase::Concurrent);
+}
+
+NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
+{
+ SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
+
+ switch (conn) {
+ case GCConductor::Mutator: {
+ // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
+ // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
+ // to stop and do some work.
+ if (slotVisitor.didReachTermination()
+ || m_scheduler->shouldStop())
+ return changePhase(conn, CollectorPhase::Reloop);
+
+ // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
+ // everything. This is super cheap if the SlotVisitor is already empty.
+ slotVisitor.donateAll();
+ return false;
+ }
+ case GCConductor::Collector: {
+ {
+ ParallelModeEnabler enabler(slotVisitor);
+ slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
+ }
+ return changePhase(conn, CollectorPhase::Reloop);
+ } }
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+}
+
+NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
+{
+ if (Options::logGC())
+ dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
+
+ m_scheduler->didStop();
+
+ if (Options::logGC())
+ m_scheduler->log();
+
+ return changePhase(conn, CollectorPhase::Fixpoint);
+}
+
+NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
+{
+ m_scheduler->endCollection();
+
{
- GCPHASE(ReapingWeakHandles);
- m_objectSpace.reapWeakSets();
+ auto locker = holdLock(m_markingMutex);
+ m_parallelMarkersShouldExit = true;
+ m_markingConditionVariable.notifyAll();
+ }
+ m_helperClient.finish();
+
+ iterateExecutingAndCompilingCodeBlocks(
+ [&] (CodeBlock* codeBlock) {
+ writeBarrier(codeBlock);
+ });
+
+ updateObjectCounts();
+ endMarking();
+
+ if (m_verifier) {
+ m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking);
+ m_verifier->verify(HeapVerifier::Phase::AfterMarking);
+ }
+
+ if (vm()->typeProfiler())
+ vm()->typeProfiler()->invalidateTypeSetCache();
+
+ reapWeakHandles();
+ pruneStaleEntriesFromWeakGCMaps();
+ sweepArrayBuffers();
+ snapshotUnswept();
+ finalizeUnconditionalFinalizers();
+ removeDeadCompilerWorklistEntries();
+ notifyIncrementalSweeper();
+
+ m_codeBlocks->iterateCurrentlyExecuting(
+ [&] (CodeBlock* codeBlock) {
+ writeBarrier(codeBlock);
+ });
+ m_codeBlocks->clearCurrentlyExecuting();
+
+ m_objectSpace.prepareForAllocation();
+ updateAllocationLimits();
+
+ didFinishCollection();
+
+ if (m_verifier) {
+ m_verifier->trimDeadObjects();
+ m_verifier->verify(HeapVerifier::Phase::AfterGC);
}
- JAVASCRIPTCORE_GC_MARKED();
+ if (false) {
+ dataLog("Heap state after GC:\n");
+ m_objectSpace.dumpBits();
+ }
+
+ if (Options::logGC()) {
+ double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
+ dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
+ }
{
- GCPHASE(SweepingArrayBuffers);
- m_arrayBuffers.sweep();
+ auto locker = holdLock(*m_threadLock);
+ m_requests.removeFirst();
+ m_lastServedTicket++;
+ clearMutatorWaiting();
+ }
+ ParkingLot::unparkAll(&m_worldState);
+
+ if (false)
+ dataLog("GC END!\n");
+
+ setNeedFinalize();
+
+ m_lastGCStartTime = m_currentGCStartTime;
+ m_lastGCEndTime = MonotonicTime::now();
+
+ return changePhase(conn, CollectorPhase::NotRunning);
+}
+
+bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
+{
+ checkConn(conn);
+
+ m_nextPhase = nextPhase;
+
+ return finishChangingPhase(conn);
+}
+
+NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
+{
+ checkConn(conn);
+
+ if (m_nextPhase == m_currentPhase)
+ return true;
+
+ if (false)
+ dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
+
+ bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
+ bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
+
+ if (suspendedBefore != suspendedAfter) {
+ if (suspendedBefore) {
+ RELEASE_ASSERT(!suspendedAfter);
+
+ resumeThePeriphery();
+ if (conn == GCConductor::Collector)
+ resumeTheMutator();
+ else
+ handleNeedFinalize();
+ } else {
+ RELEASE_ASSERT(!suspendedBefore);
+ RELEASE_ASSERT(suspendedAfter);
+
+ if (conn == GCConductor::Collector) {
+ waitWhileNeedFinalize();
+ if (!stopTheMutator()) {
+ if (false)
+ dataLog("Returning false.\n");
+ return false;
+ }
+ } else {
+ sanitizeStackForVM(m_vm);
+ handleNeedFinalize();
+ }
+ stopThePeriphery(conn);
+ }
}
+
+ m_currentPhase = m_nextPhase;
+ return true;
+}
- if (m_operationInProgress == FullCollection) {
- m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
- MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
- m_objectSpace.forEachBlock(functor);
+void Heap::stopThePeriphery(GCConductor conn)
+{
+ if (m_collectorBelievesThatTheWorldIsStopped) {
+ dataLog("FATAL: world already stopped.\n");
+ RELEASE_ASSERT_NOT_REACHED();
}
+
+ if (m_mutatorDidRun)
+ m_mutatorExecutionVersion++;
+
+ m_mutatorDidRun = false;
- if (m_operationInProgress == FullCollection)
- copyBackingStores<FullCollection>();
- else
- copyBackingStores<EdenCollection>();
+ suspendCompilerThreads();
+ m_collectorBelievesThatTheWorldIsStopped = true;
+
+ forEachSlotVisitor(
+ [&] (SlotVisitor& slotVisitor) {
+ slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
+ });
+#if ENABLE(JIT)
{
- GCPHASE(FinalizeUnconditionalFinalizers);
- finalizeUnconditionalFinalizers();
+ DeferGCForAWhile awhile(*this);
+ if (JITWorklist::instance()->completeAllForVM(*m_vm)
+ && conn == GCConductor::Collector)
+ setGCDidJIT();
+ }
+#else
+ UNUSED_PARAM(conn);
+#endif // ENABLE(JIT)
+
+ vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
+
+ m_structureIDTable.flushOldTables();
+ m_objectSpace.stopAllocating();
+
+ m_stopTime = MonotonicTime::now();
+}
+
+NEVER_INLINE void Heap::resumeThePeriphery()
+{
+ // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
+ // collection cycle or this is just a concurrent phase within a collection cycle:
+ // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
+ // last active block.
+ // - During collection cycle: it reinstates the last active block.
+ m_objectSpace.resumeAllocating();
+
+ m_barriersExecuted = 0;
+
+ if (!m_collectorBelievesThatTheWorldIsStopped) {
+ dataLog("Fatal: collector does not believe that the world is stopped.\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ m_collectorBelievesThatTheWorldIsStopped = false;
+
+ // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
+ // become available. We basically want a lockAny() method that will lock whatever lock is available
+ // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
+ // queues at once, which is totally achievable - it would just require memory allocation, which is
+ // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
+ // with a DLG-style handshake mechanism, but that seems not as general.
+ Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
+
+ forEachSlotVisitor(
+ [&] (SlotVisitor& slotVisitor) {
+ slotVisitorsToUpdate.append(&slotVisitor);
+ });
+
+ for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
+ for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
+ SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
+ bool remove = false;
+ if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
+ remove = true;
+ else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
+ slotVisitor.updateMutatorIsStopped(locker);
+ remove = true;
+ }
+ if (remove) {
+ slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
+ slotVisitorsToUpdate.takeLast();
+ }
+ }
+ std::this_thread::yield();
+ }
+
+ for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
+ slotVisitor->updateMutatorIsStopped();
+
+ resumeCompilerThreads();
+}
+
+bool Heap::stopTheMutator()
+{
+ for (;;) {
+ unsigned oldState = m_worldState.load();
+ if (oldState & stoppedBit) {
+ RELEASE_ASSERT(!(oldState & hasAccessBit));
+ RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
+ RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
+ return true;
+ }
+
+ if (oldState & mutatorHasConnBit) {
+ RELEASE_ASSERT(!(oldState & hasAccessBit));
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+ return false;
+ }
+
+ if (!(oldState & hasAccessBit)) {
+ RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
+ RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
+ // We can stop the world instantly.
+ if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
+ return true;
+ continue;
+ }
+
+ // Transfer the conn to the mutator and bail.
+ RELEASE_ASSERT(oldState & hasAccessBit);
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+ unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
+ if (m_worldState.compareExchangeWeak(oldState, newState)) {
+ if (false)
+ dataLog("Handed off the conn.\n");
+ m_stopIfNecessaryTimer->scheduleSoon();
+ ParkingLot::unparkAll(&m_worldState);
+ return false;
+ }
+ }
+}
+
+NEVER_INLINE void Heap::resumeTheMutator()
+{
+ if (false)
+ dataLog("Resuming the mutator.\n");
+ for (;;) {
+ unsigned oldState = m_worldState.load();
+ if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
+ dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ if (oldState & mutatorHasConnBit) {
+ dataLog("Fatal: mutator has the conn.\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ if (!(oldState & stoppedBit)) {
+ if (false)
+ dataLog("Returning because not stopped.\n");
+ return;
+ }
+
+ if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
+ if (false)
+ dataLog("CASing and returning.\n");
+ ParkingLot::unparkAll(&m_worldState);
+ return;
+ }
+ }
+}
+
+void Heap::stopIfNecessarySlow()
+{
+ while (stopIfNecessarySlow(m_worldState.load())) { }
+
+ RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
+ RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
+
+ handleGCDidJIT();
+ handleNeedFinalize();
+ m_mutatorDidRun = true;
+}
+
+bool Heap::stopIfNecessarySlow(unsigned oldState)
+{
+ RELEASE_ASSERT(oldState & hasAccessBit);
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+
+ // It's possible for us to wake up with finalization already requested but the world not yet
+ // resumed. If that happens, we can't run finalization yet.
+ if (handleNeedFinalize(oldState))
+ return true;
+
+ // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
+ // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
+ // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
+ // and there would be some other bit indicating whether we were in some GC phase other than the
+ // NotRunning or Concurrent ones.
+ if (oldState & mutatorHasConnBit)
+ collectInMutatorThread();
+
+ return false;
+}
+
+NEVER_INLINE void Heap::collectInMutatorThread()
+{
+ CollectingScope collectingScope(*this);
+ for (;;) {
+ RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
+ switch (result) {
+ case RunCurrentPhaseResult::Finished:
+ return;
+ case RunCurrentPhaseResult::Continue:
+ break;
+ case RunCurrentPhaseResult::NeedCurrentThreadState:
+ sanitizeStackForVM(m_vm);
+ auto lambda = [&] (CurrentThreadState& state) {
+ for (;;) {
+ RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
+ switch (result) {
+ case RunCurrentPhaseResult::Finished:
+ return;
+ case RunCurrentPhaseResult::Continue:
+ break;
+ case RunCurrentPhaseResult::NeedCurrentThreadState:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+ };
+ callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
+ return;
+ }
+ }
+}
+
+template<typename Func>
+void Heap::waitForCollector(const Func& func)
+{
+ for (;;) {
+ bool done;
+ {
+ LockHolder locker(*m_threadLock);
+ done = func(locker);
+ if (!done) {
+ setMutatorWaiting();
+
+ // At this point, the collector knows that we intend to wait, and he will clear the
+ // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
+ // prevents us from parking except if there is also stop-the-world. Unparking after
+ // clearing means that if the clearing happens after we park, then we will unpark.
+ }
+ }
+
+ // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
+ unsigned oldState = m_worldState.load();
+ if (stopIfNecessarySlow(oldState))
+ continue;
+
+ // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
+ // do the collection.
+ relinquishConn();
+
+ if (done) {
+ clearMutatorWaiting(); // Clean up just in case.
+ return;
+ }
+
+ // If mutatorWaitingBit is still set then we want to wait.
+ ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
+ }
+}
+
+void Heap::acquireAccessSlow()
+{
+ for (;;) {
+ unsigned oldState = m_worldState.load();
+ RELEASE_ASSERT(!(oldState & hasAccessBit));
+
+ if (oldState & stoppedBit) {
+ if (verboseStop) {
+ dataLog("Stopping in acquireAccess!\n");
+ WTFReportBacktrace();
+ }
+ // Wait until we're not stopped anymore.
+ ParkingLot::compareAndPark(&m_worldState, oldState);
+ continue;
+ }
+
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+ unsigned newState = oldState | hasAccessBit;
+ if (m_worldState.compareExchangeWeak(oldState, newState)) {
+ handleGCDidJIT();
+ handleNeedFinalize();
+ m_mutatorDidRun = true;
+ stopIfNecessary();
+ return;
+ }
+ }
+}
+
+void Heap::releaseAccessSlow()
+{
+ for (;;) {
+ unsigned oldState = m_worldState.load();
+ if (!(oldState & hasAccessBit)) {
+ dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ if (oldState & stoppedBit) {
+ dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ if (handleNeedFinalize(oldState))
+ continue;
+
+ unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
+
+ if ((oldState & mutatorHasConnBit)
+ && m_nextPhase != m_currentPhase) {
+ // This means that the collector thread had given us the conn so that we would do something
+ // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
+ // the meantime, since we're handing the conn over, the collector will be awoken and it is
+ // sure to have work to do.
+ newState |= stoppedBit;
+ }
+
+ if (m_worldState.compareExchangeWeak(oldState, newState)) {
+ if (oldState & mutatorHasConnBit)
+ finishRelinquishingConn();
+ return;
+ }
+ }
+}
+
+bool Heap::relinquishConn(unsigned oldState)
+{
+ RELEASE_ASSERT(oldState & hasAccessBit);
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+
+ if (!(oldState & mutatorHasConnBit))
+ return false; // Done.
+
+ if (m_threadShouldStop)
+ return false;
+
+ if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
+ return true; // Loop around.
+
+ finishRelinquishingConn();
+ return true;
+}
+
+void Heap::finishRelinquishingConn()
+{
+ if (false)
+ dataLog("Relinquished the conn.\n");
+
+ sanitizeStackForVM(m_vm);
+
+ auto locker = holdLock(*m_threadLock);
+ if (!m_requests.isEmpty())
+ m_threadCondition->notifyOne(locker);
+ ParkingLot::unparkAll(&m_worldState);
+}
+
+void Heap::relinquishConn()
+{
+ while (relinquishConn(m_worldState.load())) { }
+}
+
+bool Heap::handleGCDidJIT(unsigned oldState)
+{
+ RELEASE_ASSERT(oldState & hasAccessBit);
+ if (!(oldState & gcDidJITBit))
+ return false;
+ if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
+ WTF::crossModifyingCodeFence();
+ return true;
+ }
+ return true;
+}
+
+NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
+{
+ RELEASE_ASSERT(oldState & hasAccessBit);
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+
+ if (!(oldState & needFinalizeBit))
+ return false;
+ if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
+ finalize();
+ // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
+ // which case they would be waiting for us to release heap access.
+ ParkingLot::unparkAll(&m_worldState);
+ return true;
+ }
+ return true;
+}
+
+void Heap::handleGCDidJIT()
+{
+ while (handleGCDidJIT(m_worldState.load())) { }
+}
+
+void Heap::handleNeedFinalize()
+{
+ while (handleNeedFinalize(m_worldState.load())) { }
+}
+
+void Heap::setGCDidJIT()
+{
+ m_worldState.transaction(
+ [&] (unsigned& state) {
+ RELEASE_ASSERT(state & stoppedBit);
+ state |= gcDidJITBit;
+ });
+}
+
+void Heap::setNeedFinalize()
+{
+ m_worldState.exchangeOr(needFinalizeBit);
+ ParkingLot::unparkAll(&m_worldState);
+ m_stopIfNecessaryTimer->scheduleSoon();
+}
+
+void Heap::waitWhileNeedFinalize()
+{
+ for (;;) {
+ unsigned oldState = m_worldState.load();
+ if (!(oldState & needFinalizeBit)) {
+ // This means that either there was no finalize request or the main thread will finalize
+ // with heap access, so a subsequent call to stopTheWorld() will return only when
+ // finalize finishes.
+ return;
+ }
+ ParkingLot::compareAndPark(&m_worldState, oldState);
}
+}
+void Heap::setMutatorWaiting()
+{
+ m_worldState.exchangeOr(mutatorWaitingBit);
+}
+
+void Heap::clearMutatorWaiting()
+{
+ m_worldState.exchangeAnd(~mutatorWaitingBit);
+}
+
+void Heap::notifyThreadStopping(const AbstractLocker&)
+{
+ m_threadIsStopping = true;
+ clearMutatorWaiting();
+ ParkingLot::unparkAll(&m_worldState);
+}
+
+void Heap::finalize()
+{
+ MonotonicTime before;
+ if (Options::logGC()) {
+ before = MonotonicTime::now();
+ dataLog("[GC<", RawPointer(this), ">: finalize ");
+ }
+
{
- GCPHASE(DeleteCodeBlocks);
+ SweepingScope helpingGCScope(*this);
deleteUnmarkedCompiledCode();
+ deleteSourceProviderCaches();
+ sweepLargeAllocations();
}
+
+ if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
+ cache->clear();
+
+ if (Options::sweepSynchronously())
+ sweepSynchronously();
- {
- GCPHASE(DeleteSourceProviderCaches);
- m_vm->clearSourceProviderCaches();
+ if (Options::logGC()) {
+ MonotonicTime after = MonotonicTime::now();
+ dataLog((after - before).milliseconds(), "ms]\n");
}
+}
- if (m_operationInProgress == FullCollection)
- m_sweeper->startSweeping(m_blockSnapshot);
+Heap::Ticket Heap::requestCollection(std::optional<CollectionScope> scope)
+{
+ stopIfNecessary();
+
+ ASSERT(vm()->currentThreadIsHoldingAPILock());
+ RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
+
+ LockHolder locker(*m_threadLock);
+ // We may be able to steal the conn. That only works if the collector is definitely not running
+ // right now. This is an optimization that prevents the collector thread from ever starting in most
+ // cases.
+ ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
+ if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
+ if (false)
+ dataLog("Taking the conn.\n");
+ m_worldState.exchangeOr(mutatorHasConnBit);
+ }
+
+ m_requests.append(scope);
+ m_lastGrantedTicket++;
+ if (!(m_worldState.load() & mutatorHasConnBit))
+ m_threadCondition->notifyOne(locker);
+ return m_lastGrantedTicket;
+}
- {
- GCPHASE(AddCurrentlyExecutingCodeBlocksToRememberedSet);
- m_codeBlocks.rememberCurrentlyExecutingCodeBlocks(this);
+void Heap::waitForCollection(Ticket ticket)
+{
+ waitForCollector(
+ [&] (const AbstractLocker&) -> bool {
+ return m_lastServedTicket >= ticket;
+ });
+}
+
+void Heap::sweepLargeAllocations()
+{
+ m_objectSpace.sweepLargeAllocations();
+}
+
+void Heap::suspendCompilerThreads()
+{
+#if ENABLE(DFG_JIT)
+ // We ensure the worklists so that it's not possible for the mutator to start a new worklist
+ // after we have suspended the ones that he had started before. That's not very expensive since
+ // the worklists use AutomaticThreads anyway.
+ for (unsigned i = DFG::numberOfWorklists(); i--;)
+ DFG::ensureWorklistForIndex(i).suspendAllThreads();
+#endif
+}
+
+void Heap::willStartCollection(std::optional<CollectionScope> scope)
+{
+ if (Options::logGC())
+ dataLog("=> ");
+
+ if (shouldDoFullCollection(scope)) {
+ m_collectionScope = CollectionScope::Full;
+ m_shouldDoFullCollection = false;
+ if (Options::logGC())
+ dataLog("FullCollection, ");
+ if (false)
+ dataLog("Full collection!\n");
+ } else {
+ m_collectionScope = CollectionScope::Eden;
+ if (Options::logGC())
+ dataLog("EdenCollection, ");
+ if (false)
+ dataLog("Eden collection!\n");
+ }
+ if (m_collectionScope == CollectionScope::Full) {
+ m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
+ m_extraMemorySize = 0;
+ m_deprecatedExtraMemorySize = 0;
+#if ENABLE(RESOURCE_USAGE)
+ m_externalMemorySize = 0;
+#endif
+
+ if (m_fullActivityCallback)
+ m_fullActivityCallback->willCollect();
+ } else {
+ ASSERT(m_collectionScope == CollectionScope::Eden);
+ m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
}
- m_bytesAbandonedThisCycle = 0;
+ if (m_edenActivityCallback)
+ m_edenActivityCallback->willCollect();
- {
- GCPHASE(ResetAllocators);
- m_objectSpace.resetAllocators();
+ for (auto* observer : m_observers)
+ observer->willGarbageCollect();
+}
+
+void Heap::prepareForMarking()
+{
+ m_objectSpace.prepareForMarking();
+}
+
+void Heap::reapWeakHandles()
+{
+ m_objectSpace.reapWeakSets();
+}
+
+void Heap::pruneStaleEntriesFromWeakGCMaps()
+{
+ if (m_collectionScope != CollectionScope::Full)
+ return;
+ for (auto& pruneCallback : m_weakGCMaps.values())
+ pruneCallback();
+}
+
+void Heap::sweepArrayBuffers()
+{
+ m_arrayBuffers.sweep();
+}
+
+void Heap::snapshotUnswept()
+{
+ TimingScope timingScope(*this, "Heap::snapshotUnswept");
+ m_objectSpace.snapshotUnswept();
+}
+
+void Heap::deleteSourceProviderCaches()
+{
+ if (*m_lastCollectionScope == CollectionScope::Full)
+ m_vm->clearSourceProviderCaches();
+}
+
+void Heap::notifyIncrementalSweeper()
+{
+ if (m_collectionScope == CollectionScope::Full) {
+ if (!m_logicallyEmptyWeakBlocks.isEmpty())
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
+ }
+
+ m_sweeper->startSweeping();
+}
+
+void Heap::updateAllocationLimits()
+{
+ static const bool verbose = false;
+
+ if (verbose) {
+ dataLog("\n");
+ dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
}
- size_t currentHeapSize = sizeAfterCollect();
- if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
- HeapStatistics::exitWithFailure();
+ // Calculate our current heap size threshold for the purpose of figuring out when we should
+ // run another collection. This isn't the same as either size() or capacity(), though it should
+ // be somewhere between the two. The key is to match the size calculations involved calls to
+ // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
+ // fragmentation, we may have size() much smaller than capacity().
+ size_t currentHeapSize = 0;
+
+ // For marked space, we use the total number of bytes visited. This matches the logic for
+ // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
+ // objects allocated rather than blocks used. This will underestimate capacity(), and in case
+ // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
+ // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
+ currentHeapSize += m_totalBytesVisited;
+ if (verbose)
+ dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
+
+ // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
+ // extra memory reporting.
+ currentHeapSize += extraMemorySize();
+ if (!ASSERT_DISABLED) {
+ Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
+ checkedCurrentHeapSize += extraMemorySize();
+ ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
+ }
- if (m_operationInProgress == FullCollection) {
+ if (verbose)
+ dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
+
+ if (m_collectionScope == CollectionScope::Full) {
// To avoid pathological GC churn in very small and very large heaps, we set
// the new allocation limit based on the current size of the heap, with a
// fixed minimum.
m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
+ if (verbose)
+ dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+ if (verbose)
+ dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
+ m_sizeAfterLastFullCollect = currentHeapSize;
+ if (verbose)
+ dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
+ m_bytesAbandonedSinceLastFullCollect = 0;
+ if (verbose)
+ dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
} else {
ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
- m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+ // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
+ // But we are sloppy, so we have to defend against the overflow.
+ m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
+ if (verbose)
+ dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
+ m_sizeAfterLastEdenCollect = currentHeapSize;
+ if (verbose)
+ dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
double minEdenToOldGenerationRatio = 1.0 / 3.0;
if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
m_shouldDoFullCollection = true;
+ // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
+ if (verbose)
+ dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+ if (verbose)
+ dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
+ if (m_fullActivityCallback) {
+ ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
+ m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
+ }
}
m_sizeAfterLastCollect = currentHeapSize;
-
+ if (verbose)
+ dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
m_bytesAllocatedThisCycle = 0;
- double lastGCEndTime = WTF::monotonicallyIncreasingTime();
- m_lastGCLength = lastGCEndTime - lastGCStartTime;
-
- if (Options::recordGCPauseTimes())
- HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime);
- RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
- m_operationInProgress = NoOperation;
- JAVASCRIPTCORE_GC_END();
+ if (Options::logGC())
+ dataLog("=> ", currentHeapSize / 1024, "kb, ");
+}
- if (Options::useZombieMode())
- zombifyDeadObjects();
+void Heap::didFinishCollection()
+{
+ m_afterGC = MonotonicTime::now();
+ CollectionScope scope = *m_collectionScope;
+ if (scope == CollectionScope::Full)
+ m_lastFullGCLength = m_afterGC - m_beforeGC;
+ else
+ m_lastEdenGCLength = m_afterGC - m_beforeGC;
- if (Options::objectsAreImmortal())
- markDeadObjects();
+#if ENABLE(RESOURCE_USAGE)
+ ASSERT(externalMemorySize() <= extraMemorySize());
+#endif
- if (Options::showObjectStatistics())
- HeapStatistics::showObjectStatistics(this);
-
- if (Options::logGC()) {
- double after = currentTimeMS();
- dataLog(after - before, " ms, ", currentHeapSize / 1024, " kb]\n");
+ if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
+ gatherExtraHeapSnapshotData(*heapProfiler);
+ removeDeadHeapSnapshotNodes(*heapProfiler);
}
-}
-bool Heap::collectIfNecessaryOrDefer()
-{
- if (m_deferralDepth)
- return false;
-
- if (!shouldCollect())
- return false;
-
- collect();
- return true;
-}
+ RELEASE_ASSERT(m_collectionScope);
+ m_lastCollectionScope = m_collectionScope;
+ m_collectionScope = std::nullopt;
-void Heap::markDeadObjects()
-{
- HeapIterationScope iterationScope(*this);
- m_objectSpace.forEachDeadCell<MarkObject>(iterationScope);
+ for (auto* observer : m_observers)
+ observer->didGarbageCollect(scope);
}
-void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
+void Heap::resumeCompilerThreads()
{
- m_activityCallback = activityCallback;
+#if ENABLE(DFG_JIT)
+ for (unsigned i = DFG::numberOfWorklists(); i--;)
+ DFG::existingWorklistForIndex(i).resumeAllThreads();
+#endif
}
-GCActivityCallback* Heap::activityCallback()
+GCActivityCallback* Heap::fullActivityCallback()
{
- return m_activityCallback.get();
+ return m_fullActivityCallback.get();
}
-void Heap::setIncrementalSweeper(PassOwnPtr<IncrementalSweeper> sweeper)
+GCActivityCallback* Heap::edenActivityCallback()
{
- m_sweeper = sweeper;
+ return m_edenActivityCallback.get();
}
IncrementalSweeper* Heap::sweeper()
@@ -989,15 +2211,18 @@ IncrementalSweeper* Heap::sweeper()
void Heap::setGarbageCollectionTimerEnabled(bool enable)
{
- if (m_activityCallback)
- m_activityCallback->setEnabled(enable);
+ if (m_fullActivityCallback)
+ m_fullActivityCallback->setEnabled(enable);
+ if (m_edenActivityCallback)
+ m_edenActivityCallback->setEnabled(enable);
}
void Heap::didAllocate(size_t bytes)
{
- if (m_activityCallback)
- m_activityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedThisCycle);
+ if (m_edenActivityCallback)
+ m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
m_bytesAllocatedThisCycle += bytes;
+ performIncrement(bytes);
}
bool Heap::isValidAllocation(size_t)
@@ -1005,7 +2230,7 @@ bool Heap::isValidAllocation(size_t)
if (!isValidThreadState(m_vm))
return false;
- if (m_operationInProgress != NoOperation)
+ if (isCurrentThreadBusy())
return false;
return true;
@@ -1024,77 +2249,512 @@ void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
}
-void Heap::addCompiledCode(ExecutableBase* executable)
+void Heap::addExecutable(ExecutableBase* executable)
{
- m_compiledCode.append(executable);
+ m_executables.append(executable);
}
-class Zombify : public MarkedBlock::VoidFunctor {
-public:
- void operator()(JSCell* cell)
- {
- void** current = reinterpret_cast<void**>(cell);
+void Heap::collectAllGarbageIfNotDoneRecently()
+{
+ if (!m_fullActivityCallback) {
+ collectAllGarbage();
+ return;
+ }
+
+ if (m_fullActivityCallback->didSyncGCRecently()) {
+ // A synchronous GC was already requested recently so we merely accelerate next collection.
+ reportAbandonedObjectGraph();
+ return;
+ }
+
+ m_fullActivityCallback->setDidSyncGCRecently();
+ collectAllGarbage();
+}
+
+bool Heap::shouldDoFullCollection(std::optional<CollectionScope> scope) const
+{
+ if (!Options::useGenerationalGC())
+ return true;
+
+ if (!scope)
+ return m_shouldDoFullCollection;
+ return *scope == CollectionScope::Full;
+}
+
+void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
+{
+ m_logicallyEmptyWeakBlocks.append(block);
+}
+
+void Heap::sweepAllLogicallyEmptyWeakBlocks()
+{
+ if (m_logicallyEmptyWeakBlocks.isEmpty())
+ return;
+
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
+ while (sweepNextLogicallyEmptyWeakBlock()) { }
+}
+
+bool Heap::sweepNextLogicallyEmptyWeakBlock()
+{
+ if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
+ return false;
- // We want to maintain zapped-ness because that's how we know if we've called
- // the destructor.
- if (cell->isZapped())
- current++;
+ WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
- void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
- for (; current < limit; current++)
- *current = reinterpret_cast<void*>(0xbbadbeef);
+ block->sweep();
+ if (block->isEmpty()) {
+ std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
+ m_logicallyEmptyWeakBlocks.removeLast();
+ WeakBlock::destroy(*this, block);
+ } else
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
+
+ if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
+ return false;
}
-};
-void Heap::zombifyDeadObjects()
+ return true;
+}
+
+size_t Heap::visitCount()
{
- // Sweep now because destructors will crash once we're zombified.
- m_objectSpace.sweep();
- HeapIterationScope iterationScope(*this);
- m_objectSpace.forEachDeadCell<Zombify>(iterationScope);
+ size_t result = 0;
+ forEachSlotVisitor(
+ [&] (SlotVisitor& visitor) {
+ result += visitor.visitCount();
+ });
+ return result;
+}
+
+size_t Heap::bytesVisited()
+{
+ size_t result = 0;
+ forEachSlotVisitor(
+ [&] (SlotVisitor& visitor) {
+ result += visitor.bytesVisited();
+ });
+ return result;
}
-void Heap::incrementDeferralDepth()
+void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
{
- RELEASE_ASSERT(m_deferralDepth < 100); // Sanity check to make sure this doesn't get ridiculous.
+ // We don't know the full set of CodeBlocks until compilation has terminated.
+ completeAllJITPlans();
+
+ return m_codeBlocks->iterate(func);
+}
+
+void Heap::forEachCodeBlockIgnoringJITPlansImpl(const ScopedLambda<bool(CodeBlock*)>& func)
+{
+ return m_codeBlocks->iterate(func);
+}
+
+void Heap::writeBarrierSlowPath(const JSCell* from)
+{
+ if (UNLIKELY(mutatorShouldBeFenced())) {
+ // In this case, the barrierThreshold is the tautological threshold, so from could still be
+ // not black. But we can't know for sure until we fire off a fence.
+ WTF::storeLoadFence();
+ if (from->cellState() != CellState::PossiblyBlack)
+ return;
+ }
- m_deferralDepth++;
+ addToRememberedSet(from);
+}
+
+bool Heap::isCurrentThreadBusy()
+{
+ return mayBeGCThread() || mutatorState() != MutatorState::Running;
}
-void Heap::decrementDeferralDepth()
+void Heap::reportExtraMemoryVisited(size_t size)
{
- RELEASE_ASSERT(m_deferralDepth >= 1);
+ size_t* counter = &m_extraMemorySize;
- m_deferralDepth--;
+ for (;;) {
+ size_t oldSize = *counter;
+ // FIXME: Change this to use SaturatedArithmetic when available.
+ // https://bugs.webkit.org/show_bug.cgi?id=170411
+ Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
+ checkedNewSize += size;
+ size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
+ if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
+ return;
+ }
}
-void Heap::decrementDeferralDepthAndGCIfNeeded()
+#if ENABLE(RESOURCE_USAGE)
+void Heap::reportExternalMemoryVisited(size_t size)
{
- decrementDeferralDepth();
- collectIfNecessaryOrDefer();
+ size_t* counter = &m_externalMemorySize;
+
+ for (;;) {
+ size_t oldSize = *counter;
+ if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
+ return;
+ }
+}
+#endif
+
+void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
+{
+ ASSERT(!DisallowGC::isGCDisallowedOnCurrentThread());
+
+ if (!m_isSafeToCollect)
+ return;
+ switch (mutatorState()) {
+ case MutatorState::Running:
+ case MutatorState::Allocating:
+ break;
+ case MutatorState::Sweeping:
+ case MutatorState::Collecting:
+ return;
+ }
+ if (!Options::useGC())
+ return;
+
+ if (mayNeedToStop()) {
+ if (deferralContext)
+ deferralContext->m_shouldGC = true;
+ else if (isDeferred())
+ m_didDeferGCWork = true;
+ else
+ stopIfNecessary();
+ }
+
+ if (UNLIKELY(Options::gcMaxHeapSize())) {
+ if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
+ return;
+ } else {
+ if (m_bytesAllocatedThisCycle <= m_maxEdenSize)
+ return;
+ }
+
+ if (deferralContext)
+ deferralContext->m_shouldGC = true;
+ else if (isDeferred())
+ m_didDeferGCWork = true;
+ else {
+ collectAsync();
+ stopIfNecessary(); // This will immediately start the collection if we have the conn.
+ }
}
-void Heap::writeBarrier(const JSCell* from)
+void Heap::decrementDeferralDepthAndGCIfNeededSlow()
{
-#if ENABLE(GGC)
- ASSERT_GC_OBJECT_LOOKS_VALID(const_cast<JSCell*>(from));
- if (!from || !isMarked(from))
+ // Can't do anything if we're still deferred.
+ if (m_deferralDepth)
return;
- Heap* heap = Heap::heap(from);
- heap->addToRememberedSet(from);
+
+ ASSERT(!isDeferred());
+
+ m_didDeferGCWork = false;
+ // FIXME: Bring back something like the DeferGCProbability mode.
+ // https://bugs.webkit.org/show_bug.cgi?id=166627
+ collectIfNecessaryOrDefer();
+}
+
+void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
+{
+ m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
+}
+
+void Heap::unregisterWeakGCMap(void* weakGCMap)
+{
+ m_weakGCMaps.remove(weakGCMap);
+}
+
+void Heap::didAllocateBlock(size_t capacity)
+{
+#if ENABLE(RESOURCE_USAGE)
+ m_blockBytesAllocated += capacity;
#else
- UNUSED_PARAM(from);
+ UNUSED_PARAM(capacity);
#endif
}
-void Heap::flushWriteBarrierBuffer(JSCell* cell)
+void Heap::didFreeBlock(size_t capacity)
{
-#if ENABLE(GGC)
- m_writeBarrierBuffer.flush(*this);
- m_writeBarrierBuffer.add(cell);
+#if ENABLE(RESOURCE_USAGE)
+ m_blockBytesAllocated -= capacity;
#else
- UNUSED_PARAM(cell);
+ UNUSED_PARAM(capacity);
+#endif
+}
+
+#if USE(CF)
+void Heap::setRunLoop(CFRunLoopRef runLoop)
+{
+ m_runLoop = runLoop;
+ m_fullActivityCallback->setRunLoop(runLoop);
+ m_edenActivityCallback->setRunLoop(runLoop);
+ m_sweeper->setRunLoop(runLoop);
+}
+#endif // USE(CF)
+
+void Heap::addCoreConstraints()
+{
+ m_constraintSet->add(
+ "Cs", "Conservative Scan",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
+ m_objectSpace.prepareForConservativeScan();
+ ConservativeRoots conservativeRoots(*this);
+ SuperSamplerScope superSamplerScope(false);
+ gatherStackRoots(conservativeRoots);
+ gatherJSStackRoots(conservativeRoots);
+ gatherScratchBufferRoots(conservativeRoots);
+ slotVisitor.append(conservativeRoots);
+ },
+ ConstraintVolatility::GreyedByExecution);
+
+ m_constraintSet->add(
+ "Msr", "Misc Small Roots",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+#if JSC_OBJC_API_ENABLED
+ scanExternalRememberedSet(*m_vm, slotVisitor);
+#endif
+
+ if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
+ m_vm->smallStrings.visitStrongReferences(slotVisitor);
+
+ for (auto& pair : m_protectedValues)
+ slotVisitor.appendUnbarriered(pair.key);
+
+ if (m_markListSet && m_markListSet->size())
+ MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
+
+ slotVisitor.appendUnbarriered(m_vm->exception());
+ slotVisitor.appendUnbarriered(m_vm->lastException());
+ },
+ ConstraintVolatility::GreyedByExecution);
+
+ m_constraintSet->add(
+ "Sh", "Strong Handles",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ m_handleSet.visitStrongHandles(slotVisitor);
+ m_handleStack.visit(slotVisitor);
+ },
+ ConstraintVolatility::GreyedByExecution);
+
+ m_constraintSet->add(
+ "D", "Debugger",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+#if ENABLE(SAMPLING_PROFILER)
+ if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
+ LockHolder locker(samplingProfiler->getLock());
+ samplingProfiler->processUnverifiedStackTraces();
+ samplingProfiler->visit(slotVisitor);
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Sampling Profiler data:\n", slotVisitor);
+ }
+#endif // ENABLE(SAMPLING_PROFILER)
+
+ if (m_vm->typeProfiler())
+ m_vm->typeProfilerLog()->visit(slotVisitor);
+
+ m_vm->shadowChicken().visitChildren(slotVisitor);
+ },
+ ConstraintVolatility::GreyedByExecution);
+
+ m_constraintSet->add(
+ "Jsr", "JIT Stub Routines",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
+ },
+ ConstraintVolatility::GreyedByExecution);
+
+ m_constraintSet->add(
+ "Ws", "Weak Sets",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ m_objectSpace.visitWeakSets(slotVisitor);
+ },
+ ConstraintVolatility::GreyedByMarking);
+
+ m_constraintSet->add(
+ "Wrh", "Weak Reference Harvesters",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
+ current->visitWeakReferences(slotVisitor);
+ },
+ ConstraintVolatility::GreyedByMarking);
+
+#if ENABLE(DFG_JIT)
+ m_constraintSet->add(
+ "Dw", "DFG Worklists",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ for (unsigned i = DFG::numberOfWorklists(); i--;)
+ DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
+
+ // FIXME: This is almost certainly unnecessary.
+ // https://bugs.webkit.org/show_bug.cgi?id=166829
+ DFG::iterateCodeBlocksForGC(
+ *m_vm,
+ [&] (CodeBlock* codeBlock) {
+ slotVisitor.appendUnbarriered(codeBlock);
+ });
+
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("DFG Worklists:\n", slotVisitor);
+ },
+ ConstraintVolatility::GreyedByMarking);
#endif
+
+ m_constraintSet->add(
+ "Cb", "CodeBlocks",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
+ [&] (CodeBlock* codeBlock) {
+ // Visit the CodeBlock as a constraint only if it's black.
+ if (Heap::isMarked(codeBlock)
+ && codeBlock->cellState() == CellState::PossiblyBlack)
+ slotVisitor.visitAsConstraint(codeBlock);
+ });
+ },
+ ConstraintVolatility::SeldomGreyed);
+
+ m_constraintSet->add(
+ "Mrms", "Mutator+Race Mark Stack",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ // Indicate to the fixpoint that we introduced work!
+ size_t size = m_mutatorMarkStack->size() + m_raceMarkStack->size();
+ slotVisitor.addToVisitCount(size);
+
+ if (Options::logGC())
+ dataLog("(", size, ")");
+
+ m_mutatorMarkStack->transferTo(slotVisitor.mutatorMarkStack());
+ m_raceMarkStack->transferTo(slotVisitor.mutatorMarkStack());
+ },
+ [this] (SlotVisitor&) -> double {
+ return m_mutatorMarkStack->size() + m_raceMarkStack->size();
+ },
+ ConstraintVolatility::GreyedByExecution);
+}
+
+void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
+{
+ PreventCollectionScope preventCollectionScope(*this);
+ m_constraintSet->add(WTFMove(constraint));
+}
+
+void Heap::notifyIsSafeToCollect()
+{
+ MonotonicTime before;
+ if (Options::logGC()) {
+ before = MonotonicTime::now();
+ dataLog("[GC<", RawPointer(this), ">: starting ");
+ }
+
+ addCoreConstraints();
+
+ m_isSafeToCollect = true;
+
+ if (Options::collectContinuously()) {
+ m_collectContinuouslyThread = createThread(
+ "JSC DEBUG Continuous GC",
+ [this] () {
+ MonotonicTime initialTime = MonotonicTime::now();
+ Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
+ while (!m_shouldStopCollectingContinuously) {
+ {
+ LockHolder locker(*m_threadLock);
+ if (m_requests.isEmpty()) {
+ m_requests.append(std::nullopt);
+ m_lastGrantedTicket++;
+ m_threadCondition->notifyOne(locker);
+ }
+ }
+
+ {
+ LockHolder locker(m_collectContinuouslyLock);
+ Seconds elapsed = MonotonicTime::now() - initialTime;
+ Seconds elapsedInPeriod = elapsed % period;
+ MonotonicTime timeToWakeUp =
+ initialTime + elapsed - elapsedInPeriod + period;
+ while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
+ m_collectContinuouslyCondition.waitUntil(
+ m_collectContinuouslyLock, timeToWakeUp);
+ }
+ }
+ }
+ });
+ }
+
+ if (Options::logGC())
+ dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
+}
+
+void Heap::preventCollection()
+{
+ if (!m_isSafeToCollect)
+ return;
+
+ // This prevents the collectContinuously thread from starting a collection.
+ m_collectContinuouslyLock.lock();
+
+ // Wait for all collections to finish.
+ waitForCollector(
+ [&] (const AbstractLocker&) -> bool {
+ ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
+ return m_lastServedTicket == m_lastGrantedTicket;
+ });
+
+ // Now a collection can only start if this thread starts it.
+ RELEASE_ASSERT(!m_collectionScope);
+}
+
+void Heap::allowCollection()
+{
+ if (!m_isSafeToCollect)
+ return;
+
+ m_collectContinuouslyLock.unlock();
+}
+
+template<typename Func>
+void Heap::forEachSlotVisitor(const Func& func)
+{
+ auto locker = holdLock(m_parallelSlotVisitorLock);
+ func(*m_collectorSlotVisitor);
+ func(*m_mutatorSlotVisitor);
+ for (auto& slotVisitor : m_parallelSlotVisitors)
+ func(*slotVisitor);
+}
+
+void Heap::setMutatorShouldBeFenced(bool value)
+{
+ m_mutatorShouldBeFenced = value;
+ m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
+}
+
+void Heap::performIncrement(size_t bytes)
+{
+ if (!m_objectSpace.isMarking())
+ return;
+
+ m_incrementBalance += bytes * Options::gcIncrementScale();
+
+ // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
+ // state when the double goes wild.
+ if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
+ m_incrementBalance = 0;
+
+ if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
+ return;
+
+ double targetBytes = m_incrementBalance;
+ if (targetBytes <= 0)
+ return;
+ targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
+
+ SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
+ ParallelModeEnabler parallelModeEnabler(slotVisitor);
+ size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
+ // incrementBalance may go negative here because it'll remember how many bytes we overshot.
+ m_incrementBalance -= bytesVisited;
}
} // namespace JSC