summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap/MarkedSpace.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/heap/MarkedSpace.cpp
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/heap/MarkedSpace.cpp')
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.cpp677
1 files changed, 454 insertions, 223 deletions
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.cpp b/Source/JavaScriptCore/heap/MarkedSpace.cpp
index e005337a6..0dee44eba 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.cpp
+++ b/Source/JavaScriptCore/heap/MarkedSpace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2017 Apple Inc. All rights reserved.
* Copyright (C) 2007 Eric Seidel <eric@webkit.org>
*
* This library is free software; you can redistribute it and/or
@@ -21,239 +21,346 @@
#include "config.h"
#include "MarkedSpace.h"
-#include "DelayedReleaseScope.h"
+#include "FunctionCodeBlock.h"
#include "IncrementalSweeper.h"
-#include "JSGlobalObject.h"
-#include "JSLock.h"
#include "JSObject.h"
-
+#include "JSCInlines.h"
+#include "MarkedAllocatorInlines.h"
+#include "MarkedBlockInlines.h"
+#include <wtf/ListDump.h>
namespace JSC {
-class Structure;
-
-class Free {
-public:
- typedef MarkedBlock* ReturnType;
+std::array<size_t, MarkedSpace::numSizeClasses> MarkedSpace::s_sizeClassForSizeStep;
- enum FreeMode { FreeOrShrink, FreeAll };
-
- Free(FreeMode, MarkedSpace*);
- void operator()(MarkedBlock*);
- ReturnType returnValue();
-
-private:
- FreeMode m_freeMode;
- MarkedSpace* m_markedSpace;
- DoublyLinkedList<MarkedBlock> m_blocks;
-};
+namespace {
-inline Free::Free(FreeMode freeMode, MarkedSpace* newSpace)
- : m_freeMode(freeMode)
- , m_markedSpace(newSpace)
+const Vector<size_t>& sizeClasses()
{
+ static Vector<size_t>* result;
+ static std::once_flag once;
+ std::call_once(
+ once,
+ [] {
+ result = new Vector<size_t>();
+
+ auto add = [&] (size_t sizeClass) {
+ sizeClass = WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeClass);
+ if (Options::dumpSizeClasses())
+ dataLog("Adding JSC MarkedSpace size class: ", sizeClass, "\n");
+ // Perform some validation as we go.
+ RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep));
+ if (result->isEmpty())
+ RELEASE_ASSERT(sizeClass == MarkedSpace::sizeStep);
+ result->append(sizeClass);
+ };
+
+ // This is a definition of the size classes in our GC. It must define all of the
+ // size classes from sizeStep up to largeCutoff.
+
+ // Have very precise size classes for the small stuff. This is a loop to make it easy to reduce
+ // atomSize.
+ for (size_t size = MarkedSpace::sizeStep; size < MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep)
+ add(size);
+
+ // We want to make sure that the remaining size classes minimize internal fragmentation (i.e.
+ // the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential
+ // way starting at just above the precise size classes to four cells per block.
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload), "\n");
+
+ for (unsigned i = 0; ; ++i) {
+ double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i);
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Next size class as a double: ", approximateSize, "\n");
+
+ size_t approximateSizeInBytes = static_cast<size_t>(approximateSize);
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Next size class as bytes: ", approximateSizeInBytes, "\n");
+
+ // Make sure that the computer did the math correctly.
+ RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff);
+
+ if (approximateSizeInBytes > MarkedSpace::largeCutoff)
+ break;
+
+ size_t sizeClass =
+ WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes);
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Size class: ", sizeClass, "\n");
+
+ // Optimize the size class so that there isn't any slop at the end of the block's
+ // payload.
+ unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass;
+ size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1);
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Possibly better size class: ", possiblyBetterSizeClass, "\n");
+
+ // The size class we just came up with is better than the other one if it reduces
+ // total wastage assuming we only allocate cells of that size.
+ size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass;
+ size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock;
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Original wastage: ", originalWastage, ", new wastage: ", newWastage, "\n");
+
+ size_t betterSizeClass;
+ if (newWastage > originalWastage)
+ betterSizeClass = sizeClass;
+ else
+ betterSizeClass = possiblyBetterSizeClass;
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Choosing size class: ", betterSizeClass, "\n");
+
+ if (betterSizeClass == result->last()) {
+ // Defense for when expStep is small.
+ continue;
+ }
+
+ // This is usually how we get out of the loop.
+ if (betterSizeClass > MarkedSpace::largeCutoff
+ || betterSizeClass > Options::largeAllocationCutoff())
+ break;
+
+ add(betterSizeClass);
+ }
+
+ // Manually inject size classes for objects we know will be allocated in high volume.
+ add(sizeof(UnlinkedFunctionExecutable));
+ add(sizeof(UnlinkedFunctionCodeBlock));
+ add(sizeof(FunctionExecutable));
+ add(sizeof(FunctionCodeBlock));
+ add(sizeof(JSString));
+ add(sizeof(JSFunction));
+ add(sizeof(PropertyTable));
+ add(sizeof(Structure));
+
+ {
+ // Sort and deduplicate.
+ std::sort(result->begin(), result->end());
+ auto it = std::unique(result->begin(), result->end());
+ result->shrinkCapacity(it - result->begin());
+ }
+
+ if (Options::dumpSizeClasses())
+ dataLog("JSC Heap MarkedSpace size class dump: ", listDump(*result), "\n");
+
+ // We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about
+ // the size class table. This checks our results against that function's assumptions.
+ for (size_t size = MarkedSpace::sizeStep, i = 0; size <= MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep, i++)
+ RELEASE_ASSERT(result->at(i) == size);
+ });
+ return *result;
}
-inline void Free::operator()(MarkedBlock* block)
+template<typename TableType, typename SizeClassCons, typename DefaultCons>
+void buildSizeClassTable(TableType& table, const SizeClassCons& cons, const DefaultCons& defaultCons)
{
- if (m_freeMode == FreeOrShrink)
- m_markedSpace->freeOrShrinkBlock(block);
- else
- m_markedSpace->freeBlock(block);
+ size_t nextIndex = 0;
+ for (size_t sizeClass : sizeClasses()) {
+ auto entry = cons(sizeClass);
+ size_t index = MarkedSpace::sizeClassToIndex(sizeClass);
+ for (size_t i = nextIndex; i <= index; ++i)
+ table[i] = entry;
+ nextIndex = index + 1;
+ }
+ for (size_t i = nextIndex; i < MarkedSpace::numSizeClasses; ++i)
+ table[i] = defaultCons(MarkedSpace::indexToSizeClass(i));
}
-inline Free::ReturnType Free::returnValue()
+} // anonymous namespace
+
+void MarkedSpace::initializeSizeClassForStepSize()
{
- return m_blocks.head();
+ static std::once_flag flag;
+ std::call_once(
+ flag,
+ [] {
+ buildSizeClassTable(
+ s_sizeClassForSizeStep,
+ [&] (size_t sizeClass) -> size_t {
+ return sizeClass;
+ },
+ [&] (size_t sizeClass) -> size_t {
+ return sizeClass;
+ });
+ });
}
-struct VisitWeakSet : MarkedBlock::VoidFunctor {
- VisitWeakSet(HeapRootVisitor& heapRootVisitor) : m_heapRootVisitor(heapRootVisitor) { }
- void operator()(MarkedBlock* block) { block->visitWeakSet(m_heapRootVisitor); }
-private:
- HeapRootVisitor& m_heapRootVisitor;
-};
-
-struct ReapWeakSet : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->reapWeakSet(); }
-};
-
MarkedSpace::MarkedSpace(Heap* heap)
: m_heap(heap)
, m_capacity(0)
, m_isIterating(false)
- , m_currentDelayedReleaseScope(nullptr)
{
- for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- allocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::None);
- normalDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::Normal);
- immortalStructureDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::ImmortalStructure);
- }
-
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- allocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::None);
- normalDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::Normal);
- immortalStructureDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::ImmortalStructure);
- }
-
- m_normalSpace.largeAllocator.init(heap, this, 0, MarkedBlock::None);
- m_normalDestructorSpace.largeAllocator.init(heap, this, 0, MarkedBlock::Normal);
- m_immortalStructureDestructorSpace.largeAllocator.init(heap, this, 0, MarkedBlock::ImmortalStructure);
+ initializeSizeClassForStepSize();
}
MarkedSpace::~MarkedSpace()
{
- Free free(Free::FreeAll, this);
- forEachBlock(free);
+ forEachBlock(
+ [&] (MarkedBlock::Handle* block) {
+ freeBlock(block);
+ });
+ for (LargeAllocation* allocation : m_largeAllocations)
+ allocation->destroy();
ASSERT(!m_blocks.set().size());
}
-struct LastChanceToFinalize : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->lastChanceToFinalize(); }
-};
-
void MarkedSpace::lastChanceToFinalize()
{
- DelayedReleaseScope delayedReleaseScope(*this);
- stopAllocating();
- forEachBlock<LastChanceToFinalize>();
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.lastChanceToFinalize();
+ return IterationStatus::Continue;
+ });
+ for (LargeAllocation* allocation : m_largeAllocations)
+ allocation->lastChanceToFinalize();
}
void MarkedSpace::sweep()
{
- if (Options::logGC())
- dataLog("Eagerly sweeping...");
- m_heap->sweeper()->willFinishSweeping();
- forEachBlock<Sweep>();
+ m_heap->sweeper()->stopSweeping();
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.sweep();
+ return IterationStatus::Continue;
+ });
}
-void MarkedSpace::resetAllocators()
+void MarkedSpace::sweepLargeAllocations()
{
- for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- allocatorFor(cellSize).reset();
- normalDestructorAllocatorFor(cellSize).reset();
- immortalStructureDestructorAllocatorFor(cellSize).reset();
- }
-
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- allocatorFor(cellSize).reset();
- normalDestructorAllocatorFor(cellSize).reset();
- immortalStructureDestructorAllocatorFor(cellSize).reset();
+ RELEASE_ASSERT(m_largeAllocationsNurseryOffset == m_largeAllocations.size());
+ unsigned srcIndex = m_largeAllocationsNurseryOffsetForSweep;
+ unsigned dstIndex = srcIndex;
+ while (srcIndex < m_largeAllocations.size()) {
+ LargeAllocation* allocation = m_largeAllocations[srcIndex++];
+ allocation->sweep();
+ if (allocation->isEmpty()) {
+ m_capacity -= allocation->cellSize();
+ allocation->destroy();
+ continue;
+ }
+ m_largeAllocations[dstIndex++] = allocation;
}
+ m_largeAllocations.resize(dstIndex);
+ m_largeAllocationsNurseryOffset = m_largeAllocations.size();
+}
- m_normalSpace.largeAllocator.reset();
- m_normalDestructorSpace.largeAllocator.reset();
- m_immortalStructureDestructorSpace.largeAllocator.reset();
+void MarkedSpace::prepareForAllocation()
+{
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.prepareForAllocation();
+ return IterationStatus::Continue;
+ });
-#if ENABLE(GGC)
- m_blocksWithNewObjects.clear();
-#endif
+ m_activeWeakSets.takeFrom(m_newActiveWeakSets);
+
+ if (m_heap->collectionScope() == CollectionScope::Eden)
+ m_largeAllocationsNurseryOffsetForSweep = m_largeAllocationsNurseryOffset;
+ else
+ m_largeAllocationsNurseryOffsetForSweep = 0;
+ m_largeAllocationsNurseryOffset = m_largeAllocations.size();
+
+ m_allocatorForEmptyAllocation = m_firstAllocator;
}
-void MarkedSpace::visitWeakSets(HeapRootVisitor& heapRootVisitor)
+void MarkedSpace::visitWeakSets(SlotVisitor& visitor)
{
- VisitWeakSet visitWeakSet(heapRootVisitor);
- if (m_heap->operationInProgress() == EdenCollection) {
- for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
- visitWeakSet(m_blocksWithNewObjects[i]);
- } else
- forEachBlock(visitWeakSet);
+ auto visit = [&] (WeakSet* weakSet) {
+ weakSet->visit(visitor);
+ };
+
+ m_newActiveWeakSets.forEach(visit);
+
+ if (m_heap->collectionScope() == CollectionScope::Full)
+ m_activeWeakSets.forEach(visit);
}
void MarkedSpace::reapWeakSets()
{
- if (m_heap->operationInProgress() == EdenCollection) {
- for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
- m_blocksWithNewObjects[i]->reapWeakSet();
- } else
- forEachBlock<ReapWeakSet>();
+ auto visit = [&] (WeakSet* weakSet) {
+ weakSet->reap();
+ };
+
+ m_newActiveWeakSets.forEach(visit);
+
+ if (m_heap->collectionScope() == CollectionScope::Full)
+ m_activeWeakSets.forEach(visit);
}
-template <typename Functor>
-void MarkedSpace::forEachAllocator()
+void MarkedSpace::stopAllocating()
{
- Functor functor;
- forEachAllocator(functor);
+ ASSERT(!isIterating());
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.stopAllocating();
+ return IterationStatus::Continue;
+ });
}
-template <typename Functor>
-void MarkedSpace::forEachAllocator(Functor& functor)
+void MarkedSpace::prepareForConservativeScan()
{
- for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- functor(allocatorFor(cellSize));
- functor(normalDestructorAllocatorFor(cellSize));
- functor(immortalStructureDestructorAllocatorFor(cellSize));
- }
-
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- functor(allocatorFor(cellSize));
- functor(normalDestructorAllocatorFor(cellSize));
- functor(immortalStructureDestructorAllocatorFor(cellSize));
- }
-
- functor(m_normalSpace.largeAllocator);
- functor(m_normalDestructorSpace.largeAllocator);
- functor(m_immortalStructureDestructorSpace.largeAllocator);
+ m_largeAllocationsForThisCollectionBegin = m_largeAllocations.begin() + m_largeAllocationsOffsetForThisCollection;
+ m_largeAllocationsForThisCollectionSize = m_largeAllocations.size() - m_largeAllocationsOffsetForThisCollection;
+ m_largeAllocationsForThisCollectionEnd = m_largeAllocations.end();
+ RELEASE_ASSERT(m_largeAllocationsForThisCollectionEnd == m_largeAllocationsForThisCollectionBegin + m_largeAllocationsForThisCollectionSize);
+
+ std::sort(
+ m_largeAllocationsForThisCollectionBegin, m_largeAllocationsForThisCollectionEnd,
+ [&] (LargeAllocation* a, LargeAllocation* b) {
+ return a < b;
+ });
}
-struct StopAllocatingFunctor {
- void operator()(MarkedAllocator& allocator) { allocator.stopAllocating(); }
-};
-
-void MarkedSpace::stopAllocating()
+void MarkedSpace::prepareForMarking()
{
- ASSERT(!isIterating());
- forEachAllocator<StopAllocatingFunctor>();
+ if (m_heap->collectionScope() == CollectionScope::Eden)
+ m_largeAllocationsOffsetForThisCollection = m_largeAllocationsNurseryOffset;
+ else
+ m_largeAllocationsOffsetForThisCollection = 0;
}
-struct ResumeAllocatingFunctor {
- void operator()(MarkedAllocator& allocator) { allocator.resumeAllocating(); }
-};
-
void MarkedSpace::resumeAllocating()
{
- ASSERT(isIterating());
- DelayedReleaseScope scope(*this);
- forEachAllocator<ResumeAllocatingFunctor>();
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.resumeAllocating();
+ return IterationStatus::Continue;
+ });
+ // Nothing to do for LargeAllocations.
}
bool MarkedSpace::isPagedOut(double deadline)
{
- for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- if (allocatorFor(cellSize).isPagedOut(deadline)
- || normalDestructorAllocatorFor(cellSize).isPagedOut(deadline)
- || immortalStructureDestructorAllocatorFor(cellSize).isPagedOut(deadline))
- return true;
- }
-
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- if (allocatorFor(cellSize).isPagedOut(deadline)
- || normalDestructorAllocatorFor(cellSize).isPagedOut(deadline)
- || immortalStructureDestructorAllocatorFor(cellSize).isPagedOut(deadline))
- return true;
- }
-
- if (m_normalSpace.largeAllocator.isPagedOut(deadline)
- || m_normalDestructorSpace.largeAllocator.isPagedOut(deadline)
- || m_immortalStructureDestructorSpace.largeAllocator.isPagedOut(deadline))
- return true;
-
- return false;
+ bool result = false;
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ if (allocator.isPagedOut(deadline)) {
+ result = true;
+ return IterationStatus::Done;
+ }
+ return IterationStatus::Continue;
+ });
+ // FIXME: Consider taking LargeAllocations into account here.
+ return result;
}
-void MarkedSpace::freeBlock(MarkedBlock* block)
+void MarkedSpace::freeBlock(MarkedBlock::Handle* block)
{
block->allocator()->removeBlock(block);
- m_capacity -= block->capacity();
- m_blocks.remove(block);
- if (block->capacity() == MarkedBlock::blockSize) {
- m_heap->blockAllocator().deallocate(MarkedBlock::destroy(block));
- return;
- }
- m_heap->blockAllocator().deallocateCustomSize(MarkedBlock::destroy(block));
+ m_capacity -= MarkedBlock::blockSize;
+ m_blocks.remove(&block->block());
+ delete block;
}
-void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block)
+void MarkedSpace::freeOrShrinkBlock(MarkedBlock::Handle* block)
{
if (!block->isEmpty()) {
block->shrink();
@@ -263,77 +370,75 @@ void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block)
freeBlock(block);
}
-struct Shrink : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->shrink(); }
-};
-
void MarkedSpace::shrink()
{
- Free freeOrShrink(Free::FreeOrShrink, this);
- forEachBlock(freeOrShrink);
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.shrink();
+ return IterationStatus::Continue;
+ });
}
-static void clearNewlyAllocatedInBlock(MarkedBlock* block)
+void MarkedSpace::beginMarking()
{
- if (!block)
- return;
- block->clearNewlyAllocated();
-}
-
-struct ClearNewlyAllocated : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->clearNewlyAllocated(); }
-};
+ if (m_heap->collectionScope() == CollectionScope::Full) {
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.beginMarkingForFullCollection();
+ return IterationStatus::Continue;
+ });
+
+ if (UNLIKELY(nextVersion(m_markingVersion) == initialVersion)) {
+ forEachBlock(
+ [&] (MarkedBlock::Handle* handle) {
+ handle->block().resetMarks();
+ });
+ }
+
+ m_markingVersion = nextVersion(m_markingVersion);
+
+ for (LargeAllocation* allocation : m_largeAllocations)
+ allocation->flip();
+ }
-#ifndef NDEBUG
-struct VerifyNewlyAllocated : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { ASSERT(!block->clearNewlyAllocated()); }
-};
-#endif
+ if (!ASSERT_DISABLED) {
+ forEachBlock(
+ [&] (MarkedBlock::Handle* block) {
+ if (block->areMarksStale())
+ return;
+ ASSERT(!block->isFreeListed());
+ });
+ }
+
+ m_isMarking = true;
+}
-void MarkedSpace::clearNewlyAllocated()
+void MarkedSpace::endMarking()
{
- for (size_t i = 0; i < preciseCount; ++i) {
- clearNewlyAllocatedInBlock(m_normalSpace.preciseAllocators[i].takeLastActiveBlock());
- clearNewlyAllocatedInBlock(m_normalDestructorSpace.preciseAllocators[i].takeLastActiveBlock());
- clearNewlyAllocatedInBlock(m_immortalStructureDestructorSpace.preciseAllocators[i].takeLastActiveBlock());
+ if (UNLIKELY(nextVersion(m_newlyAllocatedVersion) == initialVersion)) {
+ forEachBlock(
+ [&] (MarkedBlock::Handle* handle) {
+ handle->resetAllocated();
+ });
}
+
+ m_newlyAllocatedVersion = nextVersion(m_newlyAllocatedVersion);
+
+ for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i)
+ m_largeAllocations[i]->clearNewlyAllocated();
- for (size_t i = 0; i < impreciseCount; ++i) {
- clearNewlyAllocatedInBlock(m_normalSpace.impreciseAllocators[i].takeLastActiveBlock());
- clearNewlyAllocatedInBlock(m_normalDestructorSpace.impreciseAllocators[i].takeLastActiveBlock());
- clearNewlyAllocatedInBlock(m_immortalStructureDestructorSpace.impreciseAllocators[i].takeLastActiveBlock());
+ if (!ASSERT_DISABLED) {
+ for (LargeAllocation* allocation : m_largeAllocations)
+ ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated());
}
- // We have to iterate all of the blocks in the large allocators because they are
- // canonicalized as they are used up (see MarkedAllocator::tryAllocateHelper)
- // which creates the m_newlyAllocated bitmap.
- ClearNewlyAllocated functor;
- m_normalSpace.largeAllocator.forEachBlock(functor);
- m_normalDestructorSpace.largeAllocator.forEachBlock(functor);
- m_immortalStructureDestructorSpace.largeAllocator.forEachBlock(functor);
-
-#ifndef NDEBUG
- VerifyNewlyAllocated verifyFunctor;
- forEachBlock(verifyFunctor);
-#endif
-}
-
-#ifndef NDEBUG
-struct VerifyMarked : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { ASSERT(block->needsSweeping()); }
-};
-#endif
-
-void MarkedSpace::clearMarks()
-{
- if (m_heap->operationInProgress() == EdenCollection) {
- for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
- m_blocksWithNewObjects[i]->clearMarks();
- } else
- forEachBlock<ClearMarks>();
-#ifndef NDEBUG
- forEachBlock<VerifyMarked>();
-#endif
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.endMarking();
+ return IterationStatus::Continue;
+ });
+
+ m_isMarking = false;
}
void MarkedSpace::willStartIterating()
@@ -350,4 +455,130 @@ void MarkedSpace::didFinishIterating()
m_isIterating = false;
}
+size_t MarkedSpace::objectCount()
+{
+ size_t result = 0;
+ forEachBlock(
+ [&] (MarkedBlock::Handle* block) {
+ result += block->markCount();
+ });
+ for (LargeAllocation* allocation : m_largeAllocations) {
+ if (allocation->isMarked())
+ result++;
+ }
+ return result;
+}
+
+size_t MarkedSpace::size()
+{
+ size_t result = 0;
+ forEachBlock(
+ [&] (MarkedBlock::Handle* block) {
+ result += block->markCount() * block->cellSize();
+ });
+ for (LargeAllocation* allocation : m_largeAllocations) {
+ if (allocation->isMarked())
+ result += allocation->cellSize();
+ }
+ return result;
+}
+
+size_t MarkedSpace::capacity()
+{
+ return m_capacity;
+}
+
+void MarkedSpace::addActiveWeakSet(WeakSet* weakSet)
+{
+ // We conservatively assume that the WeakSet should belong in the new set. In fact, some weak
+ // sets might contain new weak handles even though they are tied to old objects. This slightly
+ // increases the amount of scanning that an eden collection would have to do, but the effect
+ // ought to be small.
+ m_newActiveWeakSets.append(weakSet);
+}
+
+void MarkedSpace::didAddBlock(MarkedBlock::Handle* block)
+{
+ // WARNING: This function is called before block is fully initialized. The block will not know
+ // its cellSize() or attributes(). The latter implies that you can't ask things like
+ // needsDestruction().
+ m_capacity += MarkedBlock::blockSize;
+ m_blocks.add(&block->block());
+}
+
+void MarkedSpace::didAllocateInBlock(MarkedBlock::Handle* block)
+{
+ if (block->weakSet().isOnList()) {
+ block->weakSet().remove();
+ m_newActiveWeakSets.append(&block->weakSet());
+ }
+}
+
+MarkedBlock::Handle* MarkedSpace::findEmptyBlockToSteal()
+{
+ for (; m_allocatorForEmptyAllocation; m_allocatorForEmptyAllocation = m_allocatorForEmptyAllocation->nextAllocator()) {
+ if (MarkedBlock::Handle* block = m_allocatorForEmptyAllocation->findEmptyBlockToSteal())
+ return block;
+ }
+ return nullptr;
+}
+
+void MarkedSpace::snapshotUnswept()
+{
+ if (m_heap->collectionScope() == CollectionScope::Eden) {
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.snapshotUnsweptForEdenCollection();
+ return IterationStatus::Continue;
+ });
+ } else {
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.snapshotUnsweptForFullCollection();
+ return IterationStatus::Continue;
+ });
+ }
+}
+
+void MarkedSpace::assertNoUnswept()
+{
+ if (ASSERT_DISABLED)
+ return;
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.assertNoUnswept();
+ return IterationStatus::Continue;
+ });
+}
+
+void MarkedSpace::dumpBits(PrintStream& out)
+{
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ out.print("Bits for ", allocator, ":\n");
+ allocator.dumpBits(out);
+ return IterationStatus::Continue;
+ });
+}
+
+MarkedAllocator* MarkedSpace::addMarkedAllocator(
+ const AbstractLocker&, Subspace* subspace, size_t sizeClass)
+{
+ MarkedAllocator* allocator = m_bagOfAllocators.add(heap(), subspace, sizeClass);
+ allocator->setNextAllocator(nullptr);
+
+ WTF::storeStoreFence();
+
+ if (!m_firstAllocator) {
+ m_firstAllocator = allocator;
+ m_lastAllocator = allocator;
+ m_allocatorForEmptyAllocation = allocator;
+ } else {
+ m_lastAllocator->setNextAllocator(allocator);
+ m_lastAllocator = allocator;
+ }
+
+ return allocator;
+}
+
} // namespace JSC