summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap/MarkedBlock.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/heap/MarkedBlock.cpp
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/heap/MarkedBlock.cpp')
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlock.cpp565
1 files changed, 403 insertions, 162 deletions
diff --git a/Source/JavaScriptCore/heap/MarkedBlock.cpp b/Source/JavaScriptCore/heap/MarkedBlock.cpp
index 674f45636..3e4aca2d3 100644
--- a/Source/JavaScriptCore/heap/MarkedBlock.cpp
+++ b/Source/JavaScriptCore/heap/MarkedBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,236 +26,477 @@
#include "config.h"
#include "MarkedBlock.h"
-#include "DelayedReleaseScope.h"
-#include "IncrementalSweeper.h"
#include "JSCell.h"
#include "JSDestructibleObject.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+#include "MarkedBlockInlines.h"
+#include "SuperSampler.h"
+#include "SweepingScope.h"
namespace JSC {
-MarkedBlock* MarkedBlock::create(DeadBlock* block, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
+const size_t MarkedBlock::blockSize;
+
+static const bool computeBalance = false;
+static size_t balance;
+
+MarkedBlock::Handle* MarkedBlock::tryCreate(Heap& heap)
{
- ASSERT(reinterpret_cast<size_t>(block) == (reinterpret_cast<size_t>(block) & blockMask));
- Region* region = block->region();
- return new (NotNull, block) MarkedBlock(region, allocator, cellSize, destructorType);
+ if (computeBalance) {
+ balance++;
+ if (!(balance % 10))
+ dataLog("MarkedBlock Balance: ", balance, "\n");
+ }
+ void* blockSpace = tryFastAlignedMalloc(blockSize, blockSize);
+ if (!blockSpace)
+ return nullptr;
+ if (scribbleFreeCells())
+ scribble(blockSpace, blockSize);
+ return new Handle(heap, blockSpace);
}
-MarkedBlock::MarkedBlock(Region* region, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
- : HeapBlock<MarkedBlock>(region)
- , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
- , m_endAtom((allocator->cellSize() ? atomsPerBlock : region->blockSize() / atomSize) - m_atomsPerCell + 1)
- , m_destructorType(destructorType)
- , m_allocator(allocator)
- , m_state(New) // All cells start out unmarked.
- , m_weakSet(allocator->heap()->vm())
+MarkedBlock::Handle::Handle(Heap& heap, void* blockSpace)
+ : m_weakSet(heap.vm(), CellContainer())
+ , m_newlyAllocatedVersion(MarkedSpace::nullVersion)
{
- ASSERT(allocator);
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ m_block = new (NotNull, blockSpace) MarkedBlock(*heap.vm(), *this);
+
+ m_weakSet.setContainer(*m_block);
+
+ heap.didAllocateBlock(blockSize);
}
-inline void MarkedBlock::callDestructor(JSCell* cell)
+MarkedBlock::Handle::~Handle()
{
- // A previous eager sweep may already have run cell's destructor.
- if (cell->isZapped())
- return;
+ Heap& heap = *this->heap();
+ if (computeBalance) {
+ balance--;
+ if (!(balance % 10))
+ dataLog("MarkedBlock Balance: ", balance, "\n");
+ }
+ removeFromAllocator();
+ m_block->~MarkedBlock();
+ fastAlignedFree(m_block);
+ heap.didFreeBlock(blockSize);
+}
- cell->methodTableForDestruction()->destroy(cell);
- cell->zap();
+MarkedBlock::MarkedBlock(VM& vm, Handle& handle)
+ : m_markingVersion(MarkedSpace::nullVersion)
+ , m_handle(handle)
+ , m_vm(&vm)
+{
+ if (false)
+ dataLog(RawPointer(this), ": Allocated.\n");
}
-template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, MarkedBlock::DestructorType dtorType>
-MarkedBlock::FreeList MarkedBlock::specializedSweep()
+void MarkedBlock::Handle::unsweepWithNoNewlyAllocated()
{
- ASSERT(blockState != Allocated && blockState != FreeListed);
- ASSERT(!(dtorType == MarkedBlock::None && sweepMode == SweepOnly));
+ RELEASE_ASSERT(m_isFreeListed);
+ m_isFreeListed = false;
+}
- // This produces a free list that is ordered in reverse through the block.
- // This is fine, since the allocation code makes no assumptions about the
- // order of the free list.
- FreeCell* head = 0;
- size_t count = 0;
- for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
- if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i))))
- continue;
+void MarkedBlock::Handle::setIsFreeListed()
+{
+ m_allocator->setIsEmpty(NoLockingNecessary, this, false);
+ m_isFreeListed = true;
+}
- JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
+void MarkedBlock::Handle::stopAllocating(const FreeList& freeList)
+{
+ auto locker = holdLock(block().m_lock);
+
+ if (false)
+ dataLog(RawPointer(this), ": MarkedBlock::Handle::stopAllocating!\n");
+ ASSERT(!allocator()->isAllocated(NoLockingNecessary, this));
+
+ if (!isFreeListed()) {
+ if (false)
+ dataLog("There ain't no newly allocated.\n");
+ // This means that we either didn't use this block at all for allocation since last GC,
+ // or someone had already done stopAllocating() before.
+ ASSERT(freeList.allocationWillFail());
+ return;
+ }
+
+ if (false)
+ dataLog("Free list: ", freeList, "\n");
+
+ // Roll back to a coherent state for Heap introspection. Cells newly
+ // allocated from our free list are not currently marked, so we need another
+ // way to tell what's live vs dead.
+
+ m_newlyAllocated.clearAll();
+ m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion();
+
+ forEachCell(
+ [&] (HeapCell* cell, HeapCell::Kind) -> IterationStatus {
+ setNewlyAllocated(cell);
+ return IterationStatus::Continue;
+ });
+
+ forEachFreeCell(
+ freeList,
+ [&] (HeapCell* cell) {
+ if (false)
+ dataLog("Free cell: ", RawPointer(cell), "\n");
+ if (m_attributes.destruction == NeedsDestruction)
+ cell->zap();
+ clearNewlyAllocated(cell);
+ });
+
+ m_isFreeListed = false;
+}
- if (dtorType != MarkedBlock::None && blockState != New)
- callDestructor(cell);
+void MarkedBlock::Handle::lastChanceToFinalize()
+{
+ allocator()->setIsAllocated(NoLockingNecessary, this, false);
+ m_block->m_marks.clearAll();
+ m_block->clearHasAnyMarked();
+ m_block->m_markingVersion = heap()->objectSpace().markingVersion();
+ m_weakSet.lastChanceToFinalize();
+ m_newlyAllocated.clearAll();
+ m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion();
+ sweep();
+}
- if (sweepMode == SweepToFreeList) {
- FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
- freeCell->next = head;
- head = freeCell;
- ++count;
+FreeList MarkedBlock::Handle::resumeAllocating()
+{
+ {
+ auto locker = holdLock(block().m_lock);
+
+ if (false)
+ dataLog(RawPointer(this), ": MarkedBlock::Handle::resumeAllocating!\n");
+ ASSERT(!allocator()->isAllocated(NoLockingNecessary, this));
+ ASSERT(!isFreeListed());
+
+ if (!hasAnyNewlyAllocated()) {
+ if (false)
+ dataLog("There ain't no newly allocated.\n");
+ // This means we had already exhausted the block when we stopped allocation.
+ return FreeList();
}
}
- // We only want to discard the newlyAllocated bits if we're creating a FreeList,
- // otherwise we would lose information on what's currently alive.
- if (sweepMode == SweepToFreeList && m_newlyAllocated)
- m_newlyAllocated.clear();
+ // Re-create our free list from before stopping allocation. Note that this may return an empty
+ // freelist, in which case the block will still be Marked!
+ return sweep(SweepToFreeList);
+}
- m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
- return FreeList(head, count * cellSize());
+void MarkedBlock::Handle::zap(const FreeList& freeList)
+{
+ forEachFreeCell(
+ freeList,
+ [&] (HeapCell* cell) {
+ if (m_attributes.destruction == NeedsDestruction)
+ cell->zap();
+ });
}
-MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
+template<typename Func>
+void MarkedBlock::Handle::forEachFreeCell(const FreeList& freeList, const Func& func)
{
- ASSERT(DelayedReleaseScope::isInEffectFor(heap()->m_objectSpace));
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ if (freeList.remaining) {
+ for (unsigned remaining = freeList.remaining; remaining; remaining -= cellSize())
+ func(bitwise_cast<HeapCell*>(freeList.payloadEnd - remaining));
+ } else {
+ for (FreeCell* current = freeList.head; current;) {
+ FreeCell* next = current->next;
+ func(bitwise_cast<HeapCell*>(current));
+ current = next;
+ }
+ }
+}
- m_weakSet.sweep();
+void MarkedBlock::aboutToMarkSlow(HeapVersion markingVersion)
+{
+ ASSERT(vm()->heap.objectSpace().isMarking());
+ LockHolder locker(m_lock);
+
+ if (!areMarksStale(markingVersion))
+ return;
+
+ MarkedAllocator* allocator = handle().allocator();
+
+ if (handle().allocator()->isAllocated(holdLock(allocator->bitvectorLock()), &handle())
+ || !marksConveyLivenessDuringMarking(markingVersion)) {
+ if (false)
+ dataLog(RawPointer(this), ": Clearing marks without doing anything else.\n");
+ // We already know that the block is full and is already recognized as such, or that the
+ // block did not survive the previous GC. So, we can clear mark bits the old fashioned
+ // way. Note that it's possible for such a block to have newlyAllocated with an up-to-
+ // date version! If it does, then we want to leave the newlyAllocated alone, since that
+ // means that we had allocated in this previously empty block but did not fill it up, so
+ // we created a newlyAllocated.
+ m_marks.clearAll();
+ } else {
+ if (false)
+ dataLog(RawPointer(this), ": Doing things.\n");
+ HeapVersion newlyAllocatedVersion = space()->newlyAllocatedVersion();
+ if (handle().m_newlyAllocatedVersion == newlyAllocatedVersion) {
+ // Merge the contents of marked into newlyAllocated. If we get the full set of bits
+ // then invalidate newlyAllocated and set allocated.
+ handle().m_newlyAllocated.mergeAndClear(m_marks);
+ } else {
+ // Replace the contents of newlyAllocated with marked. If we get the full set of
+ // bits then invalidate newlyAllocated and set allocated.
+ handle().m_newlyAllocated.setAndClear(m_marks);
+ }
+ handle().m_newlyAllocatedVersion = newlyAllocatedVersion;
+ }
+ clearHasAnyMarked();
+ WTF::storeStoreFence();
+ m_markingVersion = markingVersion;
+
+ // This means we're the first ones to mark any object in this block.
+ allocator->setIsMarkingNotEmpty(holdLock(allocator->bitvectorLock()), &handle(), true);
+}
- if (sweepMode == SweepOnly && m_destructorType == MarkedBlock::None)
- return FreeList();
+void MarkedBlock::Handle::resetAllocated()
+{
+ m_newlyAllocated.clearAll();
+ m_newlyAllocatedVersion = MarkedSpace::nullVersion;
+}
- if (m_destructorType == MarkedBlock::ImmortalStructure)
- return sweepHelper<MarkedBlock::ImmortalStructure>(sweepMode);
- if (m_destructorType == MarkedBlock::Normal)
- return sweepHelper<MarkedBlock::Normal>(sweepMode);
- return sweepHelper<MarkedBlock::None>(sweepMode);
+void MarkedBlock::resetMarks()
+{
+ // We want aboutToMarkSlow() to see what the mark bits were after the last collection. It uses
+ // the version number to distinguish between the marks having already been stale before
+ // beginMarking(), or just stale now that beginMarking() bumped the version. If we have a version
+ // wraparound, then we will call this method before resetting the version to null. When the
+ // version is null, aboutToMarkSlow() will assume that the marks were not stale as of before
+ // beginMarking(). Hence the need to whip the marks into shape.
+ if (areMarksStale())
+ m_marks.clearAll();
+ m_markingVersion = MarkedSpace::nullVersion;
}
-template<MarkedBlock::DestructorType dtorType>
-MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
+#if !ASSERT_DISABLED
+void MarkedBlock::assertMarksNotStale()
{
- switch (m_state) {
- case New:
- ASSERT(sweepMode == SweepToFreeList);
- return specializedSweep<New, SweepToFreeList, dtorType>();
- case FreeListed:
- // Happens when a block transitions to fully allocated.
- ASSERT(sweepMode == SweepToFreeList);
- return FreeList();
- case Allocated:
- RELEASE_ASSERT_NOT_REACHED();
- return FreeList();
- case Marked:
- return sweepMode == SweepToFreeList
- ? specializedSweep<Marked, SweepToFreeList, dtorType>()
- : specializedSweep<Marked, SweepOnly, dtorType>();
- }
+ ASSERT(m_markingVersion == vm()->heap.objectSpace().markingVersion());
+}
+#endif // !ASSERT_DISABLED
- RELEASE_ASSERT_NOT_REACHED();
- return FreeList();
+bool MarkedBlock::areMarksStale()
+{
+ return areMarksStale(vm()->heap.objectSpace().markingVersion());
}
-class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
-public:
- SetNewlyAllocatedFunctor(MarkedBlock* block)
- : m_block(block)
- {
- }
+bool MarkedBlock::Handle::areMarksStale()
+{
+ return m_block->areMarksStale();
+}
- void operator()(JSCell* cell)
- {
- ASSERT(MarkedBlock::blockFor(cell) == m_block);
- m_block->setNewlyAllocated(cell);
- }
+bool MarkedBlock::isMarked(const void* p)
+{
+ return isMarked(vm()->heap.objectSpace().markingVersion(), p);
+}
-private:
- MarkedBlock* m_block;
-};
+void MarkedBlock::Handle::didConsumeFreeList()
+{
+ auto locker = holdLock(block().m_lock);
+ if (false)
+ dataLog(RawPointer(this), ": MarkedBlock::Handle::didConsumeFreeList!\n");
+ ASSERT(isFreeListed());
+ m_isFreeListed = false;
+ allocator()->setIsAllocated(NoLockingNecessary, this, true);
+}
-void MarkedBlock::stopAllocating(const FreeList& freeList)
+size_t MarkedBlock::markCount()
{
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
- FreeCell* head = freeList.head;
+ return areMarksStale() ? 0 : m_marks.count();
+}
- if (m_state == Marked) {
- // If the block is in the Marked state then we know that:
- // 1) It was not used for allocation during the previous allocation cycle.
- // 2) It may have dead objects, and we only know them to be dead by the
- // fact that their mark bits are unset.
- // Hence if the block is Marked we need to leave it Marked.
-
- ASSERT(!head);
+bool MarkedBlock::Handle::isEmpty()
+{
+ return m_allocator->isEmpty(NoLockingNecessary, this);
+}
+
+void MarkedBlock::clearHasAnyMarked()
+{
+ m_biasedMarkCount = m_markCountBias;
+}
+
+void MarkedBlock::noteMarkedSlow()
+{
+ MarkedAllocator* allocator = handle().allocator();
+ allocator->setIsMarkingRetired(holdLock(allocator->bitvectorLock()), &handle(), true);
+}
+
+void MarkedBlock::Handle::removeFromAllocator()
+{
+ if (!m_allocator)
return;
- }
-
- ASSERT(m_state == FreeListed);
-
- // Roll back to a coherent state for Heap introspection. Cells newly
- // allocated from our free list are not currently marked, so we need another
- // way to tell what's live vs dead.
- ASSERT(!m_newlyAllocated);
- m_newlyAllocated = adoptPtr(new WTF::Bitmap<atomsPerBlock>());
+ m_allocator->removeBlock(this);
+}
- SetNewlyAllocatedFunctor functor(this);
- forEachCell(functor);
+void MarkedBlock::updateNeedsDestruction()
+{
+ m_needsDestruction = handle().needsDestruction();
+}
- FreeCell* next;
- for (FreeCell* current = head; current; current = next) {
- next = current->next;
- reinterpret_cast<JSCell*>(current)->zap();
- clearNewlyAllocated(current);
- }
+void MarkedBlock::Handle::didAddToAllocator(MarkedAllocator* allocator, size_t index)
+{
+ ASSERT(m_index == std::numeric_limits<size_t>::max());
+ ASSERT(!m_allocator);
+
+ m_index = index;
+ m_allocator = allocator;
+
+ size_t cellSize = allocator->cellSize();
+ m_atomsPerCell = (cellSize + atomSize - 1) / atomSize;
+ m_endAtom = atomsPerBlock - m_atomsPerCell + 1;
- m_state = Marked;
+ m_attributes = allocator->attributes();
+
+ if (m_attributes.cellKind != HeapCell::JSCell)
+ RELEASE_ASSERT(m_attributes.destruction == DoesNotNeedDestruction);
+
+ block().updateNeedsDestruction();
+
+ double markCountBias = -(Options::minMarkedBlockUtilization() * cellsPerBlock());
+
+ // The mark count bias should be comfortably within this range.
+ RELEASE_ASSERT(markCountBias > static_cast<double>(std::numeric_limits<int16_t>::min()));
+ RELEASE_ASSERT(markCountBias < 0);
+
+ // This means we haven't marked anything yet.
+ block().m_biasedMarkCount = block().m_markCountBias = static_cast<int16_t>(markCountBias);
}
-void MarkedBlock::clearMarks()
+void MarkedBlock::Handle::didRemoveFromAllocator()
{
-#if ENABLE(GGC)
- if (heap()->operationInProgress() == JSC::EdenCollection)
- this->clearMarksWithCollectionType<EdenCollection>();
- else
- this->clearMarksWithCollectionType<FullCollection>();
-#else
- this->clearMarksWithCollectionType<FullCollection>();
-#endif
+ ASSERT(m_index != std::numeric_limits<size_t>::max());
+ ASSERT(m_allocator);
+
+ m_index = std::numeric_limits<size_t>::max();
+ m_allocator = nullptr;
}
-void MarkedBlock::clearRememberedSet()
+bool MarkedBlock::Handle::isLive(const HeapCell* cell)
{
- m_rememberedSet.clearAll();
+ return isLive(space()->markingVersion(), space()->isMarking(), cell);
}
-template <HeapOperation collectionType>
-void MarkedBlock::clearMarksWithCollectionType()
+bool MarkedBlock::Handle::isLiveCell(const void* p)
{
- ASSERT(collectionType == FullCollection || collectionType == EdenCollection);
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ return isLiveCell(space()->markingVersion(), space()->isMarking(), p);
+}
- ASSERT(m_state != New && m_state != FreeListed);
- if (collectionType == FullCollection) {
- m_marks.clearAll();
-#if ENABLE(GGC)
- m_rememberedSet.clearAll();
+#if !ASSERT_DISABLED
+void MarkedBlock::assertValidCell(VM& vm, HeapCell* cell) const
+{
+ RELEASE_ASSERT(&vm == this->vm());
+ RELEASE_ASSERT(const_cast<MarkedBlock*>(this)->handle().cellAlign(cell) == cell);
+}
#endif
- }
- // This will become true at the end of the mark phase. We set it now to
- // avoid an extra pass to do so later.
- m_state = Marked;
+void MarkedBlock::Handle::dumpState(PrintStream& out)
+{
+ CommaPrinter comma;
+ allocator()->forEachBitVectorWithName(
+ holdLock(allocator()->bitvectorLock()),
+ [&] (FastBitVector& bitvector, const char* name) {
+ out.print(comma, name, ":", bitvector[index()] ? "YES" : "no");
+ });
}
-void MarkedBlock::lastChanceToFinalize()
+Subspace* MarkedBlock::Handle::subspace() const
{
- m_weakSet.lastChanceToFinalize();
-
- clearNewlyAllocated();
- clearMarksWithCollectionType<FullCollection>();
- sweep();
+ return allocator()->subspace();
}
-MarkedBlock::FreeList MarkedBlock::resumeAllocating()
+FreeList MarkedBlock::Handle::sweep(SweepMode sweepMode)
{
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ SweepingScope sweepingScope(*heap());
+
+ m_allocator->setIsUnswept(NoLockingNecessary, this, false);
+
+ m_weakSet.sweep();
- ASSERT(m_state == Marked);
+ if (sweepMode == SweepOnly && m_attributes.destruction == DoesNotNeedDestruction)
+ return FreeList();
- if (!m_newlyAllocated) {
- // We didn't have to create a "newly allocated" bitmap. That means we were already Marked
- // when we last stopped allocation, so return an empty free list and stay in the Marked state.
+ if (UNLIKELY(m_isFreeListed)) {
+ RELEASE_ASSERT(sweepMode == SweepToFreeList);
return FreeList();
}
+
+ ASSERT(!m_allocator->isAllocated(NoLockingNecessary, this));
+
+ if (space()->isMarking())
+ block().m_lock.lock();
+
+ if (m_attributes.destruction == NeedsDestruction)
+ return subspace()->finishSweep(*this, sweepMode);
+
+ // Handle the no-destructor specializations here, since we have the most of those. This
+ // ensures that they don't get re-specialized for every destructor space.
+
+ EmptyMode emptyMode = this->emptyMode();
+ ScribbleMode scribbleMode = this->scribbleMode();
+ NewlyAllocatedMode newlyAllocatedMode = this->newlyAllocatedMode();
+ MarksMode marksMode = this->marksMode();
+
+ FreeList result;
+ auto trySpecialized = [&] () -> bool {
+ if (sweepMode != SweepToFreeList)
+ return false;
+ if (scribbleMode != DontScribble)
+ return false;
+ if (newlyAllocatedMode != DoesNotHaveNewlyAllocated)
+ return false;
+
+ switch (emptyMode) {
+ case IsEmpty:
+ switch (marksMode) {
+ case MarksNotStale:
+ result = specializedSweep<true, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, [] (VM&, JSCell*) { });
+ return true;
+ case MarksStale:
+ result = specializedSweep<true, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, [] (VM&, JSCell*) { });
+ return true;
+ }
+ break;
+ case NotEmpty:
+ switch (marksMode) {
+ case MarksNotStale:
+ result = specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, [] (VM&, JSCell*) { });
+ return true;
+ case MarksStale:
+ result = specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, [] (VM&, JSCell*) { });
+ return true;
+ }
+ break;
+ }
+
+ return false;
+ };
+
+ if (trySpecialized())
+ return result;
- // Re-create our free list from before stopping allocation.
- return sweep(SweepToFreeList);
+ // The template arguments don't matter because the first one is false.
+ return specializedSweep<false, IsEmpty, SweepOnly, BlockHasNoDestructors, DontScribble, HasNewlyAllocated, MarksStale>(emptyMode, sweepMode, BlockHasNoDestructors, scribbleMode, newlyAllocatedMode, marksMode, [] (VM&, JSCell*) { });
}
} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::MarkedBlock::Handle::SweepMode mode)
+{
+ switch (mode) {
+ case JSC::MarkedBlock::Handle::SweepToFreeList:
+ out.print("SweepToFreeList");
+ return;
+ case JSC::MarkedBlock::Handle::SweepOnly:
+ out.print("SweepOnly");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+