summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap/MarkedAllocator.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/heap/MarkedAllocator.cpp')
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocator.cpp544
1 files changed, 430 insertions, 114 deletions
diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.cpp b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
index c2b0f72de..5ee544a3c 100644
--- a/Source/JavaScriptCore/heap/MarkedAllocator.cpp
+++ b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
@@ -1,21 +1,60 @@
+/*
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include "config.h"
#include "MarkedAllocator.h"
-#include "DelayedReleaseScope.h"
+#include "AllocatingScope.h"
#include "GCActivityCallback.h"
#include "Heap.h"
#include "IncrementalSweeper.h"
+#include "JSCInlines.h"
+#include "MarkedAllocatorInlines.h"
+#include "MarkedBlockInlines.h"
+#include "SuperSampler.h"
#include "VM.h"
#include <wtf/CurrentTime.h>
namespace JSC {
-static bool isListPagedOut(double deadline, DoublyLinkedList<MarkedBlock>& list)
+MarkedAllocator::MarkedAllocator(Heap* heap, Subspace* subspace, size_t cellSize)
+ : m_currentBlock(0)
+ , m_lastActiveBlock(0)
+ , m_cellSize(static_cast<unsigned>(cellSize))
+ , m_attributes(subspace->attributes())
+ , m_heap(heap)
+ , m_subspace(subspace)
+{
+}
+
+bool MarkedAllocator::isPagedOut(double deadline)
{
unsigned itersSinceLastTimeCheck = 0;
- MarkedBlock* block = list.head();
- while (block) {
- block = block->next();
+ for (auto* block : m_blocks) {
+ if (block)
+ block->block().updateNeedsDestruction();
++itersSinceLastTimeCheck;
if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
double currentTime = WTF::monotonicallyIncreasingTime();
@@ -27,162 +66,439 @@ static bool isListPagedOut(double deadline, DoublyLinkedList<MarkedBlock>& list)
return false;
}
-bool MarkedAllocator::isPagedOut(double deadline)
+bool MarkedAllocator::shouldStealEmptyBlocksFromOtherAllocators() const
{
- if (isListPagedOut(deadline, m_blockList))
- return true;
- return false;
+ return !needsDestruction();
}
-inline void* MarkedAllocator::tryAllocateHelper(size_t bytes)
+MarkedBlock::Handle* MarkedAllocator::findEmptyBlockToSteal()
{
- // We need a while loop to check the free list because the DelayedReleaseScope
- // could cause arbitrary code to execute and exhaust the free list that we
- // thought had elements in it.
- while (!m_freeList.head) {
- DelayedReleaseScope delayedReleaseScope(*m_markedSpace);
- if (m_currentBlock) {
- ASSERT(m_currentBlock == m_nextBlockToSweep);
- m_currentBlock->didConsumeFreeList();
- m_nextBlockToSweep = m_currentBlock->next();
- }
+ // Don't allow others to steal from us, if we wouldn't steal from others.
+ if (!shouldStealEmptyBlocksFromOtherAllocators())
+ return nullptr;
+
+ m_emptyCursor = m_empty.findBit(m_emptyCursor, true);
+ if (m_emptyCursor >= m_blocks.size())
+ return nullptr;
+ return m_blocks[m_emptyCursor];
+}
- MarkedBlock* next;
- for (MarkedBlock*& block = m_nextBlockToSweep; block; block = next) {
- next = block->next();
+void MarkedAllocator::didConsumeFreeList()
+{
+ if (m_currentBlock)
+ m_currentBlock->didConsumeFreeList();
+
+ setFreeList(FreeList());
+ m_currentBlock = nullptr;
+}
- MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList);
-
- if (!freeList.head) {
- block->didConsumeEmptyFreeList();
- m_blockList.remove(block);
- m_blockList.push(block);
- if (!m_lastFullBlock)
- m_lastFullBlock = block;
- continue;
- }
-
- if (bytes > block->cellSize()) {
- block->stopAllocating(freeList);
- continue;
- }
-
- m_currentBlock = block;
- m_freeList = freeList;
+void* MarkedAllocator::tryAllocateWithoutCollecting()
+{
+ SuperSamplerScope superSamplerScope(false);
+
+ ASSERT(!m_currentBlock);
+ ASSERT(!m_freeList);
+
+ for (;;) {
+ m_allocationCursor = (m_canAllocateButNotEmpty | m_empty).findBit(m_allocationCursor, true);
+ if (m_allocationCursor >= m_blocks.size())
break;
- }
- if (!m_freeList.head) {
- m_currentBlock = 0;
- return 0;
+ setIsCanAllocateButNotEmpty(NoLockingNecessary, m_allocationCursor, false);
+
+ if (void* result = tryAllocateIn(m_blocks[m_allocationCursor]))
+ return result;
+ }
+
+ if (Options::stealEmptyBlocksFromOtherAllocators()
+ && shouldStealEmptyBlocksFromOtherAllocators()) {
+ if (MarkedBlock::Handle* block = markedSpace().findEmptyBlockToSteal()) {
+ block->sweep();
+
+ // It's good that this clears canAllocateButNotEmpty as well as all other bits,
+ // because there is a remote chance that a block may have both canAllocateButNotEmpty
+ // and empty set at the same time.
+ block->removeFromAllocator();
+ addBlock(block);
+ return allocateIn(block);
}
}
-
- ASSERT(m_freeList.head);
- MarkedBlock::FreeCell* head = m_freeList.head;
- m_freeList.head = head->next;
- ASSERT(head);
- m_markedSpace->didAllocateInBlock(m_currentBlock);
- return head;
-}
-inline void* MarkedAllocator::tryAllocate(size_t bytes)
+ return nullptr;
+}
+
+void* MarkedAllocator::allocateIn(MarkedBlock::Handle* block)
{
- ASSERT(!m_heap->isBusy());
- m_heap->m_operationInProgress = Allocation;
- void* result = tryAllocateHelper(bytes);
- m_heap->m_operationInProgress = NoOperation;
+ void* result = tryAllocateIn(block);
+ RELEASE_ASSERT(result);
return result;
}
+
+void* MarkedAllocator::tryAllocateIn(MarkedBlock::Handle* block)
+{
+ ASSERT(block);
+ ASSERT(!block->isFreeListed());
+
+ FreeList freeList = block->sweep(MarkedBlock::Handle::SweepToFreeList);
+
+ // It's possible to stumble on a completely full block. Marking tries to retire these, but
+ // that algorithm is racy and may forget to do it sometimes.
+ if (freeList.allocationWillFail()) {
+ ASSERT(block->isFreeListed());
+ block->unsweepWithNoNewlyAllocated();
+ ASSERT(!block->isFreeListed());
+ ASSERT(!isEmpty(NoLockingNecessary, block));
+ ASSERT(!isCanAllocateButNotEmpty(NoLockingNecessary, block));
+ return nullptr;
+ }
+
+ m_currentBlock = block;
+ setFreeList(freeList);
-void* MarkedAllocator::allocateSlowCase(size_t bytes)
+ void* result;
+ if (m_freeList.remaining) {
+ unsigned cellSize = m_cellSize;
+ m_freeList.remaining -= cellSize;
+ result = m_freeList.payloadEnd - m_freeList.remaining - cellSize;
+ } else {
+ FreeCell* head = m_freeList.head;
+ m_freeList.head = head->next;
+ result = head;
+ }
+ RELEASE_ASSERT(result);
+ setIsEden(NoLockingNecessary, m_currentBlock, true);
+ markedSpace().didAllocateInBlock(m_currentBlock);
+ return result;
+}
+
+ALWAYS_INLINE void MarkedAllocator::doTestCollectionsIfNeeded(GCDeferralContext* deferralContext)
+{
+ if (!Options::slowPathAllocsBetweenGCs())
+ return;
+
+ static unsigned allocationCount = 0;
+ if (!allocationCount) {
+ if (!m_heap->isDeferred()) {
+ if (deferralContext)
+ deferralContext->m_shouldGC = true;
+ else
+ m_heap->collectAllGarbage();
+ }
+ }
+ if (++allocationCount >= Options::slowPathAllocsBetweenGCs())
+ allocationCount = 0;
+}
+
+void* MarkedAllocator::allocateSlowCase(GCDeferralContext* deferralContext)
{
+ bool crashOnFailure = true;
+ return allocateSlowCaseImpl(deferralContext, crashOnFailure);
+}
+
+void* MarkedAllocator::tryAllocateSlowCase(GCDeferralContext* deferralContext)
+{
+ bool crashOnFailure = false;
+ return allocateSlowCaseImpl(deferralContext, crashOnFailure);
+}
+
+void* MarkedAllocator::allocateSlowCaseImpl(GCDeferralContext* deferralContext, bool crashOnFailure)
+{
+ SuperSamplerScope superSamplerScope(false);
ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
-#if COLLECT_ON_EVERY_ALLOCATION
- if (!m_heap->isDeferred())
- m_heap->collectAllGarbage();
- ASSERT(m_heap->m_operationInProgress == NoOperation);
-#endif
+ doTestCollectionsIfNeeded(deferralContext);
+
+ ASSERT(!markedSpace().isIterating());
+ m_heap->didAllocate(m_freeList.originalSize);
- ASSERT(!m_markedSpace->isIterating());
- ASSERT(!m_freeList.head);
- m_heap->didAllocate(m_freeList.bytes);
+ didConsumeFreeList();
- void* result = tryAllocate(bytes);
+ AllocatingScope helpingHeap(*m_heap);
+
+ m_heap->collectIfNecessaryOrDefer(deferralContext);
+
+ // Goofy corner case: the GC called a callback and now this allocator has a currentBlock. This only
+ // happens when running WebKit tests, which inject a callback into the GC's finalization.
+ if (UNLIKELY(m_currentBlock)) {
+ if (crashOnFailure)
+ return allocate(deferralContext);
+ return tryAllocate(deferralContext);
+ }
+
+ void* result = tryAllocateWithoutCollecting();
if (LIKELY(result != 0))
return result;
- if (m_heap->collectIfNecessaryOrDefer()) {
- result = tryAllocate(bytes);
- if (result)
- return result;
+ MarkedBlock::Handle* block = tryAllocateBlock();
+ if (!block) {
+ if (crashOnFailure)
+ RELEASE_ASSERT_NOT_REACHED();
+ else
+ return nullptr;
}
-
- ASSERT(!m_heap->shouldCollect());
-
- MarkedBlock* block = allocateBlock(bytes);
- ASSERT(block);
addBlock(block);
-
- result = tryAllocate(bytes);
+ result = allocateIn(block);
ASSERT(result);
return result;
}
-MarkedBlock* MarkedAllocator::allocateBlock(size_t bytes)
+static size_t blockHeaderSize()
+{
+ return WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeof(MarkedBlock));
+}
+
+size_t MarkedAllocator::blockSizeForBytes(size_t bytes)
{
size_t minBlockSize = MarkedBlock::blockSize;
- size_t minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(MarkedBlock) + bytes);
- size_t blockSize = std::max(minBlockSize, minAllocationSize);
+ size_t minAllocationSize = blockHeaderSize() + WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes);
+ minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), minAllocationSize);
+ return std::max(minBlockSize, minAllocationSize);
+}
- size_t cellSize = m_cellSize ? m_cellSize : WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes);
+MarkedBlock::Handle* MarkedAllocator::tryAllocateBlock()
+{
+ SuperSamplerScope superSamplerScope(false);
+
+ MarkedBlock::Handle* handle = MarkedBlock::tryCreate(*m_heap);
+ if (!handle)
+ return nullptr;
+
+ markedSpace().didAddBlock(handle);
+
+ return handle;
+}
- if (blockSize == MarkedBlock::blockSize)
- return MarkedBlock::create(m_heap->blockAllocator().allocate<MarkedBlock>(), this, cellSize, m_destructorType);
- return MarkedBlock::create(m_heap->blockAllocator().allocateCustomSize(blockSize, MarkedBlock::blockSize), this, cellSize, m_destructorType);
+void MarkedAllocator::addBlock(MarkedBlock::Handle* block)
+{
+ size_t index;
+ if (m_freeBlockIndices.isEmpty()) {
+ index = m_blocks.size();
+
+ size_t oldCapacity = m_blocks.capacity();
+ m_blocks.append(block);
+ if (m_blocks.capacity() != oldCapacity) {
+ forEachBitVector(
+ NoLockingNecessary,
+ [&] (FastBitVector& vector) {
+ ASSERT_UNUSED(vector, vector.numBits() == oldCapacity);
+ });
+
+ ASSERT(m_blocks.capacity() > oldCapacity);
+
+ LockHolder locker(m_bitvectorLock);
+ forEachBitVector(
+ locker,
+ [&] (FastBitVector& vector) {
+ vector.resize(m_blocks.capacity());
+ });
+ }
+ } else {
+ index = m_freeBlockIndices.takeLast();
+ ASSERT(!m_blocks[index]);
+ m_blocks[index] = block;
+ }
+
+ forEachBitVector(
+ NoLockingNecessary,
+ [&] (FastBitVector& vector) {
+ ASSERT_UNUSED(vector, !vector[index]);
+ });
+
+ // This is the point at which the block learns of its cellSize() and attributes().
+ block->didAddToAllocator(this, index);
+
+ setIsLive(NoLockingNecessary, index, true);
+ setIsEmpty(NoLockingNecessary, index, true);
}
-void MarkedAllocator::addBlock(MarkedBlock* block)
+void MarkedAllocator::removeBlock(MarkedBlock::Handle* block)
{
- // Satisfy the ASSERT in MarkedBlock::sweep.
- DelayedReleaseScope delayedReleaseScope(*m_markedSpace);
- ASSERT(!m_currentBlock);
- ASSERT(!m_freeList.head);
+ ASSERT(block->allocator() == this);
+ ASSERT(m_blocks[block->index()] == block);
+
+ m_blocks[block->index()] = nullptr;
+ m_freeBlockIndices.append(block->index());
+
+ forEachBitVector(
+ holdLock(m_bitvectorLock),
+ [&] (FastBitVector& vector) {
+ vector[block->index()] = false;
+ });
- m_blockList.append(block);
- m_nextBlockToSweep = m_currentBlock = block;
- m_freeList = block->sweep(MarkedBlock::SweepToFreeList);
- m_markedSpace->didAddBlock(block);
+ block->didRemoveFromAllocator();
}
-void MarkedAllocator::removeBlock(MarkedBlock* block)
+void MarkedAllocator::stopAllocating()
{
- if (m_currentBlock == block) {
- m_currentBlock = m_currentBlock->next();
- m_freeList = MarkedBlock::FreeList();
+ if (false)
+ dataLog(RawPointer(this), ": MarkedAllocator::stopAllocating!\n");
+ ASSERT(!m_lastActiveBlock);
+ if (!m_currentBlock) {
+ ASSERT(!m_freeList);
+ return;
}
- if (m_nextBlockToSweep == block)
- m_nextBlockToSweep = m_nextBlockToSweep->next();
+
+ m_currentBlock->stopAllocating(m_freeList);
+ m_lastActiveBlock = m_currentBlock;
+ m_currentBlock = 0;
+ m_freeList = FreeList();
+}
+
+void MarkedAllocator::prepareForAllocation()
+{
+ m_lastActiveBlock = nullptr;
+ m_currentBlock = nullptr;
+ setFreeList(FreeList());
- if (block == m_lastFullBlock)
- m_lastFullBlock = m_lastFullBlock->prev();
+ m_allocationCursor = 0;
+ m_emptyCursor = 0;
+ m_unsweptCursor = 0;
- m_blockList.remove(block);
+ m_eden.clearAll();
+
+ if (UNLIKELY(Options::useImmortalObjects())) {
+ // FIXME: Make this work again.
+ // https://bugs.webkit.org/show_bug.cgi?id=162296
+ RELEASE_ASSERT_NOT_REACHED();
+ }
}
-void MarkedAllocator::reset()
+void MarkedAllocator::lastChanceToFinalize()
{
- m_lastActiveBlock = 0;
- m_currentBlock = 0;
- m_freeList = MarkedBlock::FreeList();
- if (m_heap->operationInProgress() == FullCollection)
- m_lastFullBlock = 0;
+ forEachBlock(
+ [&] (MarkedBlock::Handle* block) {
+ block->lastChanceToFinalize();
+ });
+}
- if (m_lastFullBlock)
- m_nextBlockToSweep = m_lastFullBlock->next() ? m_lastFullBlock->next() : m_lastFullBlock;
- else
- m_nextBlockToSweep = m_blockList.head();
+void MarkedAllocator::setFreeList(const FreeList& freeList)
+{
+ m_freeList = freeList;
+}
+
+void MarkedAllocator::resumeAllocating()
+{
+ if (!m_lastActiveBlock)
+ return;
+
+ m_freeList = m_lastActiveBlock->resumeAllocating();
+ m_currentBlock = m_lastActiveBlock;
+ m_lastActiveBlock = nullptr;
+}
+
+void MarkedAllocator::beginMarkingForFullCollection()
+{
+ // Mark bits are sticky and so is our summary of mark bits. We only clear these during full
+ // collections, so if you survived the last collection you will survive the next one so long
+ // as the next one is eden.
+ m_markingNotEmpty.clearAll();
+ m_markingRetired.clearAll();
+}
+
+void MarkedAllocator::endMarking()
+{
+ m_allocated.clearAll();
+
+ // It's surprising and frustrating to comprehend, but the end-of-marking flip does not need to
+ // know what kind of collection it is. That knowledge is already encoded in the m_markingXYZ
+ // vectors.
+
+ if (needsDestruction()) {
+ // If blocks need destruction then nothing is empty! This is a correct assertion but may
+ // become wrong once we go full concurrent: when we create a new block, it will flicker
+ // into the empty set for a tiny moment. On the other hand, this code is likely to be run
+ // in stopTheWorld.
+ ASSERT(m_empty.isEmpty());
+ m_canAllocateButNotEmpty = m_live & ~m_markingRetired;
+ return;
+ }
+
+ m_empty = m_live & ~m_markingNotEmpty;
+ m_canAllocateButNotEmpty = m_live & m_markingNotEmpty & ~m_markingRetired;
+
+ if (false) {
+ dataLog("Bits for ", m_cellSize, ", ", m_attributes, " after endMarking:\n");
+ dumpBits(WTF::dataFile());
+ }
+}
+
+void MarkedAllocator::snapshotUnsweptForEdenCollection()
+{
+ m_unswept |= m_eden;
+}
+
+void MarkedAllocator::snapshotUnsweptForFullCollection()
+{
+ m_unswept = m_live;
+}
+
+MarkedBlock::Handle* MarkedAllocator::findBlockToSweep()
+{
+ m_unsweptCursor = m_unswept.findBit(m_unsweptCursor, true);
+ if (m_unsweptCursor >= m_blocks.size())
+ return nullptr;
+ return m_blocks[m_unsweptCursor];
+}
+
+void MarkedAllocator::sweep()
+{
+ m_unswept.forEachSetBit(
+ [&] (size_t index) {
+ MarkedBlock::Handle* block = m_blocks[index];
+ block->sweep();
+ });
+}
+
+void MarkedAllocator::shrink()
+{
+ m_empty.forEachSetBit(
+ [&] (size_t index) {
+ markedSpace().freeBlock(m_blocks[index]);
+ });
+}
+
+void MarkedAllocator::assertNoUnswept()
+{
+ if (ASSERT_DISABLED)
+ return;
+
+ if (m_unswept.isEmpty())
+ return;
+
+ dataLog("Assertion failed: unswept not empty in ", *this, ".\n");
+ dumpBits();
+ ASSERT_NOT_REACHED();
+}
+
+void MarkedAllocator::dump(PrintStream& out) const
+{
+ out.print(RawPointer(this), ":", m_cellSize, "/", m_attributes);
+}
+
+void MarkedAllocator::dumpBits(PrintStream& out)
+{
+ unsigned maxNameLength = 0;
+ forEachBitVectorWithName(
+ NoLockingNecessary,
+ [&] (FastBitVector&, const char* name) {
+ unsigned length = strlen(name);
+ maxNameLength = std::max(maxNameLength, length);
+ });
+
+ forEachBitVectorWithName(
+ NoLockingNecessary,
+ [&] (FastBitVector& vector, const char* name) {
+ out.print(" ", name, ": ");
+ for (unsigned i = maxNameLength - strlen(name); i--;)
+ out.print(" ");
+ out.print(vector, "\n");
+ });
+}
+
+MarkedSpace& MarkedAllocator::markedSpace() const
+{
+ return m_subspace->space();
}
} // namespace JSC
+