summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap/MarkedSpace.h
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/heap/MarkedSpace.h
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/heap/MarkedSpace.h')
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.h374
1 files changed, 155 insertions, 219 deletions
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.h b/Source/JavaScriptCore/heap/MarkedSpace.h
index e853d6674..26be5e3df 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.h
+++ b/Source/JavaScriptCore/heap/MarkedSpace.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
* Copyright (C) 2001 Peter Kelly (pmk@post.com)
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2017 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -19,291 +19,227 @@
*
*/
-#ifndef MarkedSpace_h
-#define MarkedSpace_h
+#pragma once
-#include "MachineStackMarker.h"
+#include "IterationStatus.h"
+#include "LargeAllocation.h"
#include "MarkedAllocator.h"
#include "MarkedBlock.h"
#include "MarkedBlockSet.h"
#include <array>
-#include <wtf/PageAllocationAligned.h>
-#include <wtf/Bitmap.h>
-#include <wtf/DoublyLinkedList.h>
+#include <wtf/Bag.h>
#include <wtf/HashSet.h>
#include <wtf/Noncopyable.h>
+#include <wtf/RetainPtr.h>
+#include <wtf/SentinelLinkedList.h>
#include <wtf/Vector.h>
namespace JSC {
-class DelayedReleaseScope;
class Heap;
class HeapIterationScope;
-class JSCell;
-class LiveObjectIterator;
class LLIntOffsetsExtractor;
-class WeakGCHandle;
-class SlotVisitor;
+class Subspace;
+class WeakSet;
-struct ClearMarks : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block)
- {
- block->clearMarks();
- }
-};
-
-struct ClearRememberedSet : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block)
- {
- block->clearRememberedSet();
- }
-};
-
-struct Sweep : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->sweep(); }
-};
-
-struct MarkCount : MarkedBlock::CountFunctor {
- void operator()(MarkedBlock* block) { count(block->markCount()); }
-};
-
-struct Size : MarkedBlock::CountFunctor {
- void operator()(MarkedBlock* block) { count(block->markCount() * block->cellSize()); }
-};
+typedef uint32_t HeapVersion;
class MarkedSpace {
WTF_MAKE_NONCOPYABLE(MarkedSpace);
public:
+ // sizeStep is really a synonym for atomSize; it's no accident that they are the same.
+ static const size_t sizeStep = MarkedBlock::atomSize;
+
+ // Sizes up to this amount get a size class for each size step.
+ static const size_t preciseCutoff = 80;
+
+ // The amount of available payload in a block is the block's size minus the header. But the
+ // header size might not be atom size aligned, so we round down the result accordingly.
+ static const size_t blockPayload = (MarkedBlock::blockSize - sizeof(MarkedBlock)) & ~(MarkedBlock::atomSize - 1);
+
+ // The largest cell we're willing to allocate in a MarkedBlock the "normal way" (i.e. using size
+ // classes, rather than a large allocation) is half the size of the payload, rounded down. This
+ // ensures that we only use the size class approach if it means being able to pack two things
+ // into one block.
+ static const size_t largeCutoff = (blockPayload / 2) & ~(sizeStep - 1);
+
+ static const size_t numSizeClasses = largeCutoff / sizeStep;
+
+ static const HeapVersion nullVersion = 0; // The version of freshly allocated blocks.
+ static const HeapVersion initialVersion = 2; // The version that the heap starts out with. Set to make sure that nextVersion(nullVersion) != initialVersion.
+
+ static HeapVersion nextVersion(HeapVersion version)
+ {
+ version++;
+ if (version == nullVersion)
+ version = initialVersion;
+ return version;
+ }
+
+ static size_t sizeClassToIndex(size_t size)
+ {
+ ASSERT(size);
+ return (size + sizeStep - 1) / sizeStep - 1;
+ }
+
+ static size_t indexToSizeClass(size_t index)
+ {
+ return (index + 1) * sizeStep;
+ }
+
MarkedSpace(Heap*);
~MarkedSpace();
- void lastChanceToFinalize();
+
+ Heap* heap() const { return m_heap; }
+
+ void lastChanceToFinalize(); // You must call stopAllocating before you call this.
- MarkedAllocator& firstAllocator();
- MarkedAllocator& allocatorFor(size_t);
- MarkedAllocator& immortalStructureDestructorAllocatorFor(size_t);
- MarkedAllocator& normalDestructorAllocatorFor(size_t);
- void* allocateWithNormalDestructor(size_t);
- void* allocateWithImmortalStructureDestructor(size_t);
- void* allocateWithoutDestructor(size_t);
-
- void resetAllocators();
+ static size_t optimalSizeFor(size_t);
+
+ void prepareForAllocation();
- void visitWeakSets(HeapRootVisitor&);
+ void visitWeakSets(SlotVisitor&);
void reapWeakSets();
MarkedBlockSet& blocks() { return m_blocks; }
void willStartIterating();
- bool isIterating() { return m_isIterating; }
+ bool isIterating() const { return m_isIterating; }
void didFinishIterating();
void stopAllocating();
void resumeAllocating(); // If we just stopped allocation but we didn't do a collection, we need to resume allocation.
-
- typedef HashSet<MarkedBlock*>::iterator BlockIterator;
- template<typename Functor> typename Functor::ReturnType forEachLiveCell(HeapIterationScope&, Functor&);
- template<typename Functor> typename Functor::ReturnType forEachLiveCell(HeapIterationScope&);
- template<typename Functor> typename Functor::ReturnType forEachDeadCell(HeapIterationScope&, Functor&);
- template<typename Functor> typename Functor::ReturnType forEachDeadCell(HeapIterationScope&);
- template<typename Functor> typename Functor::ReturnType forEachBlock(Functor&);
- template<typename Functor> typename Functor::ReturnType forEachBlock();
+ void prepareForMarking();
+ void prepareForConservativeScan();
+
+ typedef HashSet<MarkedBlock*>::iterator BlockIterator;
+
+ template<typename Functor> void forEachLiveCell(HeapIterationScope&, const Functor&);
+ template<typename Functor> void forEachDeadCell(HeapIterationScope&, const Functor&);
+ template<typename Functor> void forEachBlock(const Functor&);
+
void shrink();
- void freeBlock(MarkedBlock*);
- void freeOrShrinkBlock(MarkedBlock*);
+ void freeBlock(MarkedBlock::Handle*);
+ void freeOrShrinkBlock(MarkedBlock::Handle*);
- void didAddBlock(MarkedBlock*);
- void didConsumeFreeList(MarkedBlock*);
- void didAllocateInBlock(MarkedBlock*);
+ void didAddBlock(MarkedBlock::Handle*);
+ void didConsumeFreeList(MarkedBlock::Handle*);
+ void didAllocateInBlock(MarkedBlock::Handle*);
- void clearMarks();
- void clearRememberedSet();
+ void beginMarking();
+ void endMarking();
+ void snapshotUnswept();
void clearNewlyAllocated();
void sweep();
+ void sweepLargeAllocations();
+ void assertNoUnswept();
size_t objectCount();
size_t size();
size_t capacity();
bool isPagedOut(double deadline);
+
+ HeapVersion markingVersion() const { return m_markingVersion; }
+ HeapVersion newlyAllocatedVersion() const { return m_newlyAllocatedVersion; }
-#if USE(CF)
- template<typename T> void releaseSoon(RetainPtr<T>&&);
-#endif
-
+ const Vector<LargeAllocation*>& largeAllocations() const { return m_largeAllocations; }
+ unsigned largeAllocationsNurseryOffset() const { return m_largeAllocationsNurseryOffset; }
+ unsigned largeAllocationsOffsetForThisCollection() const { return m_largeAllocationsOffsetForThisCollection; }
+
+ // These are cached pointers and offsets for quickly searching the large allocations that are
+ // relevant to this collection.
+ LargeAllocation** largeAllocationsForThisCollectionBegin() const { return m_largeAllocationsForThisCollectionBegin; }
+ LargeAllocation** largeAllocationsForThisCollectionEnd() const { return m_largeAllocationsForThisCollectionEnd; }
+ unsigned largeAllocationsForThisCollectionSize() const { return m_largeAllocationsForThisCollectionSize; }
+
+ MarkedAllocator* firstAllocator() const { return m_firstAllocator; }
+ MarkedAllocator* allocatorForEmptyAllocation() const { return m_allocatorForEmptyAllocation; }
+
+ MarkedBlock::Handle* findEmptyBlockToSteal();
+
+ Lock& allocatorLock() { return m_allocatorLock; }
+ MarkedAllocator* addMarkedAllocator(const AbstractLocker&, Subspace*, size_t cellSize);
+
+ // When this is true it means that we have flipped but the mark bits haven't converged yet.
+ bool isMarking() const { return m_isMarking; }
+
+ void dumpBits(PrintStream& = WTF::dataFile());
+
+ JS_EXPORT_PRIVATE static std::array<size_t, numSizeClasses> s_sizeClassForSizeStep;
+
private:
- friend class DelayedReleaseScope;
friend class LLIntOffsetsExtractor;
+ friend class JIT;
+ friend class WeakSet;
+ friend class Subspace;
+
+ void* allocateSlow(Subspace&, GCDeferralContext*, size_t);
+ void* tryAllocateSlow(Subspace&, GCDeferralContext*, size_t);
- template<typename Functor> void forEachAllocator(Functor&);
- template<typename Functor> void forEachAllocator();
-
- // [ 32... 128 ]
- static const size_t preciseStep = MarkedBlock::atomSize;
- static const size_t preciseCutoff = 128;
- static const size_t preciseCount = preciseCutoff / preciseStep;
+ static void initializeSizeClassForStepSize();
+
+ void initializeSubspace(Subspace&);
- // [ 1024... blockSize ]
- static const size_t impreciseStep = 2 * preciseCutoff;
- static const size_t impreciseCutoff = MarkedBlock::blockSize / 2;
- static const size_t impreciseCount = impreciseCutoff / impreciseStep;
+ template<typename Functor> inline void forEachAllocator(const Functor&);
+
+ void addActiveWeakSet(WeakSet*);
- struct Subspace {
- std::array<MarkedAllocator, preciseCount> preciseAllocators;
- std::array<MarkedAllocator, impreciseCount> impreciseAllocators;
- MarkedAllocator largeAllocator;
- };
+ Vector<Subspace*> m_subspaces;
- Subspace m_normalDestructorSpace;
- Subspace m_immortalStructureDestructorSpace;
- Subspace m_normalSpace;
+ Vector<LargeAllocation*> m_largeAllocations;
+ unsigned m_largeAllocationsNurseryOffset { 0 };
+ unsigned m_largeAllocationsOffsetForThisCollection { 0 };
+ unsigned m_largeAllocationsNurseryOffsetForSweep { 0 };
+ LargeAllocation** m_largeAllocationsForThisCollectionBegin { nullptr };
+ LargeAllocation** m_largeAllocationsForThisCollectionEnd { nullptr };
+ unsigned m_largeAllocationsForThisCollectionSize { 0 };
Heap* m_heap;
+ HeapVersion m_markingVersion { initialVersion };
+ HeapVersion m_newlyAllocatedVersion { initialVersion };
size_t m_capacity;
bool m_isIterating;
+ bool m_isMarking { false };
MarkedBlockSet m_blocks;
- Vector<MarkedBlock*> m_blocksWithNewObjects;
-
- DelayedReleaseScope* m_currentDelayedReleaseScope;
+
+ SentinelLinkedList<WeakSet, BasicRawSentinelNode<WeakSet>> m_activeWeakSets;
+ SentinelLinkedList<WeakSet, BasicRawSentinelNode<WeakSet>> m_newActiveWeakSets;
+
+ Lock m_allocatorLock;
+ Bag<MarkedAllocator> m_bagOfAllocators;
+ MarkedAllocator* m_firstAllocator { nullptr };
+ MarkedAllocator* m_lastAllocator { nullptr };
+ MarkedAllocator* m_allocatorForEmptyAllocation { nullptr };
};
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell(HeapIterationScope&, Functor& functor)
+template <typename Functor> inline void MarkedSpace::forEachBlock(const Functor& functor)
{
- ASSERT(isIterating());
- BlockIterator end = m_blocks.set().end();
- for (BlockIterator it = m_blocks.set().begin(); it != end; ++it)
- (*it)->forEachLiveCell(functor);
- return functor.returnValue();
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.forEachBlock(functor);
+ return IterationStatus::Continue;
+ });
}
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell(HeapIterationScope& scope)
+template <typename Functor>
+void MarkedSpace::forEachAllocator(const Functor& functor)
{
- Functor functor;
- return forEachLiveCell(scope, functor);
-}
-
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell(HeapIterationScope&, Functor& functor)
-{
- ASSERT(isIterating());
- BlockIterator end = m_blocks.set().end();
- for (BlockIterator it = m_blocks.set().begin(); it != end; ++it)
- (*it)->forEachDeadCell(functor);
- return functor.returnValue();
-}
-
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell(HeapIterationScope& scope)
-{
- Functor functor;
- return forEachDeadCell(scope, functor);
-}
-
-inline MarkedAllocator& MarkedSpace::allocatorFor(size_t bytes)
-{
- ASSERT(bytes);
- if (bytes <= preciseCutoff)
- return m_normalSpace.preciseAllocators[(bytes - 1) / preciseStep];
- if (bytes <= impreciseCutoff)
- return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_normalSpace.largeAllocator;
-}
-
-inline MarkedAllocator& MarkedSpace::immortalStructureDestructorAllocatorFor(size_t bytes)
-{
- ASSERT(bytes);
- if (bytes <= preciseCutoff)
- return m_immortalStructureDestructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
- if (bytes <= impreciseCutoff)
- return m_immortalStructureDestructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_immortalStructureDestructorSpace.largeAllocator;
+ for (MarkedAllocator* allocator = m_firstAllocator; allocator; allocator = allocator->nextAllocator()) {
+ if (functor(*allocator) == IterationStatus::Done)
+ return;
+ }
}
-inline MarkedAllocator& MarkedSpace::normalDestructorAllocatorFor(size_t bytes)
+ALWAYS_INLINE size_t MarkedSpace::optimalSizeFor(size_t bytes)
{
ASSERT(bytes);
if (bytes <= preciseCutoff)
- return m_normalDestructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
- if (bytes <= impreciseCutoff)
- return m_normalDestructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_normalDestructorSpace.largeAllocator;
-}
-
-inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes)
-{
- return allocatorFor(bytes).allocate(bytes);
-}
-
-inline void* MarkedSpace::allocateWithImmortalStructureDestructor(size_t bytes)
-{
- return immortalStructureDestructorAllocatorFor(bytes).allocate(bytes);
-}
-
-inline void* MarkedSpace::allocateWithNormalDestructor(size_t bytes)
-{
- return normalDestructorAllocatorFor(bytes).allocate(bytes);
-}
-
-template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock(Functor& functor)
-{
- for (size_t i = 0; i < preciseCount; ++i) {
- m_normalSpace.preciseAllocators[i].forEachBlock(functor);
- m_normalDestructorSpace.preciseAllocators[i].forEachBlock(functor);
- m_immortalStructureDestructorSpace.preciseAllocators[i].forEachBlock(functor);
- }
-
- for (size_t i = 0; i < impreciseCount; ++i) {
- m_normalSpace.impreciseAllocators[i].forEachBlock(functor);
- m_normalDestructorSpace.impreciseAllocators[i].forEachBlock(functor);
- m_immortalStructureDestructorSpace.impreciseAllocators[i].forEachBlock(functor);
- }
-
- m_normalSpace.largeAllocator.forEachBlock(functor);
- m_normalDestructorSpace.largeAllocator.forEachBlock(functor);
- m_immortalStructureDestructorSpace.largeAllocator.forEachBlock(functor);
-
- return functor.returnValue();
-}
-
-template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock()
-{
- Functor functor;
- return forEachBlock(functor);
-}
-
-inline void MarkedSpace::didAddBlock(MarkedBlock* block)
-{
- m_capacity += block->capacity();
- m_blocks.add(block);
-}
-
-inline void MarkedSpace::didAllocateInBlock(MarkedBlock* block)
-{
-#if ENABLE(GGC)
- m_blocksWithNewObjects.append(block);
-#else
- UNUSED_PARAM(block);
-#endif
-}
-
-inline void MarkedSpace::clearRememberedSet()
-{
- forEachBlock<ClearRememberedSet>();
-}
-
-inline size_t MarkedSpace::objectCount()
-{
- return forEachBlock<MarkCount>();
-}
-
-inline size_t MarkedSpace::size()
-{
- return forEachBlock<Size>();
-}
-
-inline size_t MarkedSpace::capacity()
-{
- return m_capacity;
+ return WTF::roundUpToMultipleOf<sizeStep>(bytes);
+ if (bytes <= largeCutoff)
+ return s_sizeClassForSizeStep[sizeClassToIndex(bytes)];
+ return bytes;
}
} // namespace JSC
-
-#endif // MarkedSpace_h