summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/heap
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/heap')
-rw-r--r--Source/JavaScriptCore/heap/AllocatingScope.h (renamed from Source/JavaScriptCore/heap/GCThread.h)45
-rw-r--r--Source/JavaScriptCore/heap/AllocatorAttributes.cpp39
-rw-r--r--Source/JavaScriptCore/heap/AllocatorAttributes.h51
-rw-r--r--Source/JavaScriptCore/heap/BlockAllocator.cpp172
-rw-r--r--Source/JavaScriptCore/heap/BlockAllocator.h300
-rw-r--r--Source/JavaScriptCore/heap/CellContainer.cpp43
-rw-r--r--Source/JavaScriptCore/heap/CellContainer.h102
-rw-r--r--Source/JavaScriptCore/heap/CellContainerInlines.h104
-rw-r--r--Source/JavaScriptCore/heap/CellState.h (renamed from Source/JavaScriptCore/heap/CopyVisitor.h)54
-rw-r--r--Source/JavaScriptCore/heap/CodeBlockSet.cpp145
-rw-r--r--Source/JavaScriptCore/heap/CodeBlockSet.h67
-rw-r--r--Source/JavaScriptCore/heap/CodeBlockSetInlines.h89
-rw-r--r--Source/JavaScriptCore/heap/CollectingScope.h52
-rw-r--r--Source/JavaScriptCore/heap/CollectionScope.cpp55
-rw-r--r--Source/JavaScriptCore/heap/CollectionScope.h (renamed from Source/JavaScriptCore/heap/HeapOperation.h)18
-rw-r--r--Source/JavaScriptCore/heap/CollectorPhase.cpp (renamed from Source/JavaScriptCore/heap/CopyVisitor.cpp)77
-rw-r--r--Source/JavaScriptCore/heap/CollectorPhase.h75
-rw-r--r--Source/JavaScriptCore/heap/ConservativeRoots.cpp87
-rw-r--r--Source/JavaScriptCore/heap/ConservativeRoots.h29
-rw-r--r--Source/JavaScriptCore/heap/ConstraintVolatility.h73
-rw-r--r--Source/JavaScriptCore/heap/CopiedAllocator.h166
-rw-r--r--Source/JavaScriptCore/heap/CopiedBlock.h288
-rw-r--r--Source/JavaScriptCore/heap/CopiedBlockInlines.h86
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpace.cpp353
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpace.h147
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpaceInlines.h263
-rw-r--r--Source/JavaScriptCore/heap/CopyVisitorInlines.h106
-rw-r--r--Source/JavaScriptCore/heap/CopyWorkList.h190
-rw-r--r--Source/JavaScriptCore/heap/DeferGC.cpp2
-rw-r--r--Source/JavaScriptCore/heap/DeferGC.h22
-rw-r--r--Source/JavaScriptCore/heap/DelayedReleaseScope.h100
-rw-r--r--Source/JavaScriptCore/heap/DeleteAllCodeEffort.h36
-rw-r--r--Source/JavaScriptCore/heap/DestructionMode.cpp50
-rw-r--r--Source/JavaScriptCore/heap/DestructionMode.h43
-rw-r--r--Source/JavaScriptCore/heap/EdenGCActivityCallback.cpp100
-rw-r--r--Source/JavaScriptCore/heap/EdenGCActivityCallback.h (renamed from Source/JavaScriptCore/heap/SuperRegion.h)39
-rw-r--r--Source/JavaScriptCore/heap/FreeList.cpp37
-rw-r--r--Source/JavaScriptCore/heap/FreeList.h91
-rw-r--r--Source/JavaScriptCore/heap/FullGCActivityCallback.cpp115
-rw-r--r--Source/JavaScriptCore/heap/FullGCActivityCallback.h54
-rw-r--r--Source/JavaScriptCore/heap/GCActivityCallback.cpp164
-rw-r--r--Source/JavaScriptCore/heap/GCActivityCallback.h106
-rw-r--r--Source/JavaScriptCore/heap/GCAssertions.h30
-rw-r--r--Source/JavaScriptCore/heap/GCConductor.cpp66
-rw-r--r--Source/JavaScriptCore/heap/GCConductor.h49
-rw-r--r--Source/JavaScriptCore/heap/GCDeferralContext.h46
-rw-r--r--Source/JavaScriptCore/heap/GCDeferralContextInlines.h49
-rw-r--r--Source/JavaScriptCore/heap/GCIncomingRefCounted.h6
-rw-r--r--Source/JavaScriptCore/heap/GCIncomingRefCountedInlines.h6
-rw-r--r--Source/JavaScriptCore/heap/GCIncomingRefCountedSet.h11
-rw-r--r--Source/JavaScriptCore/heap/GCIncomingRefCountedSetInlines.h7
-rw-r--r--Source/JavaScriptCore/heap/GCLogging.cpp (renamed from Source/JavaScriptCore/heap/WriteBarrierBuffer.cpp)65
-rw-r--r--Source/JavaScriptCore/heap/GCLogging.h56
-rw-r--r--Source/JavaScriptCore/heap/GCSegmentedArray.h167
-rw-r--r--Source/JavaScriptCore/heap/GCSegmentedArrayInlines.h227
-rw-r--r--Source/JavaScriptCore/heap/GCThread.cpp136
-rw-r--r--Source/JavaScriptCore/heap/GCThreadSharedData.cpp210
-rw-r--r--Source/JavaScriptCore/heap/GCThreadSharedData.h124
-rw-r--r--Source/JavaScriptCore/heap/GCTypeMap.h64
-rw-r--r--Source/JavaScriptCore/heap/Handle.h11
-rw-r--r--Source/JavaScriptCore/heap/HandleBlock.h18
-rw-r--r--Source/JavaScriptCore/heap/HandleBlockInlines.h24
-rw-r--r--Source/JavaScriptCore/heap/HandleSet.cpp18
-rw-r--r--Source/JavaScriptCore/heap/HandleSet.h28
-rw-r--r--Source/JavaScriptCore/heap/HandleStack.cpp11
-rw-r--r--Source/JavaScriptCore/heap/HandleStack.h15
-rw-r--r--Source/JavaScriptCore/heap/HandleTypes.h5
-rw-r--r--Source/JavaScriptCore/heap/Heap.cpp3002
-rw-r--r--Source/JavaScriptCore/heap/Heap.h1091
-rw-r--r--Source/JavaScriptCore/heap/HeapCell.cpp59
-rw-r--r--Source/JavaScriptCore/heap/HeapCell.h92
-rw-r--r--Source/JavaScriptCore/heap/HeapCellInlines.h95
-rw-r--r--Source/JavaScriptCore/heap/HeapHelperPool.cpp47
-rw-r--r--Source/JavaScriptCore/heap/HeapHelperPool.h34
-rw-r--r--Source/JavaScriptCore/heap/HeapInlines.h272
-rw-r--r--Source/JavaScriptCore/heap/HeapIterationScope.h6
-rw-r--r--Source/JavaScriptCore/heap/HeapObserver.h (renamed from Source/JavaScriptCore/heap/CopyToken.h)19
-rw-r--r--Source/JavaScriptCore/heap/HeapProfiler.cpp66
-rw-r--r--Source/JavaScriptCore/heap/HeapProfiler.h57
-rw-r--r--Source/JavaScriptCore/heap/HeapRootVisitor.h86
-rw-r--r--Source/JavaScriptCore/heap/HeapSnapshot.cpp184
-rw-r--r--Source/JavaScriptCore/heap/HeapSnapshot.h64
-rw-r--r--Source/JavaScriptCore/heap/HeapSnapshotBuilder.cpp393
-rw-r--r--Source/JavaScriptCore/heap/HeapSnapshotBuilder.h140
-rw-r--r--Source/JavaScriptCore/heap/HeapStatistics.cpp257
-rw-r--r--Source/JavaScriptCore/heap/HeapStatistics.h61
-rw-r--r--Source/JavaScriptCore/heap/HeapTimer.cpp160
-rw-r--r--Source/JavaScriptCore/heap/HeapTimer.h50
-rw-r--r--Source/JavaScriptCore/heap/HeapUtil.h189
-rw-r--r--Source/JavaScriptCore/heap/HeapVerifier.cpp217
-rw-r--r--Source/JavaScriptCore/heap/HeapVerifier.h95
-rw-r--r--Source/JavaScriptCore/heap/IncrementalSweeper.cpp100
-rw-r--r--Source/JavaScriptCore/heap/IncrementalSweeper.h33
-rw-r--r--Source/JavaScriptCore/heap/JITStubRoutineSet.cpp2
-rw-r--r--Source/JavaScriptCore/heap/JITStubRoutineSet.h8
-rw-r--r--Source/JavaScriptCore/heap/LargeAllocation.cpp128
-rw-r--r--Source/JavaScriptCore/heap/LargeAllocation.h163
-rw-r--r--Source/JavaScriptCore/heap/ListableHandler.h26
-rw-r--r--Source/JavaScriptCore/heap/LiveObjectData.h43
-rw-r--r--Source/JavaScriptCore/heap/LiveObjectList.cpp40
-rw-r--r--Source/JavaScriptCore/heap/LiveObjectList.h53
-rw-r--r--Source/JavaScriptCore/heap/Local.h9
-rw-r--r--Source/JavaScriptCore/heap/LocalScope.h7
-rw-r--r--Source/JavaScriptCore/heap/LockDuringMarking.h47
-rw-r--r--Source/JavaScriptCore/heap/MachineStackMarker.cpp993
-rw-r--r--Source/JavaScriptCore/heap/MachineStackMarker.h188
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.cpp146
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.h107
-rw-r--r--Source/JavaScriptCore/heap/MarkStackInlines.h119
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocator.cpp544
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocator.h341
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocatorInlines.h85
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlock.cpp565
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlock.h948
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlockInlines.h394
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlockSet.h7
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.cpp677
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.h374
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpaceInlines.h66
-rw-r--r--Source/JavaScriptCore/heap/MarkingConstraint.cpp77
-rw-r--r--Source/JavaScriptCore/heap/MarkingConstraint.h96
-rw-r--r--Source/JavaScriptCore/heap/MarkingConstraintSet.cpp249
-rw-r--r--Source/JavaScriptCore/heap/MarkingConstraintSet.h87
-rw-r--r--Source/JavaScriptCore/heap/MutatorScheduler.cpp76
-rw-r--r--Source/JavaScriptCore/heap/MutatorScheduler.h76
-rw-r--r--Source/JavaScriptCore/heap/MutatorState.cpp55
-rw-r--r--Source/JavaScriptCore/heap/MutatorState.h53
-rw-r--r--Source/JavaScriptCore/heap/OpaqueRootSet.h (renamed from Source/JavaScriptCore/heap/CopyWriteBarrier.h)81
-rw-r--r--Source/JavaScriptCore/heap/PreventCollectionScope.h (renamed from Source/JavaScriptCore/heap/HeapBlock.h)47
-rw-r--r--Source/JavaScriptCore/heap/Region.h319
-rw-r--r--Source/JavaScriptCore/heap/RegisterState.h158
-rw-r--r--Source/JavaScriptCore/heap/ReleaseHeapAccessScope.h (renamed from Source/JavaScriptCore/heap/RecursiveAllocationScope.h)39
-rw-r--r--Source/JavaScriptCore/heap/RunningScope.h52
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.cpp926
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.h215
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitorInlines.h239
-rw-r--r--Source/JavaScriptCore/heap/SpaceTimeMutatorScheduler.cpp221
-rw-r--r--Source/JavaScriptCore/heap/SpaceTimeMutatorScheduler.h86
-rw-r--r--Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.cpp233
-rw-r--r--Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.h92
-rw-r--r--Source/JavaScriptCore/heap/StopIfNecessaryTimer.cpp55
-rw-r--r--Source/JavaScriptCore/heap/StopIfNecessaryTimer.h44
-rw-r--r--Source/JavaScriptCore/heap/Strong.h11
-rw-r--r--Source/JavaScriptCore/heap/StrongInlines.h5
-rw-r--r--Source/JavaScriptCore/heap/Subspace.cpp196
-rw-r--r--Source/JavaScriptCore/heap/Subspace.h122
-rw-r--r--Source/JavaScriptCore/heap/SubspaceInlines.h76
-rw-r--r--Source/JavaScriptCore/heap/SuperRegion.cpp82
-rw-r--r--Source/JavaScriptCore/heap/SweepingScope.h52
-rw-r--r--Source/JavaScriptCore/heap/SynchronousStopTheWorldMutatorScheduler.cpp67
-rw-r--r--Source/JavaScriptCore/heap/SynchronousStopTheWorldMutatorScheduler.h57
-rw-r--r--Source/JavaScriptCore/heap/TinyBloomFilter.h5
-rw-r--r--Source/JavaScriptCore/heap/UnconditionalFinalizer.h10
-rw-r--r--Source/JavaScriptCore/heap/VisitRaceKey.cpp40
-rw-r--r--Source/JavaScriptCore/heap/VisitRaceKey.h107
-rw-r--r--Source/JavaScriptCore/heap/VisitingTimeout.h (renamed from Source/JavaScriptCore/heap/WriteBarrierBuffer.h)57
-rw-r--r--Source/JavaScriptCore/heap/Weak.cpp1
-rw-r--r--Source/JavaScriptCore/heap/Weak.h57
-rw-r--r--Source/JavaScriptCore/heap/WeakBlock.cpp75
-rw-r--r--Source/JavaScriptCore/heap/WeakBlock.h55
-rw-r--r--Source/JavaScriptCore/heap/WeakHandleOwner.cpp2
-rw-r--r--Source/JavaScriptCore/heap/WeakHandleOwner.h5
-rw-r--r--Source/JavaScriptCore/heap/WeakImpl.h7
-rw-r--r--Source/JavaScriptCore/heap/WeakInlines.h44
-rw-r--r--Source/JavaScriptCore/heap/WeakReferenceHarvester.h7
-rw-r--r--Source/JavaScriptCore/heap/WeakSet.cpp47
-rw-r--r--Source/JavaScriptCore/heap/WeakSet.h38
-rw-r--r--Source/JavaScriptCore/heap/WeakSetInlines.h10
-rw-r--r--Source/JavaScriptCore/heap/WriteBarrierSupport.cpp2
-rw-r--r--Source/JavaScriptCore/heap/WriteBarrierSupport.h6
170 files changed, 15640 insertions, 7812 deletions
diff --git a/Source/JavaScriptCore/heap/GCThread.h b/Source/JavaScriptCore/heap/AllocatingScope.h
index 0d218f975..1d13ae0de 100644
--- a/Source/JavaScriptCore/heap/GCThread.h
+++ b/Source/JavaScriptCore/heap/AllocatingScope.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,41 +23,30 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GCThread_h
-#define GCThread_h
+#pragma once
-#include <GCThreadSharedData.h>
-#include <wtf/Deque.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/Threading.h>
+#include "Heap.h"
namespace JSC {
-class CopyVisitor;
-class GCThreadSharedData;
-class SlotVisitor;
-
-class GCThread {
+class AllocatingScope {
public:
- GCThread(GCThreadSharedData&, SlotVisitor*, CopyVisitor*);
-
- SlotVisitor* slotVisitor();
- CopyVisitor* copyVisitor();
- ThreadIdentifier threadID();
- void initializeThreadID(ThreadIdentifier);
-
- static void gcThreadStartFunc(void*);
+ AllocatingScope(Heap& heap)
+ : m_heap(heap)
+ {
+ RELEASE_ASSERT(m_heap.m_mutatorState == MutatorState::Running);
+ m_heap.m_mutatorState = MutatorState::Allocating;
+ }
+
+ ~AllocatingScope()
+ {
+ RELEASE_ASSERT(m_heap.m_mutatorState == MutatorState::Allocating);
+ m_heap.m_mutatorState = MutatorState::Running;
+ }
private:
- void gcThreadMain();
- GCPhase waitForNextPhase();
-
- ThreadIdentifier m_threadID;
- GCThreadSharedData& m_shared;
- OwnPtr<SlotVisitor> m_slotVisitor;
- OwnPtr<CopyVisitor> m_copyVisitor;
+ Heap& m_heap;
};
} // namespace JSC
-#endif
diff --git a/Source/JavaScriptCore/heap/AllocatorAttributes.cpp b/Source/JavaScriptCore/heap/AllocatorAttributes.cpp
new file mode 100644
index 000000000..468f91cd1
--- /dev/null
+++ b/Source/JavaScriptCore/heap/AllocatorAttributes.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AllocatorAttributes.h"
+
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+void AllocatorAttributes::dump(PrintStream& out) const
+{
+ out.print("{", destruction, ", ", cellKind, "}");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/AllocatorAttributes.h b/Source/JavaScriptCore/heap/AllocatorAttributes.h
new file mode 100644
index 000000000..6d5299f85
--- /dev/null
+++ b/Source/JavaScriptCore/heap/AllocatorAttributes.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ConstraintVolatility.h"
+#include "DestructionMode.h"
+#include "HeapCell.h"
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+struct AllocatorAttributes {
+ AllocatorAttributes() { }
+
+ AllocatorAttributes(DestructionMode destruction, HeapCell::Kind cellKind)
+ : destruction(destruction)
+ , cellKind(cellKind)
+ {
+ }
+
+ void dump(PrintStream& out) const;
+
+ DestructionMode destruction { DoesNotNeedDestruction };
+ HeapCell::Kind cellKind { HeapCell::JSCell };
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/BlockAllocator.cpp b/Source/JavaScriptCore/heap/BlockAllocator.cpp
deleted file mode 100644
index 7a7474913..000000000
--- a/Source/JavaScriptCore/heap/BlockAllocator.cpp
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "BlockAllocator.h"
-
-#include "CopiedBlock.h"
-#include "CopyWorkList.h"
-#include "MarkedBlock.h"
-#include "WeakBlock.h"
-#include <wtf/CurrentTime.h>
-
-namespace JSC {
-
-inline ThreadIdentifier createBlockFreeingThread(BlockAllocator* allocator)
-{
- if (!GCActivityCallback::s_shouldCreateGCTimer)
- return 0; // No block freeing thread.
- ThreadIdentifier identifier = createThread(allocator->blockFreeingThreadStartFunc, allocator, "JavaScriptCore::BlockFree");
- RELEASE_ASSERT(identifier);
- return identifier;
-}
-
-BlockAllocator::BlockAllocator()
- : m_superRegion()
- , m_copiedRegionSet(CopiedBlock::blockSize)
- , m_markedRegionSet(MarkedBlock::blockSize)
- , m_fourKBBlockRegionSet(WeakBlock::blockSize)
- , m_workListRegionSet(CopyWorkListSegment::blockSize)
- , m_numberOfEmptyRegions(0)
- , m_isCurrentlyAllocating(false)
- , m_blockFreeingThreadShouldQuit(false)
- , m_blockFreeingThread(createBlockFreeingThread(this))
-{
- m_regionLock.Init();
-}
-
-BlockAllocator::~BlockAllocator()
-{
- releaseFreeRegions();
- {
- std::lock_guard<std::mutex> lock(m_emptyRegionConditionMutex);
- m_blockFreeingThreadShouldQuit = true;
- m_emptyRegionCondition.notify_all();
- }
- if (m_blockFreeingThread)
- waitForThreadCompletion(m_blockFreeingThread);
- ASSERT(allRegionSetsAreEmpty());
- ASSERT(m_emptyRegions.isEmpty());
-}
-
-bool BlockAllocator::allRegionSetsAreEmpty() const
-{
- return m_copiedRegionSet.isEmpty()
- && m_markedRegionSet.isEmpty()
- && m_fourKBBlockRegionSet.isEmpty()
- && m_workListRegionSet.isEmpty();
-}
-
-void BlockAllocator::releaseFreeRegions()
-{
- while (true) {
- Region* region;
- {
- SpinLockHolder locker(&m_regionLock);
- if (!m_numberOfEmptyRegions)
- region = 0;
- else {
- region = m_emptyRegions.removeHead();
- RELEASE_ASSERT(region);
- m_numberOfEmptyRegions--;
- }
- }
-
- if (!region)
- break;
-
- region->destroy();
- }
-}
-
-void BlockAllocator::waitForDuration(std::chrono::milliseconds duration)
-{
- std::unique_lock<std::mutex> lock(m_emptyRegionConditionMutex);
-
- // If this returns early, that's fine, so long as it doesn't do it too
- // frequently. It would only be a bug if this function failed to return
- // when it was asked to do so.
- if (m_blockFreeingThreadShouldQuit)
- return;
-
- m_emptyRegionCondition.wait_for(lock, duration);
-}
-
-void BlockAllocator::blockFreeingThreadStartFunc(void* blockAllocator)
-{
- static_cast<BlockAllocator*>(blockAllocator)->blockFreeingThreadMain();
-}
-
-void BlockAllocator::blockFreeingThreadMain()
-{
- size_t currentNumberOfEmptyRegions;
- while (!m_blockFreeingThreadShouldQuit) {
- // Generally wait for one second before scavenging free blocks. This
- // may return early, particularly when we're being asked to quit.
- waitForDuration(std::chrono::seconds(1));
- if (m_blockFreeingThreadShouldQuit)
- break;
-
- if (m_isCurrentlyAllocating) {
- m_isCurrentlyAllocating = false;
- continue;
- }
-
- // Sleep until there is actually work to do rather than waking up every second to check.
- {
- std::unique_lock<std::mutex> lock(m_emptyRegionConditionMutex);
- SpinLockHolder regionLocker(&m_regionLock);
- while (!m_numberOfEmptyRegions && !m_blockFreeingThreadShouldQuit) {
- m_regionLock.Unlock();
- m_emptyRegionCondition.wait(lock);
- m_regionLock.Lock();
- }
- currentNumberOfEmptyRegions = m_numberOfEmptyRegions;
- }
-
- size_t desiredNumberOfEmptyRegions = currentNumberOfEmptyRegions / 2;
-
- while (!m_blockFreeingThreadShouldQuit) {
- Region* region;
- {
- SpinLockHolder locker(&m_regionLock);
- if (m_numberOfEmptyRegions <= desiredNumberOfEmptyRegions)
- region = 0;
- else {
- region = m_emptyRegions.removeHead();
- RELEASE_ASSERT(region);
- m_numberOfEmptyRegions--;
- }
- }
-
- if (!region)
- break;
-
- region->destroy();
- }
- }
-}
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/BlockAllocator.h b/Source/JavaScriptCore/heap/BlockAllocator.h
deleted file mode 100644
index a52b90f19..000000000
--- a/Source/JavaScriptCore/heap/BlockAllocator.h
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef BlockAllocator_h
-#define BlockAllocator_h
-
-#include "GCActivityCallback.h"
-#include "HeapBlock.h"
-#include "Region.h"
-#include <condition_variable>
-#include <wtf/DoublyLinkedList.h>
-#include <wtf/Forward.h>
-#include <wtf/PageAllocationAligned.h>
-#include <wtf/TCSpinLock.h>
-#include <wtf/Threading.h>
-
-namespace JSC {
-
-class BlockAllocator;
-class CopiedBlock;
-class CopyWorkListSegment;
-class HandleBlock;
-class VM;
-class MarkStackSegment;
-class MarkedBlock;
-class WeakBlock;
-
-// Simple allocator to reduce VM cost by holding onto blocks of memory for
-// short periods of time and then freeing them on a secondary thread.
-
-class BlockAllocator {
-public:
- BlockAllocator();
- ~BlockAllocator();
-
- template <typename T> DeadBlock* allocate();
- DeadBlock* allocateCustomSize(size_t blockSize, size_t blockAlignment);
- template <typename T> void deallocate(T*);
- template <typename T> void deallocateCustomSize(T*);
-
-private:
- void waitForDuration(std::chrono::milliseconds);
-
- friend ThreadIdentifier createBlockFreeingThread(BlockAllocator*);
- void blockFreeingThreadMain();
- static void blockFreeingThreadStartFunc(void* heap);
-
- struct RegionSet {
- RegionSet(size_t blockSize)
- : m_numberOfPartialRegions(0)
- , m_blockSize(blockSize)
- {
- }
-
- bool isEmpty() const
- {
- return m_fullRegions.isEmpty() && m_partialRegions.isEmpty();
- }
-
- DoublyLinkedList<Region> m_fullRegions;
- DoublyLinkedList<Region> m_partialRegions;
- size_t m_numberOfPartialRegions;
- size_t m_blockSize;
- };
-
- DeadBlock* tryAllocateFromRegion(RegionSet&, DoublyLinkedList<Region>&, size_t&);
-
- bool allRegionSetsAreEmpty() const;
- void releaseFreeRegions();
-
- template <typename T> RegionSet& regionSetFor();
-
- SuperRegion m_superRegion;
- RegionSet m_copiedRegionSet;
- RegionSet m_markedRegionSet;
- // WeakBlocks and MarkStackSegments use the same RegionSet since they're the same size.
- RegionSet m_fourKBBlockRegionSet;
- RegionSet m_workListRegionSet;
-
- DoublyLinkedList<Region> m_emptyRegions;
- size_t m_numberOfEmptyRegions;
-
- bool m_isCurrentlyAllocating;
- bool m_blockFreeingThreadShouldQuit;
- SpinLock m_regionLock;
- std::mutex m_emptyRegionConditionMutex;
- std::condition_variable m_emptyRegionCondition;
- ThreadIdentifier m_blockFreeingThread;
-};
-
-inline DeadBlock* BlockAllocator::tryAllocateFromRegion(RegionSet& set, DoublyLinkedList<Region>& regions, size_t& numberOfRegions)
-{
- if (numberOfRegions) {
- ASSERT(!regions.isEmpty());
- Region* region = regions.head();
- ASSERT(!region->isFull());
-
- if (region->isEmpty()) {
- ASSERT(region == m_emptyRegions.head());
- m_numberOfEmptyRegions--;
- set.m_numberOfPartialRegions++;
- region = m_emptyRegions.removeHead()->reset(set.m_blockSize);
- set.m_partialRegions.push(region);
- }
-
- DeadBlock* block = region->allocate();
-
- if (region->isFull()) {
- set.m_numberOfPartialRegions--;
- set.m_fullRegions.push(set.m_partialRegions.removeHead());
- }
-
- return block;
- }
- return 0;
-}
-
-template<typename T>
-inline DeadBlock* BlockAllocator::allocate()
-{
- RegionSet& set = regionSetFor<T>();
- DeadBlock* block;
- m_isCurrentlyAllocating = true;
- {
- SpinLockHolder locker(&m_regionLock);
- if ((block = tryAllocateFromRegion(set, set.m_partialRegions, set.m_numberOfPartialRegions)))
- return block;
- if ((block = tryAllocateFromRegion(set, m_emptyRegions, m_numberOfEmptyRegions)))
- return block;
- }
-
- Region* newRegion = Region::create(&m_superRegion, T::blockSize);
-
- SpinLockHolder locker(&m_regionLock);
- m_emptyRegions.push(newRegion);
- m_numberOfEmptyRegions++;
- block = tryAllocateFromRegion(set, m_emptyRegions, m_numberOfEmptyRegions);
- ASSERT(block);
- return block;
-}
-
-inline DeadBlock* BlockAllocator::allocateCustomSize(size_t blockSize, size_t blockAlignment)
-{
- size_t realSize = WTF::roundUpToMultipleOf(blockAlignment, blockSize);
- Region* newRegion = Region::createCustomSize(&m_superRegion, realSize, blockAlignment);
- DeadBlock* block = newRegion->allocate();
- ASSERT(block);
- return block;
-}
-
-template<typename T>
-inline void BlockAllocator::deallocate(T* block)
-{
- RegionSet& set = regionSetFor<T>();
- bool shouldWakeBlockFreeingThread = false;
- {
- SpinLockHolder locker(&m_regionLock);
- Region* region = block->region();
- ASSERT(!region->isEmpty());
- if (region->isFull())
- set.m_fullRegions.remove(region);
- else {
- set.m_partialRegions.remove(region);
- set.m_numberOfPartialRegions--;
- }
-
- region->deallocate(block);
-
- if (region->isEmpty()) {
- m_emptyRegions.push(region);
- shouldWakeBlockFreeingThread = !m_numberOfEmptyRegions;
- m_numberOfEmptyRegions++;
- } else {
- set.m_partialRegions.push(region);
- set.m_numberOfPartialRegions++;
- }
- }
-
- if (shouldWakeBlockFreeingThread) {
- std::lock_guard<std::mutex> lock(m_emptyRegionConditionMutex);
- m_emptyRegionCondition.notify_one();
- }
-
- if (!m_blockFreeingThread)
- releaseFreeRegions();
-}
-
-template<typename T>
-inline void BlockAllocator::deallocateCustomSize(T* block)
-{
- Region* region = block->region();
- ASSERT(region->isCustomSize());
- region->deallocate(block);
- region->destroy();
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<CopiedBlock>()
-{
- return m_copiedRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<MarkedBlock>()
-{
- return m_markedRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<WeakBlock>()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<MarkStackSegment>()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<CopyWorkListSegment>()
-{
- return m_workListRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HandleBlock>()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<CopiedBlock>>()
-{
- return m_copiedRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<MarkedBlock>>()
-{
- return m_markedRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<WeakBlock>>()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<MarkStackSegment>>()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<CopyWorkListSegment>>()
-{
- return m_workListRegionSet;
-}
-
-template <>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<HandleBlock>>()
-{
- return m_fourKBBlockRegionSet;
-}
-
-template <typename T>
-inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor()
-{
- RELEASE_ASSERT_NOT_REACHED();
- return *(RegionSet*)0;
-}
-
-} // namespace JSC
-
-#endif // BlockAllocator_h
diff --git a/Source/JavaScriptCore/heap/CellContainer.cpp b/Source/JavaScriptCore/heap/CellContainer.cpp
new file mode 100644
index 000000000..baccde983
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CellContainer.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CellContainer.h"
+
+#include "MarkedBlockInlines.h"
+
+namespace JSC {
+
+bool CellContainer::isNewlyAllocated(HeapCell* cell) const
+{
+ if (isLargeAllocation())
+ return largeAllocation().isNewlyAllocated();
+ MarkedBlock::Handle& handle = markedBlock().handle();
+ return !handle.isNewlyAllocatedStale()
+ && handle.isNewlyAllocated(cell);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/CellContainer.h b/Source/JavaScriptCore/heap/CellContainer.h
new file mode 100644
index 000000000..e4f2ff287
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CellContainer.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/StdLibExtras.h>
+
+namespace JSC {
+
+class Heap;
+class HeapCell;
+class LargeAllocation;
+class MarkedBlock;
+class WeakSet;
+class VM;
+
+typedef uint32_t HeapVersion;
+
+// This is how we abstract over either MarkedBlock& or LargeAllocation&. Put things in here as you
+// find need for them.
+
+class CellContainer {
+public:
+ CellContainer()
+ : m_encodedPointer(0)
+ {
+ }
+
+ CellContainer(MarkedBlock& markedBlock)
+ : m_encodedPointer(bitwise_cast<uintptr_t>(&markedBlock))
+ {
+ }
+
+ CellContainer(LargeAllocation& largeAllocation)
+ : m_encodedPointer(bitwise_cast<uintptr_t>(&largeAllocation) | isLargeAllocationBit)
+ {
+ }
+
+ VM* vm() const;
+ Heap* heap() const;
+
+ explicit operator bool() const { return !!m_encodedPointer; }
+
+ bool isMarkedBlock() const { return m_encodedPointer && !(m_encodedPointer & isLargeAllocationBit); }
+ bool isLargeAllocation() const { return m_encodedPointer & isLargeAllocationBit; }
+
+ MarkedBlock& markedBlock() const
+ {
+ ASSERT(isMarkedBlock());
+ return *bitwise_cast<MarkedBlock*>(m_encodedPointer);
+ }
+
+ LargeAllocation& largeAllocation() const
+ {
+ ASSERT(isLargeAllocation());
+ return *bitwise_cast<LargeAllocation*>(m_encodedPointer - isLargeAllocationBit);
+ }
+
+ void aboutToMark(HeapVersion markingVersion);
+ bool areMarksStale() const;
+
+ bool isMarked(HeapCell*) const;
+ bool isMarked(HeapVersion markingVersion, HeapCell*) const;
+
+ bool isNewlyAllocated(HeapCell*) const;
+
+ void noteMarked();
+ void assertValidCell(VM&, HeapCell*) const;
+
+ size_t cellSize() const;
+
+ WeakSet& weakSet() const;
+
+private:
+ static const uintptr_t isLargeAllocationBit = 1;
+ uintptr_t m_encodedPointer;
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/CellContainerInlines.h b/Source/JavaScriptCore/heap/CellContainerInlines.h
new file mode 100644
index 000000000..c23d4885b
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CellContainerInlines.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CellContainer.h"
+#include "JSCell.h"
+#include "LargeAllocation.h"
+#include "MarkedBlock.h"
+#include "VM.h"
+
+namespace JSC {
+
+inline VM* CellContainer::vm() const
+{
+ if (isLargeAllocation())
+ return largeAllocation().vm();
+ return markedBlock().vm();
+}
+
+inline Heap* CellContainer::heap() const
+{
+ return &vm()->heap;
+}
+
+inline bool CellContainer::isMarked(HeapCell* cell) const
+{
+ if (isLargeAllocation())
+ return largeAllocation().isMarked();
+ return markedBlock().isMarked(cell);
+}
+
+inline bool CellContainer::isMarked(HeapVersion markingVersion, HeapCell* cell) const
+{
+ if (isLargeAllocation())
+ return largeAllocation().isMarked();
+ return markedBlock().isMarked(markingVersion, cell);
+}
+
+inline void CellContainer::noteMarked()
+{
+ if (!isLargeAllocation())
+ markedBlock().noteMarked();
+}
+
+inline void CellContainer::assertValidCell(VM& vm, HeapCell* cell) const
+{
+ if (isLargeAllocation())
+ largeAllocation().assertValidCell(vm, cell);
+ else
+ markedBlock().assertValidCell(vm, cell);
+}
+
+inline size_t CellContainer::cellSize() const
+{
+ if (isLargeAllocation())
+ return largeAllocation().cellSize();
+ return markedBlock().cellSize();
+}
+
+inline WeakSet& CellContainer::weakSet() const
+{
+ if (isLargeAllocation())
+ return largeAllocation().weakSet();
+ return markedBlock().weakSet();
+}
+
+inline void CellContainer::aboutToMark(HeapVersion markingVersion)
+{
+ if (!isLargeAllocation())
+ markedBlock().aboutToMark(markingVersion);
+}
+
+inline bool CellContainer::areMarksStale() const
+{
+ if (isLargeAllocation())
+ return false;
+ return markedBlock().areMarksStale();
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/CopyVisitor.h b/Source/JavaScriptCore/heap/CellState.h
index 41da1f9d3..caf32b6fa 100644
--- a/Source/JavaScriptCore/heap/CopyVisitor.h
+++ b/Source/JavaScriptCore/heap/CellState.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,40 +23,34 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CopyVisitor_h
-#define CopyVisitor_h
+#pragma once
-#include "CopiedSpace.h"
+#include <wtf/Assertions.h>
namespace JSC {
-class GCThreadSharedData;
-class JSCell;
-
-class CopyVisitor {
-public:
- CopyVisitor(GCThreadSharedData&);
-
- void copyFromShared();
-
- void startCopying();
- void doneCopying();
-
- // Low-level API for copying, appropriate for cases where the object's heap references
- // are discontiguous or if the object occurs frequently enough that you need to focus on
- // performance. Use this with care as it is easy to shoot yourself in the foot.
- bool checkIfShouldCopy(void*);
- void* allocateNewSpace(size_t);
- void didCopy(void*, size_t);
+// The CellState of a cell is a kind of hint about what the state of the cell is.
+enum class CellState : uint8_t {
+ // The object is either currently being scanned, or it has finished being scanned, or this
+ // is a full collection and it's actually a white object (you'd know because its mark bit
+ // would be clear).
+ PossiblyBlack = 0,
+
+ // The object is in eden. During GC, this means that the object has not been marked yet.
+ DefinitelyWhite = 1,
+
+ // This sorta means that the object is grey - i.e. it will be scanned. Or it could be white
+ // during a full collection if its mark bit is clear. That would happen if it had been black,
+ // got barriered, and we did a full collection.
+ PossiblyGrey = 2
+};
-private:
- void* allocateNewSpaceSlow(size_t);
- void visitItem(CopyWorklistItem);
+static const unsigned blackThreshold = 0; // x <= blackThreshold means x is PossiblyOldOrBlack.
+static const unsigned tautologicalThreshold = 100; // x <= tautologicalThreshold is always true.
- GCThreadSharedData& m_shared;
- CopiedAllocator m_copiedAllocator;
-};
+inline bool isWithinThreshold(CellState cellState, unsigned threshold)
+{
+ return static_cast<unsigned>(cellState) <= threshold;
+}
} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/CodeBlockSet.cpp b/Source/JavaScriptCore/heap/CodeBlockSet.cpp
index c04cbacd6..6d305baf1 100644
--- a/Source/JavaScriptCore/heap/CodeBlockSet.cpp
+++ b/Source/JavaScriptCore/heap/CodeBlockSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,94 +27,111 @@
#include "CodeBlockSet.h"
#include "CodeBlock.h"
-#include "SlotVisitor.h"
+#include "JSCInlines.h"
+#include <wtf/CommaPrinter.h>
namespace JSC {
-static const bool verbose = false;
-
-CodeBlockSet::CodeBlockSet() { }
+CodeBlockSet::CodeBlockSet()
+{
+}
CodeBlockSet::~CodeBlockSet()
{
- HashSet<CodeBlock*>::iterator iter = m_set.begin();
- HashSet<CodeBlock*>::iterator end = m_set.end();
- for (; iter != end; ++iter)
- (*iter)->deref();
}
-void CodeBlockSet::add(PassRefPtr<CodeBlock> codeBlock)
+void CodeBlockSet::add(CodeBlock* codeBlock)
{
- CodeBlock* block = codeBlock.leakRef();
- bool isNewEntry = m_set.add(block).isNewEntry;
+ LockHolder locker(&m_lock);
+ bool isNewEntry = m_newCodeBlocks.add(codeBlock).isNewEntry;
ASSERT_UNUSED(isNewEntry, isNewEntry);
}
-void CodeBlockSet::clearMarks()
+void CodeBlockSet::promoteYoungCodeBlocks(const LockHolder&)
{
- HashSet<CodeBlock*>::iterator iter = m_set.begin();
- HashSet<CodeBlock*>::iterator end = m_set.end();
- for (; iter != end; ++iter) {
- CodeBlock* codeBlock = *iter;
- codeBlock->m_mayBeExecuting = false;
- codeBlock->m_visitAggregateHasBeenCalled = false;
- }
+ ASSERT(m_lock.isLocked());
+ m_oldCodeBlocks.add(m_newCodeBlocks.begin(), m_newCodeBlocks.end());
+ m_newCodeBlocks.clear();
+}
+
+void CodeBlockSet::clearMarksForFullCollection()
+{
+ LockHolder locker(&m_lock);
+ for (CodeBlock* codeBlock : m_oldCodeBlocks)
+ codeBlock->clearVisitWeaklyHasBeenCalled();
+}
+
+void CodeBlockSet::lastChanceToFinalize(VM& vm)
+{
+ LockHolder locker(&m_lock);
+ for (CodeBlock* codeBlock : m_newCodeBlocks)
+ codeBlock->structure(vm)->classInfo()->methodTable.destroy(codeBlock);
+
+ for (CodeBlock* codeBlock : m_oldCodeBlocks)
+ codeBlock->structure(vm)->classInfo()->methodTable.destroy(codeBlock);
}
-void CodeBlockSet::deleteUnmarkedAndUnreferenced()
+void CodeBlockSet::deleteUnmarkedAndUnreferenced(VM& vm, CollectionScope scope)
{
- // This needs to be a fixpoint because code blocks that are unmarked may
- // refer to each other. For example, a DFG code block that is owned by
- // the GC may refer to an FTL for-entry code block that is also owned by
- // the GC.
- Vector<CodeBlock*, 16> toRemove;
- if (verbose)
- dataLog("Fixpointing over unmarked, set size = ", m_set.size(), "...\n");
- for (;;) {
- HashSet<CodeBlock*>::iterator iter = m_set.begin();
- HashSet<CodeBlock*>::iterator end = m_set.end();
- for (; iter != end; ++iter) {
- CodeBlock* codeBlock = *iter;
- if (!codeBlock->hasOneRef())
- continue;
- if (codeBlock->m_mayBeExecuting)
- continue;
- codeBlock->deref();
- toRemove.append(codeBlock);
+ LockHolder locker(&m_lock);
+ Vector<CodeBlock*> unmarked;
+
+ auto consider = [&] (HashSet<CodeBlock*>& set) {
+ for (CodeBlock* codeBlock : set) {
+ if (Heap::isMarked(codeBlock))
+ continue;;
+ unmarked.append(codeBlock);
+ }
+ for (CodeBlock* codeBlock : unmarked) {
+ codeBlock->structure(vm)->classInfo()->methodTable.destroy(codeBlock);
+ set.remove(codeBlock);
}
- if (verbose)
- dataLog(" Removing ", toRemove.size(), " blocks.\n");
- if (toRemove.isEmpty())
- break;
- for (unsigned i = toRemove.size(); i--;)
- m_set.remove(toRemove[i]);
- toRemove.resize(0);
+ unmarked.resize(0);
+ };
+
+ switch (scope) {
+ case CollectionScope::Eden:
+ consider(m_newCodeBlocks);
+ break;
+ case CollectionScope::Full:
+ consider(m_oldCodeBlocks);
+ consider(m_newCodeBlocks);
+ break;
}
+
+ // Any remaining young CodeBlocks are live and need to be promoted to the set of old CodeBlocks.
+ promoteYoungCodeBlocks(locker);
}
-void CodeBlockSet::traceMarked(SlotVisitor& visitor)
+bool CodeBlockSet::contains(const LockHolder&, void* candidateCodeBlock)
{
- if (verbose)
- dataLog("Tracing ", m_set.size(), " code blocks.\n");
- HashSet<CodeBlock*>::iterator iter = m_set.begin();
- HashSet<CodeBlock*>::iterator end = m_set.end();
- for (; iter != end; ++iter) {
- CodeBlock* codeBlock = *iter;
- if (!codeBlock->m_mayBeExecuting)
- continue;
- codeBlock->visitAggregate(visitor);
- }
+ RELEASE_ASSERT(m_lock.isLocked());
+ CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock);
+ if (!HashSet<CodeBlock*>::isValidValue(codeBlock))
+ return false;
+ return m_oldCodeBlocks.contains(codeBlock) || m_newCodeBlocks.contains(codeBlock) || m_currentlyExecuting.contains(codeBlock);
}
-void CodeBlockSet::rememberCurrentlyExecutingCodeBlocks(Heap* heap)
+void CodeBlockSet::clearCurrentlyExecuting()
{
-#if ENABLE(GGC)
- for (size_t i = 0; i < m_currentlyExecuting.size(); ++i)
- heap->addToRememberedSet(m_currentlyExecuting[i]->ownerExecutable());
m_currentlyExecuting.clear();
-#else
- UNUSED_PARAM(heap);
-#endif // ENABLE(GGC)
+}
+
+void CodeBlockSet::dump(PrintStream& out) const
+{
+ CommaPrinter comma;
+ out.print("{old = [");
+ for (CodeBlock* codeBlock : m_oldCodeBlocks)
+ out.print(comma, pointerDump(codeBlock));
+ out.print("], new = [");
+ comma = CommaPrinter();
+ for (CodeBlock* codeBlock : m_newCodeBlocks)
+ out.print(comma, pointerDump(codeBlock));
+ out.print("], currentlyExecuting = [");
+ comma = CommaPrinter();
+ for (CodeBlock* codeBlock : m_currentlyExecuting)
+ out.print(comma, pointerDump(codeBlock));
+ out.print("]}");
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/CodeBlockSet.h b/Source/JavaScriptCore/heap/CodeBlockSet.h
index 791d18699..0fca79adc 100644
--- a/Source/JavaScriptCore/heap/CodeBlockSet.h
+++ b/Source/JavaScriptCore/heap/CodeBlockSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,20 +23,22 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CodeBlockSet_h
-#define CodeBlockSet_h
+#pragma once
+#include "CollectionScope.h"
+#include "GCSegmentedArray.h"
#include <wtf/HashSet.h>
+#include <wtf/Lock.h>
#include <wtf/Noncopyable.h>
-#include <wtf/PassRefPtr.h>
+#include <wtf/PrintStream.h>
#include <wtf/RefPtr.h>
-#include <wtf/Vector.h>
namespace JSC {
class CodeBlock;
class Heap;
-class SlotVisitor;
+class JSCell;
+class VM;
// CodeBlockSet tracks all CodeBlocks. Every CodeBlock starts out with one
// reference coming in from GC. The GC is responsible for freeing CodeBlocks
@@ -48,50 +50,47 @@ class CodeBlockSet {
public:
CodeBlockSet();
~CodeBlockSet();
+
+ void lastChanceToFinalize(VM&);
// Add a CodeBlock. This is only called by CodeBlock constructors.
- void add(PassRefPtr<CodeBlock>);
-
- // Clear all mark bits associated with DFG code blocks.
- void clearMarks();
+ void add(CodeBlock*);
+ // Clear all mark bits for all CodeBlocks.
+ void clearMarksForFullCollection();
+
// Mark a pointer that may be a CodeBlock that belongs to the set of DFG
// blocks. This is defined in CodeBlock.h.
- void mark(void* candidateCodeBlock);
+private:
+ void mark(const LockHolder&, CodeBlock* candidateCodeBlock);
+public:
+ void mark(const LockHolder&, void* candidateCodeBlock);
// Delete all code blocks that are only referenced by this set (i.e. owned
// by this set), and that have not been marked.
- void deleteUnmarkedAndUnreferenced();
+ void deleteUnmarkedAndUnreferenced(VM&, CollectionScope);
- // Trace all marked code blocks. The CodeBlock is free to make use of
- // mayBeExecuting.
- void traceMarked(SlotVisitor&);
+ void clearCurrentlyExecuting();
- // Add all currently executing CodeBlocks to the remembered set to be
- // re-scanned during the next collection.
- void rememberCurrentlyExecutingCodeBlocks(Heap*);
+ bool contains(const LockHolder&, void* candidateCodeBlock);
+ Lock& getLock() { return m_lock; }
// Visits each CodeBlock in the heap until the visitor function returns true
// to indicate that it is done iterating, or until every CodeBlock has been
// visited.
- template<typename Functor> void iterate(Functor& functor)
- {
- for (auto &codeBlock : m_set) {
- bool done = functor(codeBlock);
- if (done)
- break;
- }
- }
+ template<typename Functor> void iterate(const Functor&);
+
+ template<typename Functor> void iterateCurrentlyExecuting(const Functor&);
+
+ void dump(PrintStream&) const;
private:
- // This is not a set of RefPtr<CodeBlock> because we need to be able to find
- // arbitrary bogus pointers. I could have written a thingy that had peek types
- // and all, but that seemed like overkill.
- HashSet<CodeBlock* > m_set;
- Vector<CodeBlock*> m_currentlyExecuting;
+ void promoteYoungCodeBlocks(const LockHolder&);
+
+ HashSet<CodeBlock*> m_oldCodeBlocks;
+ HashSet<CodeBlock*> m_newCodeBlocks;
+ HashSet<CodeBlock*> m_currentlyExecuting;
+ Lock m_lock;
};
} // namespace JSC
-
-#endif // CodeBlockSet_h
-
diff --git a/Source/JavaScriptCore/heap/CodeBlockSetInlines.h b/Source/JavaScriptCore/heap/CodeBlockSetInlines.h
new file mode 100644
index 000000000..04dbcecfb
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CodeBlockSetInlines.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include "CodeBlockSet.h"
+
+namespace JSC {
+
+inline void CodeBlockSet::mark(const LockHolder& locker, void* candidateCodeBlock)
+{
+ ASSERT(m_lock.isLocked());
+ // We have to check for 0 and -1 because those are used by the HashMap as markers.
+ uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
+
+ // This checks for both of those nasty cases in one go.
+ // 0 + 1 = 1
+ // -1 + 1 = 0
+ if (value + 1 <= 1)
+ return;
+
+ CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock);
+ if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock))
+ return;
+
+ mark(locker, codeBlock);
+}
+
+inline void CodeBlockSet::mark(const LockHolder&, CodeBlock* codeBlock)
+{
+ if (!codeBlock)
+ return;
+
+ m_currentlyExecuting.add(codeBlock);
+}
+
+template<typename Functor>
+void CodeBlockSet::iterate(const Functor& functor)
+{
+ LockHolder locker(m_lock);
+ for (auto& codeBlock : m_oldCodeBlocks) {
+ bool done = functor(codeBlock);
+ if (done)
+ return;
+ }
+
+ for (auto& codeBlock : m_newCodeBlocks) {
+ bool done = functor(codeBlock);
+ if (done)
+ return;
+ }
+}
+
+template<typename Functor>
+void CodeBlockSet::iterateCurrentlyExecuting(const Functor& functor)
+{
+ LockHolder locker(&m_lock);
+ for (CodeBlock* codeBlock : m_currentlyExecuting)
+ functor(codeBlock);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/CollectingScope.h b/Source/JavaScriptCore/heap/CollectingScope.h
new file mode 100644
index 000000000..c1f860aab
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CollectingScope.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Heap.h"
+
+namespace JSC {
+
+class CollectingScope {
+public:
+ CollectingScope(Heap& heap)
+ : m_heap(heap)
+ , m_oldState(m_heap.m_mutatorState)
+ {
+ m_heap.m_mutatorState = MutatorState::Collecting;
+ }
+
+ ~CollectingScope()
+ {
+ m_heap.m_mutatorState = m_oldState;
+ }
+
+private:
+ Heap& m_heap;
+ MutatorState m_oldState;
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/CollectionScope.cpp b/Source/JavaScriptCore/heap/CollectionScope.cpp
new file mode 100644
index 000000000..b2990e75d
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CollectionScope.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CollectionScope.h"
+
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+const char* collectionScopeName(CollectionScope scope)
+{
+ switch (scope) {
+ case CollectionScope::Eden:
+ return "Eden";
+ case CollectionScope::Full:
+ return "Full";
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::CollectionScope scope)
+{
+ out.print(JSC::collectionScopeName(scope));
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/HeapOperation.h b/Source/JavaScriptCore/heap/CollectionScope.h
index 769127e89..9b4f48703 100644
--- a/Source/JavaScriptCore/heap/HeapOperation.h
+++ b/Source/JavaScriptCore/heap/CollectionScope.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,13 +23,21 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HeapOperation_h
-#define HeapOperation_h
+#pragma once
namespace JSC {
-enum HeapOperation { NoOperation, Allocation, FullCollection, EdenCollection };
+enum class CollectionScope { Eden, Full };
+
+const char* collectionScopeName(CollectionScope);
} // namespace JSC
-#endif // HeapOperation_h
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream& out, JSC::CollectionScope);
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/CopyVisitor.cpp b/Source/JavaScriptCore/heap/CollectorPhase.cpp
index 3d18936ec..610fed1d0 100644
--- a/Source/JavaScriptCore/heap/CopyVisitor.cpp
+++ b/Source/JavaScriptCore/heap/CollectorPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,44 +24,61 @@
*/
#include "config.h"
-#include "CopyVisitor.h"
+#include "CollectorPhase.h"
-#include "CopiedSpaceInlines.h"
-#include "CopyVisitorInlines.h"
-#include "CopyWorkList.h"
-#include "GCThreadSharedData.h"
-#include "JSCell.h"
-#include "JSObject.h"
-#include "Operations.h"
-#include <wtf/Threading.h>
+#include <wtf/PrintStream.h>
namespace JSC {
-CopyVisitor::CopyVisitor(GCThreadSharedData& shared)
- : m_shared(shared)
+bool worldShouldBeSuspended(CollectorPhase phase)
{
+ switch (phase) {
+ case CollectorPhase::NotRunning:
+ case CollectorPhase::Concurrent:
+ return false;
+
+ case CollectorPhase::Begin:
+ case CollectorPhase::Fixpoint:
+ case CollectorPhase::Reloop:
+ case CollectorPhase::End:
+ return true;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
}
-void CopyVisitor::copyFromShared()
-{
- size_t next, end;
- m_shared.getNextBlocksToCopy(next, end);
- while (next < end) {
- for (; next < end; ++next) {
- CopiedBlock* block = m_shared.m_blocksToCopy[next];
- if (!block->hasWorkList())
- continue;
+} // namespace JSC
+
+namespace WTF {
- CopyWorkList& workList = block->workList();
- for (CopyWorkList::iterator it = workList.begin(); it != workList.end(); ++it)
- visitItem(*it);
+using namespace JSC;
- ASSERT(!block->liveBytes());
- m_shared.m_copiedSpace->recycleEvacuatedBlock(block, m_shared.m_vm->heap.operationInProgress());
- }
- m_shared.getNextBlocksToCopy(next, end);
+void printInternal(PrintStream& out, JSC::CollectorPhase phase)
+{
+ switch (phase) {
+ case CollectorPhase::NotRunning:
+ out.print("NotRunning");
+ return;
+ case CollectorPhase::Begin:
+ out.print("Begin");
+ return;
+ case CollectorPhase::Fixpoint:
+ out.print("Fixpoint");
+ return;
+ case CollectorPhase::Concurrent:
+ out.print("Concurrent");
+ return;
+ case CollectorPhase::Reloop:
+ out.print("Reloop");
+ return;
+ case CollectorPhase::End:
+ out.print("End");
+ return;
}
- ASSERT(next == end);
+
+ RELEASE_ASSERT_NOT_REACHED();
}
-} // namespace JSC
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/CollectorPhase.h b/Source/JavaScriptCore/heap/CollectorPhase.h
new file mode 100644
index 000000000..d5a00356e
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CollectorPhase.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+// We track collector phase in order to allow either the collector thread or the mutator thread to
+// jump in and do work. The collector and mutator trade the conn
+// (https://en.wikipedia.org/wiki/Conn_(nautical)) with each other based on who is available to do work,
+// and they use the phase to help each other know what to do once they take the conn.
+//
+// The collector relinquishes the conn whenever it stopTheMutator's and the mutator is running. Then the
+// collector thread goes to sleep.
+//
+// The mutator relinquishes the conn whenever it releaseAccess's. That wakes up the collector thread.
+enum class CollectorPhase : uint8_t {
+ // We use this phase when the collector is not running at all. After this state is Begin.
+ NotRunning,
+
+ // This is everything from when the collector begins to when it first yields to the mutator for
+ // marking. After this is Fixpoint.
+ Begin,
+
+ // This means that we should try to do some progress with the world stopped. This usually means
+ // doing an iteration of MarkingConstraintSet::executeConvergence, but it could also mean marking
+ // with the world stopped. After this is either Concurrent or End.
+ Fixpoint,
+
+ // In this state the collector is relying on the parallel helpers and incremental mutator work to
+ // make progress. After this is Reloop, once marking stalls.
+ Concurrent,
+
+ // We did some concurrent marking and now we ran out of work. This phase prepares the GC for another
+ // Fixpoint. After this is Fixpoint.
+ Reloop,
+
+ // The collector is trying to finish up. After this state is NotRunning.
+ End
+};
+
+bool worldShouldBeSuspended(CollectorPhase phase);
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::CollectorPhase);
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.cpp b/Source/JavaScriptCore/heap/ConservativeRoots.cpp
index 7fc8eee3f..554c9c230 100644
--- a/Source/JavaScriptCore/heap/ConservativeRoots.cpp
+++ b/Source/JavaScriptCore/heap/ConservativeRoots.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,70 +27,61 @@
#include "ConservativeRoots.h"
#include "CodeBlock.h"
-#include "CodeBlockSet.h"
-#include "CopiedSpace.h"
-#include "CopiedSpaceInlines.h"
+#include "CodeBlockSetInlines.h"
+#include "HeapInlines.h"
+#include "HeapUtil.h"
+#include "JITStubRoutineSet.h"
#include "JSCell.h"
#include "JSObject.h"
+#include "JSCInlines.h"
+#include "MarkedBlockInlines.h"
#include "Structure.h"
+#include <wtf/OSAllocator.h>
namespace JSC {
-ConservativeRoots::ConservativeRoots(const MarkedBlockSet* blocks, CopiedSpace* copiedSpace)
+ConservativeRoots::ConservativeRoots(Heap& heap)
: m_roots(m_inlineRoots)
, m_size(0)
, m_capacity(inlineCapacity)
- , m_blocks(blocks)
- , m_copiedSpace(copiedSpace)
+ , m_heap(heap)
{
}
ConservativeRoots::~ConservativeRoots()
{
if (m_roots != m_inlineRoots)
- OSAllocator::decommitAndRelease(m_roots, m_capacity * sizeof(JSCell*));
+ OSAllocator::decommitAndRelease(m_roots, m_capacity * sizeof(HeapCell*));
}
void ConservativeRoots::grow()
{
size_t newCapacity = m_capacity == inlineCapacity ? nonInlineCapacity : m_capacity * 2;
- JSCell** newRoots = static_cast<JSCell**>(OSAllocator::reserveAndCommit(newCapacity * sizeof(JSCell*)));
- memcpy(newRoots, m_roots, m_size * sizeof(JSCell*));
+ HeapCell** newRoots = static_cast<HeapCell**>(OSAllocator::reserveAndCommit(newCapacity * sizeof(HeapCell*)));
+ memcpy(newRoots, m_roots, m_size * sizeof(HeapCell*));
if (m_roots != m_inlineRoots)
- OSAllocator::decommitAndRelease(m_roots, m_capacity * sizeof(JSCell*));
+ OSAllocator::decommitAndRelease(m_roots, m_capacity * sizeof(HeapCell*));
m_capacity = newCapacity;
m_roots = newRoots;
}
template<typename MarkHook>
-inline void ConservativeRoots::genericAddPointer(void* p, TinyBloomFilter filter, MarkHook& markHook)
+inline void ConservativeRoots::genericAddPointer(void* p, HeapVersion markingVersion, TinyBloomFilter filter, MarkHook& markHook)
{
markHook.mark(p);
- m_copiedSpace->pinIfNecessary(p);
-
- MarkedBlock* candidate = MarkedBlock::blockFor(p);
- if (filter.ruleOut(reinterpret_cast<Bits>(candidate))) {
- ASSERT(!candidate || !m_blocks->set().contains(candidate));
- return;
- }
-
- if (!MarkedBlock::isAtomAligned(p))
- return;
-
- if (!m_blocks->set().contains(candidate))
- return;
-
- if (!candidate->isLiveCell(p))
- return;
-
- if (m_size == m_capacity)
- grow();
-
- m_roots[m_size++] = static_cast<JSCell*>(p);
+ HeapUtil::findGCObjectPointersForMarking(
+ m_heap, markingVersion, filter, p,
+ [&] (void* p) {
+ if (m_size == m_capacity)
+ grow();
+
+ m_roots[m_size++] = bitwise_cast<HeapCell*>(p);
+ });
}
template<typename MarkHook>
+SUPPRESS_ASAN
void ConservativeRoots::genericAddSpan(void* begin, void* end, MarkHook& markHook)
{
if (begin > end) {
@@ -99,13 +90,13 @@ void ConservativeRoots::genericAddSpan(void* begin, void* end, MarkHook& markHoo
end = swapTemp;
}
- ASSERT((static_cast<char*>(end) - static_cast<char*>(begin)) < 0x1000000);
- ASSERT(isPointerAligned(begin));
- ASSERT(isPointerAligned(end));
+ RELEASE_ASSERT(isPointerAligned(begin));
+ RELEASE_ASSERT(isPointerAligned(end));
- TinyBloomFilter filter = m_blocks->filter(); // Make a local copy of filter to show the compiler it won't alias, and can be register-allocated.
+ TinyBloomFilter filter = m_heap.objectSpace().blocks().filter(); // Make a local copy of filter to show the compiler it won't alias, and can be register-allocated.
+ HeapVersion markingVersion = m_heap.objectSpace().markingVersion();
for (char** it = static_cast<char**>(begin); it != static_cast<char**>(end); ++it)
- genericAddPointer(*it, filter, markHook);
+ genericAddPointer(*it, markingVersion, filter, markHook);
}
class DummyMarkHook {
@@ -124,30 +115,32 @@ void ConservativeRoots::add(void* begin, void* end, JITStubRoutineSet& jitStubRo
genericAddSpan(begin, end, jitStubRoutines);
}
-template<typename T, typename U>
class CompositeMarkHook {
public:
- CompositeMarkHook(T& first, U& second)
- : m_first(first)
- , m_second(second)
+ CompositeMarkHook(JITStubRoutineSet& stubRoutines, CodeBlockSet& codeBlocks, const LockHolder& locker)
+ : m_stubRoutines(stubRoutines)
+ , m_codeBlocks(codeBlocks)
+ , m_codeBlocksLocker(locker)
{
}
void mark(void* address)
{
- m_first.mark(address);
- m_second.mark(address);
+ m_stubRoutines.mark(address);
+ m_codeBlocks.mark(m_codeBlocksLocker, address);
}
private:
- T& m_first;
- U& m_second;
+ JITStubRoutineSet& m_stubRoutines;
+ CodeBlockSet& m_codeBlocks;
+ const LockHolder& m_codeBlocksLocker;
};
void ConservativeRoots::add(
void* begin, void* end, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks)
{
- CompositeMarkHook<JITStubRoutineSet, CodeBlockSet> markHook(jitStubRoutines, codeBlocks);
+ LockHolder locker(codeBlocks.getLock());
+ CompositeMarkHook markHook(jitStubRoutines, codeBlocks, locker);
genericAddSpan(begin, end, markHook);
}
diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.h b/Source/JavaScriptCore/heap/ConservativeRoots.h
index 0cad933a4..e46445b41 100644
--- a/Source/JavaScriptCore/heap/ConservativeRoots.h
+++ b/Source/JavaScriptCore/heap/ConservativeRoots.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,23 +23,19 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ConservativeRoots_h
-#define ConservativeRoots_h
+#pragma once
#include "Heap.h"
-#include <wtf/OSAllocator.h>
-#include <wtf/Vector.h>
namespace JSC {
class CodeBlockSet;
-class Heap;
+class HeapCell;
class JITStubRoutineSet;
-class JSCell;
class ConservativeRoots {
public:
- ConservativeRoots(const MarkedBlockSet*, CopiedSpace*);
+ ConservativeRoots(Heap&);
~ConservativeRoots();
void add(void* begin, void* end);
@@ -47,26 +43,25 @@ public:
void add(void* begin, void* end, JITStubRoutineSet&, CodeBlockSet&);
size_t size();
- JSCell** roots();
+ HeapCell** roots();
private:
static const size_t inlineCapacity = 128;
- static const size_t nonInlineCapacity = 8192 / sizeof(JSCell*);
+ static const size_t nonInlineCapacity = 8192 / sizeof(HeapCell*);
template<typename MarkHook>
- void genericAddPointer(void*, TinyBloomFilter, MarkHook&);
+ void genericAddPointer(void*, HeapVersion, TinyBloomFilter, MarkHook&);
template<typename MarkHook>
void genericAddSpan(void*, void* end, MarkHook&);
void grow();
- JSCell** m_roots;
+ HeapCell** m_roots;
size_t m_size;
size_t m_capacity;
- const MarkedBlockSet* m_blocks;
- CopiedSpace* m_copiedSpace;
- JSCell* m_inlineRoots[inlineCapacity];
+ Heap& m_heap;
+ HeapCell* m_inlineRoots[inlineCapacity];
};
inline size_t ConservativeRoots::size()
@@ -74,11 +69,9 @@ inline size_t ConservativeRoots::size()
return m_size;
}
-inline JSCell** ConservativeRoots::roots()
+inline HeapCell** ConservativeRoots::roots()
{
return m_roots;
}
} // namespace JSC
-
-#endif // ConservativeRoots_h
diff --git a/Source/JavaScriptCore/heap/ConstraintVolatility.h b/Source/JavaScriptCore/heap/ConstraintVolatility.h
new file mode 100644
index 000000000..5cf986f88
--- /dev/null
+++ b/Source/JavaScriptCore/heap/ConstraintVolatility.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+enum class ConstraintVolatility : uint8_t {
+ // The constraint needs to be validated, but it is unlikely to ever produce information.
+ // It's best to run it at the bitter end.
+ SeldomGreyed,
+
+ // FIXME: We could introduce a new kind of volatility called GreyedByResumption, which
+ // would mean running all of the times that GreyedByExecution runs except as a root in a
+ // full GC.
+ // https://bugs.webkit.org/show_bug.cgi?id=166830
+
+ // The constraint needs to be reevaluated anytime the mutator runs: so at GC start and
+ // whenever the GC resuspends after a resumption. This is almost always something that
+ // you'd call a "root" in a traditional GC.
+ GreyedByExecution,
+
+ // The constraint needs to be reevaluated any time any object is marked and anytime the
+ // mutator resumes.
+ GreyedByMarking
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+inline void printInternal(PrintStream& out, JSC::ConstraintVolatility volatility)
+{
+ switch (volatility) {
+ case JSC::ConstraintVolatility::SeldomGreyed:
+ out.print("SeldomGreyed");
+ return;
+ case JSC::ConstraintVolatility::GreyedByExecution:
+ out.print("GreyedByExecuction");
+ return;
+ case JSC::ConstraintVolatility::GreyedByMarking:
+ out.print("GreyedByMarking");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/CopiedAllocator.h b/Source/JavaScriptCore/heap/CopiedAllocator.h
deleted file mode 100644
index 3dfc9a7fe..000000000
--- a/Source/JavaScriptCore/heap/CopiedAllocator.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CopiedAllocator_h
-#define CopiedAllocator_h
-
-#include "CopiedBlock.h"
-#include <wtf/CheckedBoolean.h>
-#include <wtf/DataLog.h>
-
-namespace JSC {
-
-class CopiedAllocator {
-public:
- CopiedAllocator();
-
- bool fastPathShouldSucceed(size_t bytes) const;
- CheckedBoolean tryAllocate(size_t bytes, void** outPtr);
- CheckedBoolean tryAllocateDuringCopying(size_t bytes, void** outPtr);
- CheckedBoolean tryReallocate(void *oldPtr, size_t oldBytes, size_t newBytes);
- void* forceAllocate(size_t bytes);
- CopiedBlock* resetCurrentBlock();
- void setCurrentBlock(CopiedBlock*);
- size_t currentCapacity();
-
- bool isValid() { return !!m_currentBlock; }
-
- CopiedBlock* currentBlock() { return m_currentBlock; }
-
- // Yes, these are public. No, that doesn't mean you can play with them.
- // If I had made them private then I'd have to list off all of the JIT
- // classes and functions that are entitled to modify these directly, and
- // that would have been gross.
- size_t m_currentRemaining;
- char* m_currentPayloadEnd;
- CopiedBlock* m_currentBlock;
-};
-
-inline CopiedAllocator::CopiedAllocator()
- : m_currentRemaining(0)
- , m_currentPayloadEnd(0)
- , m_currentBlock(0)
-{
-}
-
-inline bool CopiedAllocator::fastPathShouldSucceed(size_t bytes) const
-{
- ASSERT(is8ByteAligned(reinterpret_cast<void*>(bytes)));
-
- return bytes <= m_currentRemaining;
-}
-
-inline CheckedBoolean CopiedAllocator::tryAllocate(size_t bytes, void** outPtr)
-{
- ASSERT(is8ByteAligned(reinterpret_cast<void*>(bytes)));
-
- // This code is written in a gratuitously low-level manner, in order to
- // serve as a kind of template for what the JIT would do. Note that the
- // way it's written it ought to only require one register, which doubles
- // as the result, provided that the compiler does a minimal amount of
- // control flow simplification and the bytes argument is a constant.
-
- size_t currentRemaining = m_currentRemaining;
- if (bytes > currentRemaining)
- return false;
- currentRemaining -= bytes;
- m_currentRemaining = currentRemaining;
- *outPtr = m_currentPayloadEnd - currentRemaining - bytes;
-
- ASSERT(is8ByteAligned(*outPtr));
-
- return true;
-}
-
-inline CheckedBoolean CopiedAllocator::tryAllocateDuringCopying(size_t bytes, void** outPtr)
-{
- if (!tryAllocate(bytes, outPtr))
- return false;
- m_currentBlock->reportLiveBytesDuringCopying(bytes);
- return true;
-}
-
-inline CheckedBoolean CopiedAllocator::tryReallocate(
- void* oldPtr, size_t oldBytes, size_t newBytes)
-{
- ASSERT(is8ByteAligned(oldPtr));
- ASSERT(is8ByteAligned(reinterpret_cast<void*>(oldBytes)));
- ASSERT(is8ByteAligned(reinterpret_cast<void*>(newBytes)));
-
- ASSERT(newBytes > oldBytes);
-
- size_t additionalBytes = newBytes - oldBytes;
-
- size_t currentRemaining = m_currentRemaining;
- if (m_currentPayloadEnd - currentRemaining - oldBytes != static_cast<char*>(oldPtr))
- return false;
-
- if (additionalBytes > currentRemaining)
- return false;
-
- m_currentRemaining = currentRemaining - additionalBytes;
-
- return true;
-}
-
-inline void* CopiedAllocator::forceAllocate(size_t bytes)
-{
- void* result = 0; // Needed because compilers don't realize this will always be assigned.
- CheckedBoolean didSucceed = tryAllocate(bytes, &result);
- ASSERT(didSucceed);
- return result;
-}
-
-inline CopiedBlock* CopiedAllocator::resetCurrentBlock()
-{
- CopiedBlock* result = m_currentBlock;
- if (result) {
- result->m_remaining = m_currentRemaining;
- m_currentBlock = 0;
- m_currentRemaining = 0;
- m_currentPayloadEnd = 0;
- }
- return result;
-}
-
-inline void CopiedAllocator::setCurrentBlock(CopiedBlock* newBlock)
-{
- ASSERT(!m_currentBlock);
- m_currentBlock = newBlock;
- ASSERT(newBlock);
- m_currentRemaining = newBlock->m_remaining;
- m_currentPayloadEnd = newBlock->payloadEnd();
-}
-
-inline size_t CopiedAllocator::currentCapacity()
-{
- if (!m_currentBlock)
- return 0;
- return m_currentBlock->capacity();
-}
-
-} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/CopiedBlock.h b/Source/JavaScriptCore/heap/CopiedBlock.h
deleted file mode 100644
index 6d59aa6bc..000000000
--- a/Source/JavaScriptCore/heap/CopiedBlock.h
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CopiedBlock_h
-#define CopiedBlock_h
-
-#include "BlockAllocator.h"
-#include "CopyWorkList.h"
-#include "HeapBlock.h"
-#include "JSCJSValue.h"
-#include "Options.h"
-#include <wtf/Atomics.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
-
-namespace JSC {
-
-class CopiedSpace;
-
-class CopiedBlock : public HeapBlock<CopiedBlock> {
- friend class CopiedSpace;
- friend class CopiedAllocator;
-public:
- static CopiedBlock* create(DeadBlock*);
- static CopiedBlock* createNoZeroFill(DeadBlock*);
-
- void pin();
- bool isPinned();
-
- bool isOld();
- bool isOversize();
- void didPromote();
-
- unsigned liveBytes();
- bool shouldReportLiveBytes(SpinLockHolder&, JSCell* owner);
- void reportLiveBytes(SpinLockHolder&, JSCell*, CopyToken, unsigned);
- void reportLiveBytesDuringCopying(unsigned);
- void didSurviveGC();
- void didEvacuateBytes(unsigned);
- bool shouldEvacuate();
- bool canBeRecycled();
-
- // The payload is the region of the block that is usable for allocations.
- char* payload();
- char* payloadEnd();
- size_t payloadCapacity();
-
- // The data is the region of the block that has been used for allocations.
- char* data();
- char* dataEnd();
- size_t dataSize();
-
- // The wilderness is the region of the block that is usable for allocations
- // but has not been so used.
- char* wilderness();
- char* wildernessEnd();
- size_t wildernessSize();
-
- size_t size();
- size_t capacity();
-
- static const size_t blockSize = 32 * KB;
-
- bool hasWorkList();
- CopyWorkList& workList();
- SpinLock& workListLock() { return m_workListLock; }
-
-private:
- CopiedBlock(Region*);
- void zeroFillWilderness(); // Can be called at any time to zero-fill to the end of the block.
-
- void checkConsistency();
-
- SpinLock m_workListLock;
- OwnPtr<CopyWorkList> m_workList;
-
- size_t m_remaining;
- bool m_isPinned : 1;
- bool m_isOld : 1;
- unsigned m_liveBytes;
-#ifndef NDEBUG
- unsigned m_liveObjects;
-#endif
-};
-
-inline CopiedBlock* CopiedBlock::createNoZeroFill(DeadBlock* block)
-{
- Region* region = block->region();
- return new(NotNull, block) CopiedBlock(region);
-}
-
-inline CopiedBlock* CopiedBlock::create(DeadBlock* block)
-{
- CopiedBlock* newBlock = createNoZeroFill(block);
- newBlock->zeroFillWilderness();
- return newBlock;
-}
-
-inline void CopiedBlock::zeroFillWilderness()
-{
-#if USE(JSVALUE64)
- memset(wilderness(), 0, wildernessSize());
-#else
- JSValue emptyValue;
- JSValue* limit = reinterpret_cast_ptr<JSValue*>(wildernessEnd());
- for (JSValue* currentValue = reinterpret_cast_ptr<JSValue*>(wilderness()); currentValue < limit; currentValue++)
- *currentValue = emptyValue;
-#endif
-}
-
-inline CopiedBlock::CopiedBlock(Region* region)
- : HeapBlock<CopiedBlock>(region)
- , m_remaining(payloadCapacity())
- , m_isPinned(false)
- , m_isOld(false)
- , m_liveBytes(0)
-#ifndef NDEBUG
- , m_liveObjects(0)
-#endif
-{
- m_workListLock.Init();
- ASSERT(is8ByteAligned(reinterpret_cast<void*>(m_remaining)));
-}
-
-inline void CopiedBlock::didSurviveGC()
-{
- checkConsistency();
- m_liveBytes = 0;
-#ifndef NDEBUG
- m_liveObjects = 0;
-#endif
- m_isPinned = false;
- if (m_workList)
- m_workList.clear();
-}
-
-inline void CopiedBlock::didEvacuateBytes(unsigned bytes)
-{
- ASSERT(m_liveBytes >= bytes);
- ASSERT(m_liveObjects);
- checkConsistency();
- m_liveBytes -= bytes;
-#ifndef NDEBUG
- m_liveObjects--;
-#endif
- checkConsistency();
-}
-
-inline bool CopiedBlock::canBeRecycled()
-{
- checkConsistency();
- return !m_liveBytes;
-}
-
-inline bool CopiedBlock::shouldEvacuate()
-{
- checkConsistency();
- return static_cast<double>(m_liveBytes) / static_cast<double>(payloadCapacity()) <= Options::minCopiedBlockUtilization();
-}
-
-inline void CopiedBlock::pin()
-{
- m_isPinned = true;
- if (m_workList)
- m_workList.clear();
-}
-
-inline bool CopiedBlock::isPinned()
-{
- return m_isPinned;
-}
-
-inline bool CopiedBlock::isOld()
-{
- return m_isOld;
-}
-
-inline void CopiedBlock::didPromote()
-{
- m_isOld = true;
-}
-
-inline bool CopiedBlock::isOversize()
-{
- return region()->isCustomSize();
-}
-
-inline unsigned CopiedBlock::liveBytes()
-{
- checkConsistency();
- return m_liveBytes;
-}
-
-inline char* CopiedBlock::payload()
-{
- return reinterpret_cast<char*>(this) + ((sizeof(CopiedBlock) + 7) & ~7);
-}
-
-inline char* CopiedBlock::payloadEnd()
-{
- return reinterpret_cast<char*>(this) + region()->blockSize();
-}
-
-inline size_t CopiedBlock::payloadCapacity()
-{
- return payloadEnd() - payload();
-}
-
-inline char* CopiedBlock::data()
-{
- return payload();
-}
-
-inline char* CopiedBlock::dataEnd()
-{
- return payloadEnd() - m_remaining;
-}
-
-inline size_t CopiedBlock::dataSize()
-{
- return dataEnd() - data();
-}
-
-inline char* CopiedBlock::wilderness()
-{
- return dataEnd();
-}
-
-inline char* CopiedBlock::wildernessEnd()
-{
- return payloadEnd();
-}
-
-inline size_t CopiedBlock::wildernessSize()
-{
- return wildernessEnd() - wilderness();
-}
-
-inline size_t CopiedBlock::size()
-{
- return dataSize();
-}
-
-inline size_t CopiedBlock::capacity()
-{
- return region()->blockSize();
-}
-
-inline bool CopiedBlock::hasWorkList()
-{
- return !!m_workList;
-}
-
-inline CopyWorkList& CopiedBlock::workList()
-{
- return *m_workList;
-}
-
-inline void CopiedBlock::checkConsistency()
-{
- ASSERT(!!m_liveBytes == !!m_liveObjects);
-}
-
-} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/CopiedBlockInlines.h b/Source/JavaScriptCore/heap/CopiedBlockInlines.h
deleted file mode 100644
index 8bf831cfc..000000000
--- a/Source/JavaScriptCore/heap/CopiedBlockInlines.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CopiedBlockInlines_h
-#define CopiedBlockInlines_h
-
-#include "ClassInfo.h"
-#include "CopiedBlock.h"
-#include "Heap.h"
-#include "MarkedBlock.h"
-
-namespace JSC {
-
-inline bool CopiedBlock::shouldReportLiveBytes(SpinLockHolder&, JSCell* owner)
-{
- // We want to add to live bytes if the owner isn't part of the remembered set or
- // if this block was allocated during the last cycle.
- // If we always added live bytes we would double count for elements in the remembered
- // set across collections.
- // If we didn't always add live bytes to new blocks, we'd get too few.
- bool ownerIsRemembered = MarkedBlock::blockFor(owner)->isRemembered(owner);
- return !ownerIsRemembered || !m_isOld;
-}
-
-inline void CopiedBlock::reportLiveBytes(SpinLockHolder&, JSCell* owner, CopyToken token, unsigned bytes)
-{
- checkConsistency();
-#ifndef NDEBUG
- m_liveObjects++;
-#endif
- m_liveBytes += bytes;
- checkConsistency();
- ASSERT(m_liveBytes <= CopiedBlock::blockSize);
-
- if (isPinned())
- return;
-
- if (!shouldEvacuate()) {
- pin();
- return;
- }
-
- if (!m_workList)
- m_workList = adoptPtr(new CopyWorkList(Heap::heap(owner)->blockAllocator()));
-
- m_workList->append(CopyWorklistItem(owner, token));
-}
-
-inline void CopiedBlock::reportLiveBytesDuringCopying(unsigned bytes)
-{
- checkConsistency();
- // This doesn't need to be locked because the thread that calls this function owns the current block.
- m_isOld = true;
-#ifndef NDEBUG
- m_liveObjects++;
-#endif
- m_liveBytes += bytes;
- checkConsistency();
- ASSERT(m_liveBytes <= CopiedBlock::blockSize);
-}
-
-} // namespace JSC
-
-#endif // CopiedBlockInlines_h
diff --git a/Source/JavaScriptCore/heap/CopiedSpace.cpp b/Source/JavaScriptCore/heap/CopiedSpace.cpp
deleted file mode 100644
index eb294214f..000000000
--- a/Source/JavaScriptCore/heap/CopiedSpace.cpp
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "CopiedSpace.h"
-
-#include "CopiedSpaceInlines.h"
-#include "GCActivityCallback.h"
-#include "Operations.h"
-#include "Options.h"
-
-namespace JSC {
-
-CopiedSpace::CopiedSpace(Heap* heap)
- : m_heap(heap)
- , m_inCopyingPhase(false)
- , m_shouldDoCopyPhase(false)
- , m_numberOfLoanedBlocks(0)
-{
- m_toSpaceLock.Init();
-}
-
-CopiedSpace::~CopiedSpace()
-{
- while (!m_oldGen.toSpace->isEmpty())
- m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_oldGen.toSpace->removeHead()));
-
- while (!m_oldGen.fromSpace->isEmpty())
- m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_oldGen.fromSpace->removeHead()));
-
- while (!m_oldGen.oversizeBlocks.isEmpty())
- m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(m_oldGen.oversizeBlocks.removeHead()));
-
- while (!m_newGen.toSpace->isEmpty())
- m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_newGen.toSpace->removeHead()));
-
- while (!m_newGen.fromSpace->isEmpty())
- m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_newGen.fromSpace->removeHead()));
-
- while (!m_newGen.oversizeBlocks.isEmpty())
- m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(m_newGen.oversizeBlocks.removeHead()));
-
- ASSERT(m_oldGen.toSpace->isEmpty());
- ASSERT(m_oldGen.fromSpace->isEmpty());
- ASSERT(m_oldGen.oversizeBlocks.isEmpty());
- ASSERT(m_newGen.toSpace->isEmpty());
- ASSERT(m_newGen.fromSpace->isEmpty());
- ASSERT(m_newGen.oversizeBlocks.isEmpty());
-}
-
-void CopiedSpace::init()
-{
- m_oldGen.toSpace = &m_oldGen.blocks1;
- m_oldGen.fromSpace = &m_oldGen.blocks2;
-
- m_newGen.toSpace = &m_newGen.blocks1;
- m_newGen.fromSpace = &m_newGen.blocks2;
-
- allocateBlock();
-}
-
-CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
-{
- if (isOversize(bytes))
- return tryAllocateOversize(bytes, outPtr);
-
- ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
- m_heap->didAllocate(m_allocator.currentCapacity());
-
- allocateBlock();
-
- *outPtr = m_allocator.forceAllocate(bytes);
- return true;
-}
-
-CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
-{
- ASSERT(isOversize(bytes));
-
- CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocateCustomSize(sizeof(CopiedBlock) + bytes, CopiedBlock::blockSize));
- m_newGen.oversizeBlocks.push(block);
- m_newGen.blockFilter.add(reinterpret_cast<Bits>(block));
- m_blockSet.add(block);
- ASSERT(!block->isOld());
-
- CopiedAllocator allocator;
- allocator.setCurrentBlock(block);
- *outPtr = allocator.forceAllocate(bytes);
- allocator.resetCurrentBlock();
-
- m_heap->didAllocate(block->region()->blockSize());
-
- return true;
-}
-
-CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize)
-{
- if (oldSize >= newSize)
- return true;
-
- void* oldPtr = *ptr;
- ASSERT(!m_heap->vm()->isInitializingObject());
-
- if (CopiedSpace::blockFor(oldPtr)->isOversize() || isOversize(newSize))
- return tryReallocateOversize(ptr, oldSize, newSize);
-
- if (m_allocator.tryReallocate(oldPtr, oldSize, newSize))
- return true;
-
- void* result = 0;
- if (!tryAllocate(newSize, &result)) {
- *ptr = 0;
- return false;
- }
- memcpy(result, oldPtr, oldSize);
- *ptr = result;
- return true;
-}
-
-CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize)
-{
- ASSERT(isOversize(oldSize) || isOversize(newSize));
- ASSERT(newSize > oldSize);
-
- void* oldPtr = *ptr;
-
- void* newPtr = 0;
- if (!tryAllocateOversize(newSize, &newPtr)) {
- *ptr = 0;
- return false;
- }
-
- memcpy(newPtr, oldPtr, oldSize);
-
- CopiedBlock* oldBlock = CopiedSpace::blockFor(oldPtr);
- if (oldBlock->isOversize()) {
- if (oldBlock->isOld())
- m_oldGen.oversizeBlocks.remove(oldBlock);
- else
- m_newGen.oversizeBlocks.remove(oldBlock);
- m_blockSet.remove(oldBlock);
- m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(oldBlock));
- }
-
- *ptr = newPtr;
- return true;
-}
-
-void CopiedSpace::doneFillingBlock(CopiedBlock* block, CopiedBlock** exchange)
-{
- ASSERT(m_inCopyingPhase);
-
- if (exchange)
- *exchange = allocateBlockForCopyingPhase();
-
- if (!block)
- return;
-
- if (!block->dataSize()) {
- recycleBorrowedBlock(block);
- return;
- }
-
- block->zeroFillWilderness();
-
- {
- // Always put the block into the old gen because it's being promoted!
- SpinLockHolder locker(&m_toSpaceLock);
- m_oldGen.toSpace->push(block);
- m_blockSet.add(block);
- m_oldGen.blockFilter.add(reinterpret_cast<Bits>(block));
- }
-
- {
- MutexLocker locker(m_loanedBlocksLock);
- ASSERT(m_numberOfLoanedBlocks > 0);
- ASSERT(m_inCopyingPhase);
- m_numberOfLoanedBlocks--;
- if (!m_numberOfLoanedBlocks)
- m_loanedBlocksCondition.signal();
- }
-}
-
-void CopiedSpace::didStartFullCollection()
-{
- ASSERT(heap()->operationInProgress() == FullCollection);
- ASSERT(m_oldGen.fromSpace->isEmpty());
- ASSERT(m_newGen.fromSpace->isEmpty());
-
-#ifndef NDEBUG
- for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
- ASSERT(!block->liveBytes());
-
- for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
- ASSERT(!block->liveBytes());
-#endif
-
- for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
- block->didSurviveGC();
-
- for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
- block->didSurviveGC();
-}
-
-void CopiedSpace::doneCopying()
-{
- {
- MutexLocker locker(m_loanedBlocksLock);
- while (m_numberOfLoanedBlocks > 0)
- m_loanedBlocksCondition.wait(m_loanedBlocksLock);
- }
-
- ASSERT(m_inCopyingPhase == m_shouldDoCopyPhase);
- m_inCopyingPhase = false;
-
- DoublyLinkedList<CopiedBlock>* toSpace;
- DoublyLinkedList<CopiedBlock>* fromSpace;
- TinyBloomFilter* blockFilter;
- if (heap()->operationInProgress() == FullCollection) {
- toSpace = m_oldGen.toSpace;
- fromSpace = m_oldGen.fromSpace;
- blockFilter = &m_oldGen.blockFilter;
- } else {
- toSpace = m_newGen.toSpace;
- fromSpace = m_newGen.fromSpace;
- blockFilter = &m_newGen.blockFilter;
- }
-
- while (!fromSpace->isEmpty()) {
- CopiedBlock* block = fromSpace->removeHead();
- // We don't add the block to the blockSet because it was never removed.
- ASSERT(m_blockSet.contains(block));
- blockFilter->add(reinterpret_cast<Bits>(block));
- toSpace->push(block);
- }
-
- if (heap()->operationInProgress() == EdenCollection) {
- m_oldGen.toSpace->append(*m_newGen.toSpace);
- m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks);
- m_oldGen.blockFilter.add(m_newGen.blockFilter);
- m_newGen.blockFilter.reset();
- }
-
- ASSERT(m_newGen.toSpace->isEmpty());
- ASSERT(m_newGen.fromSpace->isEmpty());
- ASSERT(m_newGen.oversizeBlocks.isEmpty());
-
- allocateBlock();
-
- m_shouldDoCopyPhase = false;
-}
-
-size_t CopiedSpace::size()
-{
- size_t calculatedSize = 0;
-
- for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
- calculatedSize += block->size();
-
- for (CopiedBlock* block = m_oldGen.fromSpace->head(); block; block = block->next())
- calculatedSize += block->size();
-
- for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
- calculatedSize += block->size();
-
- for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
- calculatedSize += block->size();
-
- for (CopiedBlock* block = m_newGen.fromSpace->head(); block; block = block->next())
- calculatedSize += block->size();
-
- for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
- calculatedSize += block->size();
-
- return calculatedSize;
-}
-
-size_t CopiedSpace::capacity()
-{
- size_t calculatedCapacity = 0;
-
- for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
- calculatedCapacity += block->capacity();
-
- for (CopiedBlock* block = m_oldGen.fromSpace->head(); block; block = block->next())
- calculatedCapacity += block->capacity();
-
- for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
- calculatedCapacity += block->capacity();
-
- for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
- calculatedCapacity += block->capacity();
-
- for (CopiedBlock* block = m_newGen.fromSpace->head(); block; block = block->next())
- calculatedCapacity += block->capacity();
-
- for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
- calculatedCapacity += block->capacity();
-
- return calculatedCapacity;
-}
-
-static bool isBlockListPagedOut(double deadline, DoublyLinkedList<CopiedBlock>* list)
-{
- unsigned itersSinceLastTimeCheck = 0;
- CopiedBlock* current = list->head();
- while (current) {
- current = current->next();
- ++itersSinceLastTimeCheck;
- if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
- double currentTime = WTF::monotonicallyIncreasingTime();
- if (currentTime > deadline)
- return true;
- itersSinceLastTimeCheck = 0;
- }
- }
-
- return false;
-}
-
-bool CopiedSpace::isPagedOut(double deadline)
-{
- return isBlockListPagedOut(deadline, m_oldGen.toSpace)
- || isBlockListPagedOut(deadline, m_oldGen.fromSpace)
- || isBlockListPagedOut(deadline, &m_oldGen.oversizeBlocks)
- || isBlockListPagedOut(deadline, m_newGen.toSpace)
- || isBlockListPagedOut(deadline, m_newGen.fromSpace)
- || isBlockListPagedOut(deadline, &m_newGen.oversizeBlocks);
-}
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/CopiedSpace.h b/Source/JavaScriptCore/heap/CopiedSpace.h
deleted file mode 100644
index c0a59a27e..000000000
--- a/Source/JavaScriptCore/heap/CopiedSpace.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CopiedSpace_h
-#define CopiedSpace_h
-
-#include "CopiedAllocator.h"
-#include "HeapBlock.h"
-#include "HeapOperation.h"
-#include "TinyBloomFilter.h"
-#include <wtf/Assertions.h>
-#include <wtf/CheckedBoolean.h>
-#include <wtf/DoublyLinkedList.h>
-#include <wtf/HashSet.h>
-#include <wtf/OSAllocator.h>
-#include <wtf/PageAllocationAligned.h>
-#include <wtf/PageBlock.h>
-#include <wtf/StdLibExtras.h>
-#include <wtf/TCSpinLock.h>
-#include <wtf/ThreadingPrimitives.h>
-
-namespace JSC {
-
-class Heap;
-class CopiedBlock;
-
-class CopiedSpace {
- friend class CopyVisitor;
- friend class GCThreadSharedData;
- friend class SlotVisitor;
- friend class JIT;
-public:
- CopiedSpace(Heap*);
- ~CopiedSpace();
- void init();
-
- CheckedBoolean tryAllocate(size_t, void**);
- CheckedBoolean tryReallocate(void**, size_t, size_t);
-
- CopiedAllocator& allocator() { return m_allocator; }
-
- void didStartFullCollection();
-
- template <HeapOperation collectionType>
- void startedCopying();
- void startedEdenCopy();
- void startedFullCopy();
- void doneCopying();
- bool isInCopyPhase() { return m_inCopyingPhase; }
-
- void pin(CopiedBlock*);
- bool isPinned(void*);
-
- bool contains(CopiedBlock*);
- bool contains(void*, CopiedBlock*&);
-
- void pinIfNecessary(void* pointer);
-
- size_t size();
- size_t capacity();
-
- bool isPagedOut(double deadline);
- bool shouldDoCopyPhase() { return m_shouldDoCopyPhase; }
-
- static CopiedBlock* blockFor(void*);
-
- Heap* heap() const { return m_heap; }
-
-private:
- static bool isOversize(size_t);
-
- JS_EXPORT_PRIVATE CheckedBoolean tryAllocateSlowCase(size_t, void**);
- CheckedBoolean tryAllocateOversize(size_t, void**);
- CheckedBoolean tryReallocateOversize(void**, size_t, size_t);
-
- void allocateBlock();
- CopiedBlock* allocateBlockForCopyingPhase();
-
- void doneFillingBlock(CopiedBlock*, CopiedBlock**);
- void recycleEvacuatedBlock(CopiedBlock*, HeapOperation collectionType);
- void recycleBorrowedBlock(CopiedBlock*);
-
- Heap* m_heap;
-
- CopiedAllocator m_allocator;
-
- HashSet<CopiedBlock*> m_blockSet;
-
- SpinLock m_toSpaceLock;
-
- struct CopiedGeneration {
- CopiedGeneration()
- : toSpace(0)
- , fromSpace(0)
- {
- }
-
- DoublyLinkedList<CopiedBlock>* toSpace;
- DoublyLinkedList<CopiedBlock>* fromSpace;
-
- DoublyLinkedList<CopiedBlock> blocks1;
- DoublyLinkedList<CopiedBlock> blocks2;
- DoublyLinkedList<CopiedBlock> oversizeBlocks;
-
- TinyBloomFilter blockFilter;
- };
-
- CopiedGeneration m_oldGen;
- CopiedGeneration m_newGen;
-
- bool m_inCopyingPhase;
- bool m_shouldDoCopyPhase;
-
- Mutex m_loanedBlocksLock;
- ThreadCondition m_loanedBlocksCondition;
- size_t m_numberOfLoanedBlocks;
-
- static const size_t s_maxAllocationSize = CopiedBlock::blockSize / 2;
- static const size_t s_initialBlockNum = 16;
- static const size_t s_blockMask = ~(CopiedBlock::blockSize - 1);
-};
-
-} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/CopiedSpaceInlines.h b/Source/JavaScriptCore/heap/CopiedSpaceInlines.h
deleted file mode 100644
index ec33f582f..000000000
--- a/Source/JavaScriptCore/heap/CopiedSpaceInlines.h
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CopiedSpaceInlines_h
-#define CopiedSpaceInlines_h
-
-#include "CopiedBlock.h"
-#include "CopiedSpace.h"
-#include "Heap.h"
-#include "HeapBlock.h"
-#include "VM.h"
-#include <wtf/CheckedBoolean.h>
-
-namespace JSC {
-
-inline bool CopiedSpace::contains(CopiedBlock* block)
-{
- return (!m_newGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block)) || !m_oldGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block)))
- && m_blockSet.contains(block);
-}
-
-inline bool CopiedSpace::contains(void* ptr, CopiedBlock*& result)
-{
- CopiedBlock* block = blockFor(ptr);
- if (contains(block)) {
- result = block;
- return true;
- }
- result = 0;
- return false;
-}
-
-inline void CopiedSpace::pin(CopiedBlock* block)
-{
- block->pin();
-}
-
-inline void CopiedSpace::pinIfNecessary(void* opaquePointer)
-{
- // Pointers into the copied space come in the following varieties:
- // 1) Pointers to the start of a span of memory. This is the most
- // natural though not necessarily the most common.
- // 2) Pointers to one value-sized (8 byte) word past the end of
- // a span of memory. This currently occurs with semi-butterflies
- // and should be fixed soon, once the other half of the
- // butterfly lands.
- // 3) Pointers to the innards arising from loop induction variable
- // optimizations (either manual ones or automatic, by the
- // compiler).
- // 4) Pointers to the end of a span of memory in arising from
- // induction variable optimizations combined with the
- // GC-to-compiler contract laid out in the C spec: a pointer to
- // the end of a span of memory must be considered to be a
- // pointer to that memory.
-
- EncodedJSValue* pointer = reinterpret_cast<EncodedJSValue*>(opaquePointer);
- CopiedBlock* block;
-
- // Handle (1) and (3).
- if (contains(pointer, block))
- pin(block);
-
- // Handle (4). We don't have to explicitly check and pin the block under this
- // pointer because it cannot possibly point to something that cases (1) and
- // (3) above or case (2) below wouldn't already catch.
- pointer--;
-
- // Handle (2)
- pointer--;
- if (contains(pointer, block))
- pin(block);
-}
-
-inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock* block, HeapOperation collectionType)
-{
- ASSERT(block);
- ASSERT(block->canBeRecycled());
- ASSERT(!block->m_isPinned);
- {
- SpinLockHolder locker(&m_toSpaceLock);
- m_blockSet.remove(block);
- if (collectionType == EdenCollection)
- m_newGen.fromSpace->remove(block);
- else
- m_oldGen.fromSpace->remove(block);
- }
- m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
-}
-
-inline void CopiedSpace::recycleBorrowedBlock(CopiedBlock* block)
-{
- m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
-
- {
- MutexLocker locker(m_loanedBlocksLock);
- ASSERT(m_numberOfLoanedBlocks > 0);
- ASSERT(m_inCopyingPhase);
- m_numberOfLoanedBlocks--;
- if (!m_numberOfLoanedBlocks)
- m_loanedBlocksCondition.signal();
- }
-}
-
-inline CopiedBlock* CopiedSpace::allocateBlockForCopyingPhase()
-{
- ASSERT(m_inCopyingPhase);
- CopiedBlock* block = CopiedBlock::createNoZeroFill(m_heap->blockAllocator().allocate<CopiedBlock>());
-
- {
- MutexLocker locker(m_loanedBlocksLock);
- m_numberOfLoanedBlocks++;
- }
-
- ASSERT(!block->dataSize());
- return block;
-}
-
-inline void CopiedSpace::allocateBlock()
-{
- m_heap->collectIfNecessaryOrDefer();
-
- m_allocator.resetCurrentBlock();
-
- CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocate<CopiedBlock>());
-
- m_newGen.toSpace->push(block);
- m_newGen.blockFilter.add(reinterpret_cast<Bits>(block));
- m_blockSet.add(block);
- m_allocator.setCurrentBlock(block);
-}
-
-inline CheckedBoolean CopiedSpace::tryAllocate(size_t bytes, void** outPtr)
-{
- ASSERT(!m_heap->vm()->isInitializingObject());
- ASSERT(bytes);
-
- if (!m_allocator.tryAllocate(bytes, outPtr))
- return tryAllocateSlowCase(bytes, outPtr);
-
- ASSERT(*outPtr);
- return true;
-}
-
-inline bool CopiedSpace::isOversize(size_t bytes)
-{
- return bytes > s_maxAllocationSize;
-}
-
-inline bool CopiedSpace::isPinned(void* ptr)
-{
- return blockFor(ptr)->m_isPinned;
-}
-
-inline CopiedBlock* CopiedSpace::blockFor(void* ptr)
-{
- return reinterpret_cast<CopiedBlock*>(reinterpret_cast<size_t>(ptr) & s_blockMask);
-}
-
-template <HeapOperation collectionType>
-inline void CopiedSpace::startedCopying()
-{
- DoublyLinkedList<CopiedBlock>* fromSpace;
- DoublyLinkedList<CopiedBlock>* oversizeBlocks;
- TinyBloomFilter* blockFilter;
- if (collectionType == FullCollection) {
- ASSERT(m_oldGen.fromSpace->isEmpty());
- ASSERT(m_newGen.fromSpace->isEmpty());
-
- m_oldGen.toSpace->append(*m_newGen.toSpace);
- m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks);
-
- ASSERT(m_newGen.toSpace->isEmpty());
- ASSERT(m_newGen.fromSpace->isEmpty());
- ASSERT(m_newGen.oversizeBlocks.isEmpty());
-
- std::swap(m_oldGen.fromSpace, m_oldGen.toSpace);
- fromSpace = m_oldGen.fromSpace;
- oversizeBlocks = &m_oldGen.oversizeBlocks;
- blockFilter = &m_oldGen.blockFilter;
- } else {
- std::swap(m_newGen.fromSpace, m_newGen.toSpace);
- fromSpace = m_newGen.fromSpace;
- oversizeBlocks = &m_newGen.oversizeBlocks;
- blockFilter = &m_newGen.blockFilter;
- }
-
- blockFilter->reset();
- m_allocator.resetCurrentBlock();
-
- CopiedBlock* next = 0;
- size_t totalLiveBytes = 0;
- size_t totalUsableBytes = 0;
- for (CopiedBlock* block = fromSpace->head(); block; block = next) {
- next = block->next();
- if (!block->isPinned() && block->canBeRecycled()) {
- recycleEvacuatedBlock(block, collectionType);
- continue;
- }
- ASSERT(block->liveBytes() <= CopiedBlock::blockSize);
- totalLiveBytes += block->liveBytes();
- totalUsableBytes += block->payloadCapacity();
- block->didPromote();
- }
-
- CopiedBlock* block = oversizeBlocks->head();
- while (block) {
- CopiedBlock* next = block->next();
- if (block->isPinned()) {
- blockFilter->add(reinterpret_cast<Bits>(block));
- totalLiveBytes += block->payloadCapacity();
- totalUsableBytes += block->payloadCapacity();
- block->didPromote();
- } else {
- oversizeBlocks->remove(block);
- m_blockSet.remove(block);
- m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(block));
- }
- block = next;
- }
-
- double markedSpaceBytes = m_heap->objectSpace().capacity();
- double totalFragmentation = static_cast<double>(totalLiveBytes + markedSpaceBytes) / static_cast<double>(totalUsableBytes + markedSpaceBytes);
- m_shouldDoCopyPhase = m_heap->operationInProgress() == EdenCollection || totalFragmentation <= Options::minHeapUtilization();
- if (!m_shouldDoCopyPhase) {
- if (Options::logGC())
- dataLog("Skipped copying, ");
- return;
- }
-
- if (Options::logGC())
- dataLogF("Did copy, ");
- ASSERT(m_shouldDoCopyPhase);
- ASSERT(!m_numberOfLoanedBlocks);
- ASSERT(!m_inCopyingPhase);
- m_inCopyingPhase = true;
-}
-
-} // namespace JSC
-
-#endif // CopiedSpaceInlines_h
-
diff --git a/Source/JavaScriptCore/heap/CopyVisitorInlines.h b/Source/JavaScriptCore/heap/CopyVisitorInlines.h
deleted file mode 100644
index 6e197fca4..000000000
--- a/Source/JavaScriptCore/heap/CopyVisitorInlines.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CopyVisitorInlines_h
-#define CopyVisitorInlines_h
-
-#include "ClassInfo.h"
-#include "CopyVisitor.h"
-#include "GCThreadSharedData.h"
-#include "JSCell.h"
-#include "JSDestructibleObject.h"
-
-namespace JSC {
-
-inline void CopyVisitor::visitItem(CopyWorklistItem item)
-{
- if (item.token() == ButterflyCopyToken) {
- JSObject::copyBackingStore(item.cell(), *this, ButterflyCopyToken);
- return;
- }
-
- item.cell()->methodTable()->copyBackingStore(item.cell(), *this, item.token());
-}
-
-inline bool CopyVisitor::checkIfShouldCopy(void* oldPtr)
-{
- CopiedBlock* block = CopiedSpace::blockFor(oldPtr);
- if (block->isOversize() || block->isPinned())
- return false;
- return true;
-}
-
-inline void* CopyVisitor::allocateNewSpace(size_t bytes)
-{
- void* result = 0; // Compilers don't realize that this will be assigned.
- if (LIKELY(m_copiedAllocator.tryAllocateDuringCopying(bytes, &result)))
- return result;
-
- result = allocateNewSpaceSlow(bytes);
- ASSERT(result);
- return result;
-}
-
-inline void* CopyVisitor::allocateNewSpaceSlow(size_t bytes)
-{
- CopiedBlock* newBlock = 0;
- m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock(), &newBlock);
- m_copiedAllocator.setCurrentBlock(newBlock);
-
- void* result = 0;
- CheckedBoolean didSucceed = m_copiedAllocator.tryAllocateDuringCopying(bytes, &result);
- ASSERT(didSucceed);
- return result;
-}
-
-inline void CopyVisitor::startCopying()
-{
- ASSERT(!m_copiedAllocator.isValid());
- CopiedBlock* block = 0;
- m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock(), &block);
- m_copiedAllocator.setCurrentBlock(block);
-}
-
-inline void CopyVisitor::doneCopying()
-{
- if (!m_copiedAllocator.isValid())
- return;
-
- m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock(), 0);
-}
-
-inline void CopyVisitor::didCopy(void* ptr, size_t bytes)
-{
- CopiedBlock* block = CopiedSpace::blockFor(ptr);
- ASSERT(!block->isOversize());
- ASSERT(!block->isPinned());
-
- block->didEvacuateBytes(bytes);
-}
-
-} // namespace JSC
-
-#endif // CopyVisitorInlines_h
-
diff --git a/Source/JavaScriptCore/heap/CopyWorkList.h b/Source/JavaScriptCore/heap/CopyWorkList.h
deleted file mode 100644
index c79063b97..000000000
--- a/Source/JavaScriptCore/heap/CopyWorkList.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CopyWorkList_h
-#define CopyWorkList_h
-
-#include "CopyToken.h"
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-class JSCell;
-
-class CopyWorklistItem {
-public:
- CopyWorklistItem()
- : m_value(0)
- {
- }
-
- CopyWorklistItem(JSCell* cell, CopyToken token)
- : m_value(bitwise_cast<uintptr_t>(cell) | static_cast<uintptr_t>(token))
- {
- ASSERT(!(bitwise_cast<uintptr_t>(cell) & static_cast<uintptr_t>(mask)));
- ASSERT(static_cast<uintptr_t>(token) <= mask);
- }
-
- JSCell* cell() const { return bitwise_cast<JSCell*>(m_value & ~static_cast<uintptr_t>(mask)); }
- CopyToken token() const { return static_cast<CopyToken>(m_value & mask); }
-
-private:
- static const unsigned requiredAlignment = 8;
- static const unsigned mask = requiredAlignment - 1;
-
- uintptr_t m_value;
-};
-
-class CopyWorkListSegment : public HeapBlock<CopyWorkListSegment> {
-public:
- static CopyWorkListSegment* create(DeadBlock* block)
- {
- return new (NotNull, block) CopyWorkListSegment(block->region());
- }
-
- size_t size() { return m_size; }
- bool isFull() { return reinterpret_cast<char*>(&data()[size()]) >= endOfBlock(); }
- CopyWorklistItem get(size_t index) { return data()[index]; }
-
- void append(CopyWorklistItem item)
- {
- ASSERT(!isFull());
- data()[m_size] = item;
- m_size += 1;
- }
-
- static const size_t blockSize = 512;
-
-private:
- CopyWorkListSegment(Region* region)
- : HeapBlock<CopyWorkListSegment>(region)
- , m_size(0)
- {
- }
-
- CopyWorklistItem* data() { return reinterpret_cast<CopyWorklistItem*>(this + 1); }
- char* endOfBlock() { return reinterpret_cast<char*>(this) + blockSize; }
-
- size_t m_size;
-};
-
-class CopyWorkListIterator {
- friend class CopyWorkList;
-public:
- CopyWorklistItem get() { return m_currentSegment->get(m_currentIndex); }
- CopyWorklistItem operator*() { return get(); }
- CopyWorklistItem operator->() { return get(); }
-
- CopyWorkListIterator& operator++()
- {
- m_currentIndex++;
-
- if (m_currentIndex >= m_currentSegment->size()) {
- m_currentIndex = 0;
- m_currentSegment = m_currentSegment->next();
-
- ASSERT(!m_currentSegment || m_currentSegment->size());
- }
-
- return *this;
- }
-
- bool operator==(const CopyWorkListIterator& other) const
- {
- return m_currentSegment == other.m_currentSegment && m_currentIndex == other.m_currentIndex;
- }
-
- bool operator!=(const CopyWorkListIterator& other) const
- {
- return !(*this == other);
- }
-
- CopyWorkListIterator()
- : m_currentSegment(0)
- , m_currentIndex(0)
- {
- }
-
-private:
- CopyWorkListIterator(CopyWorkListSegment* startSegment, size_t startIndex)
- : m_currentSegment(startSegment)
- , m_currentIndex(startIndex)
- {
- }
-
- CopyWorkListSegment* m_currentSegment;
- size_t m_currentIndex;
-};
-
-class CopyWorkList {
-public:
- typedef CopyWorkListIterator iterator;
-
- CopyWorkList(BlockAllocator&);
- ~CopyWorkList();
-
- void append(CopyWorklistItem);
- iterator begin();
- iterator end();
-
-private:
- DoublyLinkedList<CopyWorkListSegment> m_segments;
- BlockAllocator& m_blockAllocator;
-};
-
-inline CopyWorkList::CopyWorkList(BlockAllocator& blockAllocator)
- : m_blockAllocator(blockAllocator)
-{
-}
-
-inline CopyWorkList::~CopyWorkList()
-{
- while (!m_segments.isEmpty())
- m_blockAllocator.deallocate(CopyWorkListSegment::destroy(m_segments.removeHead()));
-}
-
-inline void CopyWorkList::append(CopyWorklistItem item)
-{
- if (m_segments.isEmpty() || m_segments.tail()->isFull())
- m_segments.append(CopyWorkListSegment::create(m_blockAllocator.allocate<CopyWorkListSegment>()));
-
- ASSERT(!m_segments.tail()->isFull());
-
- m_segments.tail()->append(item);
-}
-
-inline CopyWorkList::iterator CopyWorkList::begin()
-{
- return CopyWorkListIterator(m_segments.head(), 0);
-}
-
-inline CopyWorkList::iterator CopyWorkList::end()
-{
- return CopyWorkListIterator();
-}
-
-} // namespace JSC
-
-#endif // CopyWorkList_h
diff --git a/Source/JavaScriptCore/heap/DeferGC.cpp b/Source/JavaScriptCore/heap/DeferGC.cpp
index 72b3dc934..dd66c6384 100644
--- a/Source/JavaScriptCore/heap/DeferGC.cpp
+++ b/Source/JavaScriptCore/heap/DeferGC.cpp
@@ -26,6 +26,8 @@
#include "config.h"
#include "DeferGC.h"
+#include "JSCInlines.h"
+
namespace JSC {
#ifndef NDEBUG
diff --git a/Source/JavaScriptCore/heap/DeferGC.h b/Source/JavaScriptCore/heap/DeferGC.h
index d29eec854..2c0336ea4 100644
--- a/Source/JavaScriptCore/heap/DeferGC.h
+++ b/Source/JavaScriptCore/heap/DeferGC.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DeferGC_h
-#define DeferGC_h
+#pragma once
#include "Heap.h"
#include <wtf/Noncopyable.h>
@@ -68,34 +67,41 @@ private:
Heap& m_heap;
};
-#ifndef NDEBUG
class DisallowGC {
WTF_MAKE_NONCOPYABLE(DisallowGC);
public:
DisallowGC()
{
+#ifndef NDEBUG
WTF::threadSpecificSet(s_isGCDisallowedOnCurrentThread, reinterpret_cast<void*>(true));
+#endif
}
~DisallowGC()
{
+#ifndef NDEBUG
WTF::threadSpecificSet(s_isGCDisallowedOnCurrentThread, reinterpret_cast<void*>(false));
+#endif
}
static bool isGCDisallowedOnCurrentThread()
{
+#ifndef NDEBUG
return !!WTF::threadSpecificGet(s_isGCDisallowedOnCurrentThread);
+#else
+ return false;
+#endif
}
static void initialize()
{
+#ifndef NDEBUG
WTF::threadSpecificKeyCreate(&s_isGCDisallowedOnCurrentThread, 0);
+#endif
}
+#ifndef NDEBUG
JS_EXPORT_PRIVATE static WTF::ThreadSpecificKey s_isGCDisallowedOnCurrentThread;
+#endif
};
-#endif // NDEBUG
} // namespace JSC
-
-#endif // DeferGC_h
-
diff --git a/Source/JavaScriptCore/heap/DelayedReleaseScope.h b/Source/JavaScriptCore/heap/DelayedReleaseScope.h
deleted file mode 100644
index bbb4724b9..000000000
--- a/Source/JavaScriptCore/heap/DelayedReleaseScope.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DelayedReleaseScope_h
-#define DelayedReleaseScope_h
-
-#include "APIShims.h"
-#include "MarkedSpace.h"
-
-namespace JSC {
-
-#if USE(CF)
-
-class DelayedReleaseScope {
-public:
- DelayedReleaseScope(MarkedSpace& markedSpace)
- : m_markedSpace(markedSpace)
- {
- ASSERT(!m_markedSpace.m_currentDelayedReleaseScope);
- m_markedSpace.m_currentDelayedReleaseScope = this;
- }
-
- ~DelayedReleaseScope()
- {
- ASSERT(m_markedSpace.m_currentDelayedReleaseScope == this);
- m_markedSpace.m_currentDelayedReleaseScope = nullptr;
-
- HeapOperation operationInProgress = NoOperation;
- std::swap(operationInProgress, m_markedSpace.m_heap->m_operationInProgress);
-
- APICallbackShim callbackShim(*m_markedSpace.m_heap->vm());
- m_delayedReleaseObjects.clear();
-
- std::swap(operationInProgress, m_markedSpace.m_heap->m_operationInProgress);
- }
-
- template <typename T>
- void releaseSoon(RetainPtr<T>&& object)
- {
- m_delayedReleaseObjects.append(std::move(object));
- }
-
- static bool isInEffectFor(MarkedSpace& markedSpace)
- {
- return markedSpace.m_currentDelayedReleaseScope;
- }
-
-private:
- MarkedSpace& m_markedSpace;
- Vector<RetainPtr<CFTypeRef>> m_delayedReleaseObjects;
-};
-
-template <typename T>
-inline void MarkedSpace::releaseSoon(RetainPtr<T>&& object)
-{
- ASSERT(m_currentDelayedReleaseScope);
- m_currentDelayedReleaseScope->releaseSoon(std::move(object));
-}
-
-#else // USE(CF)
-
-class DelayedReleaseScope {
-public:
- DelayedReleaseScope(MarkedSpace&)
- {
- }
-
- static bool isInEffectFor(MarkedSpace&)
- {
- return true;
- }
-};
-
-#endif // USE(CF)
-
-} // namespace JSC
-
-#endif // DelayedReleaseScope_h
diff --git a/Source/JavaScriptCore/heap/DeleteAllCodeEffort.h b/Source/JavaScriptCore/heap/DeleteAllCodeEffort.h
new file mode 100644
index 000000000..cb854aead
--- /dev/null
+++ b/Source/JavaScriptCore/heap/DeleteAllCodeEffort.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+enum DeleteAllCodeEffort {
+ PreventCollectionAndDeleteAllCode,
+ DeleteAllCodeIfNotCollecting
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/DestructionMode.cpp b/Source/JavaScriptCore/heap/DestructionMode.cpp
new file mode 100644
index 000000000..bf7d93a86
--- /dev/null
+++ b/Source/JavaScriptCore/heap/DestructionMode.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DestructionMode.h"
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, DestructionMode mode)
+{
+ switch (mode) {
+ case NeedsDestruction:
+ out.print("NeedsDestruction");
+ return;
+ case DoesNotNeedDestruction:
+ out.print("DoesNotNeedDestruction");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/DestructionMode.h b/Source/JavaScriptCore/heap/DestructionMode.h
new file mode 100644
index 000000000..08d21bccb
--- /dev/null
+++ b/Source/JavaScriptCore/heap/DestructionMode.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+enum DestructionMode : int8_t {
+ DoesNotNeedDestruction,
+ NeedsDestruction
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::DestructionMode);
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/EdenGCActivityCallback.cpp b/Source/JavaScriptCore/heap/EdenGCActivityCallback.cpp
new file mode 100644
index 000000000..07e6c7a3f
--- /dev/null
+++ b/Source/JavaScriptCore/heap/EdenGCActivityCallback.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "EdenGCActivityCallback.h"
+#include "HeapInlines.h"
+
+#include "VM.h"
+
+namespace JSC {
+
+#if USE(CF) || USE(GLIB)
+
+EdenGCActivityCallback::EdenGCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap)
+{
+}
+
+void EdenGCActivityCallback::doCollection()
+{
+ m_vm->heap.collectAsync(CollectionScope::Eden);
+}
+
+double EdenGCActivityCallback::lastGCLength()
+{
+ return m_vm->heap.lastEdenGCLength().seconds();
+}
+
+double EdenGCActivityCallback::deathRate()
+{
+ Heap* heap = &m_vm->heap;
+ size_t sizeBefore = heap->sizeBeforeLastEdenCollection();
+ size_t sizeAfter = heap->sizeAfterLastEdenCollection();
+ if (!sizeBefore)
+ return 1.0;
+ if (sizeAfter > sizeBefore) {
+ // GC caused the heap to grow(!)
+ // This could happen if the we visited more extra memory than was reported allocated.
+ // We don't return a negative death rate, since that would schedule the next GC in the past.
+ return 0;
+ }
+ return static_cast<double>(sizeBefore - sizeAfter) / static_cast<double>(sizeBefore);
+}
+
+double EdenGCActivityCallback::gcTimeSlice(size_t bytes)
+{
+ return std::min((static_cast<double>(bytes) / MB) * Options::percentCPUPerMBForEdenTimer(), Options::collectionTimerMaxPercentCPU());
+}
+
+#else
+
+EdenGCActivityCallback::EdenGCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap->vm())
+{
+}
+
+void EdenGCActivityCallback::doCollection()
+{
+}
+
+double EdenGCActivityCallback::lastGCLength()
+{
+ return 0;
+}
+
+double EdenGCActivityCallback::deathRate()
+{
+ return 0;
+}
+
+double EdenGCActivityCallback::gcTimeSlice(size_t)
+{
+ return 0;
+}
+
+#endif // USE(CF) || USE(GLIB)
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/SuperRegion.h b/Source/JavaScriptCore/heap/EdenGCActivityCallback.h
index e21526b7a..cc4e93fcb 100644
--- a/Source/JavaScriptCore/heap/SuperRegion.h
+++ b/Source/JavaScriptCore/heap/EdenGCActivityCallback.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,36 +23,27 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SuperRegion_h
-#define SuperRegion_h
+#pragma once
-#include <wtf/MetaAllocator.h>
-#include <wtf/PageBlock.h>
-#include <wtf/PageReservation.h>
+#include "GCActivityCallback.h"
namespace JSC {
-class VM;
-
-class SuperRegion : public WTF::MetaAllocator {
+class JS_EXPORT_PRIVATE EdenGCActivityCallback : public GCActivityCallback {
public:
- SuperRegion();
- virtual ~SuperRegion();
-
-protected:
- virtual void* allocateNewSpace(size_t&) override;
- virtual void notifyNeedPage(void*) override;
- virtual void notifyPageIsFree(void*) override;
-
-private:
- static const uint64_t s_fixedHeapMemoryPoolSize;
+ EdenGCActivityCallback(Heap*);
- static void* getAlignedBase(PageReservation&);
+ void doCollection() override;
- PageReservation m_reservation;
- void* m_reservationBase;
+protected:
+ double lastGCLength() override;
+ double gcTimeSlice(size_t bytes) override;
+ double deathRate() override;
};
-} // namespace JSC
+inline RefPtr<GCActivityCallback> GCActivityCallback::createEdenTimer(Heap* heap)
+{
+ return s_shouldCreateGCTimer ? adoptRef(new EdenGCActivityCallback(heap)) : nullptr;
+}
-#endif // SuperRegion_h
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/FreeList.cpp b/Source/JavaScriptCore/heap/FreeList.cpp
new file mode 100644
index 000000000..43bc7aebe
--- /dev/null
+++ b/Source/JavaScriptCore/heap/FreeList.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FreeList.h"
+
+namespace JSC {
+
+void FreeList::dump(PrintStream& out) const
+{
+ out.print("{head = ", RawPointer(head), ", payloadEnd = ", RawPointer(payloadEnd), ", remaining = ", remaining, ", originalSize = ", originalSize, "}");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/FreeList.h b/Source/JavaScriptCore/heap/FreeList.h
new file mode 100644
index 000000000..842caa6f6
--- /dev/null
+++ b/Source/JavaScriptCore/heap/FreeList.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+struct FreeCell {
+ FreeCell* next;
+};
+
+// This representation of a FreeList is convenient for the MarkedAllocator.
+
+struct FreeList {
+ FreeCell* head { nullptr };
+ char* payloadEnd { nullptr };
+ unsigned remaining { 0 };
+ unsigned originalSize { 0 };
+
+ FreeList()
+ {
+ }
+
+ static FreeList list(FreeCell* head, unsigned bytes)
+ {
+ FreeList result;
+ result.head = head;
+ result.remaining = 0;
+ result.originalSize = bytes;
+ return result;
+ }
+
+ static FreeList bump(char* payloadEnd, unsigned remaining)
+ {
+ FreeList result;
+ result.payloadEnd = payloadEnd;
+ result.remaining = remaining;
+ result.originalSize = remaining;
+ return result;
+ }
+
+ bool operator==(const FreeList& other) const
+ {
+ return head == other.head
+ && payloadEnd == other.payloadEnd
+ && remaining == other.remaining
+ && originalSize == other.originalSize;
+ }
+
+ bool operator!=(const FreeList& other) const
+ {
+ return !(*this == other);
+ }
+
+ explicit operator bool() const
+ {
+ return *this != FreeList();
+ }
+
+ bool allocationWillFail() const { return !head && !remaining; }
+ bool allocationWillSucceed() const { return !allocationWillFail(); }
+
+ void dump(PrintStream&) const;
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/FullGCActivityCallback.cpp b/Source/JavaScriptCore/heap/FullGCActivityCallback.cpp
new file mode 100644
index 000000000..e57dd3a1c
--- /dev/null
+++ b/Source/JavaScriptCore/heap/FullGCActivityCallback.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FullGCActivityCallback.h"
+
+#include "VM.h"
+
+namespace JSC {
+
+#if USE(CF) || USE(GLIB)
+
+#if !PLATFORM(IOS)
+const double pagingTimeOut = 0.1; // Time in seconds to allow opportunistic timer to iterate over all blocks to see if the Heap is paged out.
+#endif
+
+FullGCActivityCallback::FullGCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap)
+{
+}
+
+void FullGCActivityCallback::doCollection()
+{
+ Heap& heap = m_vm->heap;
+ m_didSyncGCRecently = false;
+
+#if !PLATFORM(IOS)
+ double startTime = WTF::monotonicallyIncreasingTime();
+ if (heap.isPagedOut(startTime + pagingTimeOut)) {
+ cancel();
+ heap.increaseLastFullGCLength(Seconds(pagingTimeOut));
+ return;
+ }
+#endif
+
+ heap.collectAsync(CollectionScope::Full);
+}
+
+double FullGCActivityCallback::lastGCLength()
+{
+ return m_vm->heap.lastFullGCLength().seconds();
+}
+
+double FullGCActivityCallback::deathRate()
+{
+ Heap* heap = &m_vm->heap;
+ size_t sizeBefore = heap->sizeBeforeLastFullCollection();
+ size_t sizeAfter = heap->sizeAfterLastFullCollection();
+ if (!sizeBefore)
+ return 1.0;
+ if (sizeAfter > sizeBefore) {
+ // GC caused the heap to grow(!)
+ // This could happen if the we visited more extra memory than was reported allocated.
+ // We don't return a negative death rate, since that would schedule the next GC in the past.
+ return 0;
+ }
+ return static_cast<double>(sizeBefore - sizeAfter) / static_cast<double>(sizeBefore);
+}
+
+double FullGCActivityCallback::gcTimeSlice(size_t bytes)
+{
+ return std::min((static_cast<double>(bytes) / MB) * Options::percentCPUPerMBForFullTimer(), Options::collectionTimerMaxPercentCPU());
+}
+
+#else
+
+FullGCActivityCallback::FullGCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap)
+{
+}
+
+void FullGCActivityCallback::doCollection()
+{
+}
+
+double FullGCActivityCallback::lastGCLength()
+{
+ return 0;
+}
+
+double FullGCActivityCallback::deathRate()
+{
+ return 0;
+}
+
+double FullGCActivityCallback::gcTimeSlice(size_t)
+{
+ return 0;
+}
+
+#endif // USE(CF) || USE(GLIB)
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/FullGCActivityCallback.h b/Source/JavaScriptCore/heap/FullGCActivityCallback.h
new file mode 100644
index 000000000..5106ee798
--- /dev/null
+++ b/Source/JavaScriptCore/heap/FullGCActivityCallback.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GCActivityCallback.h"
+
+namespace JSC {
+
+class JS_EXPORT_PRIVATE FullGCActivityCallback : public GCActivityCallback {
+public:
+ FullGCActivityCallback(Heap*);
+
+ void doCollection() override;
+
+ bool didSyncGCRecently() const { return m_didSyncGCRecently; }
+ void setDidSyncGCRecently() { m_didSyncGCRecently = true; }
+
+protected:
+ double lastGCLength() override;
+ double gcTimeSlice(size_t bytes) override;
+ double deathRate() override;
+
+ bool m_didSyncGCRecently { false };
+};
+
+inline RefPtr<FullGCActivityCallback> GCActivityCallback::createFullTimer(Heap* heap)
+{
+ return s_shouldCreateGCTimer ? adoptRef(new FullGCActivityCallback(heap)) : nullptr;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCActivityCallback.cpp b/Source/JavaScriptCore/heap/GCActivityCallback.cpp
new file mode 100644
index 000000000..380d91c7c
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCActivityCallback.cpp
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2010-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCActivityCallback.h"
+
+#include "HeapInlines.h"
+#include "JSLock.h"
+#include "JSObject.h"
+#include "VM.h"
+
+#if USE(GLIB)
+#include <glib.h>
+#endif
+
+namespace JSC {
+
+bool GCActivityCallback::s_shouldCreateGCTimer = true;
+
+#if USE(CF) || USE(GLIB)
+
+const double timerSlop = 2.0; // Fudge factor to avoid performance cost of resetting timer.
+
+#if USE(CF)
+GCActivityCallback::GCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap->vm())
+{
+}
+#elif USE(GLIB)
+GCActivityCallback::GCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap->vm())
+{
+ g_source_set_ready_time(m_timer.get(), g_get_monotonic_time() + s_decade * G_USEC_PER_SEC);
+}
+#endif
+
+void GCActivityCallback::doWork()
+{
+ Heap* heap = &m_vm->heap;
+ if (!isEnabled())
+ return;
+
+ JSLockHolder locker(m_vm);
+ if (heap->isDeferred()) {
+ scheduleTimer(0);
+ return;
+ }
+
+ doCollection();
+}
+
+#if USE(CF)
+void GCActivityCallback::scheduleTimer(double newDelay)
+{
+ if (newDelay * timerSlop > m_delay)
+ return;
+ double delta = m_delay - newDelay;
+ m_delay = newDelay;
+ m_nextFireTime = WTF::currentTime() + newDelay;
+ CFRunLoopTimerSetNextFireDate(m_timer.get(), CFRunLoopTimerGetNextFireDate(m_timer.get()) - delta);
+}
+
+void GCActivityCallback::cancelTimer()
+{
+ m_delay = s_decade;
+ m_nextFireTime = 0;
+ CFRunLoopTimerSetNextFireDate(m_timer.get(), CFAbsoluteTimeGetCurrent() + s_decade);
+}
+#elif USE(GLIB)
+void GCActivityCallback::scheduleTimer(double newDelay)
+{
+ ASSERT(newDelay >= 0);
+ if (newDelay * timerSlop > m_delay)
+ return;
+
+ double delta = m_delay - newDelay;
+ m_delay = newDelay;
+ m_nextFireTime = WTF::currentTime() + newDelay;
+
+ gint64 readyTime = g_source_get_ready_time(m_timer.get());
+ g_source_set_ready_time(m_timer.get(), readyTime - delta * G_USEC_PER_SEC);
+}
+
+void GCActivityCallback::cancelTimer()
+{
+ m_delay = s_decade;
+ m_nextFireTime = 0;
+ g_source_set_ready_time(m_timer.get(), g_get_monotonic_time() + s_decade * G_USEC_PER_SEC);
+}
+#endif
+
+void GCActivityCallback::didAllocate(size_t bytes)
+{
+ // The first byte allocated in an allocation cycle will report 0 bytes to didAllocate.
+ // We pretend it's one byte so that we don't ignore this allocation entirely.
+ if (!bytes)
+ bytes = 1;
+ double bytesExpectedToReclaim = static_cast<double>(bytes) * deathRate();
+ double newDelay = lastGCLength() / gcTimeSlice(bytesExpectedToReclaim);
+ scheduleTimer(newDelay);
+}
+
+void GCActivityCallback::willCollect()
+{
+ cancelTimer();
+}
+
+void GCActivityCallback::cancel()
+{
+ cancelTimer();
+}
+
+#else
+
+GCActivityCallback::GCActivityCallback(Heap* heap)
+ : GCActivityCallback(heap->vm())
+{
+}
+
+void GCActivityCallback::doWork()
+{
+}
+
+void GCActivityCallback::didAllocate(size_t)
+{
+}
+
+void GCActivityCallback::willCollect()
+{
+}
+
+void GCActivityCallback::cancel()
+{
+}
+
+#endif
+
+}
+
diff --git a/Source/JavaScriptCore/heap/GCActivityCallback.h b/Source/JavaScriptCore/heap/GCActivityCallback.h
new file mode 100644
index 000000000..ddee2583e
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCActivityCallback.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2010, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "HeapTimer.h"
+#include <wtf/RefPtr.h>
+
+#if USE(CF)
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+namespace JSC {
+
+class FullGCActivityCallback;
+class Heap;
+
+class JS_EXPORT_PRIVATE GCActivityCallback : public HeapTimer {
+public:
+ static RefPtr<FullGCActivityCallback> createFullTimer(Heap*);
+ static RefPtr<GCActivityCallback> createEdenTimer(Heap*);
+
+ GCActivityCallback(Heap*);
+
+ void doWork() override;
+
+ virtual void doCollection() = 0;
+
+ virtual void didAllocate(size_t);
+ virtual void willCollect();
+ virtual void cancel();
+ bool isEnabled() const { return m_enabled; }
+ void setEnabled(bool enabled) { m_enabled = enabled; }
+
+ static bool s_shouldCreateGCTimer;
+
+#if USE(CF) || USE(GLIB)
+ double nextFireTime() const { return m_nextFireTime; }
+#endif
+
+protected:
+ virtual double lastGCLength() = 0;
+ virtual double gcTimeSlice(size_t bytes) = 0;
+ virtual double deathRate() = 0;
+
+#if USE(CF)
+ GCActivityCallback(VM* vm)
+ : HeapTimer(vm)
+ , m_enabled(true)
+ , m_delay(s_decade)
+ {
+ }
+#elif USE(GLIB)
+ GCActivityCallback(VM* vm)
+ : HeapTimer(vm)
+ , m_enabled(true)
+ , m_delay(s_decade)
+ {
+ }
+#else
+ GCActivityCallback(VM* vm)
+ : HeapTimer(vm)
+ , m_enabled(true)
+ {
+ }
+#endif
+
+ bool m_enabled;
+
+#if USE(CF) || USE(GLIB)
+protected:
+ void cancelTimer();
+ void scheduleTimer(double);
+
+private:
+ double m_delay;
+ double m_nextFireTime { 0 };
+#endif
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCAssertions.h b/Source/JavaScriptCore/heap/GCAssertions.h
index b0676bfee..145a30eb8 100644
--- a/Source/JavaScriptCore/heap/GCAssertions.h
+++ b/Source/JavaScriptCore/heap/GCAssertions.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Igalia S.L.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +24,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GCAssertions_h
-#define GCAssertions_h
+#pragma once
#include <type_traits>
#include <wtf/Assertions.h>
@@ -32,27 +32,29 @@
#if ENABLE(GC_VALIDATION)
#define ASSERT_GC_OBJECT_LOOKS_VALID(cell) do { \
RELEASE_ASSERT(cell);\
- RELEASE_ASSERT(cell->unvalidatedStructure()->unvalidatedStructure() == cell->unvalidatedStructure()->unvalidatedStructure()->unvalidatedStructure()); \
+ RELEASE_ASSERT(cell->structure()->structure() == cell->structure()->structure()->structure()); \
} while (0)
#define ASSERT_GC_OBJECT_INHERITS(object, classInfo) do {\
ASSERT_GC_OBJECT_LOOKS_VALID(object); \
- RELEASE_ASSERT(object->inherits(classInfo)); \
+ RELEASE_ASSERT(object->inherits(*object->JSC::JSCell::vm(), classInfo)); \
+} while (0)
+
+// Used to avoid triggering -Wundefined-bool-conversion.
+#define ASSERT_THIS_GC_OBJECT_LOOKS_VALID() do { \
+ RELEASE_ASSERT(this->structure()->structure() == this->structure()->structure()->structure()); \
+} while (0)
+
+#define ASSERT_THIS_GC_OBJECT_INHERITS(classInfo) do {\
+ ASSERT_THIS_GC_OBJECT_LOOKS_VALID(); \
+ RELEASE_ASSERT(this->inherits(*this->vm(), classInfo)); \
} while (0)
#else
#define ASSERT_GC_OBJECT_LOOKS_VALID(cell) do { (void)cell; } while (0)
#define ASSERT_GC_OBJECT_INHERITS(object, classInfo) do { (void)object; (void)classInfo; } while (0)
+#define ASSERT_THIS_GC_OBJECT_LOOKS_VALID()
+#define ASSERT_THIS_GC_OBJECT_INHERITS(classInfo) do { (void)classInfo; } while (0)
#endif
-#if COMPILER(CLANG)
#define STATIC_ASSERT_IS_TRIVIALLY_DESTRUCTIBLE(klass) static_assert(std::is_trivially_destructible<klass>::value, #klass " must have a trivial destructor")
-#elif COMPILER(MSVC)
-// An earlier verison of the C++11 spec used to call this type trait std::has_trivial_destructor, and that's what MSVC uses.
-#define STATIC_ASSERT_IS_TRIVIALLY_DESTRUCTIBLE(klass) static_assert(std::has_trivial_destructor<klass>::value, #klass " must have a trivial destructor")
-#else
-// This is not enabled on GCC due to http://gcc.gnu.org/bugzilla/show_bug.cgi?id=52702
-#define STATIC_ASSERT_IS_TRIVIALLY_DESTRUCTIBLE(klass)
-#endif
-
-#endif // GCAssertions_h
diff --git a/Source/JavaScriptCore/heap/GCConductor.cpp b/Source/JavaScriptCore/heap/GCConductor.cpp
new file mode 100644
index 000000000..4d2443671
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCConductor.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCConductor.h"
+
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+const char* gcConductorShortName(GCConductor conn)
+{
+ switch (conn) {
+ case GCConductor::Mutator:
+ return "M";
+ case GCConductor::Collector:
+ return "C";
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, GCConductor conn)
+{
+ switch (conn) {
+ case GCConductor::Mutator:
+ out.print("Mutator");
+ return;
+ case GCConductor::Collector:
+ out.print("Collector");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/GCConductor.h b/Source/JavaScriptCore/heap/GCConductor.h
new file mode 100644
index 000000000..bdc18cade
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCConductor.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+// Either the mutator has the conn (https://en.wikipedia.org/wiki/Conn_(nautical)), meaning that the
+// mutator will incrementally drive the collector when it calls into slow paths; or the collector has the
+// conn, meaning that the collector thread will drive the collector.
+enum class GCConductor : uint8_t {
+ Mutator,
+ Collector
+};
+
+const char* gcConductorShortName(GCConductor officer);
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::GCConductor);
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/GCDeferralContext.h b/Source/JavaScriptCore/heap/GCDeferralContext.h
new file mode 100644
index 000000000..b4d151ea3
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCDeferralContext.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+class Heap;
+class MarkedAllocator;
+
+class GCDeferralContext {
+ friend class Heap;
+ friend class MarkedAllocator;
+public:
+ inline GCDeferralContext(Heap&);
+ inline ~GCDeferralContext();
+
+private:
+ Heap& m_heap;
+ bool m_shouldGC { false };
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/GCDeferralContextInlines.h b/Source/JavaScriptCore/heap/GCDeferralContextInlines.h
new file mode 100644
index 000000000..cd8c1e7ad
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCDeferralContextInlines.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GCDeferralContext.h"
+#include "Heap.h"
+
+namespace JSC {
+
+ALWAYS_INLINE GCDeferralContext::GCDeferralContext(Heap& heap)
+ : m_heap(heap)
+{
+}
+
+ALWAYS_INLINE GCDeferralContext::~GCDeferralContext()
+{
+ ASSERT(!DisallowGC::isGCDisallowedOnCurrentThread());
+#if ENABLE(GC_VALIDATION)
+ ASSERT(!m_heap.vm()->isInitializingObject());
+#endif
+ if (UNLIKELY(m_shouldGC))
+ m_heap.collectIfNecessaryOrDefer();
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/GCIncomingRefCounted.h b/Source/JavaScriptCore/heap/GCIncomingRefCounted.h
index 3854d0a14..6c8b73e98 100644
--- a/Source/JavaScriptCore/heap/GCIncomingRefCounted.h
+++ b/Source/JavaScriptCore/heap/GCIncomingRefCounted.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GCIncomingRefCounted_h
-#define GCIncomingRefCounted_h
+#pragma once
#include <wtf/DeferrableRefCounted.h>
#include <wtf/Vector.h>
@@ -110,6 +109,3 @@ private:
};
} // namespace JSC
-
-#endif // GCIncomingRefCounted_h
-
diff --git a/Source/JavaScriptCore/heap/GCIncomingRefCountedInlines.h b/Source/JavaScriptCore/heap/GCIncomingRefCountedInlines.h
index 41330ecb9..a1a91e203 100644
--- a/Source/JavaScriptCore/heap/GCIncomingRefCountedInlines.h
+++ b/Source/JavaScriptCore/heap/GCIncomingRefCountedInlines.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GCIncomingRefCountedInlines_h
-#define GCIncomingRefCountedInlines_h
+#pragma once
#include "GCIncomingRefCounted.h"
#include "Heap.h"
@@ -125,6 +124,3 @@ bool GCIncomingRefCounted<T>::filterIncomingReferences(FilterFunctionType& filte
}
} // namespace JSC
-
-#endif // GCIncomingRefCountedInlines_h
-
diff --git a/Source/JavaScriptCore/heap/GCIncomingRefCountedSet.h b/Source/JavaScriptCore/heap/GCIncomingRefCountedSet.h
index 5e7254a2c..0b435195c 100644
--- a/Source/JavaScriptCore/heap/GCIncomingRefCountedSet.h
+++ b/Source/JavaScriptCore/heap/GCIncomingRefCountedSet.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GCIncomingRefCountedSet_h
-#define GCIncomingRefCountedSet_h
+#pragma once
#include "GCIncomingRefCounted.h"
@@ -36,8 +35,9 @@ template<typename T>
class GCIncomingRefCountedSet {
public:
GCIncomingRefCountedSet();
- ~GCIncomingRefCountedSet();
-
+
+ void lastChanceToFinalize();
+
// Returns true if the native object is new to this set.
bool addReference(JSCell*, T*);
@@ -54,6 +54,3 @@ private:
};
} // namespace JSC
-
-#endif // GCIncomingRefCountedSet_h
-
diff --git a/Source/JavaScriptCore/heap/GCIncomingRefCountedSetInlines.h b/Source/JavaScriptCore/heap/GCIncomingRefCountedSetInlines.h
index 52c55e2d1..b10972da0 100644
--- a/Source/JavaScriptCore/heap/GCIncomingRefCountedSetInlines.h
+++ b/Source/JavaScriptCore/heap/GCIncomingRefCountedSetInlines.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GCIncomingRefCountedSetInlines_h
-#define GCIncomingRefCountedSetInlines_h
+#pragma once
#include "GCIncomingRefCountedSet.h"
#include "VM.h"
@@ -38,7 +37,7 @@ GCIncomingRefCountedSet<T>::GCIncomingRefCountedSet()
}
template<typename T>
-GCIncomingRefCountedSet<T>::~GCIncomingRefCountedSet()
+void GCIncomingRefCountedSet<T>::lastChanceToFinalize()
{
for (size_t i = m_vector.size(); i--;)
m_vector[i]->filterIncomingReferences(removeAll);
@@ -88,5 +87,3 @@ bool GCIncomingRefCountedSet<T>::removeDead(JSCell* cell)
}
} // namespace JSC
-
-#endif // GCIncomingRefCountedSetInlines_h
diff --git a/Source/JavaScriptCore/heap/WriteBarrierBuffer.cpp b/Source/JavaScriptCore/heap/GCLogging.cpp
index 5314b5112..688de602c 100644
--- a/Source/JavaScriptCore/heap/WriteBarrierBuffer.cpp
+++ b/Source/JavaScriptCore/heap/GCLogging.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,46 +24,53 @@
*/
#include "config.h"
-#include "WriteBarrierBuffer.h"
+#include "GCLogging.h"
-#include "GCAssertions.h"
+#include "ClassInfo.h"
#include "Heap.h"
+#include "HeapIterationScope.h"
#include "JSCell.h"
-#include "Structure.h"
+#include "JSCellInlines.h"
+#include <wtf/PrintStream.h>
namespace JSC {
-WriteBarrierBuffer::WriteBarrierBuffer(unsigned capacity)
- : m_currentIndex(0)
- , m_capacity(capacity)
- , m_buffer(static_cast<JSCell**>(fastMalloc(sizeof(JSCell*) * capacity)))
+const char* GCLogging::levelAsString(Level level)
{
+ switch (level) {
+ case None:
+ return "None";
+ case Basic:
+ return "Basic";
+ case Verbose:
+ return "Verbose";
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return "";
+ }
}
-WriteBarrierBuffer::~WriteBarrierBuffer()
-{
- fastFree(m_buffer);
- m_buffer = 0;
-}
+} // namespace JSC
-void WriteBarrierBuffer::flush(Heap& heap)
-{
- ASSERT(m_currentIndex <= m_capacity);
- for (size_t i = 0; i < m_currentIndex; ++i)
- heap.writeBarrier(m_buffer[i]);
- m_currentIndex = 0;
-}
+namespace WTF {
-void WriteBarrierBuffer::reset()
+void printInternal(PrintStream& out, JSC::GCLogging::Level level)
{
- m_currentIndex = 0;
+ switch (level) {
+ case JSC::GCLogging::Level::None:
+ out.print("None");
+ return;
+ case JSC::GCLogging::Level::Basic:
+ out.print("Basic");
+ return;
+ case JSC::GCLogging::Level::Verbose:
+ out.print("Verbose");
+ return;
+ default:
+ out.print("Level=", level - JSC::GCLogging::Level::None);
+ return;
+ }
}
-void WriteBarrierBuffer::add(JSCell* cell)
-{
- ASSERT_GC_OBJECT_LOOKS_VALID(cell);
- ASSERT(m_currentIndex < m_capacity);
- m_buffer[m_currentIndex++] = cell;
-}
+} // namespace WTF
-} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCLogging.h b/Source/JavaScriptCore/heap/GCLogging.h
new file mode 100644
index 000000000..712addc18
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCLogging.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+class Heap;
+
+class GCLogging {
+public:
+ enum Level : uint8_t {
+ None = 0,
+ Basic,
+ Verbose
+ };
+
+ static const char* levelAsString(Level);
+ static void dumpObjectGraph(Heap*);
+};
+
+typedef GCLogging::Level gcLogLevel;
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::GCLogging::Level);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/heap/GCSegmentedArray.h b/Source/JavaScriptCore/heap/GCSegmentedArray.h
new file mode 100644
index 000000000..176c0849e
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCSegmentedArray.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/DoublyLinkedList.h>
+#include <wtf/ListDump.h>
+#include <wtf/PrintStream.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+template <typename T>
+class GCArraySegment : public DoublyLinkedListNode<GCArraySegment<T>> {
+ friend class WTF::DoublyLinkedListNode<GCArraySegment<T>>;
+public:
+ GCArraySegment()
+ : DoublyLinkedListNode<GCArraySegment<T>>()
+#if !ASSERT_DISABLED
+ , m_top(0)
+#endif
+ {
+ }
+
+ static GCArraySegment* create();
+ static void destroy(GCArraySegment*);
+
+ T* data()
+ {
+ return bitwise_cast<T*>(this + 1);
+ }
+
+ static const size_t blockSize = 4 * KB;
+
+ GCArraySegment* m_prev;
+ GCArraySegment* m_next;
+#if !ASSERT_DISABLED
+ size_t m_top;
+#endif
+};
+
+template <typename T> class GCSegmentedArrayIterator;
+
+template <typename T>
+class GCSegmentedArray {
+ friend class GCSegmentedArrayIterator<T>;
+ friend class GCSegmentedArrayIterator<const T>;
+public:
+ GCSegmentedArray();
+ ~GCSegmentedArray();
+
+ void append(T);
+
+ bool canRemoveLast();
+ const T removeLast();
+ bool refill();
+
+ size_t size();
+ bool isEmpty();
+
+ void fillVector(Vector<T>&);
+ void clear();
+
+ typedef GCSegmentedArrayIterator<T> iterator;
+ iterator begin() const { return GCSegmentedArrayIterator<T>(m_segments.head(), m_top); }
+ iterator end() const { return GCSegmentedArrayIterator<T>(); }
+
+protected:
+ template <size_t size> struct CapacityFromSize {
+ static const size_t value = (size - sizeof(GCArraySegment<T>)) / sizeof(T);
+ };
+
+ void expand();
+
+ size_t postIncTop();
+ size_t preDecTop();
+ void setTopForFullSegment();
+ void setTopForEmptySegment();
+ size_t top();
+
+ void validatePrevious();
+
+ DoublyLinkedList<GCArraySegment<T>> m_segments;
+
+ JS_EXPORT_PRIVATE static const size_t s_segmentCapacity = CapacityFromSize<GCArraySegment<T>::blockSize>::value;
+ size_t m_top;
+ size_t m_numberOfSegments;
+};
+
+template <typename T>
+class GCSegmentedArrayIterator {
+ friend class GCSegmentedArray<T>;
+public:
+ GCSegmentedArrayIterator()
+ : m_currentSegment(0)
+ , m_currentOffset(0)
+ {
+ }
+
+ T& get() { return m_currentSegment->data()[m_currentOffset]; }
+ T& operator*() { return get(); }
+ T& operator->() { return get(); }
+
+ bool operator==(const GCSegmentedArrayIterator& other)
+ {
+ return m_currentSegment == other.m_currentSegment && m_currentOffset == other.m_currentOffset;
+ }
+
+ bool operator!=(const GCSegmentedArrayIterator& other)
+ {
+ return !(*this == other);
+ }
+
+ GCSegmentedArrayIterator& operator++()
+ {
+ ASSERT(m_currentSegment);
+
+ m_currentOffset++;
+
+ if (m_currentOffset >= m_offsetLimit) {
+ m_currentSegment = m_currentSegment->next();
+ m_currentOffset = 0;
+ m_offsetLimit = GCSegmentedArray<T>::s_segmentCapacity;
+ }
+
+ return *this;
+ }
+
+private:
+ GCSegmentedArrayIterator(GCArraySegment<T>* start, size_t top)
+ : m_currentSegment(start)
+ , m_currentOffset(0)
+ , m_offsetLimit(top)
+ {
+ if (!m_offsetLimit)
+ m_currentSegment = nullptr;
+ }
+
+ GCArraySegment<T>* m_currentSegment;
+ size_t m_currentOffset;
+ size_t m_offsetLimit;
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/GCSegmentedArrayInlines.h b/Source/JavaScriptCore/heap/GCSegmentedArrayInlines.h
new file mode 100644
index 000000000..19aca9bb0
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCSegmentedArrayInlines.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GCSegmentedArray.h"
+
+namespace JSC {
+
+template <typename T>
+GCSegmentedArray<T>::GCSegmentedArray()
+ : m_top(0)
+ , m_numberOfSegments(0)
+{
+ m_segments.push(GCArraySegment<T>::create());
+ m_numberOfSegments++;
+}
+
+template <typename T>
+GCSegmentedArray<T>::~GCSegmentedArray()
+{
+ ASSERT(m_numberOfSegments == 1);
+ ASSERT(m_segments.size() == 1);
+ GCArraySegment<T>::destroy(m_segments.removeHead());
+ m_numberOfSegments--;
+ ASSERT(!m_numberOfSegments);
+ ASSERT(!m_segments.size());
+}
+
+template <typename T>
+void GCSegmentedArray<T>::clear()
+{
+ if (!m_segments.head())
+ return;
+ GCArraySegment<T>* next;
+ for (GCArraySegment<T>* current = m_segments.head(); current->next(); current = next) {
+ next = current->next();
+ m_segments.remove(current);
+ GCArraySegment<T>::destroy(current);
+ }
+ m_top = 0;
+ m_numberOfSegments = 1;
+#if !ASSERT_DISABLED
+ m_segments.head()->m_top = 0;
+#endif
+}
+
+template <typename T>
+void GCSegmentedArray<T>::expand()
+{
+ ASSERT(m_segments.head()->m_top == s_segmentCapacity);
+
+ GCArraySegment<T>* nextSegment = GCArraySegment<T>::create();
+ m_numberOfSegments++;
+
+#if !ASSERT_DISABLED
+ nextSegment->m_top = 0;
+#endif
+
+ m_segments.push(nextSegment);
+ setTopForEmptySegment();
+ validatePrevious();
+}
+
+template <typename T>
+bool GCSegmentedArray<T>::refill()
+{
+ validatePrevious();
+ if (top())
+ return true;
+ GCArraySegment<T>::destroy(m_segments.removeHead());
+ ASSERT(m_numberOfSegments > 1);
+ m_numberOfSegments--;
+ setTopForFullSegment();
+ validatePrevious();
+ return true;
+}
+
+template <typename T>
+void GCSegmentedArray<T>::fillVector(Vector<T>& vector)
+{
+ ASSERT(vector.size() == size());
+
+ GCArraySegment<T>* currentSegment = m_segments.head();
+ if (!currentSegment)
+ return;
+
+ unsigned count = 0;
+ for (unsigned i = 0; i < m_top; ++i) {
+ ASSERT(currentSegment->data()[i]);
+ vector[count++] = currentSegment->data()[i];
+ }
+
+ currentSegment = currentSegment->next();
+ while (currentSegment) {
+ for (unsigned i = 0; i < s_segmentCapacity; ++i) {
+ ASSERT(currentSegment->data()[i]);
+ vector[count++] = currentSegment->data()[i];
+ }
+ currentSegment = currentSegment->next();
+ }
+}
+
+template <typename T>
+inline GCArraySegment<T>* GCArraySegment<T>::create()
+{
+ return new (NotNull, fastMalloc(blockSize)) GCArraySegment<T>();
+}
+
+template <typename T>
+inline void GCArraySegment<T>::destroy(GCArraySegment* segment)
+{
+ segment->~GCArraySegment();
+ fastFree(segment);
+}
+
+template <typename T>
+inline size_t GCSegmentedArray<T>::postIncTop()
+{
+ size_t result = m_top++;
+ ASSERT(result == m_segments.head()->m_top++);
+ return result;
+}
+
+template <typename T>
+inline size_t GCSegmentedArray<T>::preDecTop()
+{
+ size_t result = --m_top;
+ ASSERT(result == --m_segments.head()->m_top);
+ return result;
+}
+
+template <typename T>
+inline void GCSegmentedArray<T>::setTopForFullSegment()
+{
+ ASSERT(m_segments.head()->m_top == s_segmentCapacity);
+ m_top = s_segmentCapacity;
+}
+
+template <typename T>
+inline void GCSegmentedArray<T>::setTopForEmptySegment()
+{
+ ASSERT(!m_segments.head()->m_top);
+ m_top = 0;
+}
+
+template <typename T>
+inline size_t GCSegmentedArray<T>::top()
+{
+ ASSERT(m_top == m_segments.head()->m_top);
+ return m_top;
+}
+
+template <typename T>
+#if ASSERT_DISABLED
+inline void GCSegmentedArray<T>::validatePrevious() { }
+#else
+inline void GCSegmentedArray<T>::validatePrevious()
+{
+ unsigned count = 0;
+ for (GCArraySegment<T>* current = m_segments.head(); current; current = current->next())
+ count++;
+ ASSERT(m_segments.size() == m_numberOfSegments);
+}
+#endif
+
+template <typename T>
+inline void GCSegmentedArray<T>::append(T value)
+{
+ if (m_top == s_segmentCapacity)
+ expand();
+ m_segments.head()->data()[postIncTop()] = value;
+}
+
+template <typename T>
+inline bool GCSegmentedArray<T>::canRemoveLast()
+{
+ return !!m_top;
+}
+
+template <typename T>
+inline const T GCSegmentedArray<T>::removeLast()
+{
+ return m_segments.head()->data()[preDecTop()];
+}
+
+template <typename T>
+inline bool GCSegmentedArray<T>::isEmpty()
+{
+ if (m_top)
+ return false;
+ if (m_segments.head()->next()) {
+ ASSERT(m_segments.head()->next()->m_top == s_segmentCapacity);
+ return false;
+ }
+ return true;
+}
+
+template <typename T>
+inline size_t GCSegmentedArray<T>::size()
+{
+ return m_top + s_segmentCapacity * (m_numberOfSegments - 1);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCThread.cpp b/Source/JavaScriptCore/heap/GCThread.cpp
deleted file mode 100644
index 50f02ce19..000000000
--- a/Source/JavaScriptCore/heap/GCThread.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "GCThread.h"
-
-#include "CopyVisitor.h"
-#include "CopyVisitorInlines.h"
-#include "GCThreadSharedData.h"
-#include "SlotVisitor.h"
-#include <wtf/MainThread.h>
-#include <wtf/PassOwnPtr.h>
-
-namespace JSC {
-
-GCThread::GCThread(GCThreadSharedData& shared, SlotVisitor* slotVisitor, CopyVisitor* copyVisitor)
- : m_threadID(0)
- , m_shared(shared)
- , m_slotVisitor(WTF::adoptPtr(slotVisitor))
- , m_copyVisitor(WTF::adoptPtr(copyVisitor))
-{
-}
-
-ThreadIdentifier GCThread::threadID()
-{
- ASSERT(m_threadID);
- return m_threadID;
-}
-
-void GCThread::initializeThreadID(ThreadIdentifier threadID)
-{
- ASSERT(!m_threadID);
- m_threadID = threadID;
-}
-
-SlotVisitor* GCThread::slotVisitor()
-{
- ASSERT(m_slotVisitor);
- return m_slotVisitor.get();
-}
-
-CopyVisitor* GCThread::copyVisitor()
-{
- ASSERT(m_copyVisitor);
- return m_copyVisitor.get();
-}
-
-GCPhase GCThread::waitForNextPhase()
-{
- std::unique_lock<std::mutex> lock(m_shared.m_phaseMutex);
- m_shared.m_phaseConditionVariable.wait(lock, [this] { return !m_shared.m_gcThreadsShouldWait; });
-
- m_shared.m_numberOfActiveGCThreads--;
- if (!m_shared.m_numberOfActiveGCThreads)
- m_shared.m_activityConditionVariable.notify_one();
-
- m_shared.m_phaseConditionVariable.wait(lock, [this] { return m_shared.m_currentPhase != NoPhase; });
- m_shared.m_numberOfActiveGCThreads++;
- return m_shared.m_currentPhase;
-}
-
-void GCThread::gcThreadMain()
-{
- GCPhase currentPhase;
-#if ENABLE(PARALLEL_GC)
- WTF::registerGCThread();
-#endif
- // Wait for the main thread to finish creating and initializing us. The main thread grabs this lock before
- // creating this thread. We aren't guaranteed to have a valid threadID until the main thread releases this lock.
- {
- std::lock_guard<std::mutex> lock(m_shared.m_phaseMutex);
- }
- {
- ParallelModeEnabler enabler(*m_slotVisitor);
- while ((currentPhase = waitForNextPhase()) != Exit) {
- // Note: Each phase is responsible for its own termination conditions. The comments below describe
- // how each phase reaches termination.
- switch (currentPhase) {
- case Mark:
- m_slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
- // GCThreads only return from drainFromShared() if the main thread sets the m_parallelMarkersShouldExit
- // flag in the GCThreadSharedData. The only way the main thread sets that flag is if it realizes
- // that all of the various subphases in Heap::markRoots() have been fully finished and there is
- // no more marking work to do and all of the GCThreads are idle, meaning no more work can be generated.
- break;
- case Copy:
- // We don't have to call startCopying() because it's called for us on the main thread to avoid a
- // race condition.
- m_copyVisitor->copyFromShared();
- // We know we're done copying when we return from copyFromShared() because we would
- // only do so if there were no more chunks of copying work left to do. When there is no
- // more copying work to do, the main thread will wait in CopiedSpace::doneCopying() until
- // all of the blocks that the GCThreads borrowed have been returned. doneCopying()
- // returns our borrowed CopiedBlock, allowing the copying phase to finish.
- m_copyVisitor->doneCopying();
- break;
- case NoPhase:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- case Exit:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
- }
-}
-
-void GCThread::gcThreadStartFunc(void* data)
-{
- GCThread* thread = static_cast<GCThread*>(data);
- thread->gcThreadMain();
-}
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp b/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
deleted file mode 100644
index 09143a15f..000000000
--- a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "GCThreadSharedData.h"
-
-#include "CopyVisitor.h"
-#include "CopyVisitorInlines.h"
-#include "GCThread.h"
-#include "VM.h"
-#include "MarkStack.h"
-#include "SlotVisitor.h"
-#include "SlotVisitorInlines.h"
-
-namespace JSC {
-
-#if ENABLE(PARALLEL_GC)
-void GCThreadSharedData::resetChildren()
-{
- for (size_t i = 0; i < m_gcThreads.size(); ++i)
- m_gcThreads[i]->slotVisitor()->reset();
-}
-
-size_t GCThreadSharedData::childVisitCount()
-{
- unsigned long result = 0;
- for (unsigned i = 0; i < m_gcThreads.size(); ++i)
- result += m_gcThreads[i]->slotVisitor()->visitCount();
- return result;
-}
-
-size_t GCThreadSharedData::childBytesVisited()
-{
- size_t result = 0;
- for (unsigned i = 0; i < m_gcThreads.size(); ++i)
- result += m_gcThreads[i]->slotVisitor()->bytesVisited();
- return result;
-}
-
-size_t GCThreadSharedData::childBytesCopied()
-{
- size_t result = 0;
- for (unsigned i = 0; i < m_gcThreads.size(); ++i)
- result += m_gcThreads[i]->slotVisitor()->bytesCopied();
- return result;
-}
-#endif
-
-GCThreadSharedData::GCThreadSharedData(VM* vm)
- : m_vm(vm)
- , m_copiedSpace(&vm->heap.m_storageSpace)
- , m_shouldHashCons(false)
- , m_sharedMarkStack(vm->heap.blockAllocator())
- , m_numberOfActiveParallelMarkers(0)
- , m_parallelMarkersShouldExit(false)
- , m_copyIndex(0)
- , m_numberOfActiveGCThreads(0)
- , m_gcThreadsShouldWait(false)
- , m_currentPhase(NoPhase)
-{
- m_copyLock.Init();
-#if ENABLE(PARALLEL_GC)
- // Grab the lock so the new GC threads can be properly initialized before they start running.
- std::unique_lock<std::mutex> lock(m_phaseMutex);
- for (unsigned i = 1; i < Options::numberOfGCMarkers(); ++i) {
- m_numberOfActiveGCThreads++;
- SlotVisitor* slotVisitor = new SlotVisitor(*this);
- CopyVisitor* copyVisitor = new CopyVisitor(*this);
- GCThread* newThread = new GCThread(*this, slotVisitor, copyVisitor);
- ThreadIdentifier threadID = createThread(GCThread::gcThreadStartFunc, newThread, "JavaScriptCore::Marking");
- newThread->initializeThreadID(threadID);
- m_gcThreads.append(newThread);
- }
-
- // Wait for all the GCThreads to get to the right place.
- m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; });
-#endif
-}
-
-GCThreadSharedData::~GCThreadSharedData()
-{
-#if ENABLE(PARALLEL_GC)
- // Destroy our marking threads.
- {
- std::lock_guard<std::mutex> markingLock(m_markingMutex);
- std::lock_guard<std::mutex> phaseLock(m_phaseMutex);
- ASSERT(m_currentPhase == NoPhase);
- m_parallelMarkersShouldExit = true;
- m_gcThreadsShouldWait = false;
- m_currentPhase = Exit;
- m_phaseConditionVariable.notify_all();
- }
- for (unsigned i = 0; i < m_gcThreads.size(); ++i) {
- waitForThreadCompletion(m_gcThreads[i]->threadID());
- delete m_gcThreads[i];
- }
-#endif
-}
-
-void GCThreadSharedData::reset()
-{
- ASSERT(m_sharedMarkStack.isEmpty());
-
-#if ENABLE(PARALLEL_GC)
- m_opaqueRoots.clear();
-#else
- ASSERT(m_opaqueRoots.isEmpty());
-#endif
- m_weakReferenceHarvesters.removeAll();
-
- if (m_shouldHashCons) {
- m_vm->resetNewStringsSinceLastHashCons();
- m_shouldHashCons = false;
- }
-}
-
-void GCThreadSharedData::startNextPhase(GCPhase phase)
-{
- std::lock_guard<std::mutex> lock(m_phaseMutex);
- ASSERT(!m_gcThreadsShouldWait);
- ASSERT(m_currentPhase == NoPhase);
- m_gcThreadsShouldWait = true;
- m_currentPhase = phase;
- m_phaseConditionVariable.notify_all();
-}
-
-void GCThreadSharedData::endCurrentPhase()
-{
- ASSERT(m_gcThreadsShouldWait);
- std::unique_lock<std::mutex> lock(m_phaseMutex);
- m_currentPhase = NoPhase;
- m_gcThreadsShouldWait = false;
- m_phaseConditionVariable.notify_all();
- m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; });
-}
-
-void GCThreadSharedData::didStartMarking()
-{
- std::lock_guard<std::mutex> lock(m_markingMutex);
- m_parallelMarkersShouldExit = false;
- startNextPhase(Mark);
-}
-
-void GCThreadSharedData::didFinishMarking()
-{
- {
- std::lock_guard<std::mutex> lock(m_markingMutex);
- m_parallelMarkersShouldExit = true;
- m_markingConditionVariable.notify_all();
- }
-
- ASSERT(m_currentPhase == Mark);
- endCurrentPhase();
-}
-
-void GCThreadSharedData::didStartCopying()
-{
- {
- SpinLockHolder locker(&m_copyLock);
- if (m_vm->heap.operationInProgress() == EdenCollection) {
- // Reset the vector to be empty, but don't throw away the backing store.
- m_blocksToCopy.shrink(0);
- for (CopiedBlock* block = m_copiedSpace->m_newGen.fromSpace->head(); block; block = block->next())
- m_blocksToCopy.append(block);
- } else {
- ASSERT(m_vm->heap.operationInProgress() == FullCollection);
- WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy);
- }
- m_copyIndex = 0;
- }
-
- // We do this here so that we avoid a race condition where the main thread can
- // blow through all of the copying work before the GCThreads fully wake up.
- // The GCThreads then request a block from the CopiedSpace when the copying phase
- // has completed, which isn't allowed.
- for (size_t i = 0; i < m_gcThreads.size(); i++)
- m_gcThreads[i]->copyVisitor()->startCopying();
-
- startNextPhase(Copy);
-}
-
-void GCThreadSharedData::didFinishCopying()
-{
- ASSERT(m_currentPhase == Copy);
- endCurrentPhase();
-}
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.h b/Source/JavaScriptCore/heap/GCThreadSharedData.h
deleted file mode 100644
index 915c2c991..000000000
--- a/Source/JavaScriptCore/heap/GCThreadSharedData.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef GCThreadSharedData_h
-#define GCThreadSharedData_h
-
-#include "ListableHandler.h"
-#include "MarkStack.h"
-#include "MarkedBlock.h"
-#include "UnconditionalFinalizer.h"
-#include "WeakReferenceHarvester.h"
-#include <condition_variable>
-#include <wtf/HashSet.h>
-#include <wtf/TCSpinLock.h>
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-class GCThread;
-class VM;
-class CopiedSpace;
-class CopyVisitor;
-
-enum GCPhase {
- NoPhase,
- Mark,
- Copy,
- Exit
-};
-
-class GCThreadSharedData {
-public:
- GCThreadSharedData(VM*);
- ~GCThreadSharedData();
-
- void reset();
-
- void didStartMarking();
- void didFinishMarking();
- void didStartCopying();
- void didFinishCopying();
-
-#if ENABLE(PARALLEL_GC)
- void resetChildren();
- size_t childVisitCount();
- size_t childBytesVisited();
- size_t childBytesCopied();
- size_t childDupStrings();
-#endif
-
-private:
- friend class GCThread;
- friend class SlotVisitor;
- friend class CopyVisitor;
-
- void getNextBlocksToCopy(size_t&, size_t&);
- void startNextPhase(GCPhase);
- void endCurrentPhase();
-
- VM* m_vm;
- CopiedSpace* m_copiedSpace;
-
- bool m_shouldHashCons;
-
- Vector<GCThread*> m_gcThreads;
-
- std::mutex m_markingMutex;
- std::condition_variable m_markingConditionVariable;
- MarkStackArray m_sharedMarkStack;
- unsigned m_numberOfActiveParallelMarkers;
- bool m_parallelMarkersShouldExit;
-
- Mutex m_opaqueRootsLock;
- HashSet<void*> m_opaqueRoots;
-
- SpinLock m_copyLock;
- Vector<CopiedBlock*> m_blocksToCopy;
- size_t m_copyIndex;
- static const size_t s_blockFragmentLength = 32;
-
- std::mutex m_phaseMutex;
- std::condition_variable m_phaseConditionVariable;
- std::condition_variable m_activityConditionVariable;
- unsigned m_numberOfActiveGCThreads;
- bool m_gcThreadsShouldWait;
- GCPhase m_currentPhase;
-
- ListableHandler<WeakReferenceHarvester>::List m_weakReferenceHarvesters;
- ListableHandler<UnconditionalFinalizer>::List m_unconditionalFinalizers;
-};
-
-inline void GCThreadSharedData::getNextBlocksToCopy(size_t& start, size_t& end)
-{
- SpinLockHolder locker(&m_copyLock);
- start = m_copyIndex;
- end = std::min(m_blocksToCopy.size(), m_copyIndex + s_blockFragmentLength);
- m_copyIndex = end;
-}
-
-} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/GCTypeMap.h b/Source/JavaScriptCore/heap/GCTypeMap.h
new file mode 100644
index 000000000..9e19e6773
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCTypeMap.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CollectionScope.h"
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+template<typename T>
+struct GCTypeMap {
+ T eden;
+ T full;
+
+ T& operator[](CollectionScope scope)
+ {
+ switch (scope) {
+ case CollectionScope::Full:
+ return full;
+ case CollectionScope::Eden:
+ return eden;
+ }
+ ASSERT_NOT_REACHED();
+ return full;
+ }
+
+ const T& operator[](CollectionScope scope) const
+ {
+ switch (scope) {
+ case CollectionScope::Full:
+ return full;
+ case CollectionScope::Eden:
+ return eden;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return full;
+ }
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/Handle.h b/Source/JavaScriptCore/heap/Handle.h
index 28ac30cd9..ba4161c25 100644
--- a/Source/JavaScriptCore/heap/Handle.h
+++ b/Source/JavaScriptCore/heap/Handle.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef Handle_h
-#define Handle_h
+#pragma once
#include "HandleTypes.h"
@@ -52,9 +51,7 @@ class HandleBase {
public:
bool operator!() const { return !m_slot || !*m_slot; }
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef JSValue (HandleBase::*UnspecifiedBoolType);
- operator UnspecifiedBoolType*() const { return (m_slot && *m_slot) ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0; }
+ explicit operator bool() const { return m_slot && *m_slot; }
HandleSlot slot() const { return m_slot; }
@@ -184,6 +181,4 @@ template <typename T, typename U> inline bool operator!=(JSValue a, const Handle
return a != b.get();
}
-}
-
-#endif
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HandleBlock.h b/Source/JavaScriptCore/heap/HandleBlock.h
index 962d37c5e..ef64433bb 100644
--- a/Source/JavaScriptCore/heap/HandleBlock.h
+++ b/Source/JavaScriptCore/heap/HandleBlock.h
@@ -23,20 +23,20 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HandleBlock_h
-#define HandleBlock_h
+#pragma once
-#include "HeapBlock.h"
+#include <wtf/DoublyLinkedList.h>
namespace JSC {
-class DeadBlock;
class HandleSet;
class HandleNode;
-class HandleBlock : public HeapBlock<HandleBlock> {
+class HandleBlock : public DoublyLinkedListNode<HandleBlock> {
+ friend class WTF::DoublyLinkedListNode<HandleBlock>;
public:
- static HandleBlock* create(DeadBlock*, HandleSet*);
+ static HandleBlock* create(HandleSet*);
+ static void destroy(HandleBlock*);
static HandleBlock* blockFor(HandleNode*);
static const size_t blockSize = 4 * KB;
@@ -48,13 +48,15 @@ public:
unsigned nodeCapacity();
private:
- HandleBlock(Region*, HandleSet*);
+ HandleBlock(HandleSet*);
char* payload();
char* payloadEnd();
static const size_t s_blockMask = ~(blockSize - 1);
+ HandleBlock* m_prev;
+ HandleBlock* m_next;
HandleSet* m_handleSet;
};
@@ -69,5 +71,3 @@ inline HandleSet* HandleBlock::handleSet()
}
} // namespace JSC
-
-#endif // HandleBlock_h
diff --git a/Source/JavaScriptCore/heap/HandleBlockInlines.h b/Source/JavaScriptCore/heap/HandleBlockInlines.h
index 7c771935e..c87481f47 100644
--- a/Source/JavaScriptCore/heap/HandleBlockInlines.h
+++ b/Source/JavaScriptCore/heap/HandleBlockInlines.h
@@ -23,29 +23,33 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HandleBlockInlines_h
-#define HandleBlockInlines_h
+#pragma once
-#include "BlockAllocator.h"
#include "HandleBlock.h"
+#include <wtf/FastMalloc.h>
namespace JSC {
-inline HandleBlock* HandleBlock::create(DeadBlock* block, HandleSet* handleSet)
+inline HandleBlock* HandleBlock::create(HandleSet* handleSet)
{
- Region* region = block->region();
- return new (NotNull, block) HandleBlock(region, handleSet);
+ return new (NotNull, fastAlignedMalloc(blockSize, blockSize)) HandleBlock(handleSet);
}
-inline HandleBlock::HandleBlock(Region* region, HandleSet* handleSet)
- : HeapBlock<HandleBlock>(region)
+inline void HandleBlock::destroy(HandleBlock* block)
+{
+ block->~HandleBlock();
+ fastAlignedFree(block);
+}
+
+inline HandleBlock::HandleBlock(HandleSet* handleSet)
+ : DoublyLinkedListNode<HandleBlock>()
, m_handleSet(handleSet)
{
}
inline char* HandleBlock::payloadEnd()
{
- return reinterpret_cast<char*>(this) + region()->blockSize();
+ return reinterpret_cast<char*>(this) + blockSize;
}
inline char* HandleBlock::payload()
@@ -70,5 +74,3 @@ inline unsigned HandleBlock::nodeCapacity()
}
} // namespace JSC
-
-#endif // HandleBlockInlines_h
diff --git a/Source/JavaScriptCore/heap/HandleSet.cpp b/Source/JavaScriptCore/heap/HandleSet.cpp
index fdb554448..9d7d5c631 100644
--- a/Source/JavaScriptCore/heap/HandleSet.cpp
+++ b/Source/JavaScriptCore/heap/HandleSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,16 +28,14 @@
#include "HandleBlock.h"
#include "HandleBlockInlines.h"
-#include "HeapRootVisitor.h"
#include "JSObject.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include <wtf/DataLog.h>
namespace JSC {
HandleSet::HandleSet(VM* vm)
: m_vm(vm)
- , m_nextToFinalize(0)
{
grow();
}
@@ -45,12 +43,12 @@ HandleSet::HandleSet(VM* vm)
HandleSet::~HandleSet()
{
while (!m_blockList.isEmpty())
- m_vm->heap.blockAllocator().deallocate(HandleBlock::destroy(m_blockList.removeHead()));
+ HandleBlock::destroy(m_blockList.removeHead());
}
void HandleSet::grow()
{
- HandleBlock* newBlock = HandleBlock::create(m_vm->heap.blockAllocator().allocate<HandleBlock>(), this);
+ HandleBlock* newBlock = HandleBlock::create(this);
m_blockList.append(newBlock);
for (int i = newBlock->nodeCapacity() - 1; i >= 0; --i) {
@@ -60,23 +58,19 @@ void HandleSet::grow()
}
}
-void HandleSet::visitStrongHandles(HeapRootVisitor& heapRootVisitor)
+void HandleSet::visitStrongHandles(SlotVisitor& visitor)
{
Node* end = m_strongList.end();
for (Node* node = m_strongList.begin(); node != end; node = node->next()) {
#if ENABLE(GC_VALIDATION)
RELEASE_ASSERT(isLiveNode(node));
#endif
- heapRootVisitor.visit(node->slot());
+ visitor.appendUnbarriered(*node->slot());
}
}
void HandleSet::writeBarrier(HandleSlot slot, const JSValue& value)
{
- // Forbid assignment to handles during the finalization phase, since it would violate many GC invariants.
- // File a bug with stack trace if you hit this.
- RELEASE_ASSERT(!m_nextToFinalize);
-
if (!value == !*slot && slot->isCell() == value.isCell())
return;
diff --git a/Source/JavaScriptCore/heap/HandleSet.h b/Source/JavaScriptCore/heap/HandleSet.h
index 58251f66a..458daeb4d 100644
--- a/Source/JavaScriptCore/heap/HandleSet.h
+++ b/Source/JavaScriptCore/heap/HandleSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,11 +23,11 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HandleSet_h
-#define HandleSet_h
+#pragma once
#include "Handle.h"
#include "HandleBlock.h"
+#include "HeapCell.h"
#include <wtf/DoublyLinkedList.h>
#include <wtf/HashCountedSet.h>
#include <wtf/SentinelLinkedList.h>
@@ -35,9 +35,7 @@
namespace JSC {
-class HandleBlock;
class HandleSet;
-class HeapRootVisitor;
class VM;
class JSValue;
class SlotVisitor;
@@ -75,13 +73,13 @@ public:
HandleSlot allocate();
void deallocate(HandleSlot);
- void visitStrongHandles(HeapRootVisitor&);
+ void visitStrongHandles(SlotVisitor&);
JS_EXPORT_PRIVATE void writeBarrier(HandleSlot, const JSValue&);
unsigned protectedGlobalObjectCount();
- template<typename Functor> void forEachStrongHandle(Functor&, const HashCountedSet<JSCell*>& skipSet);
+ template<typename Functor> void forEachStrongHandle(const Functor&, const HashCountedSet<JSCell*>& skipSet);
private:
typedef HandleNode Node;
@@ -100,7 +98,6 @@ private:
SentinelLinkedList<Node> m_strongList;
SentinelLinkedList<Node> m_immediateList;
SinglyLinkedList<Node> m_freeList;
- Node* m_nextToFinalize;
};
inline HandleSet* HandleSet::heapFor(HandleSlot handle)
@@ -125,10 +122,6 @@ inline HandleSet::Node* HandleSet::toNode(HandleSlot handle)
inline HandleSlot HandleSet::allocate()
{
- // Forbid assignment to handles during the finalization phase, since it would violate many GC invariants.
- // File a bug with stack trace if you hit this.
- RELEASE_ASSERT(!m_nextToFinalize);
-
if (m_freeList.isEmpty())
grow();
@@ -141,11 +134,6 @@ inline HandleSlot HandleSet::allocate()
inline void HandleSet::deallocate(HandleSlot handle)
{
HandleSet::Node* node = toNode(handle);
- if (node == m_nextToFinalize) {
- ASSERT(m_nextToFinalize->next());
- m_nextToFinalize = m_nextToFinalize->next();
- }
-
SentinelLinkedList<HandleSet::Node>::remove(node);
m_freeList.push(node);
}
@@ -192,7 +180,7 @@ inline HandleNode* HandleNode::next()
return m_next;
}
-template<typename Functor> void HandleSet::forEachStrongHandle(Functor& functor, const HashCountedSet<JSCell*>& skipSet)
+template<typename Functor> void HandleSet::forEachStrongHandle(const Functor& functor, const HashCountedSet<JSCell*>& skipSet)
{
HandleSet::Node* end = m_strongList.end();
for (HandleSet::Node* node = m_strongList.begin(); node != end; node = node->next()) {
@@ -205,6 +193,4 @@ template<typename Functor> void HandleSet::forEachStrongHandle(Functor& functor,
}
}
-}
-
-#endif
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HandleStack.cpp b/Source/JavaScriptCore/heap/HandleStack.cpp
index 41b2ada5f..30c0d1f88 100644
--- a/Source/JavaScriptCore/heap/HandleStack.cpp
+++ b/Source/JavaScriptCore/heap/HandleStack.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,9 +26,8 @@
#include "config.h"
#include "HandleStack.h"
-#include "HeapRootVisitor.h"
#include "JSObject.h"
-#include "Operations.h"
+#include "JSCInlines.h"
namespace JSC {
@@ -40,7 +39,7 @@ HandleStack::HandleStack()
grow();
}
-void HandleStack::visit(HeapRootVisitor& heapRootVisitor)
+void HandleStack::visit(SlotVisitor& visitor)
{
const Vector<HandleSlot>& blocks = m_blockStack.blocks();
size_t blockLength = m_blockStack.blockLength;
@@ -48,10 +47,10 @@ void HandleStack::visit(HeapRootVisitor& heapRootVisitor)
int end = blocks.size() - 1;
for (int i = 0; i < end; ++i) {
HandleSlot block = blocks[i];
- heapRootVisitor.visit(block, blockLength);
+ visitor.appendUnbarriered(block, blockLength);
}
HandleSlot block = blocks[end];
- heapRootVisitor.visit(block, m_frame.m_next - block);
+ visitor.appendUnbarriered(block, m_frame.m_next - block);
}
void HandleStack::grow()
diff --git a/Source/JavaScriptCore/heap/HandleStack.h b/Source/JavaScriptCore/heap/HandleStack.h
index a7ce97650..dc1486417 100644
--- a/Source/JavaScriptCore/heap/HandleStack.h
+++ b/Source/JavaScriptCore/heap/HandleStack.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HandleStack_h
-#define HandleStack_h
+#pragma once
#include <wtf/Assertions.h>
#include <wtf/BlockStack.h>
@@ -33,7 +32,7 @@
namespace JSC {
class LocalScope;
-class HeapRootVisitor;
+class SlotVisitor;
class HandleStack {
public:
@@ -50,10 +49,10 @@ public:
HandleSlot push();
- void visit(HeapRootVisitor&);
+ void visit(SlotVisitor&);
private:
- void grow();
+ JS_EXPORT_PRIVATE void grow();
void zapTo(Frame&);
HandleSlot findFirstAfter(HandleSlot);
@@ -122,6 +121,4 @@ inline HandleSlot HandleStack::push()
return m_frame.m_next++;
}
-}
-
-#endif
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HandleTypes.h b/Source/JavaScriptCore/heap/HandleTypes.h
index 42a267e57..6cf11df30 100644
--- a/Source/JavaScriptCore/heap/HandleTypes.h
+++ b/Source/JavaScriptCore/heap/HandleTypes.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HandleTypes_h
-#define HandleTypes_h
+#pragma once
#include "JSCJSValue.h"
@@ -48,5 +47,3 @@ template<> struct HandleTypes<Unknown> {
};
} // namespace JSC
-
-#endif // HandleTypes_h
diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp
index 26ec23980..f8f957b22 100644
--- a/Source/JavaScriptCore/heap/Heap.cpp
+++ b/Source/JavaScriptCore/heap/Heap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2017 Apple Inc. All rights reserved.
* Copyright (C) 2007 Eric Seidel <eric@webkit.org>
*
* This library is free software; you can redistribute it and/or
@@ -22,163 +22,107 @@
#include "Heap.h"
#include "CodeBlock.h"
+#include "CodeBlockSetInlines.h"
+#include "CollectingScope.h"
#include "ConservativeRoots.h"
-#include "CopiedSpace.h"
-#include "CopiedSpaceInlines.h"
-#include "CopyVisitorInlines.h"
-#include "DFGWorklist.h"
-#include "DelayedReleaseScope.h"
+#include "DFGWorklistInlines.h"
+#include "EdenGCActivityCallback.h"
+#include "Exception.h"
+#include "FullGCActivityCallback.h"
#include "GCActivityCallback.h"
#include "GCIncomingRefCountedSetInlines.h"
+#include "GCSegmentedArrayInlines.h"
+#include "GCTypeMap.h"
+#include "HasOwnPropertyCache.h"
+#include "HeapHelperPool.h"
#include "HeapIterationScope.h"
-#include "HeapRootVisitor.h"
-#include "HeapStatistics.h"
+#include "HeapProfiler.h"
+#include "HeapSnapshot.h"
+#include "HeapVerifier.h"
#include "IncrementalSweeper.h"
#include "Interpreter.h"
+#include "JITStubRoutineSet.h"
+#include "JITWorklist.h"
+#include "JSCInlines.h"
#include "JSGlobalObject.h"
#include "JSLock.h"
-#include "JSONObject.h"
-#include "Operations.h"
-#include "RecursiveAllocationScope.h"
-#include "Tracing.h"
+#include "JSVirtualMachineInternal.h"
+#include "MachineStackMarker.h"
+#include "MarkedSpaceInlines.h"
+#include "MarkingConstraintSet.h"
+#include "PreventCollectionScope.h"
+#include "SamplingProfiler.h"
+#include "ShadowChicken.h"
+#include "SpaceTimeMutatorScheduler.h"
+#include "SuperSampler.h"
+#include "StochasticSpaceTimeMutatorScheduler.h"
+#include "StopIfNecessaryTimer.h"
+#include "SweepingScope.h"
+#include "SynchronousStopTheWorldMutatorScheduler.h"
+#include "TypeProfilerLog.h"
#include "UnlinkedCodeBlock.h"
#include "VM.h"
#include "WeakSetInlines.h"
#include <algorithm>
-#include <wtf/RAMSize.h>
#include <wtf/CurrentTime.h>
+#include <wtf/MainThread.h>
+#include <wtf/ParallelVectorIterator.h>
+#include <wtf/ProcessID.h>
+#include <wtf/RAMSize.h>
+#include <wtf/SimpleStats.h>
+
+#if USE(FOUNDATION)
+#if __has_include(<objc/objc-internal.h>)
+#include <objc/objc-internal.h>
+#else
+extern "C" void* objc_autoreleasePoolPush(void);
+extern "C" void objc_autoreleasePoolPop(void *context);
+#endif
+#endif // USE(FOUNDATION)
using namespace std;
-using namespace JSC;
namespace JSC {
-namespace {
-
-static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
-static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
-
-#define ENABLE_GC_LOGGING 0
-
-#if ENABLE(GC_LOGGING)
-#if COMPILER(CLANG)
-#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
-_Pragma("clang diagnostic push") \
-_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
-_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
-static type name arguments; \
-_Pragma("clang diagnostic pop")
-#else
-#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
-static type name arguments;
-#endif // COMPILER(CLANG)
-
-struct GCTimer {
- GCTimer(const char* name)
- : m_time(0)
- , m_min(100000000)
- , m_max(0)
- , m_count(0)
- , m_name(name)
- {
- }
- ~GCTimer()
- {
- dataLogF("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
- }
- double m_time;
- double m_min;
- double m_max;
- size_t m_count;
- const char* m_name;
-};
-
-struct GCTimerScope {
- GCTimerScope(GCTimer* timer)
- : m_timer(timer)
- , m_start(WTF::monotonicallyIncreasingTime())
- {
- }
- ~GCTimerScope()
- {
- double delta = WTF::monotonicallyIncreasingTime() - m_start;
- if (delta < m_timer->m_min)
- m_timer->m_min = delta;
- if (delta > m_timer->m_max)
- m_timer->m_max = delta;
- m_timer->m_count++;
- m_timer->m_time += delta;
- }
- GCTimer* m_timer;
- double m_start;
-};
-
-struct GCCounter {
- GCCounter(const char* name)
- : m_name(name)
- , m_count(0)
- , m_total(0)
- , m_min(10000000)
- , m_max(0)
- {
- }
-
- void count(size_t amount)
- {
- m_count++;
- m_total += amount;
- if (amount < m_min)
- m_min = amount;
- if (amount > m_max)
- m_max = amount;
- }
- ~GCCounter()
- {
- dataLogF("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
- }
- const char* m_name;
- size_t m_count;
- size_t m_total;
- size_t m_min;
- size_t m_max;
-};
+namespace {
-#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
-#define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
-#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
-
-#else
+bool verboseStop = false;
-#define GCPHASE(name) do { } while (false)
-#define COND_GCPHASE(cond, name1, name2) do { } while (false)
-#define GCCOUNTER(name, value) do { } while (false)
-#endif
+double maxPauseMS(double thisPauseMS)
+{
+ static double maxPauseMS;
+ maxPauseMS = std::max(thisPauseMS, maxPauseMS);
+ return maxPauseMS;
+}
-static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
+size_t minHeapSize(HeapType heapType, size_t ramSize)
{
- if (heapType == LargeHeap)
- return min(largeHeapSize, ramSize / 4);
- return smallHeapSize;
+ if (heapType == LargeHeap) {
+ double result = min(
+ static_cast<double>(Options::largeHeapSize()),
+ ramSize * Options::smallHeapRAMFraction());
+ return static_cast<size_t>(result);
+ }
+ return Options::smallHeapSize();
}
-static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
+size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
{
- // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
- if (heapSize < ramSize / 4)
- return 2 * heapSize;
- if (heapSize < ramSize / 2)
- return 1.5 * heapSize;
- return 1.25 * heapSize;
+ if (heapSize < ramSize * Options::smallHeapRAMFraction())
+ return Options::smallHeapGrowthFactor() * heapSize;
+ if (heapSize < ramSize * Options::mediumHeapRAMFraction())
+ return Options::mediumHeapGrowthFactor() * heapSize;
+ return Options::largeHeapGrowthFactor() * heapSize;
}
-static inline bool isValidSharedInstanceThreadState(VM* vm)
+bool isValidSharedInstanceThreadState(VM* vm)
{
return vm->currentThreadIsHoldingAPILock();
}
-static inline bool isValidThreadState(VM* vm)
+bool isValidThreadState(VM* vm)
{
- if (vm->identifierTable != wtfThreadData().currentIdentifierTable())
+ if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
return false;
if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
@@ -187,155 +131,356 @@ static inline bool isValidThreadState(VM* vm)
return true;
}
-struct MarkObject : public MarkedBlock::VoidFunctor {
- void operator()(JSCell* cell)
- {
- if (cell->isZapped())
- return;
- Heap::heap(cell)->setMarked(cell);
- }
-};
-
-struct Count : public MarkedBlock::CountFunctor {
- void operator()(JSCell*) { count(1); }
-};
-
-struct CountIfGlobalObject : MarkedBlock::CountFunctor {
- void operator()(JSCell* cell) {
- if (!cell->isObject())
- return;
- if (!asObject(cell)->isGlobalObject())
- return;
- count(1);
- }
-};
-
-class RecordType {
-public:
- typedef PassOwnPtr<TypeCountSet> ReturnType;
-
- RecordType();
- void operator()(JSCell*);
- ReturnType returnValue();
-
-private:
- const char* typeName(JSCell*);
- OwnPtr<TypeCountSet> m_typeCountSet;
-};
-
-inline RecordType::RecordType()
- : m_typeCountSet(adoptPtr(new TypeCountSet))
+void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
{
+ const char* typeName = "[unknown]";
+ const ClassInfo* info = cell->classInfo(vm);
+ if (info && info->className)
+ typeName = info->className;
+ set.add(typeName);
}
-inline const char* RecordType::typeName(JSCell* cell)
+bool measurePhaseTiming()
{
- const ClassInfo* info = cell->classInfo();
- if (!info || !info->className)
- return "[unknown]";
- return info->className;
+ return false;
}
-inline void RecordType::operator()(JSCell* cell)
+HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
{
- m_typeCountSet->add(typeName(cell));
+ static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
+ static std::once_flag once;
+ std::call_once(
+ once,
+ [] {
+ result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
+ });
+ return *result;
}
-inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
+SimpleStats& timingStats(const char* name, CollectionScope scope)
{
- return m_typeCountSet.release();
+ return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
}
+class TimingScope {
+public:
+ TimingScope(std::optional<CollectionScope> scope, const char* name)
+ : m_scope(scope)
+ , m_name(name)
+ {
+ if (measurePhaseTiming())
+ m_before = monotonicallyIncreasingTimeMS();
+ }
+
+ TimingScope(Heap& heap, const char* name)
+ : TimingScope(heap.collectionScope(), name)
+ {
+ }
+
+ void setScope(std::optional<CollectionScope> scope)
+ {
+ m_scope = scope;
+ }
+
+ void setScope(Heap& heap)
+ {
+ setScope(heap.collectionScope());
+ }
+
+ ~TimingScope()
+ {
+ if (measurePhaseTiming()) {
+ double after = monotonicallyIncreasingTimeMS();
+ double timing = after - m_before;
+ SimpleStats& stats = timingStats(m_name, *m_scope);
+ stats.add(timing);
+ dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
+ }
+ }
+private:
+ std::optional<CollectionScope> m_scope;
+ double m_before;
+ const char* m_name;
+};
+
} // anonymous namespace
+class Heap::Thread : public AutomaticThread {
+public:
+ Thread(const AbstractLocker& locker, Heap& heap)
+ : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
+ , m_heap(heap)
+ {
+ }
+
+protected:
+ PollResult poll(const AbstractLocker& locker) override
+ {
+ if (m_heap.m_threadShouldStop) {
+ m_heap.notifyThreadStopping(locker);
+ return PollResult::Stop;
+ }
+ if (m_heap.shouldCollectInCollectorThread(locker))
+ return PollResult::Work;
+ return PollResult::Wait;
+ }
+
+ WorkResult work() override
+ {
+ m_heap.collectInCollectorThread();
+ return WorkResult::Continue;
+ }
+
+ void threadDidStart() override
+ {
+ WTF::registerGCThread(GCThreadType::Main);
+ }
+
+private:
+ Heap& m_heap;
+};
+
Heap::Heap(VM* vm, HeapType heapType)
: m_heapType(heapType)
- , m_ramSize(ramSize())
+ , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
, m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
, m_sizeAfterLastCollect(0)
+ , m_sizeAfterLastFullCollect(0)
+ , m_sizeBeforeLastFullCollect(0)
+ , m_sizeAfterLastEdenCollect(0)
+ , m_sizeBeforeLastEdenCollect(0)
, m_bytesAllocatedThisCycle(0)
- , m_bytesAbandonedThisCycle(0)
+ , m_bytesAbandonedSinceLastFullCollect(0)
, m_maxEdenSize(m_minBytesPerCycle)
, m_maxHeapSize(m_minBytesPerCycle)
, m_shouldDoFullCollection(false)
, m_totalBytesVisited(0)
- , m_totalBytesCopied(0)
- , m_operationInProgress(NoOperation)
- , m_blockAllocator()
, m_objectSpace(this)
- , m_storageSpace(this)
- , m_extraMemoryUsage(0)
- , m_machineThreads(this)
- , m_sharedData(vm)
- , m_slotVisitor(m_sharedData)
- , m_copyVisitor(m_sharedData)
+ , m_extraMemorySize(0)
+ , m_deprecatedExtraMemorySize(0)
+ , m_machineThreads(std::make_unique<MachineThreads>(this))
+ , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
+ , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
+ , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
+ , m_raceMarkStack(std::make_unique<MarkStackArray>())
+ , m_constraintSet(std::make_unique<MarkingConstraintSet>())
, m_handleSet(vm)
+ , m_codeBlocks(std::make_unique<CodeBlockSet>())
+ , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
, m_isSafeToCollect(false)
- , m_writeBarrierBuffer(256)
, m_vm(vm)
- , m_lastGCLength(0)
- , m_lastCodeDiscardTime(WTF::monotonicallyIncreasingTime())
- , m_activityCallback(DefaultGCActivityCallback::create(this))
- , m_sweeper(IncrementalSweeper::create(this))
+ // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
+ // schedule the timer if we've never done a collection.
+ , m_lastFullGCLength(0.01)
+ , m_lastEdenGCLength(0.01)
+#if USE(CF)
+ , m_runLoop(CFRunLoopGetCurrent())
+#endif // USE(CF)
+ , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
+ , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
+ , m_sweeper(adoptRef(new IncrementalSweeper(this)))
+ , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
, m_deferralDepth(0)
+#if USE(FOUNDATION)
+ , m_delayedReleaseRecursionCount(0)
+#endif
+ , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
+ , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
+ , m_helperClient(&heapHelperPool())
+ , m_threadLock(Box<Lock>::create())
+ , m_threadCondition(AutomaticThreadCondition::create())
{
- m_storageSpace.init();
+ m_worldState.store(0);
+
+ if (Options::useConcurrentGC()) {
+ if (Options::useStochasticMutatorScheduler())
+ m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
+ else
+ m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
+ } else {
+ // We simulate turning off concurrent GC by making the scheduler say that the world
+ // should always be stopped when the collector is running.
+ m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
+ }
+
+ if (Options::verifyHeap())
+ m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
+
+ m_collectorSlotVisitor->optimizeForStoppedMutator();
+
+ LockHolder locker(*m_threadLock);
+ m_thread = adoptRef(new Thread(locker, *this));
}
Heap::~Heap()
{
+ forEachSlotVisitor(
+ [&] (SlotVisitor& visitor) {
+ visitor.clearMarkStacks();
+ });
+ m_mutatorMarkStack->clear();
+ m_raceMarkStack->clear();
+
+ for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
+ WeakBlock::destroy(*this, block);
}
bool Heap::isPagedOut(double deadline)
{
- return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
+ return m_objectSpace.isPagedOut(deadline);
}
// The VM is being destroyed and the collector will never run again.
// Run all pending finalizers now because we won't get another chance.
void Heap::lastChanceToFinalize()
{
+ MonotonicTime before;
+ if (Options::logGC()) {
+ before = MonotonicTime::now();
+ dataLog("[GC<", RawPointer(this), ">: shutdown ");
+ }
+
RELEASE_ASSERT(!m_vm->entryScope);
- RELEASE_ASSERT(m_operationInProgress == NoOperation);
+ RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
+
+ if (m_collectContinuouslyThread) {
+ {
+ LockHolder locker(m_collectContinuouslyLock);
+ m_shouldStopCollectingContinuously = true;
+ m_collectContinuouslyCondition.notifyOne();
+ }
+ waitForThreadCompletion(m_collectContinuouslyThread);
+ }
+
+ if (Options::logGC())
+ dataLog("1");
+
+ // Prevent new collections from being started. This is probably not even necessary, since we're not
+ // going to call into anything that starts collections. Still, this makes the algorithm more
+ // obviously sound.
+ m_isSafeToCollect = false;
+
+ if (Options::logGC())
+ dataLog("2");
+
+ bool isCollecting;
+ {
+ auto locker = holdLock(*m_threadLock);
+ RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
+ isCollecting = m_lastServedTicket < m_lastGrantedTicket;
+ }
+ if (isCollecting) {
+ if (Options::logGC())
+ dataLog("...]\n");
+
+ // Wait for the current collection to finish.
+ waitForCollector(
+ [&] (const AbstractLocker&) -> bool {
+ RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
+ return m_lastServedTicket == m_lastGrantedTicket;
+ });
+
+ if (Options::logGC())
+ dataLog("[GC<", RawPointer(this), ">: shutdown ");
+ }
+ if (Options::logGC())
+ dataLog("3");
+ RELEASE_ASSERT(m_requests.isEmpty());
+ RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
+
+ // Carefully bring the thread down.
+ bool stopped = false;
+ {
+ LockHolder locker(*m_threadLock);
+ stopped = m_thread->tryStop(locker);
+ m_threadShouldStop = true;
+ if (!stopped)
+ m_threadCondition->notifyOne(locker);
+ }
+
+ if (Options::logGC())
+ dataLog("4");
+
+ if (!stopped)
+ m_thread->join();
+
+ if (Options::logGC())
+ dataLog("5 ");
+
+ m_arrayBuffers.lastChanceToFinalize();
+ m_codeBlocks->lastChanceToFinalize(*m_vm);
+ m_objectSpace.stopAllocating();
m_objectSpace.lastChanceToFinalize();
+ releaseDelayedReleasedObjects();
+
+ sweepAllLogicallyEmptyWeakBlocks();
+
+ if (Options::logGC())
+ dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
}
-void Heap::reportExtraMemoryCostSlowCase(size_t cost)
+void Heap::releaseDelayedReleasedObjects()
{
- // Our frequency of garbage collection tries to balance memory use against speed
- // by collecting based on the number of newly created values. However, for values
- // that hold on to a great deal of memory that's not in the form of other JS values,
- // that is not good enough - in some cases a lot of those objects can pile up and
- // use crazy amounts of memory without a GC happening. So we track these extra
- // memory costs. Only unusually large objects are noted, and we only keep track
- // of this extra cost until the next GC. In garbage collected languages, most values
- // are either very short lived temporaries, or have extremely long lifetimes. So
- // if a large value survives one garbage collection, there is not much point to
- // collecting more frequently as long as it stays alive.
+#if USE(FOUNDATION)
+ // We need to guard against the case that releasing an object can create more objects due to the
+ // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
+ // back here and could try to recursively release objects. We guard that with a recursive entry
+ // count. Only the initial call will release objects, recursive calls simple return and let the
+ // the initial call to the function take care of any objects created during release time.
+ // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
+ // and use a temp Vector for the actual releasing.
+ if (!m_delayedReleaseRecursionCount++) {
+ while (!m_delayedReleaseObjects.isEmpty()) {
+ ASSERT(m_vm->currentThreadIsHoldingAPILock());
+
+ Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
+
+ {
+ // We need to drop locks before calling out to arbitrary code.
+ JSLock::DropAllLocks dropAllLocks(m_vm);
+
+ void* context = objc_autoreleasePoolPush();
+ objectsToRelease.clear();
+ objc_autoreleasePoolPop(context);
+ }
+ }
+ }
+ m_delayedReleaseRecursionCount--;
+#endif
+}
- didAllocate(cost);
+void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
+{
+ didAllocate(size);
collectIfNecessaryOrDefer();
}
+void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
+{
+ // FIXME: Change this to use SaturatedArithmetic when available.
+ // https://bugs.webkit.org/show_bug.cgi?id=170411
+ Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
+ checkedNewSize += size;
+ m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
+ reportExtraMemoryAllocatedSlowCase(size);
+}
+
void Heap::reportAbandonedObjectGraph()
{
// Our clients don't know exactly how much memory they
// are abandoning so we just guess for them.
- double abandonedBytes = 0.10 * m_sizeAfterLastCollect;
+ size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
// We want to accelerate the next collection. Because memory has just
// been abandoned, the next collection has the potential to
// be more profitable. Since allocation is the trigger for collection,
// we hasten the next collection by pretending that we've allocated more memory.
- didAbandon(abandonedBytes);
-}
-
-void Heap::didAbandon(size_t bytes)
-{
- if (m_activityCallback)
- m_activityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedThisCycle);
- m_bytesAbandonedThisCycle += bytes;
+ if (m_fullActivityCallback) {
+ m_fullActivityCallback->didAllocate(
+ m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
+ }
+ m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
}
void Heap::protect(JSValue k)
@@ -368,290 +513,194 @@ void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
}
}
-void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
+void Heap::finalizeUnconditionalFinalizers()
{
- ProtectCountSet::iterator end = m_protectedValues.end();
- for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
- heapRootVisitor.visit(&it->key);
+ while (m_unconditionalFinalizers.hasNext()) {
+ UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
+ finalizer->finalizeUnconditionally();
+ }
}
-void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
+void Heap::willStartIterating()
{
- m_tempSortingVectors.append(tempVector);
+ m_objectSpace.willStartIterating();
}
-void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
+void Heap::didFinishIterating()
{
- ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
- m_tempSortingVectors.removeLast();
+ m_objectSpace.didFinishIterating();
}
-void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
+void Heap::completeAllJITPlans()
{
- typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>* > VectorOfValueStringVectors;
-
- VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
- for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
- Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempSortingVector = *it;
-
- Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
- for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
- if (vectorIt->first)
- heapRootVisitor.visit(&vectorIt->first);
- }
- }
+#if ENABLE(JIT)
+ JITWorklist::instance()->completeAllForVM(*m_vm);
+#endif // ENABLE(JIT)
+ DFG::completeAllPlansForVM(*m_vm);
}
-void Heap::harvestWeakReferences()
+template<typename Func>
+void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
{
- m_slotVisitor.harvestWeakReferences();
+ m_codeBlocks->iterateCurrentlyExecuting(func);
+ DFG::iterateCodeBlocksForGC(*m_vm, func);
}
-void Heap::finalizeUnconditionalFinalizers()
+template<typename Func>
+void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
{
- m_slotVisitor.finalizeUnconditionalFinalizers();
+ Vector<CodeBlock*, 256> codeBlocks;
+ iterateExecutingAndCompilingCodeBlocks(
+ [&] (CodeBlock* codeBlock) {
+ codeBlocks.append(codeBlock);
+ });
+ for (CodeBlock* codeBlock : codeBlocks)
+ func(codeBlock);
}
-inline JSStack& Heap::stack()
+void Heap::assertSharedMarkStacksEmpty()
{
- return m_vm->interpreter->stack();
+ bool ok = true;
+
+ if (!m_sharedCollectorMarkStack->isEmpty()) {
+ dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
+ ok = false;
+ }
+
+ if (!m_sharedMutatorMarkStack->isEmpty()) {
+ dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
+ ok = false;
+ }
+
+ RELEASE_ASSERT(ok);
}
-void Heap::willStartIterating()
+void Heap::gatherStackRoots(ConservativeRoots& roots)
{
- m_objectSpace.willStartIterating();
+ m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState);
}
-void Heap::didFinishIterating()
+void Heap::gatherJSStackRoots(ConservativeRoots& roots)
{
- m_objectSpace.didFinishIterating();
+#if !ENABLE(JIT)
+ m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
+#else
+ UNUSED_PARAM(roots);
+#endif
}
-void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
+void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
{
- ASSERT(isValidThreadState(m_vm));
- ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
- stack().gatherConservativeRoots(stackRoots);
- size_t stackRootCount = stackRoots.size();
- JSCell** registerRoots = stackRoots.roots();
- for (size_t i = 0; i < stackRootCount; i++) {
- setMarked(registerRoots[i]);
- roots.add(registerRoots[i]);
- }
+#if ENABLE(DFG_JIT)
+ m_vm->gatherConservativeRoots(roots);
+#else
+ UNUSED_PARAM(roots);
+#endif
}
-void Heap::markRoots()
+void Heap::beginMarking()
{
- SamplingRegion samplingRegion("Garbage Collection: Tracing");
-
- GCPHASE(MarkRoots);
- ASSERT(isValidThreadState(m_vm));
+ TimingScope timingScope(*this, "Heap::beginMarking");
+ if (m_collectionScope == CollectionScope::Full)
+ m_codeBlocks->clearMarksForFullCollection();
+ m_jitStubRoutines->clearMarks();
+ m_objectSpace.beginMarking();
+ setMutatorShouldBeFenced(true);
+}
-#if ENABLE(OBJECT_MARK_LOGGING)
- double gcStartTime = WTF::monotonicallyIncreasingTime();
+void Heap::removeDeadCompilerWorklistEntries()
+{
+#if ENABLE(DFG_JIT)
+ for (unsigned i = DFG::numberOfWorklists(); i--;)
+ DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
#endif
+}
- void* dummy;
-
- // We gather conservative roots before clearing mark bits because conservative
- // gathering uses the mark bits to determine whether a reference is valid.
- ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
- m_jitStubRoutines.clearMarks();
- {
- GCPHASE(GatherConservativeRoots);
- m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
- }
+bool Heap::isHeapSnapshotting() const
+{
+ HeapProfiler* heapProfiler = m_vm->heapProfiler();
+ if (UNLIKELY(heapProfiler))
+ return heapProfiler->activeSnapshotBuilder();
+ return false;
+}
- ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
- m_codeBlocks.clearMarks();
+struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
+ GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
+ : m_builder(builder)
{
- GCPHASE(GatherStackRoots);
- stack().gatherConservativeRoots(stackRoots, m_jitStubRoutines, m_codeBlocks);
}
-#if ENABLE(DFG_JIT)
- ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
+ IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
{
- GCPHASE(GatherScratchBufferRoots);
- m_vm->gatherConservativeRoots(scratchBufferRoots);
- }
-#endif
-
- {
- GCPHASE(ClearLivenessData);
- m_objectSpace.clearNewlyAllocated();
- m_objectSpace.clearMarks();
+ if (kind == HeapCell::JSCell) {
+ JSCell* cell = static_cast<JSCell*>(heapCell);
+ cell->methodTable()->heapSnapshot(cell, m_builder);
+ }
+ return IterationStatus::Continue;
}
- m_sharedData.didStartMarking();
- SlotVisitor& visitor = m_slotVisitor;
- visitor.setup();
- HeapRootVisitor heapRootVisitor(visitor);
+ HeapSnapshotBuilder& m_builder;
+};
-#if ENABLE(GGC)
- Vector<const JSCell*> rememberedSet(m_slotVisitor.markStack().size());
- m_slotVisitor.markStack().fillVector(rememberedSet);
-#endif
+void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
+{
+ if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
+ HeapIterationScope heapIterationScope(*this);
+ GatherHeapSnapshotData functor(*builder);
+ m_objectSpace.forEachLiveCell(heapIterationScope, functor);
+ }
+}
+struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
+ RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
+ : m_snapshot(snapshot)
{
- ParallelModeEnabler enabler(visitor);
-
- m_vm->smallStrings.visitStrongReferences(visitor);
-
- {
- GCPHASE(VisitMachineRoots);
- MARK_LOG_ROOT(visitor, "C++ Stack");
- visitor.append(machineThreadRoots);
- visitor.donateAndDrain();
- }
- {
- GCPHASE(VisitStackRoots);
- MARK_LOG_ROOT(visitor, "Stack");
- visitor.append(stackRoots);
- visitor.donateAndDrain();
- }
-#if ENABLE(DFG_JIT)
- {
- GCPHASE(VisitScratchBufferRoots);
- MARK_LOG_ROOT(visitor, "Scratch Buffers");
- visitor.append(scratchBufferRoots);
- visitor.donateAndDrain();
- }
-#endif
- {
- GCPHASE(VisitProtectedObjects);
- MARK_LOG_ROOT(visitor, "Protected Objects");
- markProtectedObjects(heapRootVisitor);
- visitor.donateAndDrain();
- }
- {
- GCPHASE(VisitTempSortVectors);
- MARK_LOG_ROOT(visitor, "Temp Sort Vectors");
- markTempSortVectors(heapRootVisitor);
- visitor.donateAndDrain();
- }
-
- {
- GCPHASE(MarkingArgumentBuffers);
- if (m_markListSet && m_markListSet->size()) {
- MARK_LOG_ROOT(visitor, "Argument Buffers");
- MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
- visitor.donateAndDrain();
- }
- }
- if (m_vm->exception()) {
- GCPHASE(MarkingException);
- MARK_LOG_ROOT(visitor, "Exceptions");
- heapRootVisitor.visit(m_vm->addressOfException());
- visitor.donateAndDrain();
- }
-
- {
- GCPHASE(VisitStrongHandles);
- MARK_LOG_ROOT(visitor, "Strong Handles");
- m_handleSet.visitStrongHandles(heapRootVisitor);
- visitor.donateAndDrain();
- }
-
- {
- GCPHASE(HandleStack);
- MARK_LOG_ROOT(visitor, "Handle Stack");
- m_handleStack.visit(heapRootVisitor);
- visitor.donateAndDrain();
- }
-
- {
- GCPHASE(TraceCodeBlocksAndJITStubRoutines);
- MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines");
- m_codeBlocks.traceMarked(visitor);
- m_jitStubRoutines.traceMarkedStubRoutines(visitor);
- visitor.donateAndDrain();
- }
-
-#if ENABLE(PARALLEL_GC)
- {
- GCPHASE(Convergence);
- visitor.drainFromShared(SlotVisitor::MasterDrain);
- }
-#endif
}
- // Weak references must be marked last because their liveness depends on
- // the liveness of the rest of the object graph.
+ IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
{
- GCPHASE(VisitingLiveWeakHandles);
- MARK_LOG_ROOT(visitor, "Live Weak Handles");
- while (true) {
- m_objectSpace.visitWeakSets(heapRootVisitor);
- harvestWeakReferences();
- if (visitor.isEmpty())
- break;
- {
- ParallelModeEnabler enabler(visitor);
- visitor.donateAndDrain();
-#if ENABLE(PARALLEL_GC)
- visitor.drainFromShared(SlotVisitor::MasterDrain);
-#endif
- }
- }
+ if (kind == HeapCell::JSCell)
+ m_snapshot.sweepCell(static_cast<JSCell*>(cell));
+ return IterationStatus::Continue;
}
-#if ENABLE(GGC)
- {
- GCPHASE(ClearRememberedSet);
- for (unsigned i = 0; i < rememberedSet.size(); ++i) {
- const JSCell* cell = rememberedSet[i];
- MarkedBlock::blockFor(cell)->clearRemembered(cell);
- }
+ HeapSnapshot& m_snapshot;
+};
+
+void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
+{
+ if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
+ HeapIterationScope heapIterationScope(*this);
+ RemoveDeadHeapSnapshotNodes functor(*snapshot);
+ m_objectSpace.forEachDeadCell(heapIterationScope, functor);
+ snapshot->shrinkToFit();
}
-#endif
+}
- GCCOUNTER(VisitedValueCount, visitor.visitCount());
+void Heap::updateObjectCounts()
+{
+ if (m_collectionScope == CollectionScope::Full)
+ m_totalBytesVisited = 0;
- m_sharedData.didFinishMarking();
-#if ENABLE(OBJECT_MARK_LOGGING)
- size_t visitCount = visitor.visitCount();
-#if ENABLE(PARALLEL_GC)
- visitCount += m_sharedData.childVisitCount();
-#endif
- MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::monotonicallyIncreasingTime() - gcStartTime);
-#endif
+ m_totalBytesVisitedThisCycle = bytesVisited();
+
+ m_totalBytesVisited += m_totalBytesVisitedThisCycle;
+}
- if (m_operationInProgress == EdenCollection) {
- m_totalBytesVisited += visitor.bytesVisited();
- m_totalBytesCopied += visitor.bytesCopied();
- } else {
- ASSERT(m_operationInProgress == FullCollection);
- m_totalBytesVisited = visitor.bytesVisited();
- m_totalBytesCopied = visitor.bytesCopied();
- }
-#if ENABLE(PARALLEL_GC)
- m_totalBytesVisited += m_sharedData.childBytesVisited();
- m_totalBytesCopied += m_sharedData.childBytesCopied();
-#endif
+void Heap::endMarking()
+{
+ forEachSlotVisitor(
+ [&] (SlotVisitor& visitor) {
+ visitor.reset();
+ });
- visitor.reset();
-#if ENABLE(PARALLEL_GC)
- m_sharedData.resetChildren();
-#endif
- m_sharedData.reset();
-}
-
-template <HeapOperation collectionType>
-void Heap::copyBackingStores()
-{
- m_storageSpace.startedCopying<collectionType>();
- if (m_storageSpace.shouldDoCopyPhase()) {
- m_sharedData.didStartCopying();
- m_copyVisitor.startCopying();
- m_copyVisitor.copyFromShared();
- m_copyVisitor.doneCopying();
- // We need to wait for everybody to finish and return their CopiedBlocks
- // before signaling that the phase is complete.
- m_storageSpace.doneCopying();
- m_sharedData.didFinishCopying();
- } else
- m_storageSpace.doneCopying();
+ assertSharedMarkStacksEmpty();
+ m_weakReferenceHarvesters.removeAll();
+
+ RELEASE_ASSERT(m_raceMarkStack->isEmpty());
+
+ m_objectSpace.endMarking();
+ setMutatorShouldBeFenced(Options::forceFencedBarrier());
}
size_t Heap::objectCount()
@@ -659,327 +708,1500 @@ size_t Heap::objectCount()
return m_objectSpace.objectCount();
}
-size_t Heap::extraSize()
+size_t Heap::extraMemorySize()
{
- return m_extraMemoryUsage + m_arrayBuffers.size();
+ // FIXME: Change this to use SaturatedArithmetic when available.
+ // https://bugs.webkit.org/show_bug.cgi?id=170411
+ Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
+ checkedTotal += m_deprecatedExtraMemorySize;
+ checkedTotal += m_arrayBuffers.size();
+ size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
+
+ ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
+ return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
}
size_t Heap::size()
{
- return m_objectSpace.size() + m_storageSpace.size() + extraSize();
+ return m_objectSpace.size() + extraMemorySize();
}
size_t Heap::capacity()
{
- return m_objectSpace.capacity() + m_storageSpace.capacity() + extraSize();
-}
-
-size_t Heap::sizeAfterCollect()
-{
- // The result here may not agree with the normal Heap::size().
- // This is due to the fact that we only count live copied bytes
- // rather than all used (including dead) copied bytes, thus it's
- // always the case that m_totalBytesCopied <= m_storageSpace.size().
- ASSERT(m_totalBytesCopied <= m_storageSpace.size());
- return m_totalBytesVisited + m_totalBytesCopied + extraSize();
+ return m_objectSpace.capacity() + extraMemorySize();
}
size_t Heap::protectedGlobalObjectCount()
{
- return forEachProtectedCell<CountIfGlobalObject>();
+ size_t result = 0;
+ forEachProtectedCell(
+ [&] (JSCell* cell) {
+ if (cell->isObject() && asObject(cell)->isGlobalObject())
+ result++;
+ });
+ return result;
}
size_t Heap::globalObjectCount()
{
HeapIterationScope iterationScope(*this);
- return m_objectSpace.forEachLiveCell<CountIfGlobalObject>(iterationScope);
+ size_t result = 0;
+ m_objectSpace.forEachLiveCell(
+ iterationScope,
+ [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
+ if (kind != HeapCell::JSCell)
+ return IterationStatus::Continue;
+ JSCell* cell = static_cast<JSCell*>(heapCell);
+ if (cell->isObject() && asObject(cell)->isGlobalObject())
+ result++;
+ return IterationStatus::Continue;
+ });
+ return result;
}
size_t Heap::protectedObjectCount()
{
- return forEachProtectedCell<Count>();
+ size_t result = 0;
+ forEachProtectedCell(
+ [&] (JSCell*) {
+ result++;
+ });
+ return result;
}
-PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
+std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
{
- return forEachProtectedCell<RecordType>();
+ std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
+ forEachProtectedCell(
+ [&] (JSCell* cell) {
+ recordType(*vm(), *result, cell);
+ });
+ return result;
}
-PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
+std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
{
+ std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
HeapIterationScope iterationScope(*this);
- return m_objectSpace.forEachLiveCell<RecordType>(iterationScope);
+ m_objectSpace.forEachLiveCell(
+ iterationScope,
+ [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
+ if (kind == HeapCell::JSCell)
+ recordType(*vm(), *result, static_cast<JSCell*>(cell));
+ return IterationStatus::Continue;
+ });
+ return result;
}
-void Heap::deleteAllCompiledCode()
+void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
{
- // If JavaScript is running, it's not safe to delete code, since we'll end
- // up deleting code that is live on the stack.
- if (m_vm->entryScope)
+ if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
return;
+
+ PreventCollectionScope preventCollectionScope(*this);
+
+ // If JavaScript is running, it's not safe to delete all JavaScript code, since
+ // we'll end up returning to deleted code.
+ RELEASE_ASSERT(!m_vm->entryScope);
+ RELEASE_ASSERT(!m_collectionScope);
+
+ completeAllJITPlans();
+
+ for (ExecutableBase* executable : m_executables)
+ executable->clearCode();
+}
+
+void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
+{
+ if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
+ return;
+
+ PreventCollectionScope preventCollectionScope(*this);
- for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
+ RELEASE_ASSERT(!m_collectionScope);
+
+ for (ExecutableBase* current : m_executables) {
if (!current->isFunctionExecutable())
continue;
- static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
+ static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
}
-
- m_codeBlocks.clearMarks();
- m_codeBlocks.deleteUnmarkedAndUnreferenced();
}
-void Heap::deleteUnmarkedCompiledCode()
+void Heap::clearUnmarkedExecutables()
{
- ExecutableBase* next;
- for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
- next = current->next();
+ for (unsigned i = m_executables.size(); i--;) {
+ ExecutableBase* current = m_executables[i];
if (isMarked(current))
continue;
- // We do this because executable memory is limited on some platforms and because
- // CodeBlock requires eager finalization.
- ExecutableBase::clearCodeVirtual(current);
- m_compiledCode.remove(current);
+ // Eagerly dereference the Executable's JITCode in order to run watchpoint
+ // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
+ current->clearCode();
+ std::swap(m_executables[i], m_executables.last());
+ m_executables.removeLast();
}
- m_codeBlocks.deleteUnmarkedAndUnreferenced();
- m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
+ m_executables.shrinkToFit();
+}
+
+void Heap::deleteUnmarkedCompiledCode()
+{
+ clearUnmarkedExecutables();
+ m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_vm, *m_lastCollectionScope);
+ m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
}
-void Heap::addToRememberedSet(const JSCell* cell)
+void Heap::addToRememberedSet(const JSCell* constCell)
{
+ JSCell* cell = const_cast<JSCell*>(constCell);
ASSERT(cell);
- ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
- if (isInRememberedSet(cell))
- return;
- MarkedBlock::blockFor(cell)->setRemembered(cell);
- m_slotVisitor.unconditionallyAppend(const_cast<JSCell*>(cell));
+ ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
+ m_barriersExecuted++;
+ if (m_mutatorShouldBeFenced) {
+ WTF::loadLoadFence();
+ if (!isMarkedConcurrently(cell)) {
+ // During a full collection a store into an unmarked object that had surivived past
+ // collections will manifest as a store to an unmarked PossiblyBlack object. If the
+ // object gets marked at some time after this then it will go down the normal marking
+ // path. So, we don't have to remember this object. We could return here. But we go
+ // further and attempt to re-white the object.
+
+ RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
+
+ if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
+ // Now we protect against this race:
+ //
+ // 1) Object starts out black + unmarked.
+ // --> We do isMarkedConcurrently here.
+ // 2) Object is marked and greyed.
+ // 3) Object is scanned and blacked.
+ // --> We do atomicCompareExchangeCellStateStrong here.
+ //
+ // In this case we would have made the object white again, even though it should
+ // be black. This check lets us correct our mistake. This relies on the fact that
+ // isMarkedConcurrently converges monotonically to true.
+ if (isMarkedConcurrently(cell)) {
+ // It's difficult to work out whether the object should be grey or black at
+ // this point. We say black conservatively.
+ cell->setCellState(CellState::PossiblyBlack);
+ }
+
+ // Either way, we can return. Most likely, the object was not marked, and so the
+ // object is now labeled white. This means that future barrier executions will not
+ // fire. In the unlikely event that the object had become marked, we can still
+ // return anyway, since we proved that the object was not marked at the time that
+ // we executed this slow path.
+ }
+
+ return;
+ }
+ } else
+ ASSERT(Heap::isMarkedConcurrently(cell));
+ // It could be that the object was *just* marked. This means that the collector may set the
+ // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
+ // race with the collector here. If we win then this is accurate because the object _will_
+ // get scanned again. If we lose then someone else will barrier the object again. That would
+ // be unfortunate but not the end of the world.
+ cell->setCellState(CellState::PossiblyGrey);
+ m_mutatorMarkStack->append(cell);
+}
+
+void Heap::sweepSynchronously()
+{
+ double before = 0;
+ if (Options::logGC()) {
+ dataLog("Full sweep: ", capacity() / 1024, "kb ");
+ before = currentTimeMS();
+ }
+ m_objectSpace.sweep();
+ m_objectSpace.shrink();
+ if (Options::logGC()) {
+ double after = currentTimeMS();
+ dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms");
+ }
}
void Heap::collectAllGarbage()
{
if (!m_isSafeToCollect)
return;
+
+ collectSync(CollectionScope::Full);
- m_shouldDoFullCollection = true;
- collect();
+ DeferGCForAWhile deferGC(*this);
+ if (UNLIKELY(Options::useImmortalObjects()))
+ sweeper()->stopSweeping();
- SamplingRegion samplingRegion("Garbage Collection: Sweeping");
- DelayedReleaseScope delayedReleaseScope(m_objectSpace);
- m_objectSpace.sweep();
- m_objectSpace.shrink();
+ bool alreadySweptInCollectSync = Options::sweepSynchronously();
+ if (!alreadySweptInCollectSync) {
+ if (Options::logGC())
+ dataLog("[GC<", RawPointer(this), ">: ");
+ sweepSynchronously();
+ if (Options::logGC())
+ dataLog("]\n");
+ }
+ m_objectSpace.assertNoUnswept();
+
+ sweepAllLogicallyEmptyWeakBlocks();
}
-static double minute = 60.0;
+void Heap::collectAsync(std::optional<CollectionScope> scope)
+{
+ if (!m_isSafeToCollect)
+ return;
-void Heap::collect()
+ bool alreadyRequested = false;
+ {
+ LockHolder locker(*m_threadLock);
+ for (std::optional<CollectionScope> request : m_requests) {
+ if (scope) {
+ if (scope == CollectionScope::Eden) {
+ alreadyRequested = true;
+ break;
+ } else {
+ RELEASE_ASSERT(scope == CollectionScope::Full);
+ if (request == CollectionScope::Full) {
+ alreadyRequested = true;
+ break;
+ }
+ }
+ } else {
+ if (!request || request == CollectionScope::Full) {
+ alreadyRequested = true;
+ break;
+ }
+ }
+ }
+ }
+ if (alreadyRequested)
+ return;
+
+ requestCollection(scope);
+}
+
+void Heap::collectSync(std::optional<CollectionScope> scope)
{
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("JSC GC starting collection.\n");
-#endif
+ if (!m_isSafeToCollect)
+ return;
- double before = 0;
- if (Options::logGC()) {
- dataLog("[GC: ");
- before = currentTimeMS();
+ waitForCollection(requestCollection(scope));
+}
+
+bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
+{
+ RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
+ RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
+
+ if (false)
+ dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
+
+ return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
+}
+
+void Heap::collectInCollectorThread()
+{
+ for (;;) {
+ RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
+ switch (result) {
+ case RunCurrentPhaseResult::Finished:
+ return;
+ case RunCurrentPhaseResult::Continue:
+ break;
+ case RunCurrentPhaseResult::NeedCurrentThreadState:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+}
+
+void Heap::checkConn(GCConductor conn)
+{
+ switch (conn) {
+ case GCConductor::Mutator:
+ RELEASE_ASSERT(m_worldState.load() & mutatorHasConnBit);
+ return;
+ case GCConductor::Collector:
+ RELEASE_ASSERT(!(m_worldState.load() & mutatorHasConnBit));
+ return;
}
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
+{
+ checkConn(conn);
+ m_currentThreadState = currentThreadState;
- SamplingRegion samplingRegion("Garbage Collection");
+ // If the collector transfers the conn to the mutator, it leaves us in between phases.
+ if (!finishChangingPhase(conn)) {
+ // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
+ // this, but it's probably not the end of the world if it did happen.
+ if (false)
+ dataLog("Conn bounce-back.\n");
+ return RunCurrentPhaseResult::Finished;
+ }
- RELEASE_ASSERT(!m_deferralDepth);
- GCPHASE(Collect);
- ASSERT(vm()->currentThreadIsHoldingAPILock());
- RELEASE_ASSERT(vm()->identifierTable == wtfThreadData().currentIdentifierTable());
- ASSERT(m_isSafeToCollect);
- JAVASCRIPTCORE_GC_BEGIN();
- RELEASE_ASSERT(m_operationInProgress == NoOperation);
+ bool result = false;
+ switch (m_currentPhase) {
+ case CollectorPhase::NotRunning:
+ result = runNotRunningPhase(conn);
+ break;
+
+ case CollectorPhase::Begin:
+ result = runBeginPhase(conn);
+ break;
+
+ case CollectorPhase::Fixpoint:
+ if (!currentThreadState && conn == GCConductor::Mutator)
+ return RunCurrentPhaseResult::NeedCurrentThreadState;
+
+ result = runFixpointPhase(conn);
+ break;
+
+ case CollectorPhase::Concurrent:
+ result = runConcurrentPhase(conn);
+ break;
+
+ case CollectorPhase::Reloop:
+ result = runReloopPhase(conn);
+ break;
+
+ case CollectorPhase::End:
+ result = runEndPhase(conn);
+ break;
+ }
+
+ return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
+}
+
+NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
+{
+ // Check m_requests since the mutator calls this to poll what's going on.
+ {
+ auto locker = holdLock(*m_threadLock);
+ if (m_requests.isEmpty())
+ return false;
+ }
+ return changePhase(conn, CollectorPhase::Begin);
+}
+
+NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
+{
+ m_currentGCStartTime = MonotonicTime::now();
+
+ std::optional<CollectionScope> scope;
{
- RecursiveAllocationScope scope(*this);
- m_vm->prepareToDiscardCode();
+ LockHolder locker(*m_threadLock);
+ RELEASE_ASSERT(!m_requests.isEmpty());
+ scope = m_requests.first();
}
+
+ if (Options::logGC())
+ dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
- bool isFullCollection = m_shouldDoFullCollection;
- if (isFullCollection) {
- m_operationInProgress = FullCollection;
- m_slotVisitor.clearMarkStack();
- m_shouldDoFullCollection = false;
- if (Options::logGC())
- dataLog("FullCollection, ");
- } else {
-#if ENABLE(GGC)
- m_operationInProgress = EdenCollection;
- if (Options::logGC())
- dataLog("EdenCollection, ");
-#else
- m_operationInProgress = FullCollection;
- m_slotVisitor.clearMarkStack();
- if (Options::logGC())
- dataLog("FullCollection, ");
-#endif
+ m_beforeGC = MonotonicTime::now();
+
+ if (m_collectionScope) {
+ dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
}
- if (m_operationInProgress == FullCollection)
- m_extraMemoryUsage = 0;
+
+ willStartCollection(scope);
+
+ if (m_verifier) {
+ // Verify that live objects from the last GC cycle haven't been corrupted by
+ // mutators before we begin this new GC cycle.
+ m_verifier->verify(HeapVerifier::Phase::BeforeGC);
+
+ m_verifier->initializeGCCycle();
+ m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking);
+ }
+
+ prepareForMarking();
+
+ if (m_collectionScope == CollectionScope::Full) {
+ m_opaqueRoots.clear();
+ m_collectorSlotVisitor->clearMarkStacks();
+ m_mutatorMarkStack->clear();
+ }
+
+ RELEASE_ASSERT(m_raceMarkStack->isEmpty());
+
+ beginMarking();
+
+ forEachSlotVisitor(
+ [&] (SlotVisitor& visitor) {
+ visitor.didStartMarking();
+ });
+
+ m_parallelMarkersShouldExit = false;
+
+ m_helperClient.setFunction(
+ [this] () {
+ SlotVisitor* slotVisitor;
+ {
+ LockHolder locker(m_parallelSlotVisitorLock);
+ if (m_availableParallelSlotVisitors.isEmpty()) {
+ std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
+ *this, toCString("P", m_parallelSlotVisitors.size() + 1));
+
+ if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
+ newVisitor->optimizeForStoppedMutator();
+
+ newVisitor->didStartMarking();
+
+ slotVisitor = newVisitor.get();
+ m_parallelSlotVisitors.append(WTFMove(newVisitor));
+ } else
+ slotVisitor = m_availableParallelSlotVisitors.takeLast();
+ }
+
+ WTF::registerGCThread(GCThreadType::Helper);
+
+ {
+ ParallelModeEnabler parallelModeEnabler(*slotVisitor);
+ slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
+ }
+
+ {
+ LockHolder locker(m_parallelSlotVisitorLock);
+ m_availableParallelSlotVisitors.append(slotVisitor);
+ }
+ });
- if (m_activityCallback)
- m_activityCallback->willCollect();
+ SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
- double lastGCStartTime = WTF::monotonicallyIncreasingTime();
- if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
- deleteAllCompiledCode();
- m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime();
+ m_constraintSet->didStartMarking();
+
+ m_scheduler->beginCollection();
+ if (Options::logGC())
+ m_scheduler->log();
+
+ // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
+ // checks because bootstrap would have put things into the visitor. So, we should fall
+ // through to draining.
+
+ if (!slotVisitor.didReachTermination()) {
+ dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
+ dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
+ dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
+ dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
+ dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
+ dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
+ dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
+ dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
}
+
+ return changePhase(conn, CollectorPhase::Fixpoint);
+}
+NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
+{
+ RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
+
+ SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
+
+ if (Options::logGC()) {
+ HashMap<const char*, size_t> visitMap;
+ forEachSlotVisitor(
+ [&] (SlotVisitor& slotVisitor) {
+ visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
+ });
+
+ auto perVisitorDump = sortedMapDump(
+ visitMap,
+ [] (const char* a, const char* b) -> bool {
+ return strcmp(a, b) < 0;
+ },
+ ":", " ");
+
+ dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
+ }
+
+ if (slotVisitor.didReachTermination()) {
+ m_scheduler->didReachTermination();
+
+ assertSharedMarkStacksEmpty();
+
+ slotVisitor.mergeIfNecessary();
+ for (auto& parallelVisitor : m_parallelSlotVisitors)
+ parallelVisitor->mergeIfNecessary();
+
+ // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
+ // we don't have to execute root constraints again unless the mutator did run. At a
+ // minimum, we could use this for work estimates - but it's probably more than just an
+ // estimate.
+ // https://bugs.webkit.org/show_bug.cgi?id=166828
+
+ // FIXME: We should take advantage of the fact that we could timeout. This only comes
+ // into play if we're executing constraints for the first time. But that will matter
+ // when we have deep stacks or a lot of DOM stuff.
+ // https://bugs.webkit.org/show_bug.cgi?id=166831
+
+ // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
+ // add their own using Heap::addMarkingConstraint().
+ bool converged =
+ m_constraintSet->executeConvergence(slotVisitor, MonotonicTime::infinity());
+ if (converged && slotVisitor.isEmpty()) {
+ assertSharedMarkStacksEmpty();
+ return changePhase(conn, CollectorPhase::End);
+ }
+
+ m_scheduler->didExecuteConstraints();
+ }
+
+ if (Options::logGC())
+ dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
+
{
- GCPHASE(StopAllocation);
- m_objectSpace.stopAllocating();
- if (m_operationInProgress == FullCollection)
- m_storageSpace.didStartFullCollection();
+ ParallelModeEnabler enabler(slotVisitor);
+ slotVisitor.drainInParallel(m_scheduler->timeToResume());
}
+
+ m_scheduler->synchronousDrainingDidStall();
- {
- GCPHASE(FlushWriteBarrierBuffer);
- if (m_operationInProgress == EdenCollection)
- m_writeBarrierBuffer.flush(*this);
- else
- m_writeBarrierBuffer.reset();
+ if (slotVisitor.didReachTermination())
+ return true; // This is like relooping to the top if runFixpointPhase().
+
+ if (!m_scheduler->shouldResume())
+ return true;
+
+ m_scheduler->willResume();
+
+ if (Options::logGC()) {
+ double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
+ dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
}
- markRoots();
+ // Forgive the mutator for its past failures to keep up.
+ // FIXME: Figure out if moving this to different places results in perf changes.
+ m_incrementBalance = 0;
+
+ return changePhase(conn, CollectorPhase::Concurrent);
+}
+
+NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
+{
+ SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
+
+ switch (conn) {
+ case GCConductor::Mutator: {
+ // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
+ // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
+ // to stop and do some work.
+ if (slotVisitor.didReachTermination()
+ || m_scheduler->shouldStop())
+ return changePhase(conn, CollectorPhase::Reloop);
+
+ // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
+ // everything. This is super cheap if the SlotVisitor is already empty.
+ slotVisitor.donateAll();
+ return false;
+ }
+ case GCConductor::Collector: {
+ {
+ ParallelModeEnabler enabler(slotVisitor);
+ slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
+ }
+ return changePhase(conn, CollectorPhase::Reloop);
+ } }
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+}
+
+NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
+{
+ if (Options::logGC())
+ dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
+
+ m_scheduler->didStop();
+
+ if (Options::logGC())
+ m_scheduler->log();
+
+ return changePhase(conn, CollectorPhase::Fixpoint);
+}
+
+NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
+{
+ m_scheduler->endCollection();
+
{
- GCPHASE(ReapingWeakHandles);
- m_objectSpace.reapWeakSets();
+ auto locker = holdLock(m_markingMutex);
+ m_parallelMarkersShouldExit = true;
+ m_markingConditionVariable.notifyAll();
+ }
+ m_helperClient.finish();
+
+ iterateExecutingAndCompilingCodeBlocks(
+ [&] (CodeBlock* codeBlock) {
+ writeBarrier(codeBlock);
+ });
+
+ updateObjectCounts();
+ endMarking();
+
+ if (m_verifier) {
+ m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking);
+ m_verifier->verify(HeapVerifier::Phase::AfterMarking);
+ }
+
+ if (vm()->typeProfiler())
+ vm()->typeProfiler()->invalidateTypeSetCache();
+
+ reapWeakHandles();
+ pruneStaleEntriesFromWeakGCMaps();
+ sweepArrayBuffers();
+ snapshotUnswept();
+ finalizeUnconditionalFinalizers();
+ removeDeadCompilerWorklistEntries();
+ notifyIncrementalSweeper();
+
+ m_codeBlocks->iterateCurrentlyExecuting(
+ [&] (CodeBlock* codeBlock) {
+ writeBarrier(codeBlock);
+ });
+ m_codeBlocks->clearCurrentlyExecuting();
+
+ m_objectSpace.prepareForAllocation();
+ updateAllocationLimits();
+
+ didFinishCollection();
+
+ if (m_verifier) {
+ m_verifier->trimDeadObjects();
+ m_verifier->verify(HeapVerifier::Phase::AfterGC);
}
- JAVASCRIPTCORE_GC_MARKED();
+ if (false) {
+ dataLog("Heap state after GC:\n");
+ m_objectSpace.dumpBits();
+ }
+
+ if (Options::logGC()) {
+ double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
+ dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
+ }
{
- GCPHASE(SweepingArrayBuffers);
- m_arrayBuffers.sweep();
+ auto locker = holdLock(*m_threadLock);
+ m_requests.removeFirst();
+ m_lastServedTicket++;
+ clearMutatorWaiting();
+ }
+ ParkingLot::unparkAll(&m_worldState);
+
+ if (false)
+ dataLog("GC END!\n");
+
+ setNeedFinalize();
+
+ m_lastGCStartTime = m_currentGCStartTime;
+ m_lastGCEndTime = MonotonicTime::now();
+
+ return changePhase(conn, CollectorPhase::NotRunning);
+}
+
+bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
+{
+ checkConn(conn);
+
+ m_nextPhase = nextPhase;
+
+ return finishChangingPhase(conn);
+}
+
+NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
+{
+ checkConn(conn);
+
+ if (m_nextPhase == m_currentPhase)
+ return true;
+
+ if (false)
+ dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
+
+ bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
+ bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
+
+ if (suspendedBefore != suspendedAfter) {
+ if (suspendedBefore) {
+ RELEASE_ASSERT(!suspendedAfter);
+
+ resumeThePeriphery();
+ if (conn == GCConductor::Collector)
+ resumeTheMutator();
+ else
+ handleNeedFinalize();
+ } else {
+ RELEASE_ASSERT(!suspendedBefore);
+ RELEASE_ASSERT(suspendedAfter);
+
+ if (conn == GCConductor::Collector) {
+ waitWhileNeedFinalize();
+ if (!stopTheMutator()) {
+ if (false)
+ dataLog("Returning false.\n");
+ return false;
+ }
+ } else {
+ sanitizeStackForVM(m_vm);
+ handleNeedFinalize();
+ }
+ stopThePeriphery(conn);
+ }
}
+
+ m_currentPhase = m_nextPhase;
+ return true;
+}
- if (m_operationInProgress == FullCollection) {
- m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
- MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
- m_objectSpace.forEachBlock(functor);
+void Heap::stopThePeriphery(GCConductor conn)
+{
+ if (m_collectorBelievesThatTheWorldIsStopped) {
+ dataLog("FATAL: world already stopped.\n");
+ RELEASE_ASSERT_NOT_REACHED();
}
+
+ if (m_mutatorDidRun)
+ m_mutatorExecutionVersion++;
+
+ m_mutatorDidRun = false;
- if (m_operationInProgress == FullCollection)
- copyBackingStores<FullCollection>();
- else
- copyBackingStores<EdenCollection>();
+ suspendCompilerThreads();
+ m_collectorBelievesThatTheWorldIsStopped = true;
+
+ forEachSlotVisitor(
+ [&] (SlotVisitor& slotVisitor) {
+ slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
+ });
+#if ENABLE(JIT)
{
- GCPHASE(FinalizeUnconditionalFinalizers);
- finalizeUnconditionalFinalizers();
+ DeferGCForAWhile awhile(*this);
+ if (JITWorklist::instance()->completeAllForVM(*m_vm)
+ && conn == GCConductor::Collector)
+ setGCDidJIT();
+ }
+#else
+ UNUSED_PARAM(conn);
+#endif // ENABLE(JIT)
+
+ vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
+
+ m_structureIDTable.flushOldTables();
+ m_objectSpace.stopAllocating();
+
+ m_stopTime = MonotonicTime::now();
+}
+
+NEVER_INLINE void Heap::resumeThePeriphery()
+{
+ // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
+ // collection cycle or this is just a concurrent phase within a collection cycle:
+ // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
+ // last active block.
+ // - During collection cycle: it reinstates the last active block.
+ m_objectSpace.resumeAllocating();
+
+ m_barriersExecuted = 0;
+
+ if (!m_collectorBelievesThatTheWorldIsStopped) {
+ dataLog("Fatal: collector does not believe that the world is stopped.\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ m_collectorBelievesThatTheWorldIsStopped = false;
+
+ // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
+ // become available. We basically want a lockAny() method that will lock whatever lock is available
+ // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
+ // queues at once, which is totally achievable - it would just require memory allocation, which is
+ // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
+ // with a DLG-style handshake mechanism, but that seems not as general.
+ Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
+
+ forEachSlotVisitor(
+ [&] (SlotVisitor& slotVisitor) {
+ slotVisitorsToUpdate.append(&slotVisitor);
+ });
+
+ for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
+ for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
+ SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
+ bool remove = false;
+ if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
+ remove = true;
+ else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
+ slotVisitor.updateMutatorIsStopped(locker);
+ remove = true;
+ }
+ if (remove) {
+ slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
+ slotVisitorsToUpdate.takeLast();
+ }
+ }
+ std::this_thread::yield();
+ }
+
+ for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
+ slotVisitor->updateMutatorIsStopped();
+
+ resumeCompilerThreads();
+}
+
+bool Heap::stopTheMutator()
+{
+ for (;;) {
+ unsigned oldState = m_worldState.load();
+ if (oldState & stoppedBit) {
+ RELEASE_ASSERT(!(oldState & hasAccessBit));
+ RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
+ RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
+ return true;
+ }
+
+ if (oldState & mutatorHasConnBit) {
+ RELEASE_ASSERT(!(oldState & hasAccessBit));
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+ return false;
+ }
+
+ if (!(oldState & hasAccessBit)) {
+ RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
+ RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
+ // We can stop the world instantly.
+ if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
+ return true;
+ continue;
+ }
+
+ // Transfer the conn to the mutator and bail.
+ RELEASE_ASSERT(oldState & hasAccessBit);
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+ unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
+ if (m_worldState.compareExchangeWeak(oldState, newState)) {
+ if (false)
+ dataLog("Handed off the conn.\n");
+ m_stopIfNecessaryTimer->scheduleSoon();
+ ParkingLot::unparkAll(&m_worldState);
+ return false;
+ }
+ }
+}
+
+NEVER_INLINE void Heap::resumeTheMutator()
+{
+ if (false)
+ dataLog("Resuming the mutator.\n");
+ for (;;) {
+ unsigned oldState = m_worldState.load();
+ if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
+ dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ if (oldState & mutatorHasConnBit) {
+ dataLog("Fatal: mutator has the conn.\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ if (!(oldState & stoppedBit)) {
+ if (false)
+ dataLog("Returning because not stopped.\n");
+ return;
+ }
+
+ if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
+ if (false)
+ dataLog("CASing and returning.\n");
+ ParkingLot::unparkAll(&m_worldState);
+ return;
+ }
+ }
+}
+
+void Heap::stopIfNecessarySlow()
+{
+ while (stopIfNecessarySlow(m_worldState.load())) { }
+
+ RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
+ RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
+
+ handleGCDidJIT();
+ handleNeedFinalize();
+ m_mutatorDidRun = true;
+}
+
+bool Heap::stopIfNecessarySlow(unsigned oldState)
+{
+ RELEASE_ASSERT(oldState & hasAccessBit);
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+
+ // It's possible for us to wake up with finalization already requested but the world not yet
+ // resumed. If that happens, we can't run finalization yet.
+ if (handleNeedFinalize(oldState))
+ return true;
+
+ // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
+ // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
+ // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
+ // and there would be some other bit indicating whether we were in some GC phase other than the
+ // NotRunning or Concurrent ones.
+ if (oldState & mutatorHasConnBit)
+ collectInMutatorThread();
+
+ return false;
+}
+
+NEVER_INLINE void Heap::collectInMutatorThread()
+{
+ CollectingScope collectingScope(*this);
+ for (;;) {
+ RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
+ switch (result) {
+ case RunCurrentPhaseResult::Finished:
+ return;
+ case RunCurrentPhaseResult::Continue:
+ break;
+ case RunCurrentPhaseResult::NeedCurrentThreadState:
+ sanitizeStackForVM(m_vm);
+ auto lambda = [&] (CurrentThreadState& state) {
+ for (;;) {
+ RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
+ switch (result) {
+ case RunCurrentPhaseResult::Finished:
+ return;
+ case RunCurrentPhaseResult::Continue:
+ break;
+ case RunCurrentPhaseResult::NeedCurrentThreadState:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+ };
+ callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
+ return;
+ }
+ }
+}
+
+template<typename Func>
+void Heap::waitForCollector(const Func& func)
+{
+ for (;;) {
+ bool done;
+ {
+ LockHolder locker(*m_threadLock);
+ done = func(locker);
+ if (!done) {
+ setMutatorWaiting();
+
+ // At this point, the collector knows that we intend to wait, and he will clear the
+ // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
+ // prevents us from parking except if there is also stop-the-world. Unparking after
+ // clearing means that if the clearing happens after we park, then we will unpark.
+ }
+ }
+
+ // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
+ unsigned oldState = m_worldState.load();
+ if (stopIfNecessarySlow(oldState))
+ continue;
+
+ // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
+ // do the collection.
+ relinquishConn();
+
+ if (done) {
+ clearMutatorWaiting(); // Clean up just in case.
+ return;
+ }
+
+ // If mutatorWaitingBit is still set then we want to wait.
+ ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
+ }
+}
+
+void Heap::acquireAccessSlow()
+{
+ for (;;) {
+ unsigned oldState = m_worldState.load();
+ RELEASE_ASSERT(!(oldState & hasAccessBit));
+
+ if (oldState & stoppedBit) {
+ if (verboseStop) {
+ dataLog("Stopping in acquireAccess!\n");
+ WTFReportBacktrace();
+ }
+ // Wait until we're not stopped anymore.
+ ParkingLot::compareAndPark(&m_worldState, oldState);
+ continue;
+ }
+
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+ unsigned newState = oldState | hasAccessBit;
+ if (m_worldState.compareExchangeWeak(oldState, newState)) {
+ handleGCDidJIT();
+ handleNeedFinalize();
+ m_mutatorDidRun = true;
+ stopIfNecessary();
+ return;
+ }
+ }
+}
+
+void Heap::releaseAccessSlow()
+{
+ for (;;) {
+ unsigned oldState = m_worldState.load();
+ if (!(oldState & hasAccessBit)) {
+ dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ if (oldState & stoppedBit) {
+ dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ if (handleNeedFinalize(oldState))
+ continue;
+
+ unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
+
+ if ((oldState & mutatorHasConnBit)
+ && m_nextPhase != m_currentPhase) {
+ // This means that the collector thread had given us the conn so that we would do something
+ // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
+ // the meantime, since we're handing the conn over, the collector will be awoken and it is
+ // sure to have work to do.
+ newState |= stoppedBit;
+ }
+
+ if (m_worldState.compareExchangeWeak(oldState, newState)) {
+ if (oldState & mutatorHasConnBit)
+ finishRelinquishingConn();
+ return;
+ }
+ }
+}
+
+bool Heap::relinquishConn(unsigned oldState)
+{
+ RELEASE_ASSERT(oldState & hasAccessBit);
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+
+ if (!(oldState & mutatorHasConnBit))
+ return false; // Done.
+
+ if (m_threadShouldStop)
+ return false;
+
+ if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
+ return true; // Loop around.
+
+ finishRelinquishingConn();
+ return true;
+}
+
+void Heap::finishRelinquishingConn()
+{
+ if (false)
+ dataLog("Relinquished the conn.\n");
+
+ sanitizeStackForVM(m_vm);
+
+ auto locker = holdLock(*m_threadLock);
+ if (!m_requests.isEmpty())
+ m_threadCondition->notifyOne(locker);
+ ParkingLot::unparkAll(&m_worldState);
+}
+
+void Heap::relinquishConn()
+{
+ while (relinquishConn(m_worldState.load())) { }
+}
+
+bool Heap::handleGCDidJIT(unsigned oldState)
+{
+ RELEASE_ASSERT(oldState & hasAccessBit);
+ if (!(oldState & gcDidJITBit))
+ return false;
+ if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
+ WTF::crossModifyingCodeFence();
+ return true;
+ }
+ return true;
+}
+
+NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
+{
+ RELEASE_ASSERT(oldState & hasAccessBit);
+ RELEASE_ASSERT(!(oldState & stoppedBit));
+
+ if (!(oldState & needFinalizeBit))
+ return false;
+ if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
+ finalize();
+ // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
+ // which case they would be waiting for us to release heap access.
+ ParkingLot::unparkAll(&m_worldState);
+ return true;
+ }
+ return true;
+}
+
+void Heap::handleGCDidJIT()
+{
+ while (handleGCDidJIT(m_worldState.load())) { }
+}
+
+void Heap::handleNeedFinalize()
+{
+ while (handleNeedFinalize(m_worldState.load())) { }
+}
+
+void Heap::setGCDidJIT()
+{
+ m_worldState.transaction(
+ [&] (unsigned& state) {
+ RELEASE_ASSERT(state & stoppedBit);
+ state |= gcDidJITBit;
+ });
+}
+
+void Heap::setNeedFinalize()
+{
+ m_worldState.exchangeOr(needFinalizeBit);
+ ParkingLot::unparkAll(&m_worldState);
+ m_stopIfNecessaryTimer->scheduleSoon();
+}
+
+void Heap::waitWhileNeedFinalize()
+{
+ for (;;) {
+ unsigned oldState = m_worldState.load();
+ if (!(oldState & needFinalizeBit)) {
+ // This means that either there was no finalize request or the main thread will finalize
+ // with heap access, so a subsequent call to stopTheWorld() will return only when
+ // finalize finishes.
+ return;
+ }
+ ParkingLot::compareAndPark(&m_worldState, oldState);
}
+}
+void Heap::setMutatorWaiting()
+{
+ m_worldState.exchangeOr(mutatorWaitingBit);
+}
+
+void Heap::clearMutatorWaiting()
+{
+ m_worldState.exchangeAnd(~mutatorWaitingBit);
+}
+
+void Heap::notifyThreadStopping(const AbstractLocker&)
+{
+ m_threadIsStopping = true;
+ clearMutatorWaiting();
+ ParkingLot::unparkAll(&m_worldState);
+}
+
+void Heap::finalize()
+{
+ MonotonicTime before;
+ if (Options::logGC()) {
+ before = MonotonicTime::now();
+ dataLog("[GC<", RawPointer(this), ">: finalize ");
+ }
+
{
- GCPHASE(DeleteCodeBlocks);
+ SweepingScope helpingGCScope(*this);
deleteUnmarkedCompiledCode();
+ deleteSourceProviderCaches();
+ sweepLargeAllocations();
}
+
+ if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
+ cache->clear();
+
+ if (Options::sweepSynchronously())
+ sweepSynchronously();
- {
- GCPHASE(DeleteSourceProviderCaches);
- m_vm->clearSourceProviderCaches();
+ if (Options::logGC()) {
+ MonotonicTime after = MonotonicTime::now();
+ dataLog((after - before).milliseconds(), "ms]\n");
}
+}
- if (m_operationInProgress == FullCollection)
- m_sweeper->startSweeping(m_blockSnapshot);
+Heap::Ticket Heap::requestCollection(std::optional<CollectionScope> scope)
+{
+ stopIfNecessary();
+
+ ASSERT(vm()->currentThreadIsHoldingAPILock());
+ RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
+
+ LockHolder locker(*m_threadLock);
+ // We may be able to steal the conn. That only works if the collector is definitely not running
+ // right now. This is an optimization that prevents the collector thread from ever starting in most
+ // cases.
+ ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
+ if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
+ if (false)
+ dataLog("Taking the conn.\n");
+ m_worldState.exchangeOr(mutatorHasConnBit);
+ }
+
+ m_requests.append(scope);
+ m_lastGrantedTicket++;
+ if (!(m_worldState.load() & mutatorHasConnBit))
+ m_threadCondition->notifyOne(locker);
+ return m_lastGrantedTicket;
+}
- {
- GCPHASE(AddCurrentlyExecutingCodeBlocksToRememberedSet);
- m_codeBlocks.rememberCurrentlyExecutingCodeBlocks(this);
+void Heap::waitForCollection(Ticket ticket)
+{
+ waitForCollector(
+ [&] (const AbstractLocker&) -> bool {
+ return m_lastServedTicket >= ticket;
+ });
+}
+
+void Heap::sweepLargeAllocations()
+{
+ m_objectSpace.sweepLargeAllocations();
+}
+
+void Heap::suspendCompilerThreads()
+{
+#if ENABLE(DFG_JIT)
+ // We ensure the worklists so that it's not possible for the mutator to start a new worklist
+ // after we have suspended the ones that he had started before. That's not very expensive since
+ // the worklists use AutomaticThreads anyway.
+ for (unsigned i = DFG::numberOfWorklists(); i--;)
+ DFG::ensureWorklistForIndex(i).suspendAllThreads();
+#endif
+}
+
+void Heap::willStartCollection(std::optional<CollectionScope> scope)
+{
+ if (Options::logGC())
+ dataLog("=> ");
+
+ if (shouldDoFullCollection(scope)) {
+ m_collectionScope = CollectionScope::Full;
+ m_shouldDoFullCollection = false;
+ if (Options::logGC())
+ dataLog("FullCollection, ");
+ if (false)
+ dataLog("Full collection!\n");
+ } else {
+ m_collectionScope = CollectionScope::Eden;
+ if (Options::logGC())
+ dataLog("EdenCollection, ");
+ if (false)
+ dataLog("Eden collection!\n");
+ }
+ if (m_collectionScope == CollectionScope::Full) {
+ m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
+ m_extraMemorySize = 0;
+ m_deprecatedExtraMemorySize = 0;
+#if ENABLE(RESOURCE_USAGE)
+ m_externalMemorySize = 0;
+#endif
+
+ if (m_fullActivityCallback)
+ m_fullActivityCallback->willCollect();
+ } else {
+ ASSERT(m_collectionScope == CollectionScope::Eden);
+ m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
}
- m_bytesAbandonedThisCycle = 0;
+ if (m_edenActivityCallback)
+ m_edenActivityCallback->willCollect();
- {
- GCPHASE(ResetAllocators);
- m_objectSpace.resetAllocators();
+ for (auto* observer : m_observers)
+ observer->willGarbageCollect();
+}
+
+void Heap::prepareForMarking()
+{
+ m_objectSpace.prepareForMarking();
+}
+
+void Heap::reapWeakHandles()
+{
+ m_objectSpace.reapWeakSets();
+}
+
+void Heap::pruneStaleEntriesFromWeakGCMaps()
+{
+ if (m_collectionScope != CollectionScope::Full)
+ return;
+ for (auto& pruneCallback : m_weakGCMaps.values())
+ pruneCallback();
+}
+
+void Heap::sweepArrayBuffers()
+{
+ m_arrayBuffers.sweep();
+}
+
+void Heap::snapshotUnswept()
+{
+ TimingScope timingScope(*this, "Heap::snapshotUnswept");
+ m_objectSpace.snapshotUnswept();
+}
+
+void Heap::deleteSourceProviderCaches()
+{
+ if (*m_lastCollectionScope == CollectionScope::Full)
+ m_vm->clearSourceProviderCaches();
+}
+
+void Heap::notifyIncrementalSweeper()
+{
+ if (m_collectionScope == CollectionScope::Full) {
+ if (!m_logicallyEmptyWeakBlocks.isEmpty())
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
+ }
+
+ m_sweeper->startSweeping();
+}
+
+void Heap::updateAllocationLimits()
+{
+ static const bool verbose = false;
+
+ if (verbose) {
+ dataLog("\n");
+ dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
}
- size_t currentHeapSize = sizeAfterCollect();
- if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
- HeapStatistics::exitWithFailure();
+ // Calculate our current heap size threshold for the purpose of figuring out when we should
+ // run another collection. This isn't the same as either size() or capacity(), though it should
+ // be somewhere between the two. The key is to match the size calculations involved calls to
+ // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
+ // fragmentation, we may have size() much smaller than capacity().
+ size_t currentHeapSize = 0;
+
+ // For marked space, we use the total number of bytes visited. This matches the logic for
+ // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
+ // objects allocated rather than blocks used. This will underestimate capacity(), and in case
+ // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
+ // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
+ currentHeapSize += m_totalBytesVisited;
+ if (verbose)
+ dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
+
+ // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
+ // extra memory reporting.
+ currentHeapSize += extraMemorySize();
+ if (!ASSERT_DISABLED) {
+ Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
+ checkedCurrentHeapSize += extraMemorySize();
+ ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
+ }
- if (m_operationInProgress == FullCollection) {
+ if (verbose)
+ dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
+
+ if (m_collectionScope == CollectionScope::Full) {
// To avoid pathological GC churn in very small and very large heaps, we set
// the new allocation limit based on the current size of the heap, with a
// fixed minimum.
m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
+ if (verbose)
+ dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+ if (verbose)
+ dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
+ m_sizeAfterLastFullCollect = currentHeapSize;
+ if (verbose)
+ dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
+ m_bytesAbandonedSinceLastFullCollect = 0;
+ if (verbose)
+ dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
} else {
ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
- m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+ // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
+ // But we are sloppy, so we have to defend against the overflow.
+ m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
+ if (verbose)
+ dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
+ m_sizeAfterLastEdenCollect = currentHeapSize;
+ if (verbose)
+ dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
double minEdenToOldGenerationRatio = 1.0 / 3.0;
if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
m_shouldDoFullCollection = true;
+ // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
+ if (verbose)
+ dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
m_maxEdenSize = m_maxHeapSize - currentHeapSize;
+ if (verbose)
+ dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
+ if (m_fullActivityCallback) {
+ ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
+ m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
+ }
}
m_sizeAfterLastCollect = currentHeapSize;
-
+ if (verbose)
+ dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
m_bytesAllocatedThisCycle = 0;
- double lastGCEndTime = WTF::monotonicallyIncreasingTime();
- m_lastGCLength = lastGCEndTime - lastGCStartTime;
-
- if (Options::recordGCPauseTimes())
- HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime);
- RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
- m_operationInProgress = NoOperation;
- JAVASCRIPTCORE_GC_END();
+ if (Options::logGC())
+ dataLog("=> ", currentHeapSize / 1024, "kb, ");
+}
- if (Options::useZombieMode())
- zombifyDeadObjects();
+void Heap::didFinishCollection()
+{
+ m_afterGC = MonotonicTime::now();
+ CollectionScope scope = *m_collectionScope;
+ if (scope == CollectionScope::Full)
+ m_lastFullGCLength = m_afterGC - m_beforeGC;
+ else
+ m_lastEdenGCLength = m_afterGC - m_beforeGC;
- if (Options::objectsAreImmortal())
- markDeadObjects();
+#if ENABLE(RESOURCE_USAGE)
+ ASSERT(externalMemorySize() <= extraMemorySize());
+#endif
- if (Options::showObjectStatistics())
- HeapStatistics::showObjectStatistics(this);
-
- if (Options::logGC()) {
- double after = currentTimeMS();
- dataLog(after - before, " ms, ", currentHeapSize / 1024, " kb]\n");
+ if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
+ gatherExtraHeapSnapshotData(*heapProfiler);
+ removeDeadHeapSnapshotNodes(*heapProfiler);
}
-}
-bool Heap::collectIfNecessaryOrDefer()
-{
- if (m_deferralDepth)
- return false;
-
- if (!shouldCollect())
- return false;
-
- collect();
- return true;
-}
+ RELEASE_ASSERT(m_collectionScope);
+ m_lastCollectionScope = m_collectionScope;
+ m_collectionScope = std::nullopt;
-void Heap::markDeadObjects()
-{
- HeapIterationScope iterationScope(*this);
- m_objectSpace.forEachDeadCell<MarkObject>(iterationScope);
+ for (auto* observer : m_observers)
+ observer->didGarbageCollect(scope);
}
-void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
+void Heap::resumeCompilerThreads()
{
- m_activityCallback = activityCallback;
+#if ENABLE(DFG_JIT)
+ for (unsigned i = DFG::numberOfWorklists(); i--;)
+ DFG::existingWorklistForIndex(i).resumeAllThreads();
+#endif
}
-GCActivityCallback* Heap::activityCallback()
+GCActivityCallback* Heap::fullActivityCallback()
{
- return m_activityCallback.get();
+ return m_fullActivityCallback.get();
}
-void Heap::setIncrementalSweeper(PassOwnPtr<IncrementalSweeper> sweeper)
+GCActivityCallback* Heap::edenActivityCallback()
{
- m_sweeper = sweeper;
+ return m_edenActivityCallback.get();
}
IncrementalSweeper* Heap::sweeper()
@@ -989,15 +2211,18 @@ IncrementalSweeper* Heap::sweeper()
void Heap::setGarbageCollectionTimerEnabled(bool enable)
{
- if (m_activityCallback)
- m_activityCallback->setEnabled(enable);
+ if (m_fullActivityCallback)
+ m_fullActivityCallback->setEnabled(enable);
+ if (m_edenActivityCallback)
+ m_edenActivityCallback->setEnabled(enable);
}
void Heap::didAllocate(size_t bytes)
{
- if (m_activityCallback)
- m_activityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedThisCycle);
+ if (m_edenActivityCallback)
+ m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
m_bytesAllocatedThisCycle += bytes;
+ performIncrement(bytes);
}
bool Heap::isValidAllocation(size_t)
@@ -1005,7 +2230,7 @@ bool Heap::isValidAllocation(size_t)
if (!isValidThreadState(m_vm))
return false;
- if (m_operationInProgress != NoOperation)
+ if (isCurrentThreadBusy())
return false;
return true;
@@ -1024,77 +2249,512 @@ void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
}
-void Heap::addCompiledCode(ExecutableBase* executable)
+void Heap::addExecutable(ExecutableBase* executable)
{
- m_compiledCode.append(executable);
+ m_executables.append(executable);
}
-class Zombify : public MarkedBlock::VoidFunctor {
-public:
- void operator()(JSCell* cell)
- {
- void** current = reinterpret_cast<void**>(cell);
+void Heap::collectAllGarbageIfNotDoneRecently()
+{
+ if (!m_fullActivityCallback) {
+ collectAllGarbage();
+ return;
+ }
+
+ if (m_fullActivityCallback->didSyncGCRecently()) {
+ // A synchronous GC was already requested recently so we merely accelerate next collection.
+ reportAbandonedObjectGraph();
+ return;
+ }
+
+ m_fullActivityCallback->setDidSyncGCRecently();
+ collectAllGarbage();
+}
+
+bool Heap::shouldDoFullCollection(std::optional<CollectionScope> scope) const
+{
+ if (!Options::useGenerationalGC())
+ return true;
+
+ if (!scope)
+ return m_shouldDoFullCollection;
+ return *scope == CollectionScope::Full;
+}
+
+void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
+{
+ m_logicallyEmptyWeakBlocks.append(block);
+}
+
+void Heap::sweepAllLogicallyEmptyWeakBlocks()
+{
+ if (m_logicallyEmptyWeakBlocks.isEmpty())
+ return;
+
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
+ while (sweepNextLogicallyEmptyWeakBlock()) { }
+}
+
+bool Heap::sweepNextLogicallyEmptyWeakBlock()
+{
+ if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
+ return false;
- // We want to maintain zapped-ness because that's how we know if we've called
- // the destructor.
- if (cell->isZapped())
- current++;
+ WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
- void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
- for (; current < limit; current++)
- *current = reinterpret_cast<void*>(0xbbadbeef);
+ block->sweep();
+ if (block->isEmpty()) {
+ std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
+ m_logicallyEmptyWeakBlocks.removeLast();
+ WeakBlock::destroy(*this, block);
+ } else
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
+
+ if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
+ m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
+ return false;
}
-};
-void Heap::zombifyDeadObjects()
+ return true;
+}
+
+size_t Heap::visitCount()
{
- // Sweep now because destructors will crash once we're zombified.
- m_objectSpace.sweep();
- HeapIterationScope iterationScope(*this);
- m_objectSpace.forEachDeadCell<Zombify>(iterationScope);
+ size_t result = 0;
+ forEachSlotVisitor(
+ [&] (SlotVisitor& visitor) {
+ result += visitor.visitCount();
+ });
+ return result;
+}
+
+size_t Heap::bytesVisited()
+{
+ size_t result = 0;
+ forEachSlotVisitor(
+ [&] (SlotVisitor& visitor) {
+ result += visitor.bytesVisited();
+ });
+ return result;
}
-void Heap::incrementDeferralDepth()
+void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
{
- RELEASE_ASSERT(m_deferralDepth < 100); // Sanity check to make sure this doesn't get ridiculous.
+ // We don't know the full set of CodeBlocks until compilation has terminated.
+ completeAllJITPlans();
+
+ return m_codeBlocks->iterate(func);
+}
+
+void Heap::forEachCodeBlockIgnoringJITPlansImpl(const ScopedLambda<bool(CodeBlock*)>& func)
+{
+ return m_codeBlocks->iterate(func);
+}
+
+void Heap::writeBarrierSlowPath(const JSCell* from)
+{
+ if (UNLIKELY(mutatorShouldBeFenced())) {
+ // In this case, the barrierThreshold is the tautological threshold, so from could still be
+ // not black. But we can't know for sure until we fire off a fence.
+ WTF::storeLoadFence();
+ if (from->cellState() != CellState::PossiblyBlack)
+ return;
+ }
- m_deferralDepth++;
+ addToRememberedSet(from);
+}
+
+bool Heap::isCurrentThreadBusy()
+{
+ return mayBeGCThread() || mutatorState() != MutatorState::Running;
}
-void Heap::decrementDeferralDepth()
+void Heap::reportExtraMemoryVisited(size_t size)
{
- RELEASE_ASSERT(m_deferralDepth >= 1);
+ size_t* counter = &m_extraMemorySize;
- m_deferralDepth--;
+ for (;;) {
+ size_t oldSize = *counter;
+ // FIXME: Change this to use SaturatedArithmetic when available.
+ // https://bugs.webkit.org/show_bug.cgi?id=170411
+ Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
+ checkedNewSize += size;
+ size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
+ if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
+ return;
+ }
}
-void Heap::decrementDeferralDepthAndGCIfNeeded()
+#if ENABLE(RESOURCE_USAGE)
+void Heap::reportExternalMemoryVisited(size_t size)
{
- decrementDeferralDepth();
- collectIfNecessaryOrDefer();
+ size_t* counter = &m_externalMemorySize;
+
+ for (;;) {
+ size_t oldSize = *counter;
+ if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
+ return;
+ }
+}
+#endif
+
+void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
+{
+ ASSERT(!DisallowGC::isGCDisallowedOnCurrentThread());
+
+ if (!m_isSafeToCollect)
+ return;
+ switch (mutatorState()) {
+ case MutatorState::Running:
+ case MutatorState::Allocating:
+ break;
+ case MutatorState::Sweeping:
+ case MutatorState::Collecting:
+ return;
+ }
+ if (!Options::useGC())
+ return;
+
+ if (mayNeedToStop()) {
+ if (deferralContext)
+ deferralContext->m_shouldGC = true;
+ else if (isDeferred())
+ m_didDeferGCWork = true;
+ else
+ stopIfNecessary();
+ }
+
+ if (UNLIKELY(Options::gcMaxHeapSize())) {
+ if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
+ return;
+ } else {
+ if (m_bytesAllocatedThisCycle <= m_maxEdenSize)
+ return;
+ }
+
+ if (deferralContext)
+ deferralContext->m_shouldGC = true;
+ else if (isDeferred())
+ m_didDeferGCWork = true;
+ else {
+ collectAsync();
+ stopIfNecessary(); // This will immediately start the collection if we have the conn.
+ }
}
-void Heap::writeBarrier(const JSCell* from)
+void Heap::decrementDeferralDepthAndGCIfNeededSlow()
{
-#if ENABLE(GGC)
- ASSERT_GC_OBJECT_LOOKS_VALID(const_cast<JSCell*>(from));
- if (!from || !isMarked(from))
+ // Can't do anything if we're still deferred.
+ if (m_deferralDepth)
return;
- Heap* heap = Heap::heap(from);
- heap->addToRememberedSet(from);
+
+ ASSERT(!isDeferred());
+
+ m_didDeferGCWork = false;
+ // FIXME: Bring back something like the DeferGCProbability mode.
+ // https://bugs.webkit.org/show_bug.cgi?id=166627
+ collectIfNecessaryOrDefer();
+}
+
+void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
+{
+ m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
+}
+
+void Heap::unregisterWeakGCMap(void* weakGCMap)
+{
+ m_weakGCMaps.remove(weakGCMap);
+}
+
+void Heap::didAllocateBlock(size_t capacity)
+{
+#if ENABLE(RESOURCE_USAGE)
+ m_blockBytesAllocated += capacity;
#else
- UNUSED_PARAM(from);
+ UNUSED_PARAM(capacity);
#endif
}
-void Heap::flushWriteBarrierBuffer(JSCell* cell)
+void Heap::didFreeBlock(size_t capacity)
{
-#if ENABLE(GGC)
- m_writeBarrierBuffer.flush(*this);
- m_writeBarrierBuffer.add(cell);
+#if ENABLE(RESOURCE_USAGE)
+ m_blockBytesAllocated -= capacity;
#else
- UNUSED_PARAM(cell);
+ UNUSED_PARAM(capacity);
+#endif
+}
+
+#if USE(CF)
+void Heap::setRunLoop(CFRunLoopRef runLoop)
+{
+ m_runLoop = runLoop;
+ m_fullActivityCallback->setRunLoop(runLoop);
+ m_edenActivityCallback->setRunLoop(runLoop);
+ m_sweeper->setRunLoop(runLoop);
+}
+#endif // USE(CF)
+
+void Heap::addCoreConstraints()
+{
+ m_constraintSet->add(
+ "Cs", "Conservative Scan",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
+ m_objectSpace.prepareForConservativeScan();
+ ConservativeRoots conservativeRoots(*this);
+ SuperSamplerScope superSamplerScope(false);
+ gatherStackRoots(conservativeRoots);
+ gatherJSStackRoots(conservativeRoots);
+ gatherScratchBufferRoots(conservativeRoots);
+ slotVisitor.append(conservativeRoots);
+ },
+ ConstraintVolatility::GreyedByExecution);
+
+ m_constraintSet->add(
+ "Msr", "Misc Small Roots",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+#if JSC_OBJC_API_ENABLED
+ scanExternalRememberedSet(*m_vm, slotVisitor);
+#endif
+
+ if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
+ m_vm->smallStrings.visitStrongReferences(slotVisitor);
+
+ for (auto& pair : m_protectedValues)
+ slotVisitor.appendUnbarriered(pair.key);
+
+ if (m_markListSet && m_markListSet->size())
+ MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
+
+ slotVisitor.appendUnbarriered(m_vm->exception());
+ slotVisitor.appendUnbarriered(m_vm->lastException());
+ },
+ ConstraintVolatility::GreyedByExecution);
+
+ m_constraintSet->add(
+ "Sh", "Strong Handles",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ m_handleSet.visitStrongHandles(slotVisitor);
+ m_handleStack.visit(slotVisitor);
+ },
+ ConstraintVolatility::GreyedByExecution);
+
+ m_constraintSet->add(
+ "D", "Debugger",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+#if ENABLE(SAMPLING_PROFILER)
+ if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
+ LockHolder locker(samplingProfiler->getLock());
+ samplingProfiler->processUnverifiedStackTraces();
+ samplingProfiler->visit(slotVisitor);
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("Sampling Profiler data:\n", slotVisitor);
+ }
+#endif // ENABLE(SAMPLING_PROFILER)
+
+ if (m_vm->typeProfiler())
+ m_vm->typeProfilerLog()->visit(slotVisitor);
+
+ m_vm->shadowChicken().visitChildren(slotVisitor);
+ },
+ ConstraintVolatility::GreyedByExecution);
+
+ m_constraintSet->add(
+ "Jsr", "JIT Stub Routines",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
+ },
+ ConstraintVolatility::GreyedByExecution);
+
+ m_constraintSet->add(
+ "Ws", "Weak Sets",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ m_objectSpace.visitWeakSets(slotVisitor);
+ },
+ ConstraintVolatility::GreyedByMarking);
+
+ m_constraintSet->add(
+ "Wrh", "Weak Reference Harvesters",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
+ current->visitWeakReferences(slotVisitor);
+ },
+ ConstraintVolatility::GreyedByMarking);
+
+#if ENABLE(DFG_JIT)
+ m_constraintSet->add(
+ "Dw", "DFG Worklists",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ for (unsigned i = DFG::numberOfWorklists(); i--;)
+ DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
+
+ // FIXME: This is almost certainly unnecessary.
+ // https://bugs.webkit.org/show_bug.cgi?id=166829
+ DFG::iterateCodeBlocksForGC(
+ *m_vm,
+ [&] (CodeBlock* codeBlock) {
+ slotVisitor.appendUnbarriered(codeBlock);
+ });
+
+ if (Options::logGC() == GCLogging::Verbose)
+ dataLog("DFG Worklists:\n", slotVisitor);
+ },
+ ConstraintVolatility::GreyedByMarking);
#endif
+
+ m_constraintSet->add(
+ "Cb", "CodeBlocks",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
+ [&] (CodeBlock* codeBlock) {
+ // Visit the CodeBlock as a constraint only if it's black.
+ if (Heap::isMarked(codeBlock)
+ && codeBlock->cellState() == CellState::PossiblyBlack)
+ slotVisitor.visitAsConstraint(codeBlock);
+ });
+ },
+ ConstraintVolatility::SeldomGreyed);
+
+ m_constraintSet->add(
+ "Mrms", "Mutator+Race Mark Stack",
+ [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
+ // Indicate to the fixpoint that we introduced work!
+ size_t size = m_mutatorMarkStack->size() + m_raceMarkStack->size();
+ slotVisitor.addToVisitCount(size);
+
+ if (Options::logGC())
+ dataLog("(", size, ")");
+
+ m_mutatorMarkStack->transferTo(slotVisitor.mutatorMarkStack());
+ m_raceMarkStack->transferTo(slotVisitor.mutatorMarkStack());
+ },
+ [this] (SlotVisitor&) -> double {
+ return m_mutatorMarkStack->size() + m_raceMarkStack->size();
+ },
+ ConstraintVolatility::GreyedByExecution);
+}
+
+void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
+{
+ PreventCollectionScope preventCollectionScope(*this);
+ m_constraintSet->add(WTFMove(constraint));
+}
+
+void Heap::notifyIsSafeToCollect()
+{
+ MonotonicTime before;
+ if (Options::logGC()) {
+ before = MonotonicTime::now();
+ dataLog("[GC<", RawPointer(this), ">: starting ");
+ }
+
+ addCoreConstraints();
+
+ m_isSafeToCollect = true;
+
+ if (Options::collectContinuously()) {
+ m_collectContinuouslyThread = createThread(
+ "JSC DEBUG Continuous GC",
+ [this] () {
+ MonotonicTime initialTime = MonotonicTime::now();
+ Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
+ while (!m_shouldStopCollectingContinuously) {
+ {
+ LockHolder locker(*m_threadLock);
+ if (m_requests.isEmpty()) {
+ m_requests.append(std::nullopt);
+ m_lastGrantedTicket++;
+ m_threadCondition->notifyOne(locker);
+ }
+ }
+
+ {
+ LockHolder locker(m_collectContinuouslyLock);
+ Seconds elapsed = MonotonicTime::now() - initialTime;
+ Seconds elapsedInPeriod = elapsed % period;
+ MonotonicTime timeToWakeUp =
+ initialTime + elapsed - elapsedInPeriod + period;
+ while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
+ m_collectContinuouslyCondition.waitUntil(
+ m_collectContinuouslyLock, timeToWakeUp);
+ }
+ }
+ }
+ });
+ }
+
+ if (Options::logGC())
+ dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
+}
+
+void Heap::preventCollection()
+{
+ if (!m_isSafeToCollect)
+ return;
+
+ // This prevents the collectContinuously thread from starting a collection.
+ m_collectContinuouslyLock.lock();
+
+ // Wait for all collections to finish.
+ waitForCollector(
+ [&] (const AbstractLocker&) -> bool {
+ ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
+ return m_lastServedTicket == m_lastGrantedTicket;
+ });
+
+ // Now a collection can only start if this thread starts it.
+ RELEASE_ASSERT(!m_collectionScope);
+}
+
+void Heap::allowCollection()
+{
+ if (!m_isSafeToCollect)
+ return;
+
+ m_collectContinuouslyLock.unlock();
+}
+
+template<typename Func>
+void Heap::forEachSlotVisitor(const Func& func)
+{
+ auto locker = holdLock(m_parallelSlotVisitorLock);
+ func(*m_collectorSlotVisitor);
+ func(*m_mutatorSlotVisitor);
+ for (auto& slotVisitor : m_parallelSlotVisitors)
+ func(*slotVisitor);
+}
+
+void Heap::setMutatorShouldBeFenced(bool value)
+{
+ m_mutatorShouldBeFenced = value;
+ m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
+}
+
+void Heap::performIncrement(size_t bytes)
+{
+ if (!m_objectSpace.isMarking())
+ return;
+
+ m_incrementBalance += bytes * Options::gcIncrementScale();
+
+ // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
+ // state when the double goes wild.
+ if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
+ m_incrementBalance = 0;
+
+ if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
+ return;
+
+ double targetBytes = m_incrementBalance;
+ if (targetBytes <= 0)
+ return;
+ targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
+
+ SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
+ ParallelModeEnabler parallelModeEnabler(slotVisitor);
+ size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
+ // incrementBalance may go negative here because it'll remember how many bytes we overshot.
+ m_incrementBalance -= bytesVisited;
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/Heap.h b/Source/JavaScriptCore/heap/Heap.h
index 90988a3fc..8b23b9ea2 100644
--- a/Source/JavaScriptCore/heap/Heap.h
+++ b/Source/JavaScriptCore/heap/Heap.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
* Copyright (C) 2001 Peter Kelly (pmk@post.com)
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2017 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -19,501 +19,660 @@
*
*/
-#ifndef Heap_h
-#define Heap_h
+#pragma once
#include "ArrayBuffer.h"
-#include "BlockAllocator.h"
-#include "CodeBlockSet.h"
-#include "CopyVisitor.h"
+#include "CellState.h"
+#include "CollectionScope.h"
+#include "CollectorPhase.h"
+#include "DeleteAllCodeEffort.h"
+#include "GCConductor.h"
#include "GCIncomingRefCountedSet.h"
-#include "GCThreadSharedData.h"
#include "HandleSet.h"
#include "HandleStack.h"
-#include "HeapOperation.h"
-#include "JITStubRoutineSet.h"
-#include "MarkedAllocator.h"
+#include "HeapObserver.h"
+#include "ListableHandler.h"
#include "MarkedBlock.h"
#include "MarkedBlockSet.h"
#include "MarkedSpace.h"
+#include "MutatorState.h"
#include "Options.h"
-#include "SlotVisitor.h"
+#include "StructureIDTable.h"
+#include "TinyBloomFilter.h"
+#include "UnconditionalFinalizer.h"
+#include "VisitRaceKey.h"
#include "WeakHandleOwner.h"
-#include "WriteBarrierBuffer.h"
+#include "WeakReferenceHarvester.h"
#include "WriteBarrierSupport.h"
+#include <wtf/AutomaticThread.h>
+#include <wtf/Deque.h>
#include <wtf/HashCountedSet.h>
#include <wtf/HashSet.h>
-
-#define COLLECT_ON_EVERY_ALLOCATION 0
+#include <wtf/ParallelHelperPool.h>
namespace JSC {
- class CopiedSpace;
- class CodeBlock;
- class ExecutableBase;
- class GCActivityCallback;
- class GCAwareJITStubRoutine;
- class GlobalCodeBlock;
- class Heap;
- class HeapRootVisitor;
- class IncrementalSweeper;
- class JITStubRoutine;
- class JSCell;
- class VM;
- class JSStack;
- class JSValue;
- class LiveObjectIterator;
- class LLIntOffsetsExtractor;
- class MarkedArgumentBuffer;
- class WeakGCHandlePool;
- class SlotVisitor;
-
- typedef std::pair<JSValue, WTF::String> ValueStringPair;
- typedef HashCountedSet<JSCell*> ProtectCountSet;
- typedef HashCountedSet<const char*> TypeCountSet;
-
- enum HeapType { SmallHeap, LargeHeap };
-
- class Heap {
- WTF_MAKE_NONCOPYABLE(Heap);
- public:
- friend class JIT;
- friend class DFG::SpeculativeJIT;
- friend class GCThreadSharedData;
- static Heap* heap(const JSValue); // 0 for immediate values
- static Heap* heap(const JSCell*);
-
- // This constant determines how many blocks we iterate between checks of our
- // deadline when calling Heap::isPagedOut. Decreasing it will cause us to detect
- // overstepping our deadline more quickly, while increasing it will cause
- // our scan to run faster.
- static const unsigned s_timeCheckResolution = 16;
-
- static bool isLive(const void*);
- static bool isMarked(const void*);
- static bool testAndSetMarked(const void*);
- static void setMarked(const void*);
-
- JS_EXPORT_PRIVATE void addToRememberedSet(const JSCell*);
- bool isInRememberedSet(const JSCell* cell) const
- {
- ASSERT(cell);
- ASSERT(!Options::enableConcurrentJIT() || !isCompilationThread());
- return MarkedBlock::blockFor(cell)->isRemembered(cell);
- }
- static bool isWriteBarrierEnabled();
- JS_EXPORT_PRIVATE static void writeBarrier(const JSCell*);
- static void writeBarrier(const JSCell*, JSValue);
- static void writeBarrier(const JSCell*, JSCell*);
-
- WriteBarrierBuffer& writeBarrierBuffer() { return m_writeBarrierBuffer; }
- void flushWriteBarrierBuffer(JSCell*);
-
- Heap(VM*, HeapType);
- ~Heap();
- JS_EXPORT_PRIVATE void lastChanceToFinalize();
-
- VM* vm() const { return m_vm; }
- MarkedSpace& objectSpace() { return m_objectSpace; }
- MachineThreads& machineThreads() { return m_machineThreads; }
-
- JS_EXPORT_PRIVATE GCActivityCallback* activityCallback();
- JS_EXPORT_PRIVATE void setActivityCallback(PassOwnPtr<GCActivityCallback>);
- JS_EXPORT_PRIVATE void setGarbageCollectionTimerEnabled(bool);
-
- JS_EXPORT_PRIVATE IncrementalSweeper* sweeper();
- JS_EXPORT_PRIVATE void setIncrementalSweeper(PassOwnPtr<IncrementalSweeper>);
-
- // true if collection is in progress
- inline bool isCollecting();
- inline HeapOperation operationInProgress() { return m_operationInProgress; }
- // true if an allocation or collection is in progress
- inline bool isBusy();
-
- MarkedAllocator& allocatorForObjectWithoutDestructor(size_t bytes) { return m_objectSpace.allocatorFor(bytes); }
- MarkedAllocator& allocatorForObjectWithNormalDestructor(size_t bytes) { return m_objectSpace.normalDestructorAllocatorFor(bytes); }
- MarkedAllocator& allocatorForObjectWithImmortalStructureDestructor(size_t bytes) { return m_objectSpace.immortalStructureDestructorAllocatorFor(bytes); }
- CopiedAllocator& storageAllocator() { return m_storageSpace.allocator(); }
- CheckedBoolean tryAllocateStorage(JSCell* intendedOwner, size_t, void**);
- CheckedBoolean tryReallocateStorage(JSCell* intendedOwner, void**, size_t, size_t);
- void ascribeOwner(JSCell* intendedOwner, void*);
-
- typedef void (*Finalizer)(JSCell*);
- JS_EXPORT_PRIVATE void addFinalizer(JSCell*, Finalizer);
- void addCompiledCode(ExecutableBase*);
-
- void notifyIsSafeToCollect() { m_isSafeToCollect = true; }
- bool isSafeToCollect() const { return m_isSafeToCollect; }
-
- JS_EXPORT_PRIVATE void collectAllGarbage();
- bool shouldCollect();
- void collect();
- bool collectIfNecessaryOrDefer(); // Returns true if it did collect.
-
- void reportExtraMemoryCost(size_t cost);
- JS_EXPORT_PRIVATE void reportAbandonedObjectGraph();
-
- JS_EXPORT_PRIVATE void protect(JSValue);
- JS_EXPORT_PRIVATE bool unprotect(JSValue); // True when the protect count drops to 0.
-
- size_t extraSize(); // extra memory usage outside of pages allocated by the heap
- JS_EXPORT_PRIVATE size_t size();
- JS_EXPORT_PRIVATE size_t capacity();
- JS_EXPORT_PRIVATE size_t objectCount();
- JS_EXPORT_PRIVATE size_t globalObjectCount();
- JS_EXPORT_PRIVATE size_t protectedObjectCount();
- JS_EXPORT_PRIVATE size_t protectedGlobalObjectCount();
- JS_EXPORT_PRIVATE PassOwnPtr<TypeCountSet> protectedObjectTypeCounts();
- JS_EXPORT_PRIVATE PassOwnPtr<TypeCountSet> objectTypeCounts();
- void showStatistics();
-
- void pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>*);
- void popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>*);
-
- HashSet<MarkedArgumentBuffer*>& markListSet() { if (!m_markListSet) m_markListSet = adoptPtr(new HashSet<MarkedArgumentBuffer*>); return *m_markListSet; }
-
- template<typename Functor> typename Functor::ReturnType forEachProtectedCell(Functor&);
- template<typename Functor> typename Functor::ReturnType forEachProtectedCell();
- template<typename Functor> inline void forEachCodeBlock(Functor&);
-
- HandleSet* handleSet() { return &m_handleSet; }
- HandleStack* handleStack() { return &m_handleStack; }
-
- void willStartIterating();
- void didFinishIterating();
- void getConservativeRegisterRoots(HashSet<JSCell*>& roots);
-
- double lastGCLength() { return m_lastGCLength; }
- void increaseLastGCLength(double amount) { m_lastGCLength += amount; }
-
- JS_EXPORT_PRIVATE void deleteAllCompiledCode();
-
- void didAllocate(size_t);
- void didAbandon(size_t);
-
- bool isPagedOut(double deadline);
-
- const JITStubRoutineSet& jitStubRoutines() { return m_jitStubRoutines; }
-
- void addReference(JSCell*, ArrayBuffer*);
-
- bool isDeferred() const { return !!m_deferralDepth; }
+class CodeBlock;
+class CodeBlockSet;
+class CollectingScope;
+class ConservativeRoots;
+class GCDeferralContext;
+class EdenGCActivityCallback;
+class ExecutableBase;
+class FullGCActivityCallback;
+class GCActivityCallback;
+class GCAwareJITStubRoutine;
+class Heap;
+class HeapProfiler;
+class HeapVerifier;
+class IncrementalSweeper;
+class JITStubRoutine;
+class JITStubRoutineSet;
+class JSCell;
+class JSValue;
+class LLIntOffsetsExtractor;
+class MachineThreads;
+class MarkStackArray;
+class MarkedAllocator;
+class MarkedArgumentBuffer;
+class MarkingConstraint;
+class MarkingConstraintSet;
+class MutatorScheduler;
+class RunningScope;
+class SlotVisitor;
+class SpaceTimeMutatorScheduler;
+class StopIfNecessaryTimer;
+class SweepingScope;
+class VM;
+struct CurrentThreadState;
+
+namespace DFG {
+class SpeculativeJIT;
+class Worklist;
+}
+
+typedef HashCountedSet<JSCell*> ProtectCountSet;
+typedef HashCountedSet<const char*> TypeCountSet;
+
+enum HeapType { SmallHeap, LargeHeap };
+
+class HeapUtil;
+
+class Heap {
+ WTF_MAKE_NONCOPYABLE(Heap);
+public:
+ friend class JIT;
+ friend class DFG::SpeculativeJIT;
+ static Heap* heap(const JSValue); // 0 for immediate values
+ static Heap* heap(const HeapCell*);
+
+ // This constant determines how many blocks we iterate between checks of our
+ // deadline when calling Heap::isPagedOut. Decreasing it will cause us to detect
+ // overstepping our deadline more quickly, while increasing it will cause
+ // our scan to run faster.
+ static const unsigned s_timeCheckResolution = 16;
+
+ static bool isMarked(const void*);
+ static bool isMarkedConcurrently(const void*);
+ static bool testAndSetMarked(HeapVersion, const void*);
+
+ static size_t cellSize(const void*);
-#if USE(CF)
- template<typename T> void releaseSoon(RetainPtr<T>&&);
-#endif
+ void writeBarrier(const JSCell* from);
+ void writeBarrier(const JSCell* from, JSValue to);
+ void writeBarrier(const JSCell* from, JSCell* to);
+
+ void writeBarrierWithoutFence(const JSCell* from);
+
+ void mutatorFence();
+
+ // Take this if you know that from->cellState() < barrierThreshold.
+ JS_EXPORT_PRIVATE void writeBarrierSlowPath(const JSCell* from);
- private:
- friend class CodeBlock;
- friend class CopiedBlock;
- friend class DeferGC;
- friend class DeferGCForAWhile;
- friend class DelayedReleaseScope;
- friend class GCAwareJITStubRoutine;
- friend class HandleSet;
- friend class JITStubRoutine;
- friend class LLIntOffsetsExtractor;
- friend class MarkedSpace;
- friend class MarkedAllocator;
- friend class MarkedBlock;
- friend class CopiedSpace;
- friend class CopyVisitor;
- friend class RecursiveAllocationScope;
- friend class SlotVisitor;
- friend class SuperRegion;
- friend class IncrementalSweeper;
- friend class HeapStatistics;
- friend class VM;
- friend class WeakSet;
- template<typename T> friend void* allocateCell(Heap&);
- template<typename T> friend void* allocateCell(Heap&, size_t);
-
- void* allocateWithImmortalStructureDestructor(size_t); // For use with special objects whose Structures never die.
- void* allocateWithNormalDestructor(size_t); // For use with objects that inherit directly or indirectly from JSDestructibleObject.
- void* allocateWithoutDestructor(size_t); // For use with objects without destructors.
-
- static const size_t minExtraCost = 256;
- static const size_t maxExtraCost = 1024 * 1024;
-
- class FinalizerOwner : public WeakHandleOwner {
- virtual void finalize(Handle<Unknown>, void* context) override;
- };
-
- JS_EXPORT_PRIVATE bool isValidAllocation(size_t);
- JS_EXPORT_PRIVATE void reportExtraMemoryCostSlowCase(size_t);
-
- void markRoots();
- void markProtectedObjects(HeapRootVisitor&);
- void markTempSortVectors(HeapRootVisitor&);
- template <HeapOperation collectionType>
- void copyBackingStores();
- void harvestWeakReferences();
- void finalizeUnconditionalFinalizers();
- void deleteUnmarkedCompiledCode();
- void zombifyDeadObjects();
- void markDeadObjects();
-
- size_t sizeAfterCollect();
-
- JSStack& stack();
- BlockAllocator& blockAllocator();
-
- JS_EXPORT_PRIVATE void incrementDeferralDepth();
- void decrementDeferralDepth();
- JS_EXPORT_PRIVATE void decrementDeferralDepthAndGCIfNeeded();
-
- const HeapType m_heapType;
- const size_t m_ramSize;
- const size_t m_minBytesPerCycle;
- size_t m_sizeAfterLastCollect;
-
- size_t m_bytesAllocatedThisCycle;
- size_t m_bytesAbandonedThisCycle;
- size_t m_maxEdenSize;
- size_t m_maxHeapSize;
- bool m_shouldDoFullCollection;
- size_t m_totalBytesVisited;
- size_t m_totalBytesCopied;
-
- HeapOperation m_operationInProgress;
- BlockAllocator m_blockAllocator;
- MarkedSpace m_objectSpace;
- CopiedSpace m_storageSpace;
- GCIncomingRefCountedSet<ArrayBuffer> m_arrayBuffers;
- size_t m_extraMemoryUsage;
-
- HashSet<const JSCell*> m_copyingRememberedSet;
-
- ProtectCountSet m_protectedValues;
- Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>* > m_tempSortingVectors;
- OwnPtr<HashSet<MarkedArgumentBuffer*>> m_markListSet;
-
- MachineThreads m_machineThreads;
-
- GCThreadSharedData m_sharedData;
- SlotVisitor m_slotVisitor;
- CopyVisitor m_copyVisitor;
-
- HandleSet m_handleSet;
- HandleStack m_handleStack;
- CodeBlockSet m_codeBlocks;
- JITStubRoutineSet m_jitStubRoutines;
- FinalizerOwner m_finalizerOwner;
-
- bool m_isSafeToCollect;
-
- WriteBarrierBuffer m_writeBarrierBuffer;
-
- VM* m_vm;
- double m_lastGCLength;
- double m_lastCodeDiscardTime;
-
- DoublyLinkedList<ExecutableBase> m_compiledCode;
-
- OwnPtr<GCActivityCallback> m_activityCallback;
- OwnPtr<IncrementalSweeper> m_sweeper;
- Vector<MarkedBlock*> m_blockSnapshot;
-
- unsigned m_deferralDepth;
- };
+ Heap(VM*, HeapType);
+ ~Heap();
+ void lastChanceToFinalize();
+ void releaseDelayedReleasedObjects();
+
+ VM* vm() const;
+
+ MarkedSpace& objectSpace() { return m_objectSpace; }
+ MachineThreads& machineThreads() { return *m_machineThreads; }
+
+ SlotVisitor& collectorSlotVisitor() { return *m_collectorSlotVisitor; }
+
+ JS_EXPORT_PRIVATE GCActivityCallback* fullActivityCallback();
+ JS_EXPORT_PRIVATE GCActivityCallback* edenActivityCallback();
+ JS_EXPORT_PRIVATE void setGarbageCollectionTimerEnabled(bool);
+
+ JS_EXPORT_PRIVATE IncrementalSweeper* sweeper();
+
+ void addObserver(HeapObserver* observer) { m_observers.append(observer); }
+ void removeObserver(HeapObserver* observer) { m_observers.removeFirst(observer); }
+
+ MutatorState mutatorState() const { return m_mutatorState; }
+ std::optional<CollectionScope> collectionScope() const { return m_collectionScope; }
+ bool hasHeapAccess() const;
+ bool collectorBelievesThatTheWorldIsStopped() const;
+
+ // We're always busy on the collection threads. On the main thread, this returns true if we're
+ // helping heap.
+ JS_EXPORT_PRIVATE bool isCurrentThreadBusy();
+
+ typedef void (*Finalizer)(JSCell*);
+ JS_EXPORT_PRIVATE void addFinalizer(JSCell*, Finalizer);
+ void addExecutable(ExecutableBase*);
+
+ void notifyIsSafeToCollect();
+ bool isSafeToCollect() const { return m_isSafeToCollect; }
+
+ JS_EXPORT_PRIVATE bool isHeapSnapshotting() const;
+
+ JS_EXPORT_PRIVATE void collectAllGarbageIfNotDoneRecently();
+ JS_EXPORT_PRIVATE void collectAllGarbage();
+ JS_EXPORT_PRIVATE void sweepSynchronously();
- struct MarkedBlockSnapshotFunctor : public MarkedBlock::VoidFunctor {
- MarkedBlockSnapshotFunctor(Vector<MarkedBlock*>& blocks)
- : m_index(0)
- , m_blocks(blocks)
- {
- }
+ bool shouldCollectHeuristic();
- void operator()(MarkedBlock* block) { m_blocks[m_index++] = block; }
+ // Queue up a collection. Returns immediately. This will not queue a collection if a collection
+ // of equal or greater strength exists. Full collections are stronger than std::nullopt collections
+ // and std::nullopt collections are stronger than Eden collections. std::nullopt means that the GC can
+ // choose Eden or Full. This implies that if you request a GC while that GC is ongoing, nothing
+ // will happen.
+ JS_EXPORT_PRIVATE void collectAsync(std::optional<CollectionScope> = std::nullopt);
- size_t m_index;
- Vector<MarkedBlock*>& m_blocks;
- };
+ // Queue up a collection and wait for it to complete. This won't return until you get your own
+ // complete collection. For example, if there was an ongoing asynchronous collection at the time
+ // you called this, then this would wait for that one to complete and then trigger your
+ // collection and then return. In weird cases, there could be multiple GC requests in the backlog
+ // and this will wait for that backlog before running its GC and returning.
+ JS_EXPORT_PRIVATE void collectSync(std::optional<CollectionScope> = std::nullopt);
+
+ void collectIfNecessaryOrDefer(GCDeferralContext* = nullptr);
- inline bool Heap::shouldCollect()
- {
- if (isDeferred())
- return false;
- if (Options::gcMaxHeapSize())
- return m_bytesAllocatedThisCycle > Options::gcMaxHeapSize() && m_isSafeToCollect && m_operationInProgress == NoOperation;
- return m_bytesAllocatedThisCycle > m_maxEdenSize && m_isSafeToCollect && m_operationInProgress == NoOperation;
- }
-
- bool Heap::isBusy()
- {
- return m_operationInProgress != NoOperation;
- }
-
- bool Heap::isCollecting()
- {
- return m_operationInProgress == FullCollection || m_operationInProgress == EdenCollection;
- }
-
- inline Heap* Heap::heap(const JSCell* cell)
- {
- return MarkedBlock::blockFor(cell)->heap();
- }
-
- inline Heap* Heap::heap(const JSValue v)
- {
- if (!v.isCell())
- return 0;
- return heap(v.asCell());
- }
-
- inline bool Heap::isLive(const void* cell)
- {
- return MarkedBlock::blockFor(cell)->isLiveCell(cell);
- }
-
- inline bool Heap::isMarked(const void* cell)
- {
- return MarkedBlock::blockFor(cell)->isMarked(cell);
- }
-
- inline bool Heap::testAndSetMarked(const void* cell)
- {
- return MarkedBlock::blockFor(cell)->testAndSetMarked(cell);
- }
-
- inline void Heap::setMarked(const void* cell)
- {
- MarkedBlock::blockFor(cell)->setMarked(cell);
- }
-
- inline bool Heap::isWriteBarrierEnabled()
- {
-#if ENABLE(WRITE_BARRIER_PROFILING) || ENABLE(GGC)
- return true;
-#else
- return false;
+ void completeAllJITPlans();
+
+ // Use this API to report non-GC memory referenced by GC objects. Be sure to
+ // call both of these functions: Calling only one may trigger catastropic
+ // memory growth.
+ void reportExtraMemoryAllocated(size_t);
+ JS_EXPORT_PRIVATE void reportExtraMemoryVisited(size_t);
+
+#if ENABLE(RESOURCE_USAGE)
+ // Use this API to report the subset of extra memory that lives outside this process.
+ JS_EXPORT_PRIVATE void reportExternalMemoryVisited(size_t);
+ size_t externalMemorySize() { return m_externalMemorySize; }
#endif
- }
- inline void Heap::writeBarrier(const JSCell* from, JSCell* to)
- {
-#if ENABLE(WRITE_BARRIER_PROFILING)
- WriteBarrierCounters::countWriteBarrier();
-#endif
- if (!from || !isMarked(from))
- return;
- if (!to || isMarked(to))
- return;
- Heap::heap(from)->addToRememberedSet(from);
- }
-
- inline void Heap::writeBarrier(const JSCell* from, JSValue to)
- {
-#if ENABLE(WRITE_BARRIER_PROFILING)
- WriteBarrierCounters::countWriteBarrier();
-#endif
- if (!to.isCell())
- return;
- writeBarrier(from, to.asCell());
- }
-
- inline void Heap::reportExtraMemoryCost(size_t cost)
- {
- if (cost > minExtraCost)
- reportExtraMemoryCostSlowCase(cost);
- }
-
- template<typename Functor> inline typename Functor::ReturnType Heap::forEachProtectedCell(Functor& functor)
- {
- ProtectCountSet::iterator end = m_protectedValues.end();
- for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
- functor(it->key);
- m_handleSet.forEachStrongHandle(functor, m_protectedValues);
-
- return functor.returnValue();
- }
-
- template<typename Functor> inline typename Functor::ReturnType Heap::forEachProtectedCell()
- {
- Functor functor;
- return forEachProtectedCell(functor);
- }
-
- template<typename Functor> inline void Heap::forEachCodeBlock(Functor& functor)
- {
- return m_codeBlocks.iterate<Functor>(functor);
- }
-
- inline void* Heap::allocateWithNormalDestructor(size_t bytes)
- {
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("JSC GC allocating %lu bytes with normal destructor.\n", bytes);
-#endif
- ASSERT(isValidAllocation(bytes));
- return m_objectSpace.allocateWithNormalDestructor(bytes);
- }
-
- inline void* Heap::allocateWithImmortalStructureDestructor(size_t bytes)
- {
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("JSC GC allocating %lu bytes with immortal structure destructor.\n", bytes);
-#endif
- ASSERT(isValidAllocation(bytes));
- return m_objectSpace.allocateWithImmortalStructureDestructor(bytes);
- }
-
- inline void* Heap::allocateWithoutDestructor(size_t bytes)
- {
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("JSC GC allocating %lu bytes without destructor.\n", bytes);
-#endif
- ASSERT(isValidAllocation(bytes));
- return m_objectSpace.allocateWithoutDestructor(bytes);
- }
-
- inline CheckedBoolean Heap::tryAllocateStorage(JSCell* intendedOwner, size_t bytes, void** outPtr)
- {
- CheckedBoolean result = m_storageSpace.tryAllocate(bytes, outPtr);
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("JSC GC allocating %lu bytes of storage for %p: %p.\n", bytes, intendedOwner, *outPtr);
-#else
- UNUSED_PARAM(intendedOwner);
-#endif
- return result;
- }
+ // Use this API to report non-GC memory if you can't use the better API above.
+ void deprecatedReportExtraMemory(size_t);
+
+ JS_EXPORT_PRIVATE void reportAbandonedObjectGraph();
+
+ JS_EXPORT_PRIVATE void protect(JSValue);
+ JS_EXPORT_PRIVATE bool unprotect(JSValue); // True when the protect count drops to 0.
- inline CheckedBoolean Heap::tryReallocateStorage(JSCell* intendedOwner, void** ptr, size_t oldSize, size_t newSize)
- {
-#if ENABLE(ALLOCATION_LOGGING)
- void* oldPtr = *ptr;
-#endif
- CheckedBoolean result = m_storageSpace.tryReallocate(ptr, oldSize, newSize);
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("JSC GC reallocating %lu -> %lu bytes of storage for %p: %p -> %p.\n", oldSize, newSize, intendedOwner, oldPtr, *ptr);
-#else
- UNUSED_PARAM(intendedOwner);
+ JS_EXPORT_PRIVATE size_t extraMemorySize(); // Non-GC memory referenced by GC objects.
+ JS_EXPORT_PRIVATE size_t size();
+ JS_EXPORT_PRIVATE size_t capacity();
+ JS_EXPORT_PRIVATE size_t objectCount();
+ JS_EXPORT_PRIVATE size_t globalObjectCount();
+ JS_EXPORT_PRIVATE size_t protectedObjectCount();
+ JS_EXPORT_PRIVATE size_t protectedGlobalObjectCount();
+ JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> protectedObjectTypeCounts();
+ JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> objectTypeCounts();
+
+ HashSet<MarkedArgumentBuffer*>& markListSet();
+
+ template<typename Functor> void forEachProtectedCell(const Functor&);
+ template<typename Functor> void forEachCodeBlock(const Functor&);
+ template<typename Functor> void forEachCodeBlockIgnoringJITPlans(const Functor&);
+
+ HandleSet* handleSet() { return &m_handleSet; }
+ HandleStack* handleStack() { return &m_handleStack; }
+
+ void willStartIterating();
+ void didFinishIterating();
+
+ Seconds lastFullGCLength() const { return m_lastFullGCLength; }
+ Seconds lastEdenGCLength() const { return m_lastEdenGCLength; }
+ void increaseLastFullGCLength(Seconds amount) { m_lastFullGCLength += amount; }
+
+ size_t sizeBeforeLastEdenCollection() const { return m_sizeBeforeLastEdenCollect; }
+ size_t sizeAfterLastEdenCollection() const { return m_sizeAfterLastEdenCollect; }
+ size_t sizeBeforeLastFullCollection() const { return m_sizeBeforeLastFullCollect; }
+ size_t sizeAfterLastFullCollection() const { return m_sizeAfterLastFullCollect; }
+
+ void deleteAllCodeBlocks(DeleteAllCodeEffort);
+ void deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort);
+
+ void didAllocate(size_t);
+ bool isPagedOut(double deadline);
+
+ const JITStubRoutineSet& jitStubRoutines() { return *m_jitStubRoutines; }
+
+ void addReference(JSCell*, ArrayBuffer*);
+
+ bool isDeferred() const { return !!m_deferralDepth; }
+
+ StructureIDTable& structureIDTable() { return m_structureIDTable; }
+
+ CodeBlockSet& codeBlockSet() { return *m_codeBlocks; }
+
+#if USE(FOUNDATION)
+ template<typename T> void releaseSoon(RetainPtr<T>&&);
#endif
- return result;
- }
-
- inline void Heap::ascribeOwner(JSCell* intendedOwner, void* storage)
- {
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("JSC GC ascribing %p as owner of storage %p.\n", intendedOwner, storage);
-#else
- UNUSED_PARAM(intendedOwner);
- UNUSED_PARAM(storage);
+
+ JS_EXPORT_PRIVATE void registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback);
+ JS_EXPORT_PRIVATE void unregisterWeakGCMap(void* weakGCMap);
+
+ void addLogicallyEmptyWeakBlock(WeakBlock*);
+
+#if ENABLE(RESOURCE_USAGE)
+ size_t blockBytesAllocated() const { return m_blockBytesAllocated; }
#endif
- }
- inline BlockAllocator& Heap::blockAllocator()
- {
- return m_blockAllocator;
- }
+ void didAllocateBlock(size_t capacity);
+ void didFreeBlock(size_t capacity);
+
+ bool mutatorShouldBeFenced() const { return m_mutatorShouldBeFenced; }
+ const bool* addressOfMutatorShouldBeFenced() const { return &m_mutatorShouldBeFenced; }
+
+ unsigned barrierThreshold() const { return m_barrierThreshold; }
+ const unsigned* addressOfBarrierThreshold() const { return &m_barrierThreshold; }
+
+ // If true, the GC believes that the mutator is currently messing with the heap. We call this
+ // "having heap access". The GC may block if the mutator is in this state. If false, the GC may
+ // currently be doing things to the heap that make the heap unsafe to access for the mutator.
+ bool hasAccess() const;
+
+ // If the mutator does not currently have heap access, this function will acquire it. If the GC
+ // is currently using the lack of heap access to do dangerous things to the heap then this
+ // function will block, waiting for the GC to finish. It's not valid to call this if the mutator
+ // already has heap access. The mutator is required to precisely track whether or not it has
+ // heap access.
+ //
+ // It's totally fine to acquireAccess() upon VM instantiation and keep it that way. This is how
+ // WebCore uses us. For most other clients, JSLock does acquireAccess()/releaseAccess() for you.
+ void acquireAccess();
+
+ // Releases heap access. If the GC is blocking waiting to do bad things to the heap, it will be
+ // allowed to run now.
+ //
+ // Ordinarily, you should use the ReleaseHeapAccessScope to release and then reacquire heap
+ // access. You should do this anytime you're about do perform a blocking operation, like waiting
+ // on the ParkingLot.
+ void releaseAccess();
+
+ // This is like a super optimized way of saying:
+ //
+ // releaseAccess()
+ // acquireAccess()
+ //
+ // The fast path is an inlined relaxed load and branch. The slow path will block the mutator if
+ // the GC wants to do bad things to the heap.
+ //
+ // All allocations logically call this. As an optimization to improve GC progress, you can call
+ // this anywhere that you can afford a load-branch and where an object allocation would have been
+ // safe.
+ //
+ // The GC will also push a stopIfNecessary() event onto the runloop of the thread that
+ // instantiated the VM whenever it wants the mutator to stop. This means that if you never block
+ // but instead use the runloop to wait for events, then you could safely run in a mode where the
+ // mutator has permanent heap access (like the DOM does). If you have good event handling
+ // discipline (i.e. you don't block the runloop) then you can be sure that stopIfNecessary() will
+ // already be called for you at the right times.
+ void stopIfNecessary();
+
+ // This gives the conn to the collector.
+ void relinquishConn();
+
+ bool mayNeedToStop();
+
+ void performIncrement(size_t bytes);
+
+ // This is a much stronger kind of stopping of the collector, and it may require waiting for a
+ // while. This is meant to be a legacy API for clients of collectAllGarbage that expect that there
+ // is no GC before or after that function call. After calling this, you are free to start GCs
+ // yourself but you can be sure that none are running.
+ //
+ // This both prevents new collections from being started asynchronously and waits for any
+ // outstanding collections to complete.
+ void preventCollection();
+ void allowCollection();
+
+ uint64_t mutatorExecutionVersion() const { return m_mutatorExecutionVersion; }
+
+ JS_EXPORT_PRIVATE void addMarkingConstraint(std::unique_ptr<MarkingConstraint>);
+
+ size_t numOpaqueRoots() const { return m_opaqueRoots.size(); }
+
+#if USE(CF)
+ CFRunLoopRef runLoop() const { return m_runLoop.get(); }
+ JS_EXPORT_PRIVATE void setRunLoop(CFRunLoopRef);
+#endif // USE(CF)
+
+private:
+ friend class AllocatingScope;
+ friend class CodeBlock;
+ friend class CollectingScope;
+ friend class DeferGC;
+ friend class DeferGCForAWhile;
+ friend class GCAwareJITStubRoutine;
+ friend class GCLogging;
+ friend class GCThread;
+ friend class HandleSet;
+ friend class HeapUtil;
+ friend class HeapVerifier;
+ friend class JITStubRoutine;
+ friend class LLIntOffsetsExtractor;
+ friend class MarkedSpace;
+ friend class MarkedAllocator;
+ friend class MarkedBlock;
+ friend class RunningScope;
+ friend class SlotVisitor;
+ friend class SpaceTimeMutatorScheduler;
+ friend class StochasticSpaceTimeMutatorScheduler;
+ friend class SweepingScope;
+ friend class IncrementalSweeper;
+ friend class HeapStatistics;
+ friend class VM;
+ friend class WeakSet;
+
+ class Thread;
+ friend class Thread;
+
+ static const size_t minExtraMemory = 256;
+
+ class FinalizerOwner : public WeakHandleOwner {
+ void finalize(Handle<Unknown>, void* context) override;
+ };
+
+ JS_EXPORT_PRIVATE bool isValidAllocation(size_t);
+ JS_EXPORT_PRIVATE void reportExtraMemoryAllocatedSlowCase(size_t);
+ JS_EXPORT_PRIVATE void deprecatedReportExtraMemorySlowCase(size_t);
+
+ bool shouldCollectInCollectorThread(const AbstractLocker&);
+ void collectInCollectorThread();
+
+ void checkConn(GCConductor);
+
+ enum class RunCurrentPhaseResult {
+ Finished,
+ Continue,
+ NeedCurrentThreadState
+ };
+ RunCurrentPhaseResult runCurrentPhase(GCConductor, CurrentThreadState*);
+
+ // Returns true if we should keep doing things.
+ bool runNotRunningPhase(GCConductor);
+ bool runBeginPhase(GCConductor);
+ bool runFixpointPhase(GCConductor);
+ bool runConcurrentPhase(GCConductor);
+ bool runReloopPhase(GCConductor);
+ bool runEndPhase(GCConductor);
+ bool changePhase(GCConductor, CollectorPhase);
+ bool finishChangingPhase(GCConductor);
+
+ void collectInMutatorThread();
+
+ void stopThePeriphery(GCConductor);
+ void resumeThePeriphery();
+
+ // Returns true if the mutator is stopped, false if the mutator has the conn now.
+ bool stopTheMutator();
+ void resumeTheMutator();
+
+ void stopIfNecessarySlow();
+ bool stopIfNecessarySlow(unsigned extraStateBits);
+
+ template<typename Func>
+ void waitForCollector(const Func&);
+
+ JS_EXPORT_PRIVATE void acquireAccessSlow();
+ JS_EXPORT_PRIVATE void releaseAccessSlow();
+
+ bool handleGCDidJIT(unsigned);
+ void handleGCDidJIT();
+
+ bool handleNeedFinalize(unsigned);
+ void handleNeedFinalize();
+
+ bool relinquishConn(unsigned);
+ void finishRelinquishingConn();
+
+ void setGCDidJIT();
+ void setNeedFinalize();
+ void waitWhileNeedFinalize();
+
+ void setMutatorWaiting();
+ void clearMutatorWaiting();
+ void notifyThreadStopping(const AbstractLocker&);
+
+ typedef uint64_t Ticket;
+ Ticket requestCollection(std::optional<CollectionScope>);
+ void waitForCollection(Ticket);
+
+ void suspendCompilerThreads();
+ void willStartCollection(std::optional<CollectionScope>);
+ void prepareForMarking();
+
+ void gatherStackRoots(ConservativeRoots&);
+ void gatherJSStackRoots(ConservativeRoots&);
+ void gatherScratchBufferRoots(ConservativeRoots&);
+ void beginMarking();
+ void visitCompilerWorklistWeakReferences();
+ void removeDeadCompilerWorklistEntries();
+ void updateObjectCounts();
+ void endMarking();
+
+ void reapWeakHandles();
+ void pruneStaleEntriesFromWeakGCMaps();
+ void sweepArrayBuffers();
+ void snapshotUnswept();
+ void deleteSourceProviderCaches();
+ void notifyIncrementalSweeper();
+ void harvestWeakReferences();
+ void finalizeUnconditionalFinalizers();
+ void clearUnmarkedExecutables();
+ void deleteUnmarkedCompiledCode();
+ JS_EXPORT_PRIVATE void addToRememberedSet(const JSCell*);
+ void updateAllocationLimits();
+ void didFinishCollection();
+ void resumeCompilerThreads();
+ void gatherExtraHeapSnapshotData(HeapProfiler&);
+ void removeDeadHeapSnapshotNodes(HeapProfiler&);
+ void finalize();
+ void sweepLargeAllocations();
+
+ void sweepAllLogicallyEmptyWeakBlocks();
+ bool sweepNextLogicallyEmptyWeakBlock();
+
+ bool shouldDoFullCollection(std::optional<CollectionScope> requestedCollectionScope) const;
+
+ void incrementDeferralDepth();
+ void decrementDeferralDepth();
+ void decrementDeferralDepthAndGCIfNeeded();
+ JS_EXPORT_PRIVATE void decrementDeferralDepthAndGCIfNeededSlow();
+
+ size_t visitCount();
+ size_t bytesVisited();
+
+ void forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>&);
+ void forEachCodeBlockIgnoringJITPlansImpl(const ScopedLambda<bool(CodeBlock*)>&);
+
+ void setMutatorShouldBeFenced(bool value);
+
+ void addCoreConstraints();
+
+ template<typename Func>
+ void iterateExecutingAndCompilingCodeBlocks(const Func&);
+
+ template<typename Func>
+ void iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func&);
+
+ void assertSharedMarkStacksEmpty();
+
+ const HeapType m_heapType;
+ const size_t m_ramSize;
+ const size_t m_minBytesPerCycle;
+ size_t m_sizeAfterLastCollect;
+ size_t m_sizeAfterLastFullCollect;
+ size_t m_sizeBeforeLastFullCollect;
+ size_t m_sizeAfterLastEdenCollect;
+ size_t m_sizeBeforeLastEdenCollect;
+
+ size_t m_bytesAllocatedThisCycle;
+ size_t m_bytesAbandonedSinceLastFullCollect;
+ size_t m_maxEdenSize;
+ size_t m_maxHeapSize;
+ bool m_shouldDoFullCollection;
+ size_t m_totalBytesVisited;
+ size_t m_totalBytesVisitedThisCycle;
+ double m_incrementBalance { 0 };
+
+ std::optional<CollectionScope> m_collectionScope;
+ std::optional<CollectionScope> m_lastCollectionScope;
+ MutatorState m_mutatorState { MutatorState::Running };
+ StructureIDTable m_structureIDTable;
+ MarkedSpace m_objectSpace;
+ GCIncomingRefCountedSet<ArrayBuffer> m_arrayBuffers;
+ size_t m_extraMemorySize;
+ size_t m_deprecatedExtraMemorySize;
+
+ HashSet<const JSCell*> m_copyingRememberedSet;
+
+ ProtectCountSet m_protectedValues;
+ std::unique_ptr<HashSet<MarkedArgumentBuffer*>> m_markListSet;
+
+ std::unique_ptr<MachineThreads> m_machineThreads;
+
+ std::unique_ptr<SlotVisitor> m_collectorSlotVisitor;
+ std::unique_ptr<SlotVisitor> m_mutatorSlotVisitor;
+ std::unique_ptr<MarkStackArray> m_mutatorMarkStack;
+
+ Lock m_raceMarkStackLock;
+ std::unique_ptr<MarkStackArray> m_raceMarkStack;
+
+ std::unique_ptr<MarkingConstraintSet> m_constraintSet;
+
+ // We pool the slot visitors used by parallel marking threads. It's useful to be able to
+ // enumerate over them, and it's useful to have them cache some small amount of memory from
+ // one GC to the next. GC marking threads claim these at the start of marking, and return
+ // them at the end.
+ Vector<std::unique_ptr<SlotVisitor>> m_parallelSlotVisitors;
+ Vector<SlotVisitor*> m_availableParallelSlotVisitors;
+ Lock m_parallelSlotVisitorLock;
+
+ template<typename Func>
+ void forEachSlotVisitor(const Func&);
+
+ HandleSet m_handleSet;
+ HandleStack m_handleStack;
+ std::unique_ptr<CodeBlockSet> m_codeBlocks;
+ std::unique_ptr<JITStubRoutineSet> m_jitStubRoutines;
+ FinalizerOwner m_finalizerOwner;
+
+ bool m_isSafeToCollect;
+
+ bool m_mutatorShouldBeFenced { Options::forceFencedBarrier() };
+ unsigned m_barrierThreshold { Options::forceFencedBarrier() ? tautologicalThreshold : blackThreshold };
+ VM* m_vm;
+ Seconds m_lastFullGCLength;
+ Seconds m_lastEdenGCLength;
+
+ Vector<ExecutableBase*> m_executables;
+
+ Vector<WeakBlock*> m_logicallyEmptyWeakBlocks;
+ size_t m_indexOfNextLogicallyEmptyWeakBlockToSweep { WTF::notFound };
+
#if USE(CF)
- template <typename T>
- inline void Heap::releaseSoon(RetainPtr<T>&& object)
- {
- m_objectSpace.releaseSoon(std::move(object));
- }
+ RetainPtr<CFRunLoopRef> m_runLoop;
+#endif // USE(CF)
+ RefPtr<FullGCActivityCallback> m_fullActivityCallback;
+ RefPtr<GCActivityCallback> m_edenActivityCallback;
+ RefPtr<IncrementalSweeper> m_sweeper;
+ RefPtr<StopIfNecessaryTimer> m_stopIfNecessaryTimer;
+
+ Vector<HeapObserver*> m_observers;
+
+ unsigned m_deferralDepth;
+ bool m_didDeferGCWork { false };
+
+ std::unique_ptr<HeapVerifier> m_verifier;
+
+#if USE(FOUNDATION)
+ Vector<RetainPtr<CFTypeRef>> m_delayedReleaseObjects;
+ unsigned m_delayedReleaseRecursionCount;
#endif
-} // namespace JSC
+ HashMap<void*, std::function<void()>> m_weakGCMaps;
+
+ Lock m_visitRaceLock;
+
+ Lock m_markingMutex;
+ Condition m_markingConditionVariable;
+ std::unique_ptr<MarkStackArray> m_sharedCollectorMarkStack;
+ std::unique_ptr<MarkStackArray> m_sharedMutatorMarkStack;
+ unsigned m_numberOfActiveParallelMarkers { 0 };
+ unsigned m_numberOfWaitingParallelMarkers { 0 };
+ bool m_parallelMarkersShouldExit { false };
+
+ Lock m_opaqueRootsMutex;
+ HashSet<const void*> m_opaqueRoots;
+
+ static const size_t s_blockFragmentLength = 32;
+
+ ListableHandler<WeakReferenceHarvester>::List m_weakReferenceHarvesters;
+ ListableHandler<UnconditionalFinalizer>::List m_unconditionalFinalizers;
+
+ ParallelHelperClient m_helperClient;
+
+#if ENABLE(RESOURCE_USAGE)
+ size_t m_blockBytesAllocated { 0 };
+ size_t m_externalMemorySize { 0 };
+#endif
+
+ std::unique_ptr<MutatorScheduler> m_scheduler;
+
+ static const unsigned mutatorHasConnBit = 1u << 0u; // Must also be protected by threadLock.
+ static const unsigned stoppedBit = 1u << 1u; // Only set when !hasAccessBit
+ static const unsigned hasAccessBit = 1u << 2u;
+ static const unsigned gcDidJITBit = 1u << 3u; // Set when the GC did some JITing, so on resume we need to cpuid.
+ static const unsigned needFinalizeBit = 1u << 4u;
+ static const unsigned mutatorWaitingBit = 1u << 5u; // Allows the mutator to use this as a condition variable.
+ Atomic<unsigned> m_worldState;
+ bool m_collectorBelievesThatTheWorldIsStopped { false };
+ MonotonicTime m_beforeGC;
+ MonotonicTime m_afterGC;
+ MonotonicTime m_stopTime;
+
+ Deque<std::optional<CollectionScope>> m_requests;
+ Ticket m_lastServedTicket { 0 };
+ Ticket m_lastGrantedTicket { 0 };
+ CollectorPhase m_currentPhase { CollectorPhase::NotRunning };
+ CollectorPhase m_nextPhase { CollectorPhase::NotRunning };
+ bool m_threadShouldStop { false };
+ bool m_threadIsStopping { false };
+ bool m_mutatorDidRun { true };
+ uint64_t m_mutatorExecutionVersion { 0 };
+ Box<Lock> m_threadLock;
+ RefPtr<AutomaticThreadCondition> m_threadCondition; // The mutator must not wait on this. It would cause a deadlock.
+ RefPtr<AutomaticThread> m_thread;
+
+ Lock m_collectContinuouslyLock;
+ Condition m_collectContinuouslyCondition;
+ bool m_shouldStopCollectingContinuously { false };
+ ThreadIdentifier m_collectContinuouslyThread { 0 };
+
+ MonotonicTime m_lastGCStartTime;
+ MonotonicTime m_lastGCEndTime;
+ MonotonicTime m_currentGCStartTime;
+
+ uintptr_t m_barriersExecuted { 0 };
+
+ CurrentThreadState* m_currentThreadState { nullptr };
+};
-#endif // Heap_h
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapCell.cpp b/Source/JavaScriptCore/heap/HeapCell.cpp
new file mode 100644
index 000000000..edaf50ea5
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapCell.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "HeapCell.h"
+
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+#if !COMPILER(GCC_OR_CLANG)
+void HeapCell::use() const
+{
+}
+#endif
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, HeapCell::Kind kind)
+{
+ switch (kind) {
+ case HeapCell::JSCell:
+ out.print("JSCell");
+ return;
+ case HeapCell::Auxiliary:
+ out.print("Auxiliary");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/HeapCell.h b/Source/JavaScriptCore/heap/HeapCell.h
new file mode 100644
index 000000000..ef5e54048
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapCell.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "DestructionMode.h"
+
+namespace JSC {
+
+class CellContainer;
+class Heap;
+class LargeAllocation;
+class MarkedBlock;
+class VM;
+struct AllocatorAttributes;
+
+class HeapCell {
+public:
+ enum Kind : int8_t {
+ JSCell,
+ Auxiliary
+ };
+
+ HeapCell() { }
+
+ void zap() { *reinterpret_cast_ptr<uintptr_t**>(this) = 0; }
+ bool isZapped() const { return !*reinterpret_cast_ptr<uintptr_t* const*>(this); }
+
+ bool isLargeAllocation() const;
+ CellContainer cellContainer() const;
+ MarkedBlock& markedBlock() const;
+ LargeAllocation& largeAllocation() const;
+
+ // If you want performance and you know that your cell is small, you can do this instead:
+ // ASSERT(!cell->isLargeAllocation());
+ // cell->markedBlock().vm()
+ // We currently only use this hack for callees to make ExecState::vm() fast. It's not
+ // recommended to use it for too many other things, since the large allocation cutoff is
+ // a runtime option and its default value is small (400 bytes).
+ Heap* heap() const;
+ VM* vm() const;
+
+ size_t cellSize() const;
+ AllocatorAttributes allocatorAttributes() const;
+ DestructionMode destructionMode() const;
+ Kind cellKind() const;
+
+ // Call use() after the last point where you need `this` pointer to be kept alive. You usually don't
+ // need to use this, but it might be necessary if you're otherwise referring to an object's innards
+ // but not the object itself.
+#if COMPILER(GCC_OR_CLANG)
+ void use() const
+ {
+ asm volatile ("" : : "r"(this) : "memory");
+ }
+#else
+ void use() const;
+#endif
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::HeapCell::Kind);
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/HeapCellInlines.h b/Source/JavaScriptCore/heap/HeapCellInlines.h
new file mode 100644
index 000000000..c2d909f10
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapCellInlines.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CellContainer.h"
+#include "HeapCell.h"
+#include "LargeAllocation.h"
+#include "MarkedBlock.h"
+#include "VM.h"
+
+namespace JSC {
+
+ALWAYS_INLINE bool HeapCell::isLargeAllocation() const
+{
+ return LargeAllocation::isLargeAllocation(const_cast<HeapCell*>(this));
+}
+
+ALWAYS_INLINE CellContainer HeapCell::cellContainer() const
+{
+ if (isLargeAllocation())
+ return largeAllocation();
+ return markedBlock();
+}
+
+ALWAYS_INLINE MarkedBlock& HeapCell::markedBlock() const
+{
+ return *MarkedBlock::blockFor(this);
+}
+
+ALWAYS_INLINE LargeAllocation& HeapCell::largeAllocation() const
+{
+ return *LargeAllocation::fromCell(const_cast<HeapCell*>(this));
+}
+
+ALWAYS_INLINE Heap* HeapCell::heap() const
+{
+ return &vm()->heap;
+}
+
+ALWAYS_INLINE VM* HeapCell::vm() const
+{
+ if (isLargeAllocation())
+ return largeAllocation().vm();
+ return markedBlock().vm();
+}
+
+ALWAYS_INLINE size_t HeapCell::cellSize() const
+{
+ if (isLargeAllocation())
+ return largeAllocation().cellSize();
+ return markedBlock().cellSize();
+}
+
+ALWAYS_INLINE AllocatorAttributes HeapCell::allocatorAttributes() const
+{
+ if (isLargeAllocation())
+ return largeAllocation().attributes();
+ return markedBlock().attributes();
+}
+
+ALWAYS_INLINE DestructionMode HeapCell::destructionMode() const
+{
+ return allocatorAttributes().destruction;
+}
+
+ALWAYS_INLINE HeapCell::Kind HeapCell::cellKind() const
+{
+ return allocatorAttributes().cellKind;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/HeapHelperPool.cpp b/Source/JavaScriptCore/heap/HeapHelperPool.cpp
new file mode 100644
index 000000000..791aa756f
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapHelperPool.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "HeapHelperPool.h"
+
+#include <mutex>
+#include "Options.h"
+
+namespace JSC {
+
+ParallelHelperPool& heapHelperPool()
+{
+ static std::once_flag initializeHelperPoolOnceFlag;
+ static ParallelHelperPool* helperPool;
+ std::call_once(
+ initializeHelperPoolOnceFlag,
+ [] {
+ helperPool = new ParallelHelperPool();
+ helperPool->ensureThreads(Options::numberOfGCMarkers() - 1);
+ });
+ return *helperPool;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapHelperPool.h b/Source/JavaScriptCore/heap/HeapHelperPool.h
new file mode 100644
index 000000000..098d9e571
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapHelperPool.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/ParallelHelperPool.h>
+
+namespace JSC {
+
+ParallelHelperPool& heapHelperPool();
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapInlines.h b/Source/JavaScriptCore/heap/HeapInlines.h
new file mode 100644
index 000000000..620efc32a
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapInlines.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GCDeferralContext.h"
+#include "Heap.h"
+#include "HeapCellInlines.h"
+#include "IndexingHeader.h"
+#include "JSCallee.h"
+#include "JSCell.h"
+#include "Structure.h"
+#include <type_traits>
+#include <wtf/Assertions.h>
+#include <wtf/MainThread.h>
+#include <wtf/RandomNumber.h>
+
+namespace JSC {
+
+ALWAYS_INLINE VM* Heap::vm() const
+{
+ return bitwise_cast<VM*>(bitwise_cast<uintptr_t>(this) - OBJECT_OFFSETOF(VM, heap));
+}
+
+ALWAYS_INLINE Heap* Heap::heap(const HeapCell* cell)
+{
+ if (!cell)
+ return nullptr;
+ return cell->heap();
+}
+
+inline Heap* Heap::heap(const JSValue v)
+{
+ if (!v.isCell())
+ return nullptr;
+ return heap(v.asCell());
+}
+
+inline bool Heap::hasHeapAccess() const
+{
+ return m_worldState.load() & hasAccessBit;
+}
+
+inline bool Heap::collectorBelievesThatTheWorldIsStopped() const
+{
+ return m_collectorBelievesThatTheWorldIsStopped;
+}
+
+ALWAYS_INLINE bool Heap::isMarked(const void* rawCell)
+{
+ ASSERT(mayBeGCThread() != GCThreadType::Helper);
+ HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
+ if (cell->isLargeAllocation())
+ return cell->largeAllocation().isMarked();
+ MarkedBlock& block = cell->markedBlock();
+ return block.isMarked(
+ block.vm()->heap.objectSpace().markingVersion(), cell);
+}
+
+ALWAYS_INLINE bool Heap::isMarkedConcurrently(const void* rawCell)
+{
+ HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
+ if (cell->isLargeAllocation())
+ return cell->largeAllocation().isMarked();
+ MarkedBlock& block = cell->markedBlock();
+ return block.isMarkedConcurrently(
+ block.vm()->heap.objectSpace().markingVersion(), cell);
+}
+
+ALWAYS_INLINE bool Heap::testAndSetMarked(HeapVersion markingVersion, const void* rawCell)
+{
+ HeapCell* cell = bitwise_cast<HeapCell*>(rawCell);
+ if (cell->isLargeAllocation())
+ return cell->largeAllocation().testAndSetMarked();
+ MarkedBlock& block = cell->markedBlock();
+ block.aboutToMark(markingVersion);
+ return block.testAndSetMarked(cell);
+}
+
+ALWAYS_INLINE size_t Heap::cellSize(const void* rawCell)
+{
+ return bitwise_cast<HeapCell*>(rawCell)->cellSize();
+}
+
+inline void Heap::writeBarrier(const JSCell* from, JSValue to)
+{
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ WriteBarrierCounters::countWriteBarrier();
+#endif
+ if (!to.isCell())
+ return;
+ writeBarrier(from, to.asCell());
+}
+
+inline void Heap::writeBarrier(const JSCell* from, JSCell* to)
+{
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ WriteBarrierCounters::countWriteBarrier();
+#endif
+ if (!from)
+ return;
+ if (!isWithinThreshold(from->cellState(), barrierThreshold()))
+ return;
+ if (LIKELY(!to))
+ return;
+ writeBarrierSlowPath(from);
+}
+
+inline void Heap::writeBarrier(const JSCell* from)
+{
+ ASSERT_GC_OBJECT_LOOKS_VALID(const_cast<JSCell*>(from));
+ if (!from)
+ return;
+ if (UNLIKELY(isWithinThreshold(from->cellState(), barrierThreshold())))
+ writeBarrierSlowPath(from);
+}
+
+inline void Heap::writeBarrierWithoutFence(const JSCell* from)
+{
+ ASSERT_GC_OBJECT_LOOKS_VALID(const_cast<JSCell*>(from));
+ if (!from)
+ return;
+ if (UNLIKELY(isWithinThreshold(from->cellState(), blackThreshold)))
+ addToRememberedSet(from);
+}
+
+inline void Heap::mutatorFence()
+{
+ if (isX86() || UNLIKELY(mutatorShouldBeFenced()))
+ WTF::storeStoreFence();
+}
+
+template<typename Functor> inline void Heap::forEachCodeBlock(const Functor& func)
+{
+ forEachCodeBlockImpl(scopedLambdaRef<bool(CodeBlock*)>(func));
+}
+
+template<typename Functor> inline void Heap::forEachCodeBlockIgnoringJITPlans(const Functor& func)
+{
+ forEachCodeBlockIgnoringJITPlansImpl(scopedLambdaRef<bool(CodeBlock*)>(func));
+}
+
+template<typename Functor> inline void Heap::forEachProtectedCell(const Functor& functor)
+{
+ for (auto& pair : m_protectedValues)
+ functor(pair.key);
+ m_handleSet.forEachStrongHandle(functor, m_protectedValues);
+}
+
+#if USE(FOUNDATION)
+template <typename T>
+inline void Heap::releaseSoon(RetainPtr<T>&& object)
+{
+ m_delayedReleaseObjects.append(WTFMove(object));
+}
+#endif
+
+inline void Heap::incrementDeferralDepth()
+{
+ ASSERT(!mayBeGCThread() || m_collectorBelievesThatTheWorldIsStopped);
+ m_deferralDepth++;
+}
+
+inline void Heap::decrementDeferralDepth()
+{
+ ASSERT(!mayBeGCThread() || m_collectorBelievesThatTheWorldIsStopped);
+ m_deferralDepth--;
+}
+
+inline void Heap::decrementDeferralDepthAndGCIfNeeded()
+{
+ ASSERT(!mayBeGCThread() || m_collectorBelievesThatTheWorldIsStopped);
+ m_deferralDepth--;
+
+ if (UNLIKELY(m_didDeferGCWork)) {
+ decrementDeferralDepthAndGCIfNeededSlow();
+
+ // Here are the possible relationships between m_deferralDepth and m_didDeferGCWork.
+ // Note that prior to the call to decrementDeferralDepthAndGCIfNeededSlow,
+ // m_didDeferGCWork had to have been true. Now it can be either false or true. There is
+ // nothing we can reliably assert.
+ //
+ // Possible arrangements of m_didDeferGCWork and !!m_deferralDepth:
+ //
+ // Both false: We popped out of all DeferGCs and we did whatever work was deferred.
+ //
+ // Only m_didDeferGCWork is true: We stopped for GC and the GC did DeferGC. This is
+ // possible because of how we handle the baseline JIT's worklist. It's also perfectly
+ // safe because it only protects reportExtraMemory. We can just ignore this.
+ //
+ // Only !!m_deferralDepth is true: m_didDeferGCWork had been set spuriously. It is only
+ // cleared by decrementDeferralDepthAndGCIfNeededSlow(). So, if we had deferred work but
+ // then decrementDeferralDepth()'d, then we might have the bit set even if we GC'd since
+ // then.
+ //
+ // Both true: We're in a recursive ~DeferGC. We wanted to do something about the
+ // deferred work, but were unable to.
+ }
+}
+
+inline HashSet<MarkedArgumentBuffer*>& Heap::markListSet()
+{
+ if (!m_markListSet)
+ m_markListSet = std::make_unique<HashSet<MarkedArgumentBuffer*>>();
+ return *m_markListSet;
+}
+
+inline void Heap::reportExtraMemoryAllocated(size_t size)
+{
+ if (size > minExtraMemory)
+ reportExtraMemoryAllocatedSlowCase(size);
+}
+
+inline void Heap::deprecatedReportExtraMemory(size_t size)
+{
+ if (size > minExtraMemory)
+ deprecatedReportExtraMemorySlowCase(size);
+}
+
+inline void Heap::acquireAccess()
+{
+ if (m_worldState.compareExchangeWeak(0, hasAccessBit))
+ return;
+ acquireAccessSlow();
+}
+
+inline bool Heap::hasAccess() const
+{
+ return m_worldState.loadRelaxed() & hasAccessBit;
+}
+
+inline void Heap::releaseAccess()
+{
+ if (m_worldState.compareExchangeWeak(hasAccessBit, 0))
+ return;
+ releaseAccessSlow();
+}
+
+inline bool Heap::mayNeedToStop()
+{
+ return m_worldState.loadRelaxed() != hasAccessBit;
+}
+
+inline void Heap::stopIfNecessary()
+{
+ if (mayNeedToStop())
+ stopIfNecessarySlow();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapIterationScope.h b/Source/JavaScriptCore/heap/HeapIterationScope.h
index 382661c60..c70fa99a9 100644
--- a/Source/JavaScriptCore/heap/HeapIterationScope.h
+++ b/Source/JavaScriptCore/heap/HeapIterationScope.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HeapIterationScope_h
-#define HeapIterationScope_h
+#pragma once
#include "Heap.h"
#include <wtf/Noncopyable.h>
@@ -53,6 +52,3 @@ inline HeapIterationScope::~HeapIterationScope()
}
} // namespace JSC
-
-
-#endif // HeapIterationScope_h
diff --git a/Source/JavaScriptCore/heap/CopyToken.h b/Source/JavaScriptCore/heap/HeapObserver.h
index 35e0e67e6..ccbb7e6a6 100644
--- a/Source/JavaScriptCore/heap/CopyToken.h
+++ b/Source/JavaScriptCore/heap/HeapObserver.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,18 +23,17 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CopyToken_h
-#define CopyToken_h
+#pragma once
+
+#include "CollectionScope.h"
namespace JSC {
-enum CopyToken {
- ButterflyCopyToken,
- TypedArrayVectorCopyToken,
- MapBackingStoreCopyToken
+class HeapObserver {
+public:
+ virtual ~HeapObserver() { }
+ virtual void willGarbageCollect() = 0;
+ virtual void didGarbageCollect(CollectionScope) = 0;
};
} // namespace JSC
-
-#endif // CopyToken_h
-
diff --git a/Source/JavaScriptCore/heap/HeapProfiler.cpp b/Source/JavaScriptCore/heap/HeapProfiler.cpp
new file mode 100644
index 000000000..d4681b52c
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapProfiler.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "HeapProfiler.h"
+
+#include "HeapSnapshot.h"
+#include "VM.h"
+
+namespace JSC {
+
+HeapProfiler::HeapProfiler(VM& vm)
+ : m_vm(vm)
+{
+}
+
+HeapProfiler::~HeapProfiler()
+{
+}
+
+HeapSnapshot* HeapProfiler::mostRecentSnapshot()
+{
+ if (m_snapshots.isEmpty())
+ return nullptr;
+ return m_snapshots.last().get();
+}
+
+void HeapProfiler::appendSnapshot(std::unique_ptr<HeapSnapshot> snapshot)
+{
+ m_snapshots.append(WTFMove(snapshot));
+}
+
+void HeapProfiler::clearSnapshots()
+{
+ m_snapshots.clear();
+}
+
+void HeapProfiler::setActiveSnapshotBuilder(HeapSnapshotBuilder* builder)
+{
+ ASSERT(!!m_activeBuilder != !!builder);
+ m_activeBuilder = builder;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapProfiler.h b/Source/JavaScriptCore/heap/HeapProfiler.h
new file mode 100644
index 000000000..0a068fa43
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapProfiler.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class HeapSnapshot;
+class HeapSnapshotBuilder;
+class VM;
+
+class HeapProfiler {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ HeapProfiler(VM&);
+ ~HeapProfiler();
+
+ VM& vm() const { return m_vm; }
+
+ HeapSnapshot* mostRecentSnapshot();
+ void appendSnapshot(std::unique_ptr<HeapSnapshot>);
+ void clearSnapshots();
+
+ HeapSnapshotBuilder* activeSnapshotBuilder() const { return m_activeBuilder; }
+ void setActiveSnapshotBuilder(HeapSnapshotBuilder*);
+
+private:
+ VM& m_vm;
+ Vector<std::unique_ptr<HeapSnapshot>> m_snapshots;
+ HeapSnapshotBuilder* m_activeBuilder { nullptr };
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapRootVisitor.h b/Source/JavaScriptCore/heap/HeapRootVisitor.h
deleted file mode 100644
index 5b11a5ead..000000000
--- a/Source/JavaScriptCore/heap/HeapRootVisitor.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef HeapRootVisitor_h
-#define HeapRootVisitor_h
-
-#include "SlotVisitor.h"
-#include "SlotVisitorInlines.h"
-
-namespace JSC {
-
- // Privileged class for marking JSValues directly. It is only safe to use
- // this class to mark direct heap roots that are marked during every GC pass.
- // All other references should be wrapped in WriteBarriers.
- class HeapRootVisitor {
- private:
- friend class Heap;
- HeapRootVisitor(SlotVisitor&);
-
- public:
- void visit(JSValue*);
- void visit(JSValue*, size_t);
- void visit(JSString**);
- void visit(JSCell**);
-
- SlotVisitor& visitor();
-
- private:
- SlotVisitor& m_visitor;
- };
-
- inline HeapRootVisitor::HeapRootVisitor(SlotVisitor& visitor)
- : m_visitor(visitor)
- {
- }
-
- inline void HeapRootVisitor::visit(JSValue* slot)
- {
- m_visitor.append(slot);
- }
-
- inline void HeapRootVisitor::visit(JSValue* slot, size_t count)
- {
- m_visitor.append(slot, count);
- }
-
- inline void HeapRootVisitor::visit(JSString** slot)
- {
- m_visitor.append(reinterpret_cast<JSCell**>(slot));
- }
-
- inline void HeapRootVisitor::visit(JSCell** slot)
- {
- m_visitor.append(slot);
- }
-
- inline SlotVisitor& HeapRootVisitor::visitor()
- {
- return m_visitor;
- }
-
-} // namespace JSC
-
-#endif // HeapRootVisitor_h
diff --git a/Source/JavaScriptCore/heap/HeapSnapshot.cpp b/Source/JavaScriptCore/heap/HeapSnapshot.cpp
new file mode 100644
index 000000000..34db12c3d
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapSnapshot.cpp
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "HeapSnapshot.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+HeapSnapshot::HeapSnapshot(HeapSnapshot* previousSnapshot)
+ : m_previous(previousSnapshot)
+{
+}
+
+HeapSnapshot::~HeapSnapshot()
+{
+}
+
+void HeapSnapshot::appendNode(const HeapSnapshotNode& node)
+{
+ ASSERT(!m_finalized);
+ ASSERT(!m_previous || !m_previous->nodeForCell(node.cell));
+
+ m_nodes.append(node);
+ m_filter.add(bitwise_cast<uintptr_t>(node.cell));
+}
+
+void HeapSnapshot::sweepCell(JSCell* cell)
+{
+ ASSERT(cell);
+
+ if (m_finalized && !m_filter.ruleOut(bitwise_cast<uintptr_t>(cell))) {
+ ASSERT_WITH_MESSAGE(!isEmpty(), "Our filter should have ruled us out if we are empty.");
+ unsigned start = 0;
+ unsigned end = m_nodes.size();
+ while (start != end) {
+ unsigned middle = start + ((end - start) / 2);
+ HeapSnapshotNode& node = m_nodes[middle];
+ if (cell == node.cell) {
+ // Cells should always have 0 as low bits.
+ // Mark this cell for removal by setting the low bit.
+ ASSERT(!(reinterpret_cast<intptr_t>(node.cell) & CellToSweepTag));
+ node.cell = reinterpret_cast<JSCell*>(reinterpret_cast<intptr_t>(node.cell) | CellToSweepTag);
+ m_hasCellsToSweep = true;
+ return;
+ }
+ if (cell < node.cell)
+ end = middle;
+ else
+ start = middle + 1;
+ }
+ }
+
+ if (m_previous)
+ m_previous->sweepCell(cell);
+}
+
+void HeapSnapshot::shrinkToFit()
+{
+ if (m_finalized && m_hasCellsToSweep) {
+ m_filter.reset();
+ m_nodes.removeAllMatching(
+ [&] (const HeapSnapshotNode& node) -> bool {
+ bool willRemoveCell = bitwise_cast<intptr_t>(node.cell) & CellToSweepTag;
+ if (!willRemoveCell)
+ m_filter.add(bitwise_cast<uintptr_t>(node.cell));
+ return willRemoveCell;
+ });
+ m_nodes.shrinkToFit();
+ m_hasCellsToSweep = false;
+ }
+
+ if (m_previous)
+ m_previous->shrinkToFit();
+}
+
+void HeapSnapshot::finalize()
+{
+ ASSERT(!m_finalized);
+ m_finalized = true;
+
+ // Nodes are appended to the snapshot in identifier order.
+ // Now that we have the complete list of nodes we will sort
+ // them in a different order. Remember the range of identifiers
+ // in this snapshot.
+ if (!isEmpty()) {
+ m_firstObjectIdentifier = m_nodes.first().identifier;
+ m_lastObjectIdentifier = m_nodes.last().identifier;
+ }
+
+ std::sort(m_nodes.begin(), m_nodes.end(), [] (const HeapSnapshotNode& a, const HeapSnapshotNode& b) {
+ return a.cell < b.cell;
+ });
+
+#ifndef NDEBUG
+ // Assert there are no duplicates or nullptr cells.
+ JSCell* previousCell = nullptr;
+ for (auto& node : m_nodes) {
+ ASSERT(node.cell);
+ ASSERT(!(reinterpret_cast<intptr_t>(node.cell) & CellToSweepTag));
+ if (node.cell == previousCell) {
+ dataLog("Seeing same cell twice: ", RawPointer(previousCell), "\n");
+ ASSERT(node.cell != previousCell);
+ }
+ previousCell = node.cell;
+ }
+#endif
+}
+
+std::optional<HeapSnapshotNode> HeapSnapshot::nodeForCell(JSCell* cell)
+{
+ ASSERT(m_finalized);
+
+ if (!m_filter.ruleOut(bitwise_cast<uintptr_t>(cell))) {
+ ASSERT_WITH_MESSAGE(!isEmpty(), "Our filter should have ruled us out if we are empty.");
+ unsigned start = 0;
+ unsigned end = m_nodes.size();
+ while (start != end) {
+ unsigned middle = start + ((end - start) / 2);
+ HeapSnapshotNode& node = m_nodes[middle];
+ if (cell == node.cell)
+ return std::optional<HeapSnapshotNode>(node);
+ if (cell < node.cell)
+ end = middle;
+ else
+ start = middle + 1;
+ }
+ }
+
+ if (m_previous)
+ return m_previous->nodeForCell(cell);
+
+ return std::nullopt;
+}
+
+std::optional<HeapSnapshotNode> HeapSnapshot::nodeForObjectIdentifier(unsigned objectIdentifier)
+{
+ if (isEmpty()) {
+ if (m_previous)
+ return m_previous->nodeForObjectIdentifier(objectIdentifier);
+ return std::nullopt;
+ }
+
+ if (objectIdentifier > m_lastObjectIdentifier)
+ return std::nullopt;
+
+ if (objectIdentifier < m_firstObjectIdentifier) {
+ if (m_previous)
+ return m_previous->nodeForObjectIdentifier(objectIdentifier);
+ return std::nullopt;
+ }
+
+ for (auto& node : m_nodes) {
+ if (node.identifier == objectIdentifier)
+ return std::optional<HeapSnapshotNode>(node);
+ }
+
+ return std::nullopt;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapSnapshot.h b/Source/JavaScriptCore/heap/HeapSnapshot.h
new file mode 100644
index 000000000..fdd3de3b5
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapSnapshot.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "HeapSnapshotBuilder.h"
+#include "TinyBloomFilter.h"
+#include <wtf/Optional.h>
+
+namespace JSC {
+
+class HeapSnapshot {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ HeapSnapshot(HeapSnapshot*);
+ ~HeapSnapshot();
+
+ HeapSnapshot* previous() const { return m_previous; }
+
+ void appendNode(const HeapSnapshotNode&);
+ void sweepCell(JSCell*);
+ void shrinkToFit();
+ void finalize();
+
+ bool isEmpty() const { return m_nodes.isEmpty(); }
+ std::optional<HeapSnapshotNode> nodeForCell(JSCell*);
+ std::optional<HeapSnapshotNode> nodeForObjectIdentifier(unsigned objectIdentifier);
+
+private:
+ friend class HeapSnapshotBuilder;
+ static const intptr_t CellToSweepTag = 1;
+
+ Vector<HeapSnapshotNode> m_nodes;
+ TinyBloomFilter m_filter;
+ HeapSnapshot* m_previous { nullptr };
+ unsigned m_firstObjectIdentifier { 0 };
+ unsigned m_lastObjectIdentifier { 0 };
+ bool m_finalized { false };
+ bool m_hasCellsToSweep { false };
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapSnapshotBuilder.cpp b/Source/JavaScriptCore/heap/HeapSnapshotBuilder.cpp
new file mode 100644
index 000000000..5e5947592
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapSnapshotBuilder.cpp
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "HeapSnapshotBuilder.h"
+
+#include "DeferGC.h"
+#include "Heap.h"
+#include "HeapProfiler.h"
+#include "HeapSnapshot.h"
+#include "JSCInlines.h"
+#include "JSCell.h"
+#include "PreventCollectionScope.h"
+#include "VM.h"
+#include <wtf/text/StringBuilder.h>
+
+namespace JSC {
+
+unsigned HeapSnapshotBuilder::nextAvailableObjectIdentifier = 1;
+unsigned HeapSnapshotBuilder::getNextObjectIdentifier() { return nextAvailableObjectIdentifier++; }
+void HeapSnapshotBuilder::resetNextAvailableObjectIdentifier() { HeapSnapshotBuilder::nextAvailableObjectIdentifier = 1; }
+
+HeapSnapshotBuilder::HeapSnapshotBuilder(HeapProfiler& profiler)
+ : m_profiler(profiler)
+{
+}
+
+HeapSnapshotBuilder::~HeapSnapshotBuilder()
+{
+}
+
+void HeapSnapshotBuilder::buildSnapshot()
+{
+ PreventCollectionScope preventCollectionScope(m_profiler.vm().heap);
+
+ m_snapshot = std::make_unique<HeapSnapshot>(m_profiler.mostRecentSnapshot());
+ {
+ m_profiler.setActiveSnapshotBuilder(this);
+ m_profiler.vm().heap.collectAllGarbage();
+ m_profiler.setActiveSnapshotBuilder(nullptr);
+ }
+ m_snapshot->finalize();
+
+ m_profiler.appendSnapshot(WTFMove(m_snapshot));
+}
+
+void HeapSnapshotBuilder::appendNode(JSCell* cell)
+{
+ ASSERT(m_profiler.activeSnapshotBuilder() == this);
+ ASSERT(Heap::isMarkedConcurrently(cell));
+
+ if (hasExistingNodeForCell(cell))
+ return;
+
+ std::lock_guard<Lock> lock(m_buildingNodeMutex);
+
+ m_snapshot->appendNode(HeapSnapshotNode(cell, getNextObjectIdentifier()));
+}
+
+void HeapSnapshotBuilder::appendEdge(JSCell* from, JSCell* to)
+{
+ ASSERT(m_profiler.activeSnapshotBuilder() == this);
+ ASSERT(to);
+
+ // Avoid trivial edges.
+ if (from == to)
+ return;
+
+ std::lock_guard<Lock> lock(m_buildingEdgeMutex);
+
+ m_edges.append(HeapSnapshotEdge(from, to));
+}
+
+void HeapSnapshotBuilder::appendPropertyNameEdge(JSCell* from, JSCell* to, UniquedStringImpl* propertyName)
+{
+ ASSERT(m_profiler.activeSnapshotBuilder() == this);
+ ASSERT(to);
+
+ std::lock_guard<Lock> lock(m_buildingEdgeMutex);
+
+ m_edges.append(HeapSnapshotEdge(from, to, EdgeType::Property, propertyName));
+}
+
+void HeapSnapshotBuilder::appendVariableNameEdge(JSCell* from, JSCell* to, UniquedStringImpl* variableName)
+{
+ ASSERT(m_profiler.activeSnapshotBuilder() == this);
+ ASSERT(to);
+
+ std::lock_guard<Lock> lock(m_buildingEdgeMutex);
+
+ m_edges.append(HeapSnapshotEdge(from, to, EdgeType::Variable, variableName));
+}
+
+void HeapSnapshotBuilder::appendIndexEdge(JSCell* from, JSCell* to, uint32_t index)
+{
+ ASSERT(m_profiler.activeSnapshotBuilder() == this);
+ ASSERT(to);
+
+ std::lock_guard<Lock> lock(m_buildingEdgeMutex);
+
+ m_edges.append(HeapSnapshotEdge(from, to, index));
+}
+
+bool HeapSnapshotBuilder::hasExistingNodeForCell(JSCell* cell)
+{
+ if (!m_snapshot->previous())
+ return false;
+
+ return !!m_snapshot->previous()->nodeForCell(cell);
+}
+
+
+// Heap Snapshot JSON Format:
+//
+// {
+// "version": 1.0,
+// "nodes": [
+// <nodeId>, <sizeInBytes>, <nodeClassNameIndex>, <internal>,
+// <nodeId>, <sizeInBytes>, <nodeClassNameIndex>, <internal>,
+// ...
+// ],
+// "nodeClassNames": [
+// "string", "Structure", "Object", ...
+// ],
+// "edges": [
+// <fromNodeId>, <toNodeId>, <edgeTypeIndex>, <edgeExtraData>,
+// <fromNodeId>, <toNodeId>, <edgeTypeIndex>, <edgeExtraData>,
+// ...
+// ],
+// "edgeTypes": [
+// "Internal", "Property", "Index", "Variable"
+// ],
+// "edgeNames": [
+// "propertyName", "variableName", ...
+// ]
+// }
+//
+// Notes:
+//
+// <nodeClassNameIndex>
+// - index into the "nodeClassNames" list.
+//
+// <internal>
+// - 0 = false, 1 = true.
+//
+// <edgeTypeIndex>
+// - index into the "edgeTypes" list.
+//
+// <edgeExtraData>
+// - for Internal edges this should be ignored (0).
+// - for Index edges this is the index value.
+// - for Property or Variable edges this is an index into the "edgeNames" list.
+
+static uint8_t edgeTypeToNumber(EdgeType type)
+{
+ return static_cast<uint8_t>(type);
+}
+
+static const char* edgeTypeToString(EdgeType type)
+{
+ switch (type) {
+ case EdgeType::Internal:
+ return "Internal";
+ case EdgeType::Property:
+ return "Property";
+ case EdgeType::Index:
+ return "Index";
+ case EdgeType::Variable:
+ return "Variable";
+ }
+ ASSERT_NOT_REACHED();
+ return "Internal";
+}
+
+String HeapSnapshotBuilder::json()
+{
+ return json([] (const HeapSnapshotNode&) { return true; });
+}
+
+String HeapSnapshotBuilder::json(std::function<bool (const HeapSnapshotNode&)> allowNodeCallback)
+{
+ VM& vm = m_profiler.vm();
+ DeferGCForAWhile deferGC(vm.heap);
+
+ // Build a node to identifier map of allowed nodes to use when serializing edges.
+ HashMap<JSCell*, unsigned> allowedNodeIdentifiers;
+
+ // Build a list of used class names.
+ HashMap<const char*, unsigned> classNameIndexes;
+ classNameIndexes.set("<root>", 0);
+ unsigned nextClassNameIndex = 1;
+
+ // Build a list of used edge names.
+ HashMap<UniquedStringImpl*, unsigned> edgeNameIndexes;
+ unsigned nextEdgeNameIndex = 0;
+
+ StringBuilder json;
+
+ auto appendNodeJSON = [&] (const HeapSnapshotNode& node) {
+ // Let the client decide if they want to allow or disallow certain nodes.
+ if (!allowNodeCallback(node))
+ return;
+
+ allowedNodeIdentifiers.set(node.cell, node.identifier);
+
+ auto result = classNameIndexes.add(node.cell->classInfo(vm)->className, nextClassNameIndex);
+ if (result.isNewEntry)
+ nextClassNameIndex++;
+ unsigned classNameIndex = result.iterator->value;
+
+ bool isInternal = false;
+ if (!node.cell->isString()) {
+ Structure* structure = node.cell->structure(vm);
+ isInternal = !structure || !structure->globalObject();
+ }
+
+ // <nodeId>, <sizeInBytes>, <className>, <optionalInternalBoolean>
+ json.append(',');
+ json.appendNumber(node.identifier);
+ json.append(',');
+ json.appendNumber(node.cell->estimatedSizeInBytes());
+ json.append(',');
+ json.appendNumber(classNameIndex);
+ json.append(',');
+ json.append(isInternal ? '1' : '0');
+ };
+
+ bool firstEdge = true;
+ auto appendEdgeJSON = [&] (const HeapSnapshotEdge& edge) {
+ if (!firstEdge)
+ json.append(',');
+ firstEdge = false;
+
+ // <fromNodeId>, <toNodeId>, <edgeTypeIndex>, <edgeExtraData>
+ json.appendNumber(edge.from.identifier);
+ json.append(',');
+ json.appendNumber(edge.to.identifier);
+ json.append(',');
+ json.appendNumber(edgeTypeToNumber(edge.type));
+ json.append(',');
+ switch (edge.type) {
+ case EdgeType::Property:
+ case EdgeType::Variable: {
+ auto result = edgeNameIndexes.add(edge.u.name, nextEdgeNameIndex);
+ if (result.isNewEntry)
+ nextEdgeNameIndex++;
+ unsigned edgeNameIndex = result.iterator->value;
+ json.appendNumber(edgeNameIndex);
+ break;
+ }
+ case EdgeType::Index:
+ json.appendNumber(edge.u.index);
+ break;
+ default:
+ // No data for this edge type.
+ json.append('0');
+ break;
+ }
+ };
+
+ json.append('{');
+
+ // version
+ json.appendLiteral("\"version\":1");
+
+ // nodes
+ json.append(',');
+ json.appendLiteral("\"nodes\":");
+ json.append('[');
+ json.appendLiteral("0,0,0,0"); // <root>
+ for (HeapSnapshot* snapshot = m_profiler.mostRecentSnapshot(); snapshot; snapshot = snapshot->previous()) {
+ for (auto& node : snapshot->m_nodes)
+ appendNodeJSON(node);
+ }
+ json.append(']');
+
+ // node class names
+ json.append(',');
+ json.appendLiteral("\"nodeClassNames\":");
+ json.append('[');
+ Vector<const char *> orderedClassNames(classNameIndexes.size());
+ for (auto& entry : classNameIndexes)
+ orderedClassNames[entry.value] = entry.key;
+ classNameIndexes.clear();
+ bool firstClassName = true;
+ for (auto& className : orderedClassNames) {
+ if (!firstClassName)
+ json.append(',');
+ firstClassName = false;
+ json.appendQuotedJSONString(className);
+ }
+ orderedClassNames.clear();
+ json.append(']');
+
+ // Process edges.
+ // Replace pointers with identifiers.
+ // Remove any edges that we won't need.
+ m_edges.removeAllMatching([&] (HeapSnapshotEdge& edge) {
+ // If the from cell is null, this means a <root> edge.
+ if (!edge.from.cell)
+ edge.from.identifier = 0;
+ else {
+ auto fromLookup = allowedNodeIdentifiers.find(edge.from.cell);
+ if (fromLookup == allowedNodeIdentifiers.end())
+ return true;
+ edge.from.identifier = fromLookup->value;
+ }
+
+ if (!edge.to.cell)
+ edge.to.identifier = 0;
+ else {
+ auto toLookup = allowedNodeIdentifiers.find(edge.to.cell);
+ if (toLookup == allowedNodeIdentifiers.end())
+ return true;
+ edge.to.identifier = toLookup->value;
+ }
+
+ return false;
+ });
+ allowedNodeIdentifiers.clear();
+ m_edges.shrinkToFit();
+
+ // Sort edges based on from identifier.
+ std::sort(m_edges.begin(), m_edges.end(), [&] (const HeapSnapshotEdge& a, const HeapSnapshotEdge& b) {
+ return a.from.identifier < b.from.identifier;
+ });
+
+ // edges
+ json.append(',');
+ json.appendLiteral("\"edges\":");
+ json.append('[');
+ for (auto& edge : m_edges)
+ appendEdgeJSON(edge);
+ json.append(']');
+
+ // edge types
+ json.append(',');
+ json.appendLiteral("\"edgeTypes\":");
+ json.append('[');
+ json.appendQuotedJSONString(edgeTypeToString(EdgeType::Internal));
+ json.append(',');
+ json.appendQuotedJSONString(edgeTypeToString(EdgeType::Property));
+ json.append(',');
+ json.appendQuotedJSONString(edgeTypeToString(EdgeType::Index));
+ json.append(',');
+ json.appendQuotedJSONString(edgeTypeToString(EdgeType::Variable));
+ json.append(']');
+
+ // edge names
+ json.append(',');
+ json.appendLiteral("\"edgeNames\":");
+ json.append('[');
+ Vector<UniquedStringImpl*> orderedEdgeNames(edgeNameIndexes.size());
+ for (auto& entry : edgeNameIndexes)
+ orderedEdgeNames[entry.value] = entry.key;
+ edgeNameIndexes.clear();
+ bool firstEdgeName = true;
+ for (auto& edgeName : orderedEdgeNames) {
+ if (!firstEdgeName)
+ json.append(',');
+ firstEdgeName = false;
+ json.appendQuotedJSONString(edgeName);
+ }
+ orderedEdgeNames.clear();
+ json.append(']');
+
+ json.append('}');
+ return json.toString();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapSnapshotBuilder.h b/Source/JavaScriptCore/heap/HeapSnapshotBuilder.h
new file mode 100644
index 000000000..0ed85d19f
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapSnapshotBuilder.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <functional>
+#include <wtf/Lock.h>
+#include <wtf/Vector.h>
+#include <wtf/text/UniquedStringImpl.h>
+#include <wtf/text/WTFString.h>
+
+namespace JSC {
+
+class HeapProfiler;
+class HeapSnapshot;
+class JSCell;
+
+struct HeapSnapshotNode {
+ HeapSnapshotNode(JSCell* cell, unsigned identifier)
+ : cell(cell)
+ , identifier(identifier)
+ { }
+
+ JSCell* cell;
+ unsigned identifier;
+};
+
+enum class EdgeType : uint8_t {
+ Internal, // Normal strong reference. No name.
+ Property, // Named property. In `object.property` the name is "property"
+ Index, // Indexed property. In `array[0]` name is index "0".
+ Variable, // Variable held by a scope. In `let x, f=() => x++` name is "x" in f's captured scope.
+ // FIXME: <https://webkit.org/b/154934> Heap Snapshot should include "Weak" edges
+};
+
+struct HeapSnapshotEdge {
+ HeapSnapshotEdge(JSCell* fromCell, JSCell* toCell)
+ : type(EdgeType::Internal)
+ {
+ from.cell = fromCell;
+ to.cell = toCell;
+ }
+
+ HeapSnapshotEdge(JSCell* fromCell, JSCell* toCell, EdgeType type, UniquedStringImpl* name)
+ : type(type)
+ {
+ ASSERT(type == EdgeType::Property || type == EdgeType::Variable);
+ from.cell = fromCell;
+ to.cell = toCell;
+ u.name = name;
+ }
+
+ HeapSnapshotEdge(JSCell* fromCell, JSCell* toCell, uint32_t index)
+ : type(EdgeType::Index)
+ {
+ from.cell = fromCell;
+ to.cell = toCell;
+ u.index = index;
+ }
+
+ union {
+ JSCell *cell;
+ unsigned identifier;
+ } from;
+
+ union {
+ JSCell *cell;
+ unsigned identifier;
+ } to;
+
+ union {
+ UniquedStringImpl* name;
+ uint32_t index;
+ } u;
+
+ EdgeType type;
+};
+
+class JS_EXPORT_PRIVATE HeapSnapshotBuilder {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ HeapSnapshotBuilder(HeapProfiler&);
+ ~HeapSnapshotBuilder();
+
+ static unsigned nextAvailableObjectIdentifier;
+ static unsigned getNextObjectIdentifier();
+ static void resetNextAvailableObjectIdentifier();
+
+ // Performs a garbage collection that builds a snapshot of all live cells.
+ void buildSnapshot();
+
+ // A marked cell.
+ void appendNode(JSCell*);
+
+ // A reference from one cell to another.
+ void appendEdge(JSCell* from, JSCell* to);
+ void appendPropertyNameEdge(JSCell* from, JSCell* to, UniquedStringImpl* propertyName);
+ void appendVariableNameEdge(JSCell* from, JSCell* to, UniquedStringImpl* variableName);
+ void appendIndexEdge(JSCell* from, JSCell* to, uint32_t index);
+
+ String json();
+ String json(std::function<bool (const HeapSnapshotNode&)> allowNodeCallback);
+
+private:
+ // Finalized snapshots are not modified during building. So searching them
+ // for an existing node can be done concurrently without a lock.
+ bool hasExistingNodeForCell(JSCell*);
+
+ HeapProfiler& m_profiler;
+
+ // SlotVisitors run in parallel.
+ Lock m_buildingNodeMutex;
+ std::unique_ptr<HeapSnapshot> m_snapshot;
+ Lock m_buildingEdgeMutex;
+ Vector<HeapSnapshotEdge> m_edges;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapStatistics.cpp b/Source/JavaScriptCore/heap/HeapStatistics.cpp
deleted file mode 100644
index f23def711..000000000
--- a/Source/JavaScriptCore/heap/HeapStatistics.cpp
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "HeapStatistics.h"
-
-#include "Heap.h"
-#include "HeapIterationScope.h"
-#include "JSObject.h"
-#include "Operations.h"
-#include "Options.h"
-#include <stdlib.h>
-#if OS(UNIX)
-#include <sys/resource.h>
-#endif
-#include <wtf/CurrentTime.h>
-#include <wtf/DataLog.h>
-#include <wtf/Deque.h>
-
-namespace JSC {
-
-double HeapStatistics::s_startTime = 0.0;
-double HeapStatistics::s_endTime = 0.0;
-Vector<double>* HeapStatistics::s_pauseTimeStarts = 0;
-Vector<double>* HeapStatistics::s_pauseTimeEnds = 0;
-
-#if OS(UNIX)
-
-void HeapStatistics::initialize()
-{
- ASSERT(Options::recordGCPauseTimes());
- s_startTime = WTF::monotonicallyIncreasingTime();
- s_pauseTimeStarts = new Vector<double>();
- s_pauseTimeEnds = new Vector<double>();
-}
-
-void HeapStatistics::recordGCPauseTime(double start, double end)
-{
- ASSERT(Options::recordGCPauseTimes());
- ASSERT(s_pauseTimeStarts);
- ASSERT(s_pauseTimeEnds);
- s_pauseTimeStarts->append(start);
- s_pauseTimeEnds->append(end);
-}
-
-void HeapStatistics::logStatistics()
-{
- struct rusage usage;
- getrusage(RUSAGE_SELF, &usage);
-#if USE(CF) || OS(UNIX)
- char* vmName = getenv("JSVMName");
- char* suiteName = getenv("JSSuiteName");
- char* benchmarkName = getenv("JSBenchmarkName");
-#else
-#error "The HeapStatistics module is not supported on this platform."
-#endif
- if (!vmName || !suiteName || !benchmarkName)
- dataLogF("HeapStatistics: {\"max_rss\": %ld", usage.ru_maxrss);
- else
- dataLogF("HeapStatistics: {\"max_rss\": %ld, \"vm_name\": \"%s\", \"suite_name\": \"%s\", \"benchmark_name\": \"%s\"",
- usage.ru_maxrss, vmName, suiteName, benchmarkName);
-
- if (Options::recordGCPauseTimes()) {
- dataLogF(", \"pause_times\": [");
- Vector<double>::iterator startIt = s_pauseTimeStarts->begin();
- Vector<double>::iterator endIt = s_pauseTimeEnds->begin();
- if (startIt != s_pauseTimeStarts->end() && endIt != s_pauseTimeEnds->end()) {
- dataLogF("[%f, %f]", *startIt, *endIt);
- ++startIt;
- ++endIt;
- }
- while (startIt != s_pauseTimeStarts->end() && endIt != s_pauseTimeEnds->end()) {
- dataLogF(", [%f, %f]", *startIt, *endIt);
- ++startIt;
- ++endIt;
- }
- dataLogF("], \"start_time\": %f, \"end_time\": %f", s_startTime, s_endTime);
- }
- dataLogF("}\n");
-}
-
-void HeapStatistics::exitWithFailure()
-{
- ASSERT(Options::logHeapStatisticsAtExit());
- s_endTime = WTF::monotonicallyIncreasingTime();
- logStatistics();
- exit(-1);
-}
-
-void HeapStatistics::reportSuccess()
-{
- ASSERT(Options::logHeapStatisticsAtExit());
- s_endTime = WTF::monotonicallyIncreasingTime();
- logStatistics();
-}
-
-#else
-
-void HeapStatistics::initialize()
-{
-}
-
-void HeapStatistics::recordGCPauseTime(double, double)
-{
-}
-
-void HeapStatistics::logStatistics()
-{
-}
-
-void HeapStatistics::exitWithFailure()
-{
-}
-
-void HeapStatistics::reportSuccess()
-{
-}
-
-#endif // OS(UNIX)
-
-size_t HeapStatistics::parseMemoryAmount(char* s)
-{
- size_t multiplier = 1;
- char* afterS;
- size_t value = strtol(s, &afterS, 10);
- char next = afterS[0];
- switch (next) {
- case 'K':
- multiplier = KB;
- break;
- case 'M':
- multiplier = MB;
- break;
- case 'G':
- multiplier = GB;
- break;
- default:
- break;
- }
- return value * multiplier;
-}
-
-class StorageStatistics : public MarkedBlock::VoidFunctor {
-public:
- StorageStatistics();
-
- void operator()(JSCell*);
-
- size_t objectWithOutOfLineStorageCount();
- size_t objectCount();
-
- size_t storageSize();
- size_t storageCapacity();
-
-private:
- size_t m_objectWithOutOfLineStorageCount;
- size_t m_objectCount;
- size_t m_storageSize;
- size_t m_storageCapacity;
-};
-
-inline StorageStatistics::StorageStatistics()
- : m_objectWithOutOfLineStorageCount(0)
- , m_objectCount(0)
- , m_storageSize(0)
- , m_storageCapacity(0)
-{
-}
-
-inline void StorageStatistics::operator()(JSCell* cell)
-{
- if (!cell->isObject())
- return;
-
- JSObject* object = jsCast<JSObject*>(cell);
- if (hasIndexedProperties(object->structure()->indexingType()))
- return;
-
- if (object->structure()->isUncacheableDictionary())
- return;
-
- ++m_objectCount;
- if (!object->hasInlineStorage())
- ++m_objectWithOutOfLineStorageCount;
- m_storageSize += object->structure()->totalStorageSize() * sizeof(WriteBarrierBase<Unknown>);
- m_storageCapacity += object->structure()->totalStorageCapacity() * sizeof(WriteBarrierBase<Unknown>);
-}
-
-inline size_t StorageStatistics::objectWithOutOfLineStorageCount()
-{
- return m_objectWithOutOfLineStorageCount;
-}
-
-inline size_t StorageStatistics::objectCount()
-{
- return m_objectCount;
-}
-
-inline size_t StorageStatistics::storageSize()
-{
- return m_storageSize;
-}
-
-inline size_t StorageStatistics::storageCapacity()
-{
- return m_storageCapacity;
-}
-
-void HeapStatistics::showObjectStatistics(Heap* heap)
-{
- dataLogF("\n=== Heap Statistics: ===\n");
- dataLogF("size: %ldkB\n", static_cast<long>(heap->m_sizeAfterLastCollect / KB));
- dataLogF("capacity: %ldkB\n", static_cast<long>(heap->capacity() / KB));
- dataLogF("pause time: %lfms\n\n", heap->m_lastGCLength);
-
- StorageStatistics storageStatistics;
- {
- HeapIterationScope iterationScope(*heap);
- heap->m_objectSpace.forEachLiveCell(iterationScope, storageStatistics);
- }
- dataLogF("wasted .property storage: %ldkB (%ld%%)\n",
- static_cast<long>(
- (storageStatistics.storageCapacity() - storageStatistics.storageSize()) / KB),
- static_cast<long>(
- (storageStatistics.storageCapacity() - storageStatistics.storageSize()) * 100
- / storageStatistics.storageCapacity()));
- dataLogF("objects with out-of-line .property storage: %ld (%ld%%)\n",
- static_cast<long>(
- storageStatistics.objectWithOutOfLineStorageCount()),
- static_cast<long>(
- storageStatistics.objectWithOutOfLineStorageCount() * 100
- / storageStatistics.objectCount()));
-}
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapStatistics.h b/Source/JavaScriptCore/heap/HeapStatistics.h
deleted file mode 100644
index 13a29efbe..000000000
--- a/Source/JavaScriptCore/heap/HeapStatistics.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef HeapStatistics_h
-#define HeapStatistics_h
-
-#include "JSExportMacros.h"
-#include <wtf/Deque.h>
-
-namespace JSC {
-
-class Heap;
-
-class HeapStatistics {
-public:
- NO_RETURN static void exitWithFailure();
- JS_EXPORT_PRIVATE static void reportSuccess();
-
- static void initialize();
- static void recordGCPauseTime(double start, double end);
- static size_t parseMemoryAmount(char*);
-
- static void showObjectStatistics(Heap*);
-
- static const size_t KB = 1024;
- static const size_t MB = 1024 * KB;
- static const size_t GB = 1024 * MB;
-
-private:
- static void logStatistics();
- static Vector<double>* s_pauseTimeStarts;
- static Vector<double>* s_pauseTimeEnds;
- static double s_startTime;
- static double s_endTime;
-};
-
-} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/HeapTimer.cpp b/Source/JavaScriptCore/heap/HeapTimer.cpp
index 1331b0ac0..9c8b30252 100644
--- a/Source/JavaScriptCore/heap/HeapTimer.cpp
+++ b/Source/JavaScriptCore/heap/HeapTimer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,116 +26,137 @@
#include "config.h"
#include "HeapTimer.h"
-#include "APIShims.h"
+#include "GCActivityCallback.h"
+#include "IncrementalSweeper.h"
#include "JSObject.h"
#include "JSString.h"
-
+#include "JSCInlines.h"
#include <wtf/MainThread.h>
#include <wtf/Threading.h>
-#if PLATFORM(EFL)
-#include <Ecore.h>
+#if USE(GLIB)
+#include <glib.h>
#endif
namespace JSC {
+void HeapTimer::timerDidFire()
+{
+ m_apiLock->lock();
+
+ RefPtr<VM> vm = m_apiLock->vm();
+ if (!vm) {
+ // The VM has been destroyed, so we should just give up.
+ m_apiLock->unlock();
+ return;
+ }
+
+ {
+ JSLockHolder locker(vm.get());
+ doWork();
+ }
+
+ m_apiLock->unlock();
+}
+
#if USE(CF)
const CFTimeInterval HeapTimer::s_decade = 60 * 60 * 24 * 365 * 10;
-static const void* retainAPILock(const void* info)
+HeapTimer::HeapTimer(VM* vm)
+ : m_vm(vm)
+ , m_apiLock(&vm->apiLock())
{
- static_cast<JSLock*>(const_cast<void*>(info))->ref();
- return info;
+ setRunLoop(vm->heap.runLoop());
}
-static void releaseAPILock(const void* info)
+void HeapTimer::setRunLoop(CFRunLoopRef runLoop)
{
- static_cast<JSLock*>(const_cast<void*>(info))->deref();
+ if (m_runLoop) {
+ CFRunLoopRemoveTimer(m_runLoop.get(), m_timer.get(), kCFRunLoopCommonModes);
+ CFRunLoopTimerInvalidate(m_timer.get());
+ m_runLoop.clear();
+ m_timer.clear();
+ }
+
+ if (runLoop) {
+ m_runLoop = runLoop;
+ memset(&m_context, 0, sizeof(CFRunLoopTimerContext));
+ m_context.info = this;
+ m_timer = adoptCF(CFRunLoopTimerCreate(kCFAllocatorDefault, s_decade, s_decade, 0, 0, HeapTimer::timerDidFireCallback, &m_context));
+ CFRunLoopAddTimer(m_runLoop.get(), m_timer.get(), kCFRunLoopCommonModes);
+ }
}
-HeapTimer::HeapTimer(VM* vm, CFRunLoopRef runLoop)
- : m_vm(vm)
- , m_runLoop(runLoop)
+HeapTimer::~HeapTimer()
{
- memset(&m_context, 0, sizeof(CFRunLoopTimerContext));
- m_context.info = &vm->apiLock();
- m_context.retain = retainAPILock;
- m_context.release = releaseAPILock;
- m_timer = adoptCF(CFRunLoopTimerCreate(0, s_decade, s_decade, 0, 0, HeapTimer::timerDidFire, &m_context));
- CFRunLoopAddTimer(m_runLoop.get(), m_timer.get(), kCFRunLoopCommonModes);
+ setRunLoop(0);
}
-HeapTimer::~HeapTimer()
+void HeapTimer::timerDidFireCallback(CFRunLoopTimerRef, void* contextPtr)
{
- CFRunLoopRemoveTimer(m_runLoop.get(), m_timer.get(), kCFRunLoopCommonModes);
- CFRunLoopTimerInvalidate(m_timer.get());
+ static_cast<JSRunLoopTimer*>(contextPtr)->timerDidFire();
}
-void HeapTimer::timerDidFire(CFRunLoopTimerRef timer, void* context)
+void HeapTimer::scheduleTimer(double intervalInSeconds)
{
- JSLock* apiLock = static_cast<JSLock*>(context);
- apiLock->lock();
-
- VM* vm = apiLock->vm();
- // The VM has been destroyed, so we should just give up.
- if (!vm) {
- apiLock->unlock();
- return;
- }
+ CFRunLoopTimerSetNextFireDate(m_timer.get(), CFAbsoluteTimeGetCurrent() + intervalInSeconds);
+ m_isScheduled = true;
+}
- HeapTimer* heapTimer = 0;
- if (vm->heap.activityCallback() && vm->heap.activityCallback()->m_timer.get() == timer)
- heapTimer = vm->heap.activityCallback();
- else if (vm->heap.sweeper()->m_timer.get() == timer)
- heapTimer = vm->heap.sweeper();
- else
- RELEASE_ASSERT_NOT_REACHED();
+void HeapTimer::cancelTimer()
+{
+ CFRunLoopTimerSetNextFireDate(m_timer.get(), CFAbsoluteTimeGetCurrent() + s_decade);
+ m_isScheduled = false;
+}
- {
- APIEntryShim shim(vm);
- heapTimer->doWork();
- }
+#elif USE(GLIB)
- apiLock->unlock();
-}
+const long HeapTimer::s_decade = 60 * 60 * 24 * 365 * 10;
-#elif PLATFORM(EFL)
+static GSourceFuncs heapTimerSourceFunctions = {
+ nullptr, // prepare
+ nullptr, // check
+ // dispatch
+ [](GSource*, GSourceFunc callback, gpointer userData) -> gboolean
+ {
+ return callback(userData);
+ },
+ nullptr, // finalize
+ nullptr, // closure_callback
+ nullptr, // closure_marshall
+};
HeapTimer::HeapTimer(VM* vm)
: m_vm(vm)
- , m_timer(0)
+ , m_apiLock(&vm->apiLock())
+ , m_timer(adoptGRef(g_source_new(&heapTimerSourceFunctions, sizeof(GSource))))
{
+ g_source_set_name(m_timer.get(), "[JavaScriptCore] HeapTimer");
+ g_source_set_callback(m_timer.get(), [](gpointer userData) -> gboolean {
+ auto& heapTimer = *static_cast<HeapTimer*>(userData);
+ g_source_set_ready_time(heapTimer.m_timer.get(), g_get_monotonic_time() + HeapTimer::s_decade * G_USEC_PER_SEC);
+ heapTimer.timerDidFire();
+ return G_SOURCE_CONTINUE;
+ }, this, nullptr);
+ g_source_attach(m_timer.get(), g_main_context_get_thread_default());
}
HeapTimer::~HeapTimer()
{
- stop();
+ g_source_destroy(m_timer.get());
}
-Ecore_Timer* HeapTimer::add(double delay, void* agent)
+void HeapTimer::scheduleTimer(double intervalInSeconds)
{
- return ecore_timer_add(delay, reinterpret_cast<Ecore_Task_Cb>(timerEvent), agent);
-}
-
-void HeapTimer::stop()
-{
- if (!m_timer)
- return;
-
- ecore_timer_del(m_timer);
- m_timer = 0;
+ g_source_set_ready_time(m_timer.get(), g_get_monotonic_time() + intervalInSeconds * G_USEC_PER_SEC);
+ m_isScheduled = true;
}
-bool HeapTimer::timerEvent(void* info)
+void HeapTimer::cancelTimer()
{
- HeapTimer* agent = static_cast<HeapTimer*>(info);
-
- APIEntryShim shim(agent->m_vm);
- agent->doWork();
- agent->m_timer = 0;
-
- return ECORE_CALLBACK_CANCEL;
+ g_source_set_ready_time(m_timer.get(), g_get_monotonic_time() + s_decade * G_USEC_PER_SEC);
+ m_isScheduled = false;
}
#else
HeapTimer::HeapTimer(VM* vm)
@@ -147,10 +168,13 @@ HeapTimer::~HeapTimer()
{
}
-void HeapTimer::invalidate()
+void HeapTimer::scheduleTimer(double)
{
}
+void HeapTimer::cancelTimer()
+{
+}
#endif
diff --git a/Source/JavaScriptCore/heap/HeapTimer.h b/Source/JavaScriptCore/heap/HeapTimer.h
index 760405c79..48c29a43a 100644
--- a/Source/JavaScriptCore/heap/HeapTimer.h
+++ b/Source/JavaScriptCore/heap/HeapTimer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,60 +23,66 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HeapTimer_h
-#define HeapTimer_h
+#pragma once
+#include <wtf/Lock.h>
+#include <wtf/RefPtr.h>
#include <wtf/RetainPtr.h>
+#include <wtf/ThreadSafeRefCounted.h>
#include <wtf/Threading.h>
#if USE(CF)
#include <CoreFoundation/CoreFoundation.h>
-#elif PLATFORM(EFL)
-#if USE(EO)
-typedef struct _Eo_Opaque Ecore_Timer;
-#else
-typedef struct _Ecore_Timer Ecore_Timer;
#endif
+
+#if USE(GLIB)
+#include <wtf/glib/GRefPtr.h>
#endif
namespace JSC {
+class JSLock;
class VM;
-class HeapTimer {
+class HeapTimer : public ThreadSafeRefCounted<HeapTimer> {
public:
-#if USE(CF)
- HeapTimer(VM*, CFRunLoopRef);
- static void timerDidFire(CFRunLoopTimerRef, void*);
-#else
HeapTimer(VM*);
+#if USE(CF)
+ static void timerDidFireCallback(CFRunLoopTimerRef, void*);
#endif
JS_EXPORT_PRIVATE virtual ~HeapTimer();
virtual void doWork() = 0;
+
+ void scheduleTimer(double intervalInSeconds);
+ void cancelTimer();
+ bool isScheduled() const { return m_isScheduled; }
+
+#if USE(CF)
+ JS_EXPORT_PRIVATE void setRunLoop(CFRunLoopRef);
+#endif // USE(CF)
protected:
VM* m_vm;
+ RefPtr<JSLock> m_apiLock;
+ bool m_isScheduled { false };
#if USE(CF)
static const CFTimeInterval s_decade;
RetainPtr<CFRunLoopTimerRef> m_timer;
RetainPtr<CFRunLoopRef> m_runLoop;
+
CFRunLoopTimerContext m_context;
- Mutex m_shutdownMutex;
-#elif PLATFORM(EFL)
- static bool timerEvent(void*);
- Ecore_Timer* add(double delay, void* agent);
- void stop();
- Ecore_Timer* m_timer;
+ Lock m_shutdownMutex;
+#elif USE(GLIB)
+ static const long s_decade;
+ GRefPtr<GSource> m_timer;
#endif
private:
- void invalidate();
+ void timerDidFire();
};
} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/HeapUtil.h b/Source/JavaScriptCore/heap/HeapUtil.h
new file mode 100644
index 000000000..44d14baff
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapUtil.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+// Are you tired of waiting for all of WebKit to build because you changed the implementation of a
+// function in HeapInlines.h? Does it bother you that you're waiting on rebuilding the JS DOM
+// bindings even though your change is in a function called from only 2 .cpp files? Then HeapUtil.h
+// is for you! Everything in this class should be a static method that takes a Heap& if needed.
+// This is a friend of Heap, so you can access all of Heap's privates.
+//
+// This ends up being an issue because Heap exposes a lot of methods that ought to be inline for
+// performance or that must be inline because they are templates. This class ought to contain
+// methods that are used for the implementation of the collector, or for unusual clients that need
+// to reach deep into the collector for some reason. Don't put things in here that would cause you
+// to have to include it from more than a handful of places, since that would defeat the purpose.
+// This class isn't here to look pretty. It's to let us hack the GC more easily!
+
+class HeapUtil {
+public:
+ // This function must be run after stopAllocation() is called and
+ // before liveness data is cleared to be accurate.
+ template<typename Func>
+ static void findGCObjectPointersForMarking(
+ Heap& heap, HeapVersion markingVersion, TinyBloomFilter filter, void* passedPointer,
+ const Func& func)
+ {
+ const HashSet<MarkedBlock*>& set = heap.objectSpace().blocks().set();
+
+ ASSERT(heap.objectSpace().isMarking());
+ static const bool isMarking = true;
+
+ char* pointer = static_cast<char*>(passedPointer);
+
+ // It could point to a large allocation.
+ if (heap.objectSpace().largeAllocationsForThisCollectionSize()) {
+ if (heap.objectSpace().largeAllocationsForThisCollectionBegin()[0]->aboveLowerBound(pointer)
+ && heap.objectSpace().largeAllocationsForThisCollectionEnd()[-1]->belowUpperBound(pointer)) {
+ LargeAllocation** result = approximateBinarySearch<LargeAllocation*>(
+ heap.objectSpace().largeAllocationsForThisCollectionBegin(),
+ heap.objectSpace().largeAllocationsForThisCollectionSize(),
+ LargeAllocation::fromCell(pointer),
+ [] (LargeAllocation** ptr) -> LargeAllocation* { return *ptr; });
+ if (result) {
+ if (result > heap.objectSpace().largeAllocationsForThisCollectionBegin()
+ && result[-1]->contains(pointer))
+ func(result[-1]->cell());
+ if (result[0]->contains(pointer))
+ func(result[0]->cell());
+ if (result + 1 < heap.objectSpace().largeAllocationsForThisCollectionEnd()
+ && result[1]->contains(pointer))
+ func(result[1]->cell());
+ }
+ }
+ }
+
+ MarkedBlock* candidate = MarkedBlock::blockFor(pointer);
+ // It's possible for a butterfly pointer to point past the end of a butterfly. Check this now.
+ if (pointer <= bitwise_cast<char*>(candidate) + sizeof(IndexingHeader)) {
+ // We may be interested in the last cell of the previous MarkedBlock.
+ char* previousPointer = pointer - sizeof(IndexingHeader) - 1;
+ MarkedBlock* previousCandidate = MarkedBlock::blockFor(previousPointer);
+ if (!filter.ruleOut(bitwise_cast<Bits>(previousCandidate))
+ && set.contains(previousCandidate)
+ && previousCandidate->handle().cellKind() == HeapCell::Auxiliary) {
+ previousPointer = static_cast<char*>(previousCandidate->handle().cellAlign(previousPointer));
+ if (previousCandidate->handle().isLiveCell(markingVersion, isMarking, previousPointer))
+ func(previousPointer);
+ }
+ }
+
+ if (filter.ruleOut(bitwise_cast<Bits>(candidate))) {
+ ASSERT(!candidate || !set.contains(candidate));
+ return;
+ }
+
+ if (!set.contains(candidate))
+ return;
+
+ auto tryPointer = [&] (void* pointer) {
+ if (candidate->handle().isLiveCell(markingVersion, isMarking, pointer))
+ func(pointer);
+ };
+
+ if (candidate->handle().cellKind() == HeapCell::JSCell) {
+ if (!MarkedBlock::isAtomAligned(pointer))
+ return;
+
+ tryPointer(pointer);
+ return;
+ }
+
+ // A butterfly could point into the middle of an object.
+ char* alignedPointer = static_cast<char*>(candidate->handle().cellAlign(pointer));
+ tryPointer(alignedPointer);
+
+ // Also, a butterfly could point at the end of an object plus sizeof(IndexingHeader). In that
+ // case, this is pointing to the object to the right of the one we should be marking.
+ if (candidate->atomNumber(alignedPointer) > MarkedBlock::firstAtom()
+ && pointer <= alignedPointer + sizeof(IndexingHeader))
+ tryPointer(alignedPointer - candidate->cellSize());
+ }
+
+ static bool isPointerGCObjectJSCell(
+ Heap& heap, TinyBloomFilter filter, const void* pointer)
+ {
+ // It could point to a large allocation.
+ const Vector<LargeAllocation*>& largeAllocations = heap.objectSpace().largeAllocations();
+ if (!largeAllocations.isEmpty()) {
+ if (largeAllocations[0]->aboveLowerBound(pointer)
+ && largeAllocations.last()->belowUpperBound(pointer)) {
+ LargeAllocation*const* result = approximateBinarySearch<LargeAllocation*const>(
+ largeAllocations.begin(), largeAllocations.size(),
+ LargeAllocation::fromCell(pointer),
+ [] (LargeAllocation*const* ptr) -> LargeAllocation* { return *ptr; });
+ if (result) {
+ if (result > largeAllocations.begin()
+ && result[-1]->cell() == pointer
+ && result[-1]->attributes().cellKind == HeapCell::JSCell)
+ return true;
+ if (result[0]->cell() == pointer
+ && result[0]->attributes().cellKind == HeapCell::JSCell)
+ return true;
+ if (result + 1 < largeAllocations.end()
+ && result[1]->cell() == pointer
+ && result[1]->attributes().cellKind == HeapCell::JSCell)
+ return true;
+ }
+ }
+ }
+
+ const HashSet<MarkedBlock*>& set = heap.objectSpace().blocks().set();
+
+ MarkedBlock* candidate = MarkedBlock::blockFor(pointer);
+ if (filter.ruleOut(bitwise_cast<Bits>(candidate))) {
+ ASSERT(!candidate || !set.contains(candidate));
+ return false;
+ }
+
+ if (!MarkedBlock::isAtomAligned(pointer))
+ return false;
+
+ if (!set.contains(candidate))
+ return false;
+
+ if (candidate->handle().cellKind() != HeapCell::JSCell)
+ return false;
+
+ if (!candidate->handle().isLiveCell(pointer))
+ return false;
+
+ return true;
+ }
+
+ static bool isValueGCObject(
+ Heap& heap, TinyBloomFilter filter, JSValue value)
+ {
+ if (!value.isCell())
+ return false;
+ return isPointerGCObjectJSCell(heap, filter, static_cast<void*>(value.asCell()));
+ }
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/HeapVerifier.cpp b/Source/JavaScriptCore/heap/HeapVerifier.cpp
new file mode 100644
index 000000000..fada794cd
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapVerifier.cpp
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "HeapVerifier.h"
+
+#include "ButterflyInlines.h"
+#include "HeapIterationScope.h"
+#include "JSCInlines.h"
+#include "JSObject.h"
+#include "MarkedSpaceInlines.h"
+
+namespace JSC {
+
+HeapVerifier::HeapVerifier(Heap* heap, unsigned numberOfGCCyclesToRecord)
+ : m_heap(heap)
+ , m_currentCycle(0)
+ , m_numberOfCycles(numberOfGCCyclesToRecord)
+{
+ RELEASE_ASSERT(m_numberOfCycles > 0);
+ m_cycles = std::make_unique<GCCycle[]>(m_numberOfCycles);
+}
+
+const char* HeapVerifier::phaseName(HeapVerifier::Phase phase)
+{
+ switch (phase) {
+ case Phase::BeforeGC:
+ return "BeforeGC";
+ case Phase::BeforeMarking:
+ return "BeforeMarking";
+ case Phase::AfterMarking:
+ return "AfterMarking";
+ case Phase::AfterGC:
+ return "AfterGC";
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr; // Silencing a compiler warning.
+}
+
+void HeapVerifier::initializeGCCycle()
+{
+ Heap* heap = m_heap;
+ incrementCycle();
+ currentCycle().scope = *heap->collectionScope();
+}
+
+struct GatherLiveObjFunctor : MarkedBlock::CountFunctor {
+ GatherLiveObjFunctor(LiveObjectList& list)
+ : m_list(list)
+ {
+ ASSERT(!list.liveObjects.size());
+ }
+
+ inline void visit(JSCell* cell)
+ {
+ if (!cell->isObject())
+ return;
+ LiveObjectData data(asObject(cell));
+ m_list.liveObjects.append(data);
+ }
+
+ IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
+ {
+ if (kind == HeapCell::JSCell) {
+ // FIXME: This const_cast exists because this isn't a C++ lambda.
+ // https://bugs.webkit.org/show_bug.cgi?id=159644
+ const_cast<GatherLiveObjFunctor*>(this)->visit(static_cast<JSCell*>(cell));
+ }
+ return IterationStatus::Continue;
+ }
+
+ LiveObjectList& m_list;
+};
+
+void HeapVerifier::gatherLiveObjects(HeapVerifier::Phase phase)
+{
+ Heap* heap = m_heap;
+ LiveObjectList& list = *liveObjectListForGathering(phase);
+
+ HeapIterationScope iterationScope(*heap);
+ list.reset();
+ GatherLiveObjFunctor functor(list);
+ heap->m_objectSpace.forEachLiveCell(iterationScope, functor);
+}
+
+LiveObjectList* HeapVerifier::liveObjectListForGathering(HeapVerifier::Phase phase)
+{
+ switch (phase) {
+ case Phase::BeforeMarking:
+ return &currentCycle().before;
+ case Phase::AfterMarking:
+ return &currentCycle().after;
+ case Phase::BeforeGC:
+ case Phase::AfterGC:
+ // We should not be gathering live objects during these phases.
+ break;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr; // Silencing a compiler warning.
+}
+
+static void trimDeadObjectsFromList(HashSet<JSObject*>& knownLiveSet, LiveObjectList& list)
+{
+ if (!list.hasLiveObjects)
+ return;
+
+ size_t liveObjectsFound = 0;
+ for (auto& objData : list.liveObjects) {
+ if (objData.isConfirmedDead)
+ continue; // Don't "resurrect" known dead objects.
+ if (!knownLiveSet.contains(objData.obj)) {
+ objData.isConfirmedDead = true;
+ continue;
+ }
+ liveObjectsFound++;
+ }
+ list.hasLiveObjects = !!liveObjectsFound;
+}
+
+void HeapVerifier::trimDeadObjects()
+{
+ HashSet<JSObject*> knownLiveSet;
+
+ LiveObjectList& after = currentCycle().after;
+ for (auto& objData : after.liveObjects)
+ knownLiveSet.add(objData.obj);
+
+ trimDeadObjectsFromList(knownLiveSet, currentCycle().before);
+
+ for (int i = -1; i > -m_numberOfCycles; i--) {
+ trimDeadObjectsFromList(knownLiveSet, cycleForIndex(i).before);
+ trimDeadObjectsFromList(knownLiveSet, cycleForIndex(i).after);
+ }
+}
+
+bool HeapVerifier::verifyButterflyIsInStorageSpace(Phase, LiveObjectList&)
+{
+ // FIXME: Make this work again. https://bugs.webkit.org/show_bug.cgi?id=161752
+ return true;
+}
+
+void HeapVerifier::verify(HeapVerifier::Phase phase)
+{
+ bool beforeVerified = verifyButterflyIsInStorageSpace(phase, currentCycle().before);
+ bool afterVerified = verifyButterflyIsInStorageSpace(phase, currentCycle().after);
+ RELEASE_ASSERT(beforeVerified && afterVerified);
+}
+
+void HeapVerifier::reportObject(LiveObjectData& objData, int cycleIndex, HeapVerifier::GCCycle& cycle, LiveObjectList& list)
+{
+ JSObject* obj = objData.obj;
+
+ if (objData.isConfirmedDead) {
+ dataLogF("FOUND dead obj %p in GC[%d] %s list '%s'\n",
+ obj, cycleIndex, collectionScopeName(cycle.scope), list.name);
+ return;
+ }
+
+ Structure* structure = obj->structure();
+ Butterfly* butterfly = obj->butterfly();
+ void* butterflyBase = butterfly->base(structure);
+
+ dataLogF("FOUND obj %p type '%s' butterfly %p (base %p) in GC[%d] %s list '%s'\n",
+ obj, structure->classInfo()->className,
+ butterfly, butterflyBase,
+ cycleIndex, collectionScopeName(cycle.scope), list.name);
+}
+
+void HeapVerifier::checkIfRecorded(JSObject* obj)
+{
+ bool found = false;
+
+ for (int cycleIndex = 0; cycleIndex > -m_numberOfCycles; cycleIndex--) {
+ GCCycle& cycle = cycleForIndex(cycleIndex);
+ LiveObjectList& beforeList = cycle.before;
+ LiveObjectList& afterList = cycle.after;
+
+ LiveObjectData* objData;
+ objData = beforeList.findObject(obj);
+ if (objData) {
+ reportObject(*objData, cycleIndex, cycle, beforeList);
+ found = true;
+ }
+ objData = afterList.findObject(obj);
+ if (objData) {
+ reportObject(*objData, cycleIndex, cycle, afterList);
+ found = true;
+ }
+ }
+
+ if (!found)
+ dataLogF("obj %p NOT FOUND\n", obj);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/HeapVerifier.h b/Source/JavaScriptCore/heap/HeapVerifier.h
new file mode 100644
index 000000000..d736b4714
--- /dev/null
+++ b/Source/JavaScriptCore/heap/HeapVerifier.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Heap.h"
+#include "LiveObjectList.h"
+
+namespace JSC {
+
+class JSObject;
+class MarkedBlock;
+
+class HeapVerifier {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ enum class Phase {
+ BeforeGC,
+ BeforeMarking,
+ AfterMarking,
+ AfterGC
+ };
+
+ HeapVerifier(Heap*, unsigned numberOfGCCyclesToRecord);
+
+ void initializeGCCycle();
+ void gatherLiveObjects(Phase);
+ void trimDeadObjects();
+ void verify(Phase);
+
+ // Scans all previously recorded LiveObjectLists and checks if the specified
+ // object was in any of those lists.
+ JS_EXPORT_PRIVATE void checkIfRecorded(JSObject*);
+
+ static const char* phaseName(Phase);
+
+private:
+ struct GCCycle {
+ GCCycle()
+ : before("Before Marking")
+ , after("After Marking")
+ {
+ }
+
+ CollectionScope scope;
+ LiveObjectList before;
+ LiveObjectList after;
+ };
+
+ void incrementCycle() { m_currentCycle = (m_currentCycle + 1) % m_numberOfCycles; }
+ GCCycle& currentCycle() { return m_cycles[m_currentCycle]; }
+ GCCycle& cycleForIndex(int cycleIndex)
+ {
+ ASSERT(cycleIndex <= 0 && cycleIndex > -m_numberOfCycles);
+ cycleIndex += m_currentCycle;
+ if (cycleIndex < 0)
+ cycleIndex += m_numberOfCycles;
+ ASSERT(cycleIndex < m_numberOfCycles);
+ return m_cycles[cycleIndex];
+ }
+
+ LiveObjectList* liveObjectListForGathering(Phase);
+ bool verifyButterflyIsInStorageSpace(Phase, LiveObjectList&);
+
+ static void reportObject(LiveObjectData&, int cycleIndex, HeapVerifier::GCCycle&, LiveObjectList&);
+
+ Heap* m_heap;
+ int m_currentCycle;
+ int m_numberOfCycles;
+ std::unique_ptr<GCCycle[]> m_cycles;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/IncrementalSweeper.cpp b/Source/JavaScriptCore/heap/IncrementalSweeper.cpp
index 2852266aa..cfe89b870 100644
--- a/Source/JavaScriptCore/heap/IncrementalSweeper.cpp
+++ b/Source/JavaScriptCore/heap/IncrementalSweeper.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,44 +26,28 @@
#include "config.h"
#include "IncrementalSweeper.h"
-#include "APIShims.h"
-#include "DelayedReleaseScope.h"
#include "Heap.h"
#include "JSObject.h"
#include "JSString.h"
#include "MarkedBlock.h"
-
-#include <wtf/HashSet.h>
-#include <wtf/WTFThreadData.h>
+#include "JSCInlines.h"
+#include <wtf/CurrentTime.h>
namespace JSC {
-#if USE(CF)
-
static const double sweepTimeSlice = .01; // seconds
static const double sweepTimeTotal = .10;
static const double sweepTimeMultiplier = 1.0 / sweepTimeTotal;
-IncrementalSweeper::IncrementalSweeper(Heap* heap, CFRunLoopRef runLoop)
- : HeapTimer(heap->vm(), runLoop)
- , m_currentBlockToSweepIndex(0)
- , m_blocksToSweep(heap->m_blockSnapshot)
-{
-}
-
-PassOwnPtr<IncrementalSweeper> IncrementalSweeper::create(Heap* heap)
-{
- return adoptPtr(new IncrementalSweeper(heap, CFRunLoopGetCurrent()));
-}
-
void IncrementalSweeper::scheduleTimer()
{
- CFRunLoopTimerSetNextFireDate(m_timer.get(), CFAbsoluteTimeGetCurrent() + (sweepTimeSlice * sweepTimeMultiplier));
+ HeapTimer::scheduleTimer(sweepTimeSlice * sweepTimeMultiplier);
}
-void IncrementalSweeper::cancelTimer()
+IncrementalSweeper::IncrementalSweeper(Heap* heap)
+ : HeapTimer(heap->vm())
+ , m_currentAllocator(nullptr)
{
- CFRunLoopTimerSetNextFireDate(m_timer.get(), CFAbsoluteTimeGetCurrent() + s_decade);
}
void IncrementalSweeper::doWork()
@@ -73,10 +57,7 @@ void IncrementalSweeper::doWork()
void IncrementalSweeper::doSweep(double sweepBeginTime)
{
- DelayedReleaseScope scope(m_vm->heap.m_objectSpace);
- while (m_currentBlockToSweepIndex < m_blocksToSweep.size()) {
- sweepNextBlock();
-
+ while (sweepNextBlock()) {
double elapsedTime = WTF::monotonicallyIncreasingTime() - sweepBeginTime;
if (elapsedTime < sweepTimeSlice)
continue;
@@ -85,67 +66,42 @@ void IncrementalSweeper::doSweep(double sweepBeginTime)
return;
}
- m_blocksToSweep.clear();
cancelTimer();
}
-void IncrementalSweeper::sweepNextBlock()
+bool IncrementalSweeper::sweepNextBlock()
{
- while (m_currentBlockToSweepIndex < m_blocksToSweep.size()) {
- MarkedBlock* block = m_blocksToSweep[m_currentBlockToSweepIndex++];
-
- if (!block->needsSweeping())
- continue;
-
+ m_vm->heap.stopIfNecessary();
+
+ MarkedBlock::Handle* block = nullptr;
+
+ for (; m_currentAllocator; m_currentAllocator = m_currentAllocator->nextAllocator()) {
+ block = m_currentAllocator->findBlockToSweep();
+ if (block)
+ break;
+ }
+
+ if (block) {
+ DeferGCForAWhile deferGC(m_vm->heap);
block->sweep();
m_vm->heap.objectSpace().freeOrShrinkBlock(block);
- return;
+ return true;
}
+
+ return m_vm->heap.sweepNextLogicallyEmptyWeakBlock();
}
-void IncrementalSweeper::startSweeping(Vector<MarkedBlock*>& blockSnapshot)
+void IncrementalSweeper::startSweeping()
{
- m_blocksToSweep = blockSnapshot;
- m_currentBlockToSweepIndex = 0;
scheduleTimer();
+ m_currentAllocator = m_vm->heap.objectSpace().firstAllocator();
}
-void IncrementalSweeper::willFinishSweeping()
+void IncrementalSweeper::stopSweeping()
{
- m_currentBlockToSweepIndex = 0;
- m_blocksToSweep.clear();
+ m_currentAllocator = nullptr;
if (m_vm)
cancelTimer();
}
-#else
-
-IncrementalSweeper::IncrementalSweeper(VM* vm)
- : HeapTimer(vm)
-{
-}
-
-void IncrementalSweeper::doWork()
-{
-}
-
-PassOwnPtr<IncrementalSweeper> IncrementalSweeper::create(Heap* heap)
-{
- return adoptPtr(new IncrementalSweeper(heap->vm()));
-}
-
-void IncrementalSweeper::startSweeping(Vector<MarkedBlock*>&)
-{
-}
-
-void IncrementalSweeper::willFinishSweeping()
-{
-}
-
-void IncrementalSweeper::sweepNextBlock()
-{
-}
-
-#endif
-
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/IncrementalSweeper.h b/Source/JavaScriptCore/heap/IncrementalSweeper.h
index 0ac145cbd..34fa88cc2 100644
--- a/Source/JavaScriptCore/heap/IncrementalSweeper.h
+++ b/Source/JavaScriptCore/heap/IncrementalSweeper.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,44 +23,31 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef IncrementalSweeper_h
-#define IncrementalSweeper_h
+#pragma once
#include "HeapTimer.h"
-#include <wtf/PassOwnPtr.h>
#include <wtf/Vector.h>
namespace JSC {
class Heap;
-class MarkedBlock;
+class MarkedAllocator;
class IncrementalSweeper : public HeapTimer {
public:
- static PassOwnPtr<IncrementalSweeper> create(Heap*);
- void startSweeping(Vector<MarkedBlock*>&);
- JS_EXPORT_PRIVATE virtual void doWork() override;
- void sweepNextBlock();
- void willFinishSweeping();
+ JS_EXPORT_PRIVATE explicit IncrementalSweeper(Heap*);
-protected:
-#if USE(CF)
- JS_EXPORT_PRIVATE IncrementalSweeper(Heap*, CFRunLoopRef);
-#else
- IncrementalSweeper(VM*);
-#endif
+ JS_EXPORT_PRIVATE void startSweeping();
+
+ JS_EXPORT_PRIVATE void doWork() override;
+ bool sweepNextBlock();
+ JS_EXPORT_PRIVATE void stopSweeping();
-#if USE(CF)
private:
void doSweep(double startTime);
void scheduleTimer();
- void cancelTimer();
- unsigned m_currentBlockToSweepIndex;
- Vector<MarkedBlock*>& m_blocksToSweep;
-#endif
+ MarkedAllocator* m_currentAllocator;
};
} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp b/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp
index a37dc6f5c..ae8059532 100644
--- a/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp
+++ b/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp
@@ -29,7 +29,7 @@
#if ENABLE(JIT)
#include "GCAwareJITStubRoutine.h"
-
+#include "JSCInlines.h"
#include "SlotVisitor.h"
namespace JSC {
diff --git a/Source/JavaScriptCore/heap/JITStubRoutineSet.h b/Source/JavaScriptCore/heap/JITStubRoutineSet.h
index 29f0a4bff..1f9cd2aee 100644
--- a/Source/JavaScriptCore/heap/JITStubRoutineSet.h
+++ b/Source/JavaScriptCore/heap/JITStubRoutineSet.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITStubRoutineSet_h
-#define JITStubRoutineSet_h
-
-#include <wtf/Platform.h>
+#pragma once
#include "JITStubRoutine.h"
#include <wtf/FastMalloc.h>
@@ -95,6 +92,3 @@ public:
#endif // !ENABLE(JIT)
} // namespace JSC
-
-#endif // JITStubRoutineSet_h
-
diff --git a/Source/JavaScriptCore/heap/LargeAllocation.cpp b/Source/JavaScriptCore/heap/LargeAllocation.cpp
new file mode 100644
index 000000000..839c61609
--- /dev/null
+++ b/Source/JavaScriptCore/heap/LargeAllocation.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LargeAllocation.h"
+
+#include "Heap.h"
+#include "JSCInlines.h"
+#include "Operations.h"
+
+namespace JSC {
+
+LargeAllocation* LargeAllocation::tryCreate(Heap& heap, size_t size, Subspace* subspace)
+{
+ void* space = tryFastAlignedMalloc(alignment, headerSize() + size);
+ if (!space)
+ return nullptr;
+ if (scribbleFreeCells())
+ scribble(space, size);
+ return new (NotNull, space) LargeAllocation(heap, size, subspace);
+}
+
+LargeAllocation::LargeAllocation(Heap& heap, size_t size, Subspace* subspace)
+ : m_cellSize(size)
+ , m_isNewlyAllocated(true)
+ , m_hasValidCell(true)
+ , m_attributes(subspace->attributes())
+ , m_subspace(subspace)
+ , m_weakSet(heap.vm(), *this)
+{
+ m_isMarked.store(0);
+}
+
+LargeAllocation::~LargeAllocation()
+{
+ if (isOnList())
+ remove();
+}
+
+void LargeAllocation::lastChanceToFinalize()
+{
+ m_weakSet.lastChanceToFinalize();
+ clearMarked();
+ clearNewlyAllocated();
+ sweep();
+}
+
+void LargeAllocation::shrink()
+{
+ m_weakSet.shrink();
+}
+
+void LargeAllocation::visitWeakSet(SlotVisitor& visitor)
+{
+ m_weakSet.visit(visitor);
+}
+
+void LargeAllocation::reapWeakSet()
+{
+ return m_weakSet.reap();
+}
+
+void LargeAllocation::flip()
+{
+ ASSERT(heap()->collectionScope() == CollectionScope::Full);
+ clearMarked();
+}
+
+bool LargeAllocation::isEmpty()
+{
+ return !isMarked() && m_weakSet.isEmpty() && !isNewlyAllocated();
+}
+
+void LargeAllocation::sweep()
+{
+ m_weakSet.sweep();
+
+ if (m_hasValidCell && !isLive()) {
+ if (m_attributes.destruction == NeedsDestruction)
+ m_subspace->destroy(*vm(), static_cast<JSCell*>(cell()));
+ m_hasValidCell = false;
+ }
+}
+
+void LargeAllocation::destroy()
+{
+ this->~LargeAllocation();
+ fastAlignedFree(this);
+}
+
+void LargeAllocation::dump(PrintStream& out) const
+{
+ out.print(RawPointer(this), ":(cell at ", RawPointer(cell()), " with size ", m_cellSize, " and attributes ", m_attributes, ")");
+}
+
+#if !ASSERT_DISABLED
+void LargeAllocation::assertValidCell(VM& vm, HeapCell* cell) const
+{
+ ASSERT(&vm == this->vm());
+ ASSERT(cell == this->cell());
+ ASSERT(m_hasValidCell);
+}
+#endif
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/LargeAllocation.h b/Source/JavaScriptCore/heap/LargeAllocation.h
new file mode 100644
index 000000000..528575b57
--- /dev/null
+++ b/Source/JavaScriptCore/heap/LargeAllocation.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "MarkedBlock.h"
+#include "WeakSet.h"
+
+namespace JSC {
+
+class SlotVisitor;
+
+// WebKit has a good malloc that already knows what to do for large allocations. The GC shouldn't
+// have to think about such things. That's where LargeAllocation comes in. We will allocate large
+// objects directly using malloc, and put the LargeAllocation header just before them. We can detect
+// when a HeapCell* is a LargeAllocation because it will have the MarkedBlock::atomSize / 2 bit set.
+
+class LargeAllocation : public BasicRawSentinelNode<LargeAllocation> {
+public:
+ static LargeAllocation* tryCreate(Heap&, size_t, Subspace*);
+
+ ~LargeAllocation();
+
+ static LargeAllocation* fromCell(const void* cell)
+ {
+ return bitwise_cast<LargeAllocation*>(bitwise_cast<char*>(cell) - headerSize());
+ }
+
+ HeapCell* cell() const
+ {
+ return bitwise_cast<HeapCell*>(bitwise_cast<char*>(this) + headerSize());
+ }
+
+ static bool isLargeAllocation(HeapCell* cell)
+ {
+ return bitwise_cast<uintptr_t>(cell) & halfAlignment;
+ }
+
+ void lastChanceToFinalize();
+
+ Heap* heap() const { return m_weakSet.heap(); }
+ VM* vm() const { return m_weakSet.vm(); }
+ WeakSet& weakSet() { return m_weakSet; }
+
+ void shrink();
+
+ void visitWeakSet(SlotVisitor&);
+ void reapWeakSet();
+
+ void clearNewlyAllocated() { m_isNewlyAllocated = false; }
+ void flip();
+
+ bool isNewlyAllocated() const { return m_isNewlyAllocated; }
+ ALWAYS_INLINE bool isMarked() { return m_isMarked.load(std::memory_order_relaxed); }
+ ALWAYS_INLINE bool isMarked(HeapCell*) { return m_isMarked.load(std::memory_order_relaxed); }
+ ALWAYS_INLINE bool isMarkedConcurrently(HeapVersion, HeapCell*) { return m_isMarked.load(std::memory_order_relaxed); }
+ bool isLive() { return isMarked() || isNewlyAllocated(); }
+
+ bool hasValidCell() const { return m_hasValidCell; }
+
+ bool isEmpty();
+
+ size_t cellSize() const { return m_cellSize; }
+
+ bool aboveLowerBound(const void* rawPtr)
+ {
+ char* ptr = bitwise_cast<char*>(rawPtr);
+ char* begin = bitwise_cast<char*>(cell());
+ return ptr >= begin;
+ }
+
+ bool belowUpperBound(const void* rawPtr)
+ {
+ char* ptr = bitwise_cast<char*>(rawPtr);
+ char* begin = bitwise_cast<char*>(cell());
+ char* end = begin + cellSize();
+ // We cannot #include IndexingHeader.h because reasons. The fact that IndexingHeader is 8
+ // bytes is wired deep into our engine, so this isn't so bad.
+ size_t sizeOfIndexingHeader = 8;
+ return ptr <= end + sizeOfIndexingHeader;
+ }
+
+ bool contains(const void* rawPtr)
+ {
+ return aboveLowerBound(rawPtr) && belowUpperBound(rawPtr);
+ }
+
+ const AllocatorAttributes& attributes() const { return m_attributes; }
+
+ void aboutToMark(HeapVersion) { }
+
+ ALWAYS_INLINE bool testAndSetMarked()
+ {
+ // This method is usually called when the object is already marked. This avoids us
+ // having to CAS in that case. It's profitable to reduce the total amount of CAS
+ // traffic.
+ if (isMarked())
+ return true;
+ return m_isMarked.compareExchangeStrong(false, true);
+ }
+ ALWAYS_INLINE bool testAndSetMarked(HeapCell*) { return testAndSetMarked(); }
+ void clearMarked() { m_isMarked.store(false); }
+
+ void noteMarked() { }
+
+#if ASSERT_DISABLED
+ void assertValidCell(VM&, HeapCell*) const { }
+#else
+ void assertValidCell(VM&, HeapCell*) const;
+#endif
+
+ void sweep();
+
+ void destroy();
+
+ void dump(PrintStream&) const;
+
+private:
+ LargeAllocation(Heap&, size_t, Subspace*);
+
+ static const unsigned alignment = MarkedBlock::atomSize;
+ static const unsigned halfAlignment = alignment / 2;
+
+ static unsigned headerSize();
+
+ size_t m_cellSize;
+ bool m_isNewlyAllocated;
+ bool m_hasValidCell;
+ Atomic<bool> m_isMarked;
+ AllocatorAttributes m_attributes;
+ Subspace* m_subspace;
+ WeakSet m_weakSet;
+};
+
+inline unsigned LargeAllocation::headerSize()
+{
+ return ((sizeof(LargeAllocation) + halfAlignment - 1) & ~(halfAlignment - 1)) | halfAlignment;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/ListableHandler.h b/Source/JavaScriptCore/heap/ListableHandler.h
index 16c34146c..7ee78f2d3 100644
--- a/Source/JavaScriptCore/heap/ListableHandler.h
+++ b/Source/JavaScriptCore/heap/ListableHandler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -17,24 +17,28 @@
*
*/
-#ifndef ListableHandler_h
-#define ListableHandler_h
+#pragma once
#include <stdint.h>
+#include <wtf/Lock.h>
#include <wtf/Locker.h>
#include <wtf/Noncopyable.h>
#include <wtf/ThreadingPrimitives.h>
-#include <wtf/TCSpinLock.h>
namespace JSC {
-class MarkStack;
-class MarkStackThreadSharedData;
+class Heap;
class SlotVisitor;
template<typename T>
class ListableHandler {
WTF_MAKE_NONCOPYABLE(ListableHandler);
+
+public:
+ bool isOnList() const
+ {
+ return m_nextAndFlag & 1;
+ }
protected:
ListableHandler()
@@ -51,8 +55,7 @@ protected:
private:
// Allow these classes to use ListableHandler::List.
- friend class MarkStack;
- friend class GCThreadSharedData;
+ friend class Heap;
friend class SlotVisitor;
class List {
@@ -61,12 +64,11 @@ private:
List()
: m_first(0)
{
- m_lock.Init();
}
void addThreadSafe(T* handler)
{
- SpinLockHolder locker(&m_lock);
+ LockHolder locker(&m_lock);
addNotThreadSafe(handler);
}
@@ -104,7 +106,7 @@ private:
m_first = handler;
}
- SpinLock m_lock;
+ Lock m_lock;
T* m_first;
};
@@ -112,5 +114,3 @@ private:
};
} // namespace JSC
-
-#endif // ListableHandler_h
diff --git a/Source/JavaScriptCore/heap/LiveObjectData.h b/Source/JavaScriptCore/heap/LiveObjectData.h
new file mode 100644
index 000000000..6fb71979c
--- /dev/null
+++ b/Source/JavaScriptCore/heap/LiveObjectData.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+class JSObject;
+
+struct LiveObjectData {
+ LiveObjectData(JSObject* obj, bool isConfirmedDead = false)
+ : obj(obj)
+ , isConfirmedDead(isConfirmedDead)
+ {
+ }
+
+ JSObject* obj;
+ bool isConfirmedDead;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/LiveObjectList.cpp b/Source/JavaScriptCore/heap/LiveObjectList.cpp
new file mode 100644
index 000000000..5eb7cb991
--- /dev/null
+++ b/Source/JavaScriptCore/heap/LiveObjectList.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LiveObjectList.h"
+
+namespace JSC {
+
+LiveObjectData* LiveObjectList::findObject(JSObject* obj)
+{
+ for (auto& data : liveObjects) {
+ if (obj == data.obj)
+ return &data;
+ }
+ return nullptr;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/LiveObjectList.h b/Source/JavaScriptCore/heap/LiveObjectList.h
new file mode 100644
index 000000000..c40fd3a6e
--- /dev/null
+++ b/Source/JavaScriptCore/heap/LiveObjectList.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "LiveObjectData.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+struct LiveObjectList {
+ LiveObjectList(const char* name)
+ : name(name)
+ , hasLiveObjects(true)
+ {
+ }
+
+ void reset()
+ {
+ liveObjects.clear();
+ hasLiveObjects = true; // Presume to have live objects until the list is trimmed.
+ }
+
+ LiveObjectData* findObject(JSObject*);
+
+ const char* name;
+ Vector<LiveObjectData> liveObjects;
+ bool hasLiveObjects;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/Local.h b/Source/JavaScriptCore/heap/Local.h
index 14c4dee26..a0aadbe5b 100644
--- a/Source/JavaScriptCore/heap/Local.h
+++ b/Source/JavaScriptCore/heap/Local.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef Local_h
-#define Local_h
+#pragma once
#include "Handle.h"
#include "VM.h"
@@ -137,7 +136,7 @@ private:
unsigned m_count;
};
-}
+} // namespace JSC
namespace WTF {
@@ -147,6 +146,4 @@ template<typename T> struct VectorTraits<JSC::Local<T>> : SimpleClassVectorTrait
static const bool canCompareWithMemcmp = false;
};
-}
-
-#endif
+} // namespace WTF
diff --git a/Source/JavaScriptCore/heap/LocalScope.h b/Source/JavaScriptCore/heap/LocalScope.h
index ab896863c..f17f9f947 100644
--- a/Source/JavaScriptCore/heap/LocalScope.h
+++ b/Source/JavaScriptCore/heap/LocalScope.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LocalScope_h
-#define LocalScope_h
+#pragma once
#include "HandleStack.h"
#include "Local.h"
@@ -73,6 +72,4 @@ template <typename T> Local<T> LocalScope::release(Local<T> local)
return Local<T>(slot, ptr);
}
-}
-
-#endif
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/LockDuringMarking.h b/Source/JavaScriptCore/heap/LockDuringMarking.h
new file mode 100644
index 000000000..260378777
--- /dev/null
+++ b/Source/JavaScriptCore/heap/LockDuringMarking.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <heap/Heap.h>
+#include <wtf/Locker.h>
+
+namespace JSC {
+
+// Use this lock scope like so:
+// auto locker = lockDuringMarking(heap, lock);
+template<typename LockType>
+Locker<LockType> lockDuringMarking(Heap& heap, LockType& passedLock)
+{
+ LockType* lock;
+ if (heap.mutatorShouldBeFenced())
+ lock = &passedLock;
+ else
+ lock = nullptr;
+ return Locker<LockType>(lock);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.cpp b/Source/JavaScriptCore/heap/MachineStackMarker.cpp
index f546cb38b..4d4e8bb22 100644
--- a/Source/JavaScriptCore/heap/MachineStackMarker.cpp
+++ b/Source/JavaScriptCore/heap/MachineStackMarker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2017 Apple Inc. All rights reserved.
* Copyright (C) 2007 Eric Seidel <eric@webkit.org>
* Copyright (C) 2009 Acision BV. All rights reserved.
*
@@ -23,11 +23,17 @@
#include "MachineStackMarker.h"
#include "ConservativeRoots.h"
+#include "GPRInfo.h"
#include "Heap.h"
#include "JSArray.h"
+#include "JSCInlines.h"
+#include "LLIntPCRanges.h"
+#include "MacroAssembler.h"
#include "VM.h"
#include <setjmp.h>
#include <stdlib.h>
+#include <wtf/MainThread.h>
+#include <wtf/NeverDestroyed.h>
#include <wtf/StdLibExtras.h>
#if OS(DARWIN)
@@ -60,88 +66,149 @@
#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
#include <signal.h>
-#endif
-
-#endif
-using namespace WTF;
-
-namespace JSC {
+// We use SIGUSR2 to suspend and resume machine threads in JavaScriptCore.
+static const int SigThreadSuspendResume = SIGUSR2;
+static StaticLock globalSignalLock;
+thread_local static std::atomic<JSC::MachineThreads::ThreadData*> threadLocalCurrentThread { nullptr };
-static inline void swapIfBackwards(void*& begin, void*& end)
+static void pthreadSignalHandlerSuspendResume(int, siginfo_t*, void* ucontext)
{
-#if OS(WINCE)
- if (begin <= end)
+ // Touching thread local atomic types from signal handlers is allowed.
+ JSC::MachineThreads::ThreadData* threadData = threadLocalCurrentThread.load();
+
+ if (threadData->suspended.load(std::memory_order_acquire)) {
+ // This is signal handler invocation that is intended to be used to resume sigsuspend.
+ // So this handler invocation itself should not process.
+ //
+ // When signal comes, first, the system calls signal handler. And later, sigsuspend will be resumed. Signal handler invocation always precedes.
+ // So, the problem never happens that suspended.store(true, ...) will be executed before the handler is called.
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/sigsuspend.html
return;
- std::swap(begin, end);
+ }
+
+ ucontext_t* userContext = static_cast<ucontext_t*>(ucontext);
+#if CPU(PPC)
+ threadData->suspendedMachineContext = *userContext->uc_mcontext.uc_regs;
#else
-UNUSED_PARAM(begin);
-UNUSED_PARAM(end);
+ threadData->suspendedMachineContext = userContext->uc_mcontext;
#endif
-}
-#if OS(DARWIN)
-typedef mach_port_t PlatformThread;
-#elif OS(WINDOWS)
-typedef HANDLE PlatformThread;
-#elif USE(PTHREADS)
-typedef pthread_t PlatformThread;
-static const int SigThreadSuspendResume = SIGUSR2;
-
-#if defined(SA_RESTART)
-static void pthreadSignalHandlerSuspendResume(int)
-{
- sigset_t signalSet;
- sigemptyset(&signalSet);
- sigaddset(&signalSet, SigThreadSuspendResume);
- sigsuspend(&signalSet);
+ // Allow suspend caller to see that this thread is suspended.
+ // sem_post is async-signal-safe function. It means that we can call this from a signal handler.
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html#tag_02_04_03
+ //
+ // And sem_post emits memory barrier that ensures that suspendedMachineContext is correctly saved.
+ // http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_11
+ sem_post(&threadData->semaphoreForSuspendResume);
+
+ // Reaching here, SigThreadSuspendResume is blocked in this handler (this is configured by sigaction's sa_mask).
+ // So before calling sigsuspend, SigThreadSuspendResume to this thread is deferred. This ensures that the handler is not executed recursively.
+ sigset_t blockedSignalSet;
+ sigfillset(&blockedSignalSet);
+ sigdelset(&blockedSignalSet, SigThreadSuspendResume);
+ sigsuspend(&blockedSignalSet);
+
+ // Allow resume caller to see that this thread is resumed.
+ sem_post(&threadData->semaphoreForSuspendResume);
}
+#endif // USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
+
#endif
-#endif
-class MachineThreads::Thread {
- WTF_MAKE_FAST_ALLOCATED;
+using namespace WTF;
+
+namespace JSC {
+
+using Thread = MachineThreads::Thread;
+
+class ActiveMachineThreadsManager;
+static ActiveMachineThreadsManager& activeMachineThreadsManager();
+
+class ActiveMachineThreadsManager {
+ WTF_MAKE_NONCOPYABLE(ActiveMachineThreadsManager);
public:
- Thread(const PlatformThread& platThread, void* base)
- : platformThread(platThread)
- , stackBase(base)
+
+ class Locker {
+ public:
+ Locker(ActiveMachineThreadsManager& manager)
+ : m_locker(manager.m_lock)
+ {
+ }
+
+ private:
+ LockHolder m_locker;
+ };
+
+ void add(MachineThreads* machineThreads)
{
-#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN) && defined(SA_RESTART)
- // if we have SA_RESTART, enable SIGUSR2 debugging mechanism
- struct sigaction action;
- action.sa_handler = pthreadSignalHandlerSuspendResume;
- sigemptyset(&action.sa_mask);
- action.sa_flags = SA_RESTART;
- sigaction(SigThreadSuspendResume, &action, 0);
+ LockHolder managerLock(m_lock);
+ m_set.add(machineThreads);
+ }
- sigset_t mask;
- sigemptyset(&mask);
- sigaddset(&mask, SigThreadSuspendResume);
- pthread_sigmask(SIG_UNBLOCK, &mask, 0);
-#endif
+ void THREAD_SPECIFIC_CALL remove(MachineThreads* machineThreads)
+ {
+ LockHolder managerLock(m_lock);
+ auto recordedMachineThreads = m_set.take(machineThreads);
+ RELEASE_ASSERT(recordedMachineThreads == machineThreads);
}
- Thread* next;
- PlatformThread platformThread;
- void* stackBase;
+ bool contains(MachineThreads* machineThreads)
+ {
+ return m_set.contains(machineThreads);
+ }
+
+private:
+ typedef HashSet<MachineThreads*> MachineThreadsSet;
+
+ ActiveMachineThreadsManager() { }
+
+ Lock m_lock;
+ MachineThreadsSet m_set;
+
+ friend ActiveMachineThreadsManager& activeMachineThreadsManager();
};
+static ActiveMachineThreadsManager& activeMachineThreadsManager()
+{
+ static std::once_flag initializeManagerOnceFlag;
+ static ActiveMachineThreadsManager* manager = nullptr;
+
+ std::call_once(initializeManagerOnceFlag, [] {
+ manager = new ActiveMachineThreadsManager();
+ });
+ return *manager;
+}
+
+static inline PlatformThread getCurrentPlatformThread()
+{
+#if OS(DARWIN)
+ return pthread_mach_thread_np(pthread_self());
+#elif OS(WINDOWS)
+ return GetCurrentThreadId();
+#elif USE(PTHREADS)
+ return pthread_self();
+#endif
+}
+
MachineThreads::MachineThreads(Heap* heap)
: m_registeredThreads(0)
- , m_threadSpecific(0)
+ , m_threadSpecificForMachineThreads(0)
#if !ASSERT_DISABLED
, m_heap(heap)
#endif
{
UNUSED_PARAM(heap);
+ threadSpecificKeyCreate(&m_threadSpecificForMachineThreads, removeThread);
+ activeMachineThreadsManager().add(this);
}
MachineThreads::~MachineThreads()
{
- if (m_threadSpecific)
- threadSpecificKeyDelete(m_threadSpecific);
+ activeMachineThreadsManager().remove(this);
+ threadSpecificKeyDelete(m_threadSpecificForMachineThreads);
- MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
+ LockHolder registeredThreadsLock(m_registeredThreadsMutex);
for (Thread* t = m_registeredThreads; t;) {
Thread* next = t->next;
delete t;
@@ -149,171 +216,247 @@ MachineThreads::~MachineThreads()
}
}
-static inline PlatformThread getCurrentPlatformThread()
+static MachineThreads::ThreadData* threadData()
{
-#if OS(DARWIN)
- return pthread_mach_thread_np(pthread_self());
-#elif OS(WINDOWS)
- return GetCurrentThread();
-#elif USE(PTHREADS)
- return pthread_self();
-#endif
+ static NeverDestroyed<ThreadSpecific<MachineThreads::ThreadData, CanBeGCThread::True>> threadData;
+ return threadData.get();
+}
+
+MachineThreads::Thread::Thread(ThreadData* threadData)
+ : data(threadData)
+{
+ ASSERT(threadData);
}
-static inline bool equalThread(const PlatformThread& first, const PlatformThread& second)
+Thread* MachineThreads::Thread::createForCurrentThread()
+{
+ return new Thread(threadData());
+}
+
+bool MachineThreads::Thread::operator==(const PlatformThread& other) const
{
#if OS(DARWIN) || OS(WINDOWS)
- return first == second;
+ return data->platformThread == other;
#elif USE(PTHREADS)
- return !!pthread_equal(first, second);
+ return !!pthread_equal(data->platformThread, other);
#else
#error Need a way to compare threads on this platform
#endif
}
-void MachineThreads::makeUsableFromMultipleThreads()
-{
- if (m_threadSpecific)
- return;
-
- threadSpecificKeyCreate(&m_threadSpecific, removeThread);
-}
-
void MachineThreads::addCurrentThread()
{
- ASSERT(!m_heap->vm()->exclusiveThread || m_heap->vm()->exclusiveThread == currentThread());
+ ASSERT(!m_heap->vm()->hasExclusiveThread() || m_heap->vm()->exclusiveThread() == std::this_thread::get_id());
- if (!m_threadSpecific || threadSpecificGet(m_threadSpecific))
+ if (threadSpecificGet(m_threadSpecificForMachineThreads)) {
+#ifndef NDEBUG
+ LockHolder lock(m_registeredThreadsMutex);
+ ASSERT(threadSpecificGet(m_threadSpecificForMachineThreads) == this);
+#endif
return;
+ }
- threadSpecificSet(m_threadSpecific, this);
- Thread* thread = new Thread(getCurrentPlatformThread(), wtfThreadData().stack().origin());
+ Thread* thread = Thread::createForCurrentThread();
+ threadSpecificSet(m_threadSpecificForMachineThreads, this);
- MutexLocker lock(m_registeredThreadsMutex);
+ LockHolder lock(m_registeredThreadsMutex);
thread->next = m_registeredThreads;
m_registeredThreads = thread;
}
-void MachineThreads::removeThread(void* p)
+Thread* MachineThreads::machineThreadForCurrentThread()
{
- if (p)
- static_cast<MachineThreads*>(p)->removeCurrentThread();
+ LockHolder lock(m_registeredThreadsMutex);
+ PlatformThread platformThread = getCurrentPlatformThread();
+ for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
+ if (*thread == platformThread)
+ return thread;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
}
-void MachineThreads::removeCurrentThread()
+void THREAD_SPECIFIC_CALL MachineThreads::removeThread(void* p)
{
- PlatformThread currentPlatformThread = getCurrentPlatformThread();
+ auto& manager = activeMachineThreadsManager();
+ ActiveMachineThreadsManager::Locker lock(manager);
+ auto machineThreads = static_cast<MachineThreads*>(p);
+ if (manager.contains(machineThreads)) {
+ // There's a chance that the MachineThreads registry that this thread
+ // was registered with was already destructed, and another one happened
+ // to be instantiated at the same address. Hence, this thread may or
+ // may not be found in this MachineThreads registry. We only need to
+ // do a removal if this thread is found in it.
+
+#if PLATFORM(WIN)
+ // On Windows the thread specific destructor is also called when the
+ // main thread is exiting. This may lead to the main thread waiting
+ // forever for the machine thread lock when exiting, if the sampling
+ // profiler thread was terminated by the system while holding the
+ // machine thread lock.
+ if (WTF::isMainThread())
+ return;
+#endif
- MutexLocker lock(m_registeredThreadsMutex);
+ machineThreads->removeThreadIfFound(getCurrentPlatformThread());
+ }
+}
- if (equalThread(currentPlatformThread, m_registeredThreads->platformThread)) {
- Thread* t = m_registeredThreads;
+template<typename PlatformThread>
+void MachineThreads::removeThreadIfFound(PlatformThread platformThread)
+{
+ LockHolder lock(m_registeredThreadsMutex);
+ Thread* t = m_registeredThreads;
+ if (*t == platformThread) {
m_registeredThreads = m_registeredThreads->next;
delete t;
} else {
Thread* last = m_registeredThreads;
- Thread* t;
for (t = m_registeredThreads->next; t; t = t->next) {
- if (equalThread(t->platformThread, currentPlatformThread)) {
+ if (*t == platformThread) {
last->next = t->next;
break;
}
last = t;
}
- ASSERT(t); // If t is NULL, we never found ourselves in the list.
delete t;
}
}
-#if COMPILER(GCC)
-#define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
-#else
-#define REGISTER_BUFFER_ALIGNMENT
-#endif
+SUPPRESS_ASAN
+void MachineThreads::gatherFromCurrentThread(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks, CurrentThreadState& currentThreadState)
+{
+ if (currentThreadState.registerState) {
+ void* registersBegin = currentThreadState.registerState;
+ void* registersEnd = reinterpret_cast<void*>(roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(currentThreadState.registerState + 1)));
+ conservativeRoots.add(registersBegin, registersEnd, jitStubRoutines, codeBlocks);
+ }
-void MachineThreads::gatherFromCurrentThread(ConservativeRoots& conservativeRoots, void* stackCurrent)
+ conservativeRoots.add(currentThreadState.stackTop, currentThreadState.stackOrigin, jitStubRoutines, codeBlocks);
+}
+
+MachineThreads::ThreadData::ThreadData()
{
- // setjmp forces volatile registers onto the stack
- jmp_buf registers REGISTER_BUFFER_ALIGNMENT;
-#if COMPILER(MSVC)
-#pragma warning(push)
-#pragma warning(disable: 4611)
-#endif
- setjmp(registers);
-#if COMPILER(MSVC)
-#pragma warning(pop)
-#endif
+ auto stackBounds = wtfThreadData().stack();
+ platformThread = getCurrentPlatformThread();
+ stackBase = stackBounds.origin();
+ stackEnd = stackBounds.end();
+
+#if OS(WINDOWS)
+ ASSERT(platformThread == GetCurrentThreadId());
+ bool isSuccessful =
+ DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(),
+ &platformThreadHandle, 0, FALSE, DUPLICATE_SAME_ACCESS);
+ RELEASE_ASSERT(isSuccessful);
+#elif USE(PTHREADS) && !OS(DARWIN)
+ threadLocalCurrentThread.store(this);
+
+ // Signal handlers are process global configuration.
+ static std::once_flag initializeSignalHandler;
+ std::call_once(initializeSignalHandler, [] {
+ // Intentionally block SigThreadSuspendResume in the handler.
+ // SigThreadSuspendResume will be allowed in the handler by sigsuspend.
+ struct sigaction action;
+ sigemptyset(&action.sa_mask);
+ sigaddset(&action.sa_mask, SigThreadSuspendResume);
+
+ action.sa_sigaction = pthreadSignalHandlerSuspendResume;
+ action.sa_flags = SA_RESTART | SA_SIGINFO;
+ sigaction(SigThreadSuspendResume, &action, 0);
+ });
- void* registersBegin = &registers;
- void* registersEnd = reinterpret_cast<void*>(roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(&registers + 1)));
- swapIfBackwards(registersBegin, registersEnd);
- conservativeRoots.add(registersBegin, registersEnd);
+ sigset_t mask;
+ sigemptyset(&mask);
+ sigaddset(&mask, SigThreadSuspendResume);
+ pthread_sigmask(SIG_UNBLOCK, &mask, 0);
- void* stackBegin = stackCurrent;
- void* stackEnd = wtfThreadData().stack().origin();
- swapIfBackwards(stackBegin, stackEnd);
- conservativeRoots.add(stackBegin, stackEnd);
+ sem_init(&semaphoreForSuspendResume, /* Only available in this process. */ 0, /* Initial value for the semaphore. */ 0);
+#endif
+}
+
+MachineThreads::ThreadData::~ThreadData()
+{
+#if OS(WINDOWS)
+ CloseHandle(platformThreadHandle);
+#elif USE(PTHREADS) && !OS(DARWIN)
+ sem_destroy(&semaphoreForSuspendResume);
+#endif
}
-static inline void suspendThread(const PlatformThread& platformThread)
+bool MachineThreads::ThreadData::suspend()
{
#if OS(DARWIN)
- thread_suspend(platformThread);
+ kern_return_t result = thread_suspend(platformThread);
+ return result == KERN_SUCCESS;
#elif OS(WINDOWS)
- SuspendThread(platformThread);
+ bool threadIsSuspended = (SuspendThread(platformThreadHandle) != (DWORD)-1);
+ ASSERT(threadIsSuspended);
+ return threadIsSuspended;
#elif USE(PTHREADS)
- pthread_kill(platformThread, SigThreadSuspendResume);
+ ASSERT_WITH_MESSAGE(getCurrentPlatformThread() != platformThread, "Currently we don't support suspend the current thread itself.");
+ {
+ // During suspend, suspend or resume should not be executed from the other threads.
+ // We use global lock instead of per thread lock.
+ // Consider the following case, there are threads A and B.
+ // And A attempt to suspend B and B attempt to suspend A.
+ // A and B send signals. And later, signals are delivered to A and B.
+ // In that case, both will be suspended.
+ LockHolder lock(globalSignalLock);
+ if (!suspendCount) {
+ // Ideally, we would like to use pthread_sigqueue. It allows us to pass the argument to the signal handler.
+ // But it can be used in a few platforms, like Linux.
+ // Instead, we use Thread* stored in the thread local storage to pass it to the signal handler.
+ if (pthread_kill(platformThread, SigThreadSuspendResume) == ESRCH)
+ return false;
+ sem_wait(&semaphoreForSuspendResume);
+ // Release barrier ensures that this operation is always executed after all the above processing is done.
+ suspended.store(true, std::memory_order_release);
+ }
+ ++suspendCount;
+ }
+ return true;
#else
#error Need a way to suspend threads on this platform
#endif
}
-static inline void resumeThread(const PlatformThread& platformThread)
+void MachineThreads::ThreadData::resume()
{
#if OS(DARWIN)
thread_resume(platformThread);
#elif OS(WINDOWS)
- ResumeThread(platformThread);
+ ResumeThread(platformThreadHandle);
#elif USE(PTHREADS)
- pthread_kill(platformThread, SigThreadSuspendResume);
+ {
+ // During resume, suspend or resume should not be executed from the other threads.
+ LockHolder lock(globalSignalLock);
+ if (suspendCount == 1) {
+ // When allowing SigThreadSuspendResume interrupt in the signal handler by sigsuspend and SigThreadSuspendResume is actually issued,
+ // the signal handler itself will be called once again.
+ // There are several ways to distinguish the handler invocation for suspend and resume.
+ // 1. Use different signal numbers. And check the signal number in the handler.
+ // 2. Use some arguments to distinguish suspend and resume in the handler. If pthread_sigqueue can be used, we can take this.
+ // 3. Use thread local storage with atomic variables in the signal handler.
+ // In this implementaiton, we take (3). suspended flag is used to distinguish it.
+ if (pthread_kill(platformThread, SigThreadSuspendResume) == ESRCH)
+ return;
+ sem_wait(&semaphoreForSuspendResume);
+ // Release barrier ensures that this operation is always executed after all the above processing is done.
+ suspended.store(false, std::memory_order_release);
+ }
+ --suspendCount;
+ }
#else
#error Need a way to resume threads on this platform
#endif
}
-typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
-
-#if OS(DARWIN)
-
-#if CPU(X86)
-typedef i386_thread_state_t PlatformThreadRegisters;
-#elif CPU(X86_64)
-typedef x86_thread_state64_t PlatformThreadRegisters;
-#elif CPU(PPC)
-typedef ppc_thread_state_t PlatformThreadRegisters;
-#elif CPU(PPC64)
-typedef ppc_thread_state64_t PlatformThreadRegisters;
-#elif CPU(ARM)
-typedef arm_thread_state_t PlatformThreadRegisters;
-#elif CPU(ARM64)
-typedef arm_thread_state64_t PlatformThreadRegisters;
-#else
-#error Unknown Architecture
-#endif
-
-#elif OS(WINDOWS)
-typedef CONTEXT PlatformThreadRegisters;
-#elif USE(PTHREADS)
-typedef pthread_attr_t PlatformThreadRegisters;
-#else
-#error Need a thread register struct for this platform
-#endif
-
-static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
+size_t MachineThreads::ThreadData::getRegisters(ThreadData::Registers& registers)
{
+ ThreadData::Registers::PlatformRegisters& regs = registers.regs;
#if OS(DARWIN)
-
#if CPU(X86)
unsigned user_count = sizeof(regs)/sizeof(int);
thread_state_flavor_t flavor = i386_THREAD_STATE;
@@ -342,29 +485,32 @@ static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, P
"JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
CRASH();
}
- return user_count * sizeof(usword_t);
+ return user_count * sizeof(uintptr_t);
// end OS(DARWIN)
#elif OS(WINDOWS)
regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
- GetThreadContext(platformThread, &regs);
+ GetThreadContext(platformThreadHandle, &regs);
return sizeof(CONTEXT);
#elif USE(PTHREADS)
- pthread_attr_init(&regs);
+ pthread_attr_init(&regs.attribute);
#if HAVE(PTHREAD_NP_H) || OS(NETBSD)
+#if !OS(OPENBSD)
// e.g. on FreeBSD 5.4, neundorf@kde.org
- pthread_attr_get_np(platformThread, &regs);
+ pthread_attr_get_np(platformThread, &regs.attribute);
+#endif
#else
// FIXME: this function is non-portable; other POSIX systems may have different np alternatives
- pthread_getattr_np(platformThread, &regs);
+ pthread_getattr_np(platformThread, &regs.attribute);
#endif
+ regs.machineContext = suspendedMachineContext;
return 0;
#else
#error Need a way to get thread registers on this platform
#endif
}
-static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
+void* MachineThreads::ThreadData::Registers::stackPointer() const
{
#if OS(DARWIN)
@@ -414,77 +560,534 @@ static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
#endif
#elif USE(PTHREADS)
+
+#if OS(FREEBSD) && ENABLE(JIT)
+
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_esp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rsp);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_SP]);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_sp);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[29]);
+#else
+#error Unknown Architecture
+#endif
+
+#elif defined(__GLIBC__) && ENABLE(JIT)
+
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_ESP]);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RSP]);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_sp);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.sp);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[29]);
+#else
+#error Unknown Architecture
+#endif
+
+#else
void* stackBase = 0;
size_t stackSize = 0;
- int rc = pthread_attr_getstack(&regs, &stackBase, &stackSize);
+#if OS(OPENBSD)
+ stack_t ss;
+ int rc = pthread_stackseg_np(pthread_self(), &ss);
+ stackBase = (void*)((size_t) ss.ss_sp - ss.ss_size);
+ stackSize = ss.ss_size;
+#else
+ int rc = pthread_attr_getstack(&regs.attribute, &stackBase, &stackSize);
+#endif
(void)rc; // FIXME: Deal with error code somehow? Seems fatal.
ASSERT(stackBase);
return static_cast<char*>(stackBase) + stackSize;
+#endif
+
#else
#error Need a way to get the stack pointer for another thread on this platform
#endif
}
-static void freePlatformThreadRegisters(PlatformThreadRegisters& regs)
+#if ENABLE(SAMPLING_PROFILER)
+void* MachineThreads::ThreadData::Registers::framePointer() const
{
+#if OS(DARWIN)
+
+#if __DARWIN_UNIX03
+
+#if CPU(X86)
+ return reinterpret_cast<void*>(regs.__ebp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>(regs.__rbp);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>(regs.__r[11]);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>(regs.__x[29]);
+#else
+#error Unknown Architecture
+#endif
+
+#else // !__DARWIN_UNIX03
+
+#if CPU(X86)
+ return reinterpret_cast<void*>(regs.esp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>(regs.rsp);
+#else
+#error Unknown Architecture
+#endif
+
+#endif // __DARWIN_UNIX03
+
+// end OS(DARWIN)
+#elif OS(WINDOWS)
+
+#if CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.R11);
+#elif CPU(MIPS)
+#error Dont know what to do with mips. Do we even need this?
+#elif CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.Ebp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.Rbp);
+#else
+#error Unknown Architecture
+#endif
+
+#elif OS(FREEBSD)
+
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_ebp);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rbp);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_FP]);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_x[29]);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[30]);
+#else
+#error Unknown Architecture
+#endif
+
+#elif defined(__GLIBC__)
+
+// The following sequence depends on glibc's sys/ucontext.h.
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_EBP]);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RBP]);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_fp);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.regs[29]);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[30]);
+#else
+#error Unknown Architecture
+#endif
+
+#else
+#error Need a way to get the frame pointer for another thread on this platform
+#endif
+}
+
+void* MachineThreads::ThreadData::Registers::instructionPointer() const
+{
+#if OS(DARWIN)
+
+#if __DARWIN_UNIX03
+
+#if CPU(X86)
+ return reinterpret_cast<void*>(regs.__eip);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>(regs.__rip);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>(regs.__pc);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>(regs.__pc);
+#else
+#error Unknown Architecture
+#endif
+
+#else // !__DARWIN_UNIX03
+#if CPU(X86)
+ return reinterpret_cast<void*>(regs.eip);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>(regs.rip);
+#else
+#error Unknown Architecture
+#endif
+
+#endif // __DARWIN_UNIX03
+
+// end OS(DARWIN)
+#elif OS(WINDOWS)
+
+#if CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.Pc);
+#elif CPU(MIPS)
+#error Dont know what to do with mips. Do we even need this?
+#elif CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.Eip);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.Rip);
+#else
+#error Unknown Architecture
+#endif
+
+#elif OS(FREEBSD)
+
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_eip);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_rip);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_PC]);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_elr);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_pc);
+#else
+#error Unknown Architecture
+#endif
+
+#elif defined(__GLIBC__)
+
+// The following sequence depends on glibc's sys/ucontext.h.
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_EIP]);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_RIP]);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_pc);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.pc);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.pc);
+#else
+#error Unknown Architecture
+#endif
+
+#else
+#error Need a way to get the instruction pointer for another thread on this platform
+#endif
+}
+
+void* MachineThreads::ThreadData::Registers::llintPC() const
+{
+ // LLInt uses regT4 as PC.
+#if OS(DARWIN)
+
+#if __DARWIN_UNIX03
+
+#if CPU(X86)
+ static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.__esi);
+#elif CPU(X86_64)
+ static_assert(LLInt::LLIntPC == X86Registers::r8, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.__r8);
+#elif CPU(ARM)
+ static_assert(LLInt::LLIntPC == ARMRegisters::r8, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.__r[8]);
+#elif CPU(ARM64)
+ static_assert(LLInt::LLIntPC == ARM64Registers::x4, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.__x[4]);
+#else
+#error Unknown Architecture
+#endif
+
+#else // !__DARWIN_UNIX03
+#if CPU(X86)
+ static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.esi);
+#elif CPU(X86_64)
+ static_assert(LLInt::LLIntPC == X86Registers::r8, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>(regs.r8);
+#else
+#error Unknown Architecture
+#endif
+
+#endif // __DARWIN_UNIX03
+
+// end OS(DARWIN)
+#elif OS(WINDOWS)
+
+#if CPU(ARM)
+ static_assert(LLInt::LLIntPC == ARMRegisters::r8, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>((uintptr_t) regs.R8);
+#elif CPU(MIPS)
+#error Dont know what to do with mips. Do we even need this?
+#elif CPU(X86)
+ static_assert(LLInt::LLIntPC == X86Registers::esi, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>((uintptr_t) regs.Esi);
+#elif CPU(X86_64)
+ static_assert(LLInt::LLIntPC == X86Registers::r10, "Wrong LLInt PC.");
+ return reinterpret_cast<void*>((uintptr_t) regs.R10);
+#else
+#error Unknown Architecture
+#endif
+
+#elif OS(FREEBSD)
+
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_esi);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_r8);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.__gregs[_REG_R8]);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_gpregs.gp_x[4]);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.mc_regs[12]);
+#else
+#error Unknown Architecture
+#endif
+
+#elif defined(__GLIBC__)
+
+// The following sequence depends on glibc's sys/ucontext.h.
+#if CPU(X86)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_ESI]);
+#elif CPU(X86_64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[REG_R8]);
+#elif CPU(ARM)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.arm_r8);
+#elif CPU(ARM64)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.regs[4]);
+#elif CPU(MIPS)
+ return reinterpret_cast<void*>((uintptr_t) regs.machineContext.gregs[12]);
+#else
+#error Unknown Architecture
+#endif
+
+#else
+#error Need a way to get the LLIntPC for another thread on this platform
+#endif
+}
+#endif // ENABLE(SAMPLING_PROFILER)
+
+void MachineThreads::ThreadData::freeRegisters(ThreadData::Registers& registers)
+{
+ ThreadData::Registers::PlatformRegisters& regs = registers.regs;
#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
- pthread_attr_destroy(&regs);
+ pthread_attr_destroy(&regs.attribute);
#else
UNUSED_PARAM(regs);
#endif
}
-void MachineThreads::gatherFromOtherThread(ConservativeRoots& conservativeRoots, Thread* thread)
+static inline int osRedZoneAdjustment()
+{
+ int redZoneAdjustment = 0;
+#if !OS(WINDOWS)
+#if CPU(X86_64)
+ // See http://people.freebsd.org/~obrien/amd64-elf-abi.pdf Section 3.2.2.
+ redZoneAdjustment = -128;
+#elif CPU(ARM64)
+ // See https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html#//apple_ref/doc/uid/TP40013702-SW7
+ redZoneAdjustment = -128;
+#endif
+#endif // !OS(WINDOWS)
+ return redZoneAdjustment;
+}
+
+std::pair<void*, size_t> MachineThreads::ThreadData::captureStack(void* stackTop)
{
- PlatformThreadRegisters regs;
- size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
+ char* begin = reinterpret_cast_ptr<char*>(stackBase);
+ char* end = bitwise_cast<char*>(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(stackTop)));
+ ASSERT(begin >= end);
- conservativeRoots.add(static_cast<void*>(&regs), static_cast<void*>(reinterpret_cast<char*>(&regs) + regSize));
+ char* endWithRedZone = end + osRedZoneAdjustment();
+ ASSERT(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(endWithRedZone)) == reinterpret_cast<uintptr_t>(endWithRedZone));
- void* stackPointer = otherThreadStackPointer(regs);
- void* stackBase = thread->stackBase;
- swapIfBackwards(stackPointer, stackBase);
- conservativeRoots.add(stackPointer, stackBase);
+ if (endWithRedZone < stackEnd)
+ endWithRedZone = reinterpret_cast_ptr<char*>(stackEnd);
- freePlatformThreadRegisters(regs);
+ std::swap(begin, endWithRedZone);
+ return std::make_pair(begin, endWithRedZone - begin);
}
-void MachineThreads::gatherConservativeRoots(ConservativeRoots& conservativeRoots, void* stackCurrent)
+SUPPRESS_ASAN
+static void copyMemory(void* dst, const void* src, size_t size)
+{
+ size_t dstAsSize = reinterpret_cast<size_t>(dst);
+ size_t srcAsSize = reinterpret_cast<size_t>(src);
+ RELEASE_ASSERT(dstAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(dstAsSize));
+ RELEASE_ASSERT(srcAsSize == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(srcAsSize));
+ RELEASE_ASSERT(size == WTF::roundUpToMultipleOf<sizeof(intptr_t)>(size));
+
+ intptr_t* dstPtr = reinterpret_cast<intptr_t*>(dst);
+ const intptr_t* srcPtr = reinterpret_cast<const intptr_t*>(src);
+ size /= sizeof(intptr_t);
+ while (size--)
+ *dstPtr++ = *srcPtr++;
+}
+
+
+
+// This function must not call malloc(), free(), or any other function that might
+// acquire a lock. Since 'thread' is suspended, trying to acquire a lock
+// will deadlock if 'thread' holds that lock.
+// This function, specifically the memory copying, was causing problems with Address Sanitizer in
+// apps. Since we cannot blacklist the system memcpy we must use our own naive implementation,
+// copyMemory, for ASan to work on either instrumented or non-instrumented builds. This is not a
+// significant performance loss as tryCopyOtherThreadStack is only called as part of an O(heapsize)
+// operation. As the heap is generally much larger than the stack the performance hit is minimal.
+// See: https://bugs.webkit.org/show_bug.cgi?id=146297
+void MachineThreads::tryCopyOtherThreadStack(Thread* thread, void* buffer, size_t capacity, size_t* size)
{
- gatherFromCurrentThread(conservativeRoots, stackCurrent);
+ Thread::Registers registers;
+ size_t registersSize = thread->getRegisters(registers);
+
+ // This is a workaround for <rdar://problem/27607384>. During thread initialization,
+ // for some target platforms, thread state is momentarily set to 0 before being
+ // filled in with the target thread's real register values. As a result, there's
+ // a race condition that may result in us getting a null stackPointer.
+ // This issue may manifest with workqueue threads where the OS may choose to recycle
+ // a thread for an expired task.
+ //
+ // The workaround is simply to indicate that there's nothing to copy and return.
+ // This is correct because we will only ever observe a null pointer during thread
+ // initialization. Hence, by definition, there's nothing there that we need to scan
+ // yet, and therefore, nothing that needs to be copied.
+ if (UNLIKELY(!registers.stackPointer())) {
+ *size = 0;
+ return;
+ }
- if (m_threadSpecific) {
- PlatformThread currentPlatformThread = getCurrentPlatformThread();
+ std::pair<void*, size_t> stack = thread->captureStack(registers.stackPointer());
- MutexLocker lock(m_registeredThreadsMutex);
+ bool canCopy = *size + registersSize + stack.second <= capacity;
-#ifndef NDEBUG
- // Forbid malloc during the gather phase. The gather phase suspends
- // threads, so a malloc during gather would risk a deadlock with a
- // thread that had been suspended while holding the malloc lock.
- fastMallocForbid();
-#endif
- for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
- if (!equalThread(thread->platformThread, currentPlatformThread))
- suspendThread(thread->platformThread);
- }
+ if (canCopy)
+ copyMemory(static_cast<char*>(buffer) + *size, &registers, registersSize);
+ *size += registersSize;
- // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
- // and since this is a shared heap, they are real locks.
- for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
- if (!equalThread(thread->platformThread, currentPlatformThread))
- gatherFromOtherThread(conservativeRoots, thread);
- }
+ if (canCopy)
+ copyMemory(static_cast<char*>(buffer) + *size, stack.first, stack.second);
+ *size += stack.second;
- for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
- if (!equalThread(thread->platformThread, currentPlatformThread))
- resumeThread(thread->platformThread);
- }
+ thread->freeRegisters(registers);
+}
-#ifndef NDEBUG
- fastMallocAllow();
+bool MachineThreads::tryCopyOtherThreadStacks(LockHolder&, void* buffer, size_t capacity, size_t* size)
+{
+ // Prevent two VMs from suspending each other's threads at the same time,
+ // which can cause deadlock: <rdar://problem/20300842>.
+ static StaticLock mutex;
+ std::lock_guard<StaticLock> lock(mutex);
+
+ *size = 0;
+
+ PlatformThread currentPlatformThread = getCurrentPlatformThread();
+ int numberOfThreads = 0; // Using 0 to denote that we haven't counted the number of threads yet.
+ int index = 1;
+ Thread* threadsToBeDeleted = nullptr;
+
+ Thread* previousThread = nullptr;
+ for (Thread* thread = m_registeredThreads; thread; index++) {
+ if (*thread != currentPlatformThread) {
+ bool success = thread->suspend();
+#if OS(DARWIN)
+ if (!success) {
+ if (!numberOfThreads) {
+ for (Thread* countedThread = m_registeredThreads; countedThread; countedThread = countedThread->next)
+ numberOfThreads++;
+ }
+
+ // Re-do the suspension to get the actual failure result for logging.
+ kern_return_t error = thread_suspend(thread->platformThread());
+ ASSERT(error != KERN_SUCCESS);
+
+ WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
+ "JavaScript garbage collection encountered an invalid thread (err 0x%x): Thread [%d/%d: %p] platformThread %p.",
+ error, index, numberOfThreads, thread, reinterpret_cast<void*>(thread->platformThread()));
+
+ // Put the invalid thread on the threadsToBeDeleted list.
+ // We can't just delete it here because we have suspended other
+ // threads, and they may still be holding the C heap lock which
+ // we need for deleting the invalid thread. Hence, we need to
+ // defer the deletion till after we have resumed all threads.
+ Thread* nextThread = thread->next;
+ thread->next = threadsToBeDeleted;
+ threadsToBeDeleted = thread;
+
+ if (previousThread)
+ previousThread->next = nextThread;
+ else
+ m_registeredThreads = nextThread;
+ thread = nextThread;
+ continue;
+ }
+#else
+ UNUSED_PARAM(numberOfThreads);
+ UNUSED_PARAM(previousThread);
+ ASSERT_UNUSED(success, success);
#endif
+ }
+ previousThread = thread;
+ thread = thread->next;
+ }
+
+ for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
+ if (*thread != currentPlatformThread)
+ tryCopyOtherThreadStack(thread, buffer, capacity, size);
+ }
+
+ for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
+ if (*thread != currentPlatformThread)
+ thread->resume();
}
+
+ for (Thread* thread = threadsToBeDeleted; thread; ) {
+ Thread* nextThread = thread->next;
+ delete thread;
+ thread = nextThread;
+ }
+
+ return *size <= capacity;
+}
+
+static void growBuffer(size_t size, void** buffer, size_t* capacity)
+{
+ if (*buffer)
+ fastFree(*buffer);
+
+ *capacity = WTF::roundUpToMultipleOf(WTF::pageSize(), size * 2);
+ *buffer = fastMalloc(*capacity);
+}
+
+void MachineThreads::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks, CurrentThreadState* currentThreadState)
+{
+ if (currentThreadState)
+ gatherFromCurrentThread(conservativeRoots, jitStubRoutines, codeBlocks, *currentThreadState);
+
+ size_t size;
+ size_t capacity = 0;
+ void* buffer = nullptr;
+ LockHolder lock(m_registeredThreadsMutex);
+ while (!tryCopyOtherThreadStacks(lock, buffer, capacity, &size))
+ growBuffer(size, &buffer, &capacity);
+
+ if (!buffer)
+ return;
+
+ conservativeRoots.add(buffer, static_cast<char*>(buffer) + size, jitStubRoutines, codeBlocks);
+ fastFree(buffer);
+}
+
+NEVER_INLINE int callWithCurrentThreadState(const ScopedLambda<void(CurrentThreadState&)>& lambda)
+{
+ DECLARE_AND_COMPUTE_CURRENT_THREAD_STATE(state);
+ lambda(state);
+ return 42; // Suppress tail call optimization.
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.h b/Source/JavaScriptCore/heap/MachineStackMarker.h
index 49823d43e..da979c582 100644
--- a/Source/JavaScriptCore/heap/MachineStackMarker.h
+++ b/Source/JavaScriptCore/heap/MachineStackMarker.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
* Copyright (C) 2001 Peter Kelly (pmk@post.com)
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2017 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -19,47 +19,185 @@
*
*/
-#ifndef MachineThreads_h
-#define MachineThreads_h
+#pragma once
+#include "RegisterState.h"
+#include <wtf/Lock.h>
#include <wtf/Noncopyable.h>
+#include <wtf/ScopedLambda.h>
#include <wtf/ThreadSpecific.h>
-#include <wtf/ThreadingPrimitives.h>
+
+#if OS(DARWIN)
+#include <mach/thread_act.h>
+#endif
+
+#if USE(PTHREADS) && !OS(WINDOWS) && !OS(DARWIN)
+#include <semaphore.h>
+#include <signal.h>
+// Using signal.h didn't make mcontext_t and ucontext_t available on FreeBSD.
+// This bug has been fixed in FreeBSD 11.0-CURRENT, so this workaround can be
+// removed after FreeBSD 10.x goes EOL.
+// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=207079
+#if OS(FREEBSD)
+#include <ucontext.h>
+#endif
+#endif
+
+#if OS(DARWIN)
+typedef mach_port_t PlatformThread;
+#elif OS(WINDOWS)
+typedef DWORD PlatformThread;
+#elif USE(PTHREADS)
+typedef pthread_t PlatformThread;
+#endif // OS(DARWIN)
namespace JSC {
- class ConservativeRoots;
- class Heap;
+class CodeBlockSet;
+class ConservativeRoots;
+class Heap;
+class JITStubRoutineSet;
+
+struct CurrentThreadState {
+ void* stackOrigin { nullptr };
+ void* stackTop { nullptr };
+ RegisterState* registerState { nullptr };
+};
+
+class MachineThreads {
+ WTF_MAKE_NONCOPYABLE(MachineThreads);
+public:
+ MachineThreads(Heap*);
+ ~MachineThreads();
+
+ void gatherConservativeRoots(ConservativeRoots&, JITStubRoutineSet&, CodeBlockSet&, CurrentThreadState*);
- class MachineThreads {
- WTF_MAKE_NONCOPYABLE(MachineThreads);
+ JS_EXPORT_PRIVATE void addCurrentThread(); // Only needs to be called by clients that can use the same heap from multiple threads.
+
+ class ThreadData {
+ WTF_MAKE_FAST_ALLOCATED;
public:
- MachineThreads(Heap*);
- ~MachineThreads();
+ ThreadData();
+ ~ThreadData();
- void gatherConservativeRoots(ConservativeRoots&, void* stackCurrent);
+ static ThreadData* createForCurrentThread();
- JS_EXPORT_PRIVATE void makeUsableFromMultipleThreads();
- JS_EXPORT_PRIVATE void addCurrentThread(); // Only needs to be called by clients that can use the same heap from multiple threads.
+ struct Registers {
+ void* stackPointer() const;
+#if ENABLE(SAMPLING_PROFILER)
+ void* framePointer() const;
+ void* instructionPointer() const;
+ void* llintPC() const;
+#endif // ENABLE(SAMPLING_PROFILER)
+
+#if OS(DARWIN)
+#if CPU(X86)
+ typedef i386_thread_state_t PlatformRegisters;
+#elif CPU(X86_64)
+ typedef x86_thread_state64_t PlatformRegisters;
+#elif CPU(PPC)
+ typedef ppc_thread_state_t PlatformRegisters;
+#elif CPU(PPC64)
+ typedef ppc_thread_state64_t PlatformRegisters;
+#elif CPU(ARM)
+ typedef arm_thread_state_t PlatformRegisters;
+#elif CPU(ARM64)
+ typedef arm_thread_state64_t PlatformRegisters;
+#else
+#error Unknown Architecture
+#endif
+
+#elif OS(WINDOWS)
+ typedef CONTEXT PlatformRegisters;
+#elif USE(PTHREADS)
+ struct PlatformRegisters {
+ pthread_attr_t attribute;
+ mcontext_t machineContext;
+ };
+#else
+#error Need a thread register struct for this platform
+#endif
- private:
- void gatherFromCurrentThread(ConservativeRoots&, void* stackCurrent);
+ PlatformRegisters regs;
+ };
- class Thread;
+ bool suspend();
+ void resume();
+ size_t getRegisters(Registers&);
+ void freeRegisters(Registers&);
+ std::pair<void*, size_t> captureStack(void* stackTop);
- static void removeThread(void*);
- void removeCurrentThread();
+ PlatformThread platformThread;
+ void* stackBase;
+ void* stackEnd;
+#if OS(WINDOWS)
+ HANDLE platformThreadHandle;
+#elif USE(PTHREADS) && !OS(DARWIN)
+ sem_t semaphoreForSuspendResume;
+ mcontext_t suspendedMachineContext;
+ int suspendCount { 0 };
+ std::atomic<bool> suspended { false };
+#endif
+ };
+
+ class Thread {
+ WTF_MAKE_FAST_ALLOCATED;
+ Thread(ThreadData*);
- void gatherFromOtherThread(ConservativeRoots&, Thread*);
+ public:
+ using Registers = ThreadData::Registers;
- Mutex m_registeredThreadsMutex;
- Thread* m_registeredThreads;
- WTF::ThreadSpecificKey m_threadSpecific;
+ static Thread* createForCurrentThread();
+
+ bool operator==(const PlatformThread& other) const;
+ bool operator!=(const PlatformThread& other) const { return !(*this == other); }
+
+ bool suspend() { return data->suspend(); }
+ void resume() { data->resume(); }
+ size_t getRegisters(Registers& regs) { return data->getRegisters(regs); }
+ void freeRegisters(Registers& regs) { data->freeRegisters(regs); }
+ std::pair<void*, size_t> captureStack(void* stackTop) { return data->captureStack(stackTop); }
+
+ const PlatformThread& platformThread() { return data->platformThread; }
+ void* stackBase() const { return data->stackBase; }
+ void* stackEnd() const { return data->stackEnd; }
+
+ Thread* next;
+ ThreadData* data;
+ };
+
+ Lock& getLock() { return m_registeredThreadsMutex; }
+ Thread* threadsListHead(const LockHolder&) const { ASSERT(m_registeredThreadsMutex.isLocked()); return m_registeredThreads; }
+ Thread* machineThreadForCurrentThread();
+
+private:
+ void gatherFromCurrentThread(ConservativeRoots&, JITStubRoutineSet&, CodeBlockSet&, CurrentThreadState&);
+
+ void tryCopyOtherThreadStack(Thread*, void*, size_t capacity, size_t*);
+ bool tryCopyOtherThreadStacks(LockHolder&, void*, size_t capacity, size_t*);
+
+ static void THREAD_SPECIFIC_CALL removeThread(void*);
+
+ template<typename PlatformThread>
+ void removeThreadIfFound(PlatformThread);
+
+ Lock m_registeredThreadsMutex;
+ Thread* m_registeredThreads;
+ WTF::ThreadSpecificKey m_threadSpecificForMachineThreads;
#if !ASSERT_DISABLED
- Heap* m_heap;
+ Heap* m_heap;
#endif
- };
+};
+
+#define DECLARE_AND_COMPUTE_CURRENT_THREAD_STATE(stateName) \
+ CurrentThreadState stateName; \
+ stateName.stackTop = &stateName; \
+ stateName.stackOrigin = wtfThreadData().stack().origin(); \
+ ALLOCATE_AND_GET_REGISTER_STATE(stateName ## _registerState); \
+ stateName.registerState = &stateName ## _registerState
+
+// The return value is meaningless. We just use it to suppress tail call optimization.
+int callWithCurrentThreadState(const ScopedLambda<void(CurrentThreadState&)>&);
} // namespace JSC
-#endif // MachineThreads_h
diff --git a/Source/JavaScriptCore/heap/MarkStack.cpp b/Source/JavaScriptCore/heap/MarkStack.cpp
index 688de42e3..871b30180 100644
--- a/Source/JavaScriptCore/heap/MarkStack.cpp
+++ b/Source/JavaScriptCore/heap/MarkStack.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2009-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -25,90 +25,57 @@
#include "config.h"
#include "MarkStack.h"
-#include "MarkStackInlines.h"
-
-#include "ConservativeRoots.h"
-#include "CopiedSpace.h"
-#include "CopiedSpaceInlines.h"
-#include "Heap.h"
-#include "JSArray.h"
-#include "JSCell.h"
-#include "JSObject.h"
-
-#include "SlotVisitorInlines.h"
-#include "Structure.h"
-#include "WriteBarrier.h"
-#include <wtf/Atomics.h>
-#include <wtf/DataLog.h>
-#include <wtf/MainThread.h>
-namespace JSC {
+#include "GCSegmentedArrayInlines.h"
+#include "JSCInlines.h"
-COMPILE_ASSERT(MarkStackSegment::blockSize == WeakBlock::blockSize, blockSizeMatch);
+namespace JSC {
-MarkStackArray::MarkStackArray(BlockAllocator& blockAllocator)
- : m_blockAllocator(blockAllocator)
- , m_top(0)
- , m_numberOfSegments(0)
+MarkStackArray::MarkStackArray()
+ : GCSegmentedArray<const JSCell*>()
{
- m_segments.push(MarkStackSegment::create(m_blockAllocator.allocate<MarkStackSegment>()));
- m_numberOfSegments++;
}
-MarkStackArray::~MarkStackArray()
+void MarkStackArray::transferTo(MarkStackArray& other)
{
- ASSERT(m_numberOfSegments == 1);
- ASSERT(m_segments.size() == 1);
- m_blockAllocator.deallocate(MarkStackSegment::destroy(m_segments.removeHead()));
+ RELEASE_ASSERT(this != &other);
+
+ // Remove our head and the head of the other list.
+ GCArraySegment<const JSCell*>* myHead = m_segments.removeHead();
+ GCArraySegment<const JSCell*>* otherHead = other.m_segments.removeHead();
m_numberOfSegments--;
- ASSERT(!m_numberOfSegments);
- ASSERT(!m_segments.size());
-}
-
-void MarkStackArray::clear()
-{
- if (!m_segments.head())
- return;
- MarkStackSegment* next;
- for (MarkStackSegment* current = m_segments.head(); current->next(); current = next) {
- next = current->next();
- m_segments.remove(current);
- m_blockAllocator.deallocate(MarkStackSegment::destroy(current));
- }
- m_top = 0;
- m_numberOfSegments = 1;
-#if !ASSERT_DISABLED
- m_segments.head()->m_top = 0;
-#endif
-}
-
-void MarkStackArray::expand()
-{
- ASSERT(m_segments.head()->m_top == s_segmentCapacity);
+ other.m_numberOfSegments--;
+
+ other.m_segments.append(m_segments);
- MarkStackSegment* nextSegment = MarkStackSegment::create(m_blockAllocator.allocate<MarkStackSegment>());
+ other.m_numberOfSegments += m_numberOfSegments;
+ m_numberOfSegments = 0;
+
+ // Put the original heads back in their places.
+ m_segments.push(myHead);
+ other.m_segments.push(otherHead);
m_numberOfSegments++;
+ other.m_numberOfSegments++;
-#if !ASSERT_DISABLED
- nextSegment->m_top = 0;
-#endif
-
- m_segments.push(nextSegment);
- setTopForEmptySegment();
- validatePrevious();
+ while (!isEmpty()) {
+ refill();
+ while (canRemoveLast())
+ other.append(removeLast());
+ }
}
-bool MarkStackArray::refill()
+size_t MarkStackArray::transferTo(MarkStackArray& other, size_t limit)
{
- validatePrevious();
- if (top())
- return true;
- m_blockAllocator.deallocate(MarkStackSegment::destroy(m_segments.removeHead()));
- ASSERT(m_numberOfSegments > 1);
- m_numberOfSegments--;
- setTopForFullSegment();
- validatePrevious();
- return true;
+ size_t count = 0;
+ while (count < limit && !isEmpty()) {
+ refill();
+ while (count < limit && canRemoveLast()) {
+ other.append(removeLast());
+ count++;
+ }
+ }
+ RELEASE_ASSERT(count <= limit);
+ return count;
}
void MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
@@ -133,11 +100,11 @@ void MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
// Remove our head and the head of the other list before we start moving segments around.
// We'll add them back on once we're done donating.
- MarkStackSegment* myHead = m_segments.removeHead();
- MarkStackSegment* otherHead = other.m_segments.removeHead();
+ GCArraySegment<const JSCell*>* myHead = m_segments.removeHead();
+ GCArraySegment<const JSCell*>* otherHead = other.m_segments.removeHead();
while (segmentsToDonate--) {
- MarkStackSegment* current = m_segments.removeHead();
+ GCArraySegment<const JSCell*>* current = m_segments.removeHead();
ASSERT(current);
ASSERT(m_numberOfSegments > 1);
other.m_segments.push(current);
@@ -165,8 +132,8 @@ void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThread
// If other has an entire segment, steal it and return.
if (other.m_numberOfSegments > 1) {
// Move the heads of the lists aside. We'll push them back on after.
- MarkStackSegment* otherHead = other.m_segments.removeHead();
- MarkStackSegment* myHead = m_segments.removeHead();
+ GCArraySegment<const JSCell*>* otherHead = other.m_segments.removeHead();
+ GCArraySegment<const JSCell*>* myHead = m_segments.removeHead();
ASSERT(other.m_segments.head()->m_top == s_segmentCapacity);
@@ -183,33 +150,10 @@ void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThread
return;
}
- size_t numberOfCellsToSteal = (other.size() + idleThreadCount - 1) / idleThreadCount; // Round up to steal 1 / 1.
+ // Steal ceil(other.size() / idleThreadCount) things.
+ size_t numberOfCellsToSteal = (other.size() + idleThreadCount - 1) / idleThreadCount;
while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
append(other.removeLast());
}
-void MarkStackArray::fillVector(Vector<const JSCell*>& vector)
-{
- ASSERT(vector.size() == size());
-
- MarkStackSegment* currentSegment = m_segments.head();
- if (!currentSegment)
- return;
-
- unsigned count = 0;
- for (unsigned i = 0; i < m_top; ++i) {
- ASSERT(currentSegment->data()[i]);
- vector[count++] = currentSegment->data()[i];
- }
-
- currentSegment = currentSegment->next();
- while (currentSegment) {
- for (unsigned i = 0; i < s_segmentCapacity; ++i) {
- ASSERT(currentSegment->data()[i]);
- vector[count++] = currentSegment->data()[i];
- }
- currentSegment = currentSegment->next();
- }
-}
-
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkStack.h b/Source/JavaScriptCore/heap/MarkStack.h
index 6729bad22..89edeb711 100644
--- a/Source/JavaScriptCore/heap/MarkStack.h
+++ b/Source/JavaScriptCore/heap/MarkStack.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2009-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,111 +23,22 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MarkStack_h
-#define MarkStack_h
+#pragma once
-#if ENABLE(OBJECT_MARK_LOGGING)
-#define MARK_LOG_MESSAGE0(message) dataLogF(message)
-#define MARK_LOG_MESSAGE1(message, arg1) dataLogF(message, arg1)
-#define MARK_LOG_MESSAGE2(message, arg1, arg2) dataLogF(message, arg1, arg2)
-#define MARK_LOG_ROOT(visitor, rootName) \
- dataLogF("\n%s: ", rootName); \
- (visitor).resetChildCount()
-#define MARK_LOG_PARENT(visitor, parent) \
- dataLogF("\n%p (%s): ", parent, parent->className() ? parent->className() : "unknown"); \
- (visitor).resetChildCount()
-#define MARK_LOG_CHILD(visitor, child) \
- if ((visitor).childCount()) \
- dataLogFString(", "); \
- dataLogF("%p", child); \
- (visitor).incrementChildCount()
-#else
-#define MARK_LOG_MESSAGE0(message) do { } while (false)
-#define MARK_LOG_MESSAGE1(message, arg1) do { } while (false)
-#define MARK_LOG_MESSAGE2(message, arg1, arg2) do { } while (false)
-#define MARK_LOG_ROOT(visitor, rootName) do { } while (false)
-#define MARK_LOG_PARENT(visitor, parent) do { } while (false)
-#define MARK_LOG_CHILD(visitor, child) do { } while (false)
-#endif
-
-#include "HeapBlock.h"
-#include <wtf/StdLibExtras.h>
-#include <wtf/Vector.h>
+#include "GCSegmentedArray.h"
namespace JSC {
-class BlockAllocator;
-class DeadBlock;
class JSCell;
-class MarkStackSegment : public HeapBlock<MarkStackSegment> {
-public:
- MarkStackSegment(Region* region)
- : HeapBlock<MarkStackSegment>(region)
-#if !ASSERT_DISABLED
- , m_top(0)
-#endif
- {
- }
-
- static MarkStackSegment* create(DeadBlock*);
-
- const JSCell** data()
- {
- return bitwise_cast<const JSCell**>(this + 1);
- }
-
- static const size_t blockSize = 4 * KB;
-
-#if !ASSERT_DISABLED
- size_t m_top;
-#endif
-};
-
-class MarkStackArray {
+class MarkStackArray : public GCSegmentedArray<const JSCell*> {
public:
- MarkStackArray(BlockAllocator&);
- ~MarkStackArray();
+ MarkStackArray();
- void append(const JSCell*);
-
- bool canRemoveLast();
- const JSCell* removeLast();
- bool refill();
-
- void donateSomeCellsTo(MarkStackArray& other);
- void stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount);
-
- size_t size();
- bool isEmpty();
-
- void fillVector(Vector<const JSCell*>&);
- void clear();
-
-private:
- template <size_t size> struct CapacityFromSize {
- static const size_t value = (size - sizeof(MarkStackSegment)) / sizeof(const JSCell*);
- };
-
- JS_EXPORT_PRIVATE void expand();
-
- size_t postIncTop();
- size_t preDecTop();
- void setTopForFullSegment();
- void setTopForEmptySegment();
- size_t top();
-
- void validatePrevious();
-
- DoublyLinkedList<MarkStackSegment> m_segments;
- BlockAllocator& m_blockAllocator;
-
- JS_EXPORT_PRIVATE static const size_t s_segmentCapacity = CapacityFromSize<MarkStackSegment::blockSize>::value;
- size_t m_top;
- size_t m_numberOfSegments;
-
+ void transferTo(MarkStackArray&);
+ size_t transferTo(MarkStackArray&, size_t limit); // Optimized for when `limit` is small.
+ void donateSomeCellsTo(MarkStackArray&);
+ void stealSomeCellsFrom(MarkStackArray&, size_t idleThreadCount);
};
} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/MarkStackInlines.h b/Source/JavaScriptCore/heap/MarkStackInlines.h
deleted file mode 100644
index c577de602..000000000
--- a/Source/JavaScriptCore/heap/MarkStackInlines.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef MarkStackInlines_h
-#define MarkStackInlines_h
-
-#include "GCThreadSharedData.h"
-#include "MarkStack.h"
-
-namespace JSC {
-
-inline MarkStackSegment* MarkStackSegment::create(DeadBlock* block)
-{
- return new (NotNull, block) MarkStackSegment(block->region());
-}
-
-inline size_t MarkStackArray::postIncTop()
-{
- size_t result = m_top++;
- ASSERT(result == m_segments.head()->m_top++);
- return result;
-}
-
-inline size_t MarkStackArray::preDecTop()
-{
- size_t result = --m_top;
- ASSERT(result == --m_segments.head()->m_top);
- return result;
-}
-
-inline void MarkStackArray::setTopForFullSegment()
-{
- ASSERT(m_segments.head()->m_top == s_segmentCapacity);
- m_top = s_segmentCapacity;
-}
-
-inline void MarkStackArray::setTopForEmptySegment()
-{
- ASSERT(!m_segments.head()->m_top);
- m_top = 0;
-}
-
-inline size_t MarkStackArray::top()
-{
- ASSERT(m_top == m_segments.head()->m_top);
- return m_top;
-}
-
-#if ASSERT_DISABLED
-inline void MarkStackArray::validatePrevious() { }
-#else
-inline void MarkStackArray::validatePrevious()
-{
- unsigned count = 0;
- for (MarkStackSegment* current = m_segments.head(); current; current = current->next())
- count++;
- ASSERT(m_segments.size() == m_numberOfSegments);
-}
-#endif
-
-inline void MarkStackArray::append(const JSCell* cell)
-{
- if (m_top == s_segmentCapacity)
- expand();
- m_segments.head()->data()[postIncTop()] = cell;
-}
-
-inline bool MarkStackArray::canRemoveLast()
-{
- return !!m_top;
-}
-
-inline const JSCell* MarkStackArray::removeLast()
-{
- return m_segments.head()->data()[preDecTop()];
-}
-
-inline bool MarkStackArray::isEmpty()
-{
- if (m_top)
- return false;
- if (m_segments.head()->next()) {
- ASSERT(m_segments.head()->next()->m_top == s_segmentCapacity);
- return false;
- }
- return true;
-}
-
-inline size_t MarkStackArray::size()
-{
- return m_top + s_segmentCapacity * (m_numberOfSegments - 1);
-}
-
-} // namespace JSC
-
-#endif // MarkStackInlines_h
-
diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.cpp b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
index c2b0f72de..5ee544a3c 100644
--- a/Source/JavaScriptCore/heap/MarkedAllocator.cpp
+++ b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
@@ -1,21 +1,60 @@
+/*
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include "config.h"
#include "MarkedAllocator.h"
-#include "DelayedReleaseScope.h"
+#include "AllocatingScope.h"
#include "GCActivityCallback.h"
#include "Heap.h"
#include "IncrementalSweeper.h"
+#include "JSCInlines.h"
+#include "MarkedAllocatorInlines.h"
+#include "MarkedBlockInlines.h"
+#include "SuperSampler.h"
#include "VM.h"
#include <wtf/CurrentTime.h>
namespace JSC {
-static bool isListPagedOut(double deadline, DoublyLinkedList<MarkedBlock>& list)
+MarkedAllocator::MarkedAllocator(Heap* heap, Subspace* subspace, size_t cellSize)
+ : m_currentBlock(0)
+ , m_lastActiveBlock(0)
+ , m_cellSize(static_cast<unsigned>(cellSize))
+ , m_attributes(subspace->attributes())
+ , m_heap(heap)
+ , m_subspace(subspace)
+{
+}
+
+bool MarkedAllocator::isPagedOut(double deadline)
{
unsigned itersSinceLastTimeCheck = 0;
- MarkedBlock* block = list.head();
- while (block) {
- block = block->next();
+ for (auto* block : m_blocks) {
+ if (block)
+ block->block().updateNeedsDestruction();
++itersSinceLastTimeCheck;
if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
double currentTime = WTF::monotonicallyIncreasingTime();
@@ -27,162 +66,439 @@ static bool isListPagedOut(double deadline, DoublyLinkedList<MarkedBlock>& list)
return false;
}
-bool MarkedAllocator::isPagedOut(double deadline)
+bool MarkedAllocator::shouldStealEmptyBlocksFromOtherAllocators() const
{
- if (isListPagedOut(deadline, m_blockList))
- return true;
- return false;
+ return !needsDestruction();
}
-inline void* MarkedAllocator::tryAllocateHelper(size_t bytes)
+MarkedBlock::Handle* MarkedAllocator::findEmptyBlockToSteal()
{
- // We need a while loop to check the free list because the DelayedReleaseScope
- // could cause arbitrary code to execute and exhaust the free list that we
- // thought had elements in it.
- while (!m_freeList.head) {
- DelayedReleaseScope delayedReleaseScope(*m_markedSpace);
- if (m_currentBlock) {
- ASSERT(m_currentBlock == m_nextBlockToSweep);
- m_currentBlock->didConsumeFreeList();
- m_nextBlockToSweep = m_currentBlock->next();
- }
+ // Don't allow others to steal from us, if we wouldn't steal from others.
+ if (!shouldStealEmptyBlocksFromOtherAllocators())
+ return nullptr;
+
+ m_emptyCursor = m_empty.findBit(m_emptyCursor, true);
+ if (m_emptyCursor >= m_blocks.size())
+ return nullptr;
+ return m_blocks[m_emptyCursor];
+}
- MarkedBlock* next;
- for (MarkedBlock*& block = m_nextBlockToSweep; block; block = next) {
- next = block->next();
+void MarkedAllocator::didConsumeFreeList()
+{
+ if (m_currentBlock)
+ m_currentBlock->didConsumeFreeList();
+
+ setFreeList(FreeList());
+ m_currentBlock = nullptr;
+}
- MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList);
-
- if (!freeList.head) {
- block->didConsumeEmptyFreeList();
- m_blockList.remove(block);
- m_blockList.push(block);
- if (!m_lastFullBlock)
- m_lastFullBlock = block;
- continue;
- }
-
- if (bytes > block->cellSize()) {
- block->stopAllocating(freeList);
- continue;
- }
-
- m_currentBlock = block;
- m_freeList = freeList;
+void* MarkedAllocator::tryAllocateWithoutCollecting()
+{
+ SuperSamplerScope superSamplerScope(false);
+
+ ASSERT(!m_currentBlock);
+ ASSERT(!m_freeList);
+
+ for (;;) {
+ m_allocationCursor = (m_canAllocateButNotEmpty | m_empty).findBit(m_allocationCursor, true);
+ if (m_allocationCursor >= m_blocks.size())
break;
- }
- if (!m_freeList.head) {
- m_currentBlock = 0;
- return 0;
+ setIsCanAllocateButNotEmpty(NoLockingNecessary, m_allocationCursor, false);
+
+ if (void* result = tryAllocateIn(m_blocks[m_allocationCursor]))
+ return result;
+ }
+
+ if (Options::stealEmptyBlocksFromOtherAllocators()
+ && shouldStealEmptyBlocksFromOtherAllocators()) {
+ if (MarkedBlock::Handle* block = markedSpace().findEmptyBlockToSteal()) {
+ block->sweep();
+
+ // It's good that this clears canAllocateButNotEmpty as well as all other bits,
+ // because there is a remote chance that a block may have both canAllocateButNotEmpty
+ // and empty set at the same time.
+ block->removeFromAllocator();
+ addBlock(block);
+ return allocateIn(block);
}
}
-
- ASSERT(m_freeList.head);
- MarkedBlock::FreeCell* head = m_freeList.head;
- m_freeList.head = head->next;
- ASSERT(head);
- m_markedSpace->didAllocateInBlock(m_currentBlock);
- return head;
-}
-inline void* MarkedAllocator::tryAllocate(size_t bytes)
+ return nullptr;
+}
+
+void* MarkedAllocator::allocateIn(MarkedBlock::Handle* block)
{
- ASSERT(!m_heap->isBusy());
- m_heap->m_operationInProgress = Allocation;
- void* result = tryAllocateHelper(bytes);
- m_heap->m_operationInProgress = NoOperation;
+ void* result = tryAllocateIn(block);
+ RELEASE_ASSERT(result);
return result;
}
+
+void* MarkedAllocator::tryAllocateIn(MarkedBlock::Handle* block)
+{
+ ASSERT(block);
+ ASSERT(!block->isFreeListed());
+
+ FreeList freeList = block->sweep(MarkedBlock::Handle::SweepToFreeList);
+
+ // It's possible to stumble on a completely full block. Marking tries to retire these, but
+ // that algorithm is racy and may forget to do it sometimes.
+ if (freeList.allocationWillFail()) {
+ ASSERT(block->isFreeListed());
+ block->unsweepWithNoNewlyAllocated();
+ ASSERT(!block->isFreeListed());
+ ASSERT(!isEmpty(NoLockingNecessary, block));
+ ASSERT(!isCanAllocateButNotEmpty(NoLockingNecessary, block));
+ return nullptr;
+ }
+
+ m_currentBlock = block;
+ setFreeList(freeList);
-void* MarkedAllocator::allocateSlowCase(size_t bytes)
+ void* result;
+ if (m_freeList.remaining) {
+ unsigned cellSize = m_cellSize;
+ m_freeList.remaining -= cellSize;
+ result = m_freeList.payloadEnd - m_freeList.remaining - cellSize;
+ } else {
+ FreeCell* head = m_freeList.head;
+ m_freeList.head = head->next;
+ result = head;
+ }
+ RELEASE_ASSERT(result);
+ setIsEden(NoLockingNecessary, m_currentBlock, true);
+ markedSpace().didAllocateInBlock(m_currentBlock);
+ return result;
+}
+
+ALWAYS_INLINE void MarkedAllocator::doTestCollectionsIfNeeded(GCDeferralContext* deferralContext)
+{
+ if (!Options::slowPathAllocsBetweenGCs())
+ return;
+
+ static unsigned allocationCount = 0;
+ if (!allocationCount) {
+ if (!m_heap->isDeferred()) {
+ if (deferralContext)
+ deferralContext->m_shouldGC = true;
+ else
+ m_heap->collectAllGarbage();
+ }
+ }
+ if (++allocationCount >= Options::slowPathAllocsBetweenGCs())
+ allocationCount = 0;
+}
+
+void* MarkedAllocator::allocateSlowCase(GCDeferralContext* deferralContext)
{
+ bool crashOnFailure = true;
+ return allocateSlowCaseImpl(deferralContext, crashOnFailure);
+}
+
+void* MarkedAllocator::tryAllocateSlowCase(GCDeferralContext* deferralContext)
+{
+ bool crashOnFailure = false;
+ return allocateSlowCaseImpl(deferralContext, crashOnFailure);
+}
+
+void* MarkedAllocator::allocateSlowCaseImpl(GCDeferralContext* deferralContext, bool crashOnFailure)
+{
+ SuperSamplerScope superSamplerScope(false);
ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock());
-#if COLLECT_ON_EVERY_ALLOCATION
- if (!m_heap->isDeferred())
- m_heap->collectAllGarbage();
- ASSERT(m_heap->m_operationInProgress == NoOperation);
-#endif
+ doTestCollectionsIfNeeded(deferralContext);
+
+ ASSERT(!markedSpace().isIterating());
+ m_heap->didAllocate(m_freeList.originalSize);
- ASSERT(!m_markedSpace->isIterating());
- ASSERT(!m_freeList.head);
- m_heap->didAllocate(m_freeList.bytes);
+ didConsumeFreeList();
- void* result = tryAllocate(bytes);
+ AllocatingScope helpingHeap(*m_heap);
+
+ m_heap->collectIfNecessaryOrDefer(deferralContext);
+
+ // Goofy corner case: the GC called a callback and now this allocator has a currentBlock. This only
+ // happens when running WebKit tests, which inject a callback into the GC's finalization.
+ if (UNLIKELY(m_currentBlock)) {
+ if (crashOnFailure)
+ return allocate(deferralContext);
+ return tryAllocate(deferralContext);
+ }
+
+ void* result = tryAllocateWithoutCollecting();
if (LIKELY(result != 0))
return result;
- if (m_heap->collectIfNecessaryOrDefer()) {
- result = tryAllocate(bytes);
- if (result)
- return result;
+ MarkedBlock::Handle* block = tryAllocateBlock();
+ if (!block) {
+ if (crashOnFailure)
+ RELEASE_ASSERT_NOT_REACHED();
+ else
+ return nullptr;
}
-
- ASSERT(!m_heap->shouldCollect());
-
- MarkedBlock* block = allocateBlock(bytes);
- ASSERT(block);
addBlock(block);
-
- result = tryAllocate(bytes);
+ result = allocateIn(block);
ASSERT(result);
return result;
}
-MarkedBlock* MarkedAllocator::allocateBlock(size_t bytes)
+static size_t blockHeaderSize()
+{
+ return WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeof(MarkedBlock));
+}
+
+size_t MarkedAllocator::blockSizeForBytes(size_t bytes)
{
size_t minBlockSize = MarkedBlock::blockSize;
- size_t minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(MarkedBlock) + bytes);
- size_t blockSize = std::max(minBlockSize, minAllocationSize);
+ size_t minAllocationSize = blockHeaderSize() + WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes);
+ minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), minAllocationSize);
+ return std::max(minBlockSize, minAllocationSize);
+}
- size_t cellSize = m_cellSize ? m_cellSize : WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes);
+MarkedBlock::Handle* MarkedAllocator::tryAllocateBlock()
+{
+ SuperSamplerScope superSamplerScope(false);
+
+ MarkedBlock::Handle* handle = MarkedBlock::tryCreate(*m_heap);
+ if (!handle)
+ return nullptr;
+
+ markedSpace().didAddBlock(handle);
+
+ return handle;
+}
- if (blockSize == MarkedBlock::blockSize)
- return MarkedBlock::create(m_heap->blockAllocator().allocate<MarkedBlock>(), this, cellSize, m_destructorType);
- return MarkedBlock::create(m_heap->blockAllocator().allocateCustomSize(blockSize, MarkedBlock::blockSize), this, cellSize, m_destructorType);
+void MarkedAllocator::addBlock(MarkedBlock::Handle* block)
+{
+ size_t index;
+ if (m_freeBlockIndices.isEmpty()) {
+ index = m_blocks.size();
+
+ size_t oldCapacity = m_blocks.capacity();
+ m_blocks.append(block);
+ if (m_blocks.capacity() != oldCapacity) {
+ forEachBitVector(
+ NoLockingNecessary,
+ [&] (FastBitVector& vector) {
+ ASSERT_UNUSED(vector, vector.numBits() == oldCapacity);
+ });
+
+ ASSERT(m_blocks.capacity() > oldCapacity);
+
+ LockHolder locker(m_bitvectorLock);
+ forEachBitVector(
+ locker,
+ [&] (FastBitVector& vector) {
+ vector.resize(m_blocks.capacity());
+ });
+ }
+ } else {
+ index = m_freeBlockIndices.takeLast();
+ ASSERT(!m_blocks[index]);
+ m_blocks[index] = block;
+ }
+
+ forEachBitVector(
+ NoLockingNecessary,
+ [&] (FastBitVector& vector) {
+ ASSERT_UNUSED(vector, !vector[index]);
+ });
+
+ // This is the point at which the block learns of its cellSize() and attributes().
+ block->didAddToAllocator(this, index);
+
+ setIsLive(NoLockingNecessary, index, true);
+ setIsEmpty(NoLockingNecessary, index, true);
}
-void MarkedAllocator::addBlock(MarkedBlock* block)
+void MarkedAllocator::removeBlock(MarkedBlock::Handle* block)
{
- // Satisfy the ASSERT in MarkedBlock::sweep.
- DelayedReleaseScope delayedReleaseScope(*m_markedSpace);
- ASSERT(!m_currentBlock);
- ASSERT(!m_freeList.head);
+ ASSERT(block->allocator() == this);
+ ASSERT(m_blocks[block->index()] == block);
+
+ m_blocks[block->index()] = nullptr;
+ m_freeBlockIndices.append(block->index());
+
+ forEachBitVector(
+ holdLock(m_bitvectorLock),
+ [&] (FastBitVector& vector) {
+ vector[block->index()] = false;
+ });
- m_blockList.append(block);
- m_nextBlockToSweep = m_currentBlock = block;
- m_freeList = block->sweep(MarkedBlock::SweepToFreeList);
- m_markedSpace->didAddBlock(block);
+ block->didRemoveFromAllocator();
}
-void MarkedAllocator::removeBlock(MarkedBlock* block)
+void MarkedAllocator::stopAllocating()
{
- if (m_currentBlock == block) {
- m_currentBlock = m_currentBlock->next();
- m_freeList = MarkedBlock::FreeList();
+ if (false)
+ dataLog(RawPointer(this), ": MarkedAllocator::stopAllocating!\n");
+ ASSERT(!m_lastActiveBlock);
+ if (!m_currentBlock) {
+ ASSERT(!m_freeList);
+ return;
}
- if (m_nextBlockToSweep == block)
- m_nextBlockToSweep = m_nextBlockToSweep->next();
+
+ m_currentBlock->stopAllocating(m_freeList);
+ m_lastActiveBlock = m_currentBlock;
+ m_currentBlock = 0;
+ m_freeList = FreeList();
+}
+
+void MarkedAllocator::prepareForAllocation()
+{
+ m_lastActiveBlock = nullptr;
+ m_currentBlock = nullptr;
+ setFreeList(FreeList());
- if (block == m_lastFullBlock)
- m_lastFullBlock = m_lastFullBlock->prev();
+ m_allocationCursor = 0;
+ m_emptyCursor = 0;
+ m_unsweptCursor = 0;
- m_blockList.remove(block);
+ m_eden.clearAll();
+
+ if (UNLIKELY(Options::useImmortalObjects())) {
+ // FIXME: Make this work again.
+ // https://bugs.webkit.org/show_bug.cgi?id=162296
+ RELEASE_ASSERT_NOT_REACHED();
+ }
}
-void MarkedAllocator::reset()
+void MarkedAllocator::lastChanceToFinalize()
{
- m_lastActiveBlock = 0;
- m_currentBlock = 0;
- m_freeList = MarkedBlock::FreeList();
- if (m_heap->operationInProgress() == FullCollection)
- m_lastFullBlock = 0;
+ forEachBlock(
+ [&] (MarkedBlock::Handle* block) {
+ block->lastChanceToFinalize();
+ });
+}
- if (m_lastFullBlock)
- m_nextBlockToSweep = m_lastFullBlock->next() ? m_lastFullBlock->next() : m_lastFullBlock;
- else
- m_nextBlockToSweep = m_blockList.head();
+void MarkedAllocator::setFreeList(const FreeList& freeList)
+{
+ m_freeList = freeList;
+}
+
+void MarkedAllocator::resumeAllocating()
+{
+ if (!m_lastActiveBlock)
+ return;
+
+ m_freeList = m_lastActiveBlock->resumeAllocating();
+ m_currentBlock = m_lastActiveBlock;
+ m_lastActiveBlock = nullptr;
+}
+
+void MarkedAllocator::beginMarkingForFullCollection()
+{
+ // Mark bits are sticky and so is our summary of mark bits. We only clear these during full
+ // collections, so if you survived the last collection you will survive the next one so long
+ // as the next one is eden.
+ m_markingNotEmpty.clearAll();
+ m_markingRetired.clearAll();
+}
+
+void MarkedAllocator::endMarking()
+{
+ m_allocated.clearAll();
+
+ // It's surprising and frustrating to comprehend, but the end-of-marking flip does not need to
+ // know what kind of collection it is. That knowledge is already encoded in the m_markingXYZ
+ // vectors.
+
+ if (needsDestruction()) {
+ // If blocks need destruction then nothing is empty! This is a correct assertion but may
+ // become wrong once we go full concurrent: when we create a new block, it will flicker
+ // into the empty set for a tiny moment. On the other hand, this code is likely to be run
+ // in stopTheWorld.
+ ASSERT(m_empty.isEmpty());
+ m_canAllocateButNotEmpty = m_live & ~m_markingRetired;
+ return;
+ }
+
+ m_empty = m_live & ~m_markingNotEmpty;
+ m_canAllocateButNotEmpty = m_live & m_markingNotEmpty & ~m_markingRetired;
+
+ if (false) {
+ dataLog("Bits for ", m_cellSize, ", ", m_attributes, " after endMarking:\n");
+ dumpBits(WTF::dataFile());
+ }
+}
+
+void MarkedAllocator::snapshotUnsweptForEdenCollection()
+{
+ m_unswept |= m_eden;
+}
+
+void MarkedAllocator::snapshotUnsweptForFullCollection()
+{
+ m_unswept = m_live;
+}
+
+MarkedBlock::Handle* MarkedAllocator::findBlockToSweep()
+{
+ m_unsweptCursor = m_unswept.findBit(m_unsweptCursor, true);
+ if (m_unsweptCursor >= m_blocks.size())
+ return nullptr;
+ return m_blocks[m_unsweptCursor];
+}
+
+void MarkedAllocator::sweep()
+{
+ m_unswept.forEachSetBit(
+ [&] (size_t index) {
+ MarkedBlock::Handle* block = m_blocks[index];
+ block->sweep();
+ });
+}
+
+void MarkedAllocator::shrink()
+{
+ m_empty.forEachSetBit(
+ [&] (size_t index) {
+ markedSpace().freeBlock(m_blocks[index]);
+ });
+}
+
+void MarkedAllocator::assertNoUnswept()
+{
+ if (ASSERT_DISABLED)
+ return;
+
+ if (m_unswept.isEmpty())
+ return;
+
+ dataLog("Assertion failed: unswept not empty in ", *this, ".\n");
+ dumpBits();
+ ASSERT_NOT_REACHED();
+}
+
+void MarkedAllocator::dump(PrintStream& out) const
+{
+ out.print(RawPointer(this), ":", m_cellSize, "/", m_attributes);
+}
+
+void MarkedAllocator::dumpBits(PrintStream& out)
+{
+ unsigned maxNameLength = 0;
+ forEachBitVectorWithName(
+ NoLockingNecessary,
+ [&] (FastBitVector&, const char* name) {
+ unsigned length = strlen(name);
+ maxNameLength = std::max(maxNameLength, length);
+ });
+
+ forEachBitVectorWithName(
+ NoLockingNecessary,
+ [&] (FastBitVector& vector, const char* name) {
+ out.print(" ", name, ": ");
+ for (unsigned i = maxNameLength - strlen(name); i--;)
+ out.print(" ");
+ out.print(vector, "\n");
+ });
+}
+
+MarkedSpace& MarkedAllocator::markedSpace() const
+{
+ return m_subspace->space();
}
} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.h b/Source/JavaScriptCore/heap/MarkedAllocator.h
index e0d3e8902..09903e452 100644
--- a/Source/JavaScriptCore/heap/MarkedAllocator.h
+++ b/Source/JavaScriptCore/heap/MarkedAllocator.h
@@ -1,142 +1,279 @@
-#ifndef MarkedAllocator_h
-#define MarkedAllocator_h
+/*
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#pragma once
+
+#include "AllocatorAttributes.h"
+#include "FreeList.h"
#include "MarkedBlock.h"
-#include <wtf/DoublyLinkedList.h>
+#include <wtf/FastBitVector.h>
+#include <wtf/SentinelLinkedList.h>
+#include <wtf/Vector.h>
namespace JSC {
+class GCDeferralContext;
class Heap;
class MarkedSpace;
class LLIntOffsetsExtractor;
-namespace DFG {
-class SpeculativeJIT;
-}
+#define FOR_EACH_MARKED_ALLOCATOR_BIT(macro) \
+ macro(live, Live) /* The set of block indices that have actual blocks. */\
+ macro(empty, Empty) /* The set of all blocks that have no live objects and nothing to destroy. */ \
+ macro(allocated, Allocated) /* The set of allblocks that are full of live objects. */\
+ macro(canAllocateButNotEmpty, CanAllocateButNotEmpty) /* The set of all blocks are neither empty nor retired (i.e. are more than minMarkedBlockUtilization full). */ \
+ macro(eden, Eden) /* The set of all blocks that have new objects since the last GC. */\
+ macro(unswept, Unswept) /* The set of all blocks that could be swept by the incremental sweeper. */\
+ \
+ /* These are computed during marking. */\
+ macro(markingNotEmpty, MarkingNotEmpty) /* The set of all blocks that are not empty. */ \
+ macro(markingRetired, MarkingRetired) /* The set of all blocks that are retired. */
+
+// FIXME: We defined canAllocateButNotEmpty and empty to be exclusive:
+//
+// canAllocateButNotEmpty & empty == 0
+//
+// Instead of calling it canAllocate and making it inclusive:
+//
+// canAllocate & empty == empty
+//
+// The latter is probably better. I'll leave it to a future bug to fix that, since breathing on
+// this code leads to regressions for days, and it's not clear that making this change would
+// improve perf since it would not change the collector's behavior, and either way the allocator
+// has to look at both bitvectors.
+// https://bugs.webkit.org/show_bug.cgi?id=162121
+
+// Note that this collector supports overlapping allocator state with marking state, since in a
+// concurrent collector you allow allocation while marking is running. So it's best to visualize a
+// full mutable->eden collect->mutate->full collect cycle and see how the bits above get affected.
+// The example below tries to be exhaustive about what happens to the bits, but omits a lot of
+// things that happen to other state.
+//
+// Create allocator
+// - all bits are empty
+// Start allocating in some block
+// - allocate the block and set the live bit.
+// - the empty bit for the block flickers on and then gets immediately cleared by sweeping.
+// - set the eden bit.
+// Finish allocating in that block
+// - set the allocated bit.
+// Do that to a lot of blocks and then start an eden collection.
+// - beginMarking() has nothing to do.
+// - by default we have cleared markingNotEmpty/markingRetired bits.
+// - marking builds up markingNotEmpty/markingRetired bits.
+// We do endMarking()
+// - clear all allocated bits.
+// - for destructor blocks: fragmented = live & ~markingRetired
+// - for non-destructor blocks:
+// empty = live & ~markingNotEmpty
+// fragmented = live & markingNotEmpty & ~markingRetired
+// Snapshotting.
+// - unswept |= eden
+// Prepare for allocation.
+// - clear eden
+// Finish collection.
+// Allocate in some block that had some free and some live objects.
+// - clear the canAllocateButNotEmpty bit
+// - clear the unswept bit
+// - set the eden bit
+// Finish allocating (set the allocated bit).
+// Allocate in some block that was completely empty.
+// - clear the empty bit
+// - clear the unswept bit
+// - set the eden bit.
+// Finish allocating (set the allocated bit).
+// Allocate in some block that was completely empty in another allocator.
+// - clear the empty bit
+// - clear all bits in that allocator
+// - set the live bit in another allocator and the empty bit.
+// - clear the empty, unswept bits.
+// - set the eden bit.
+// Finish allocating (set the allocated bit).
+// Start a full collection.
+// - beginMarking() clears markingNotEmpty, markingRetired
+// - the heap version is incremented
+// - marking rebuilds markingNotEmpty/markingretired bits.
+// We do endMarking()
+// - clear all allocated bits.
+// - set canAllocateButNotEmpty/empty the same way as in eden collection.
+// Snapshotting.
+// - unswept = live
+// prepare for allocation.
+// - clear eden.
+// Finish collection.
+//
+// Notice how in this scheme, the empty/canAllocateButNotEmpty state stays separate from the
+// markingNotEmpty/markingRetired state. This is one step towards having separated allocation and
+// marking state.
class MarkedAllocator {
friend class LLIntOffsetsExtractor;
public:
- static ptrdiff_t offsetOfFreeListHead();
+ static ptrdiff_t offsetOfFreeList();
+ static ptrdiff_t offsetOfCellSize();
- MarkedAllocator();
- void reset();
+ MarkedAllocator(Heap*, Subspace*, size_t cellSize);
+ void lastChanceToFinalize();
+ void prepareForAllocation();
void stopAllocating();
void resumeAllocating();
- size_t cellSize() { return m_cellSize; }
- MarkedBlock::DestructorType destructorType() { return m_destructorType; }
- void* allocate(size_t);
+ void beginMarkingForFullCollection();
+ void endMarking();
+ void snapshotUnsweptForEdenCollection();
+ void snapshotUnsweptForFullCollection();
+ void sweep();
+ void shrink();
+ void assertNoUnswept();
+ size_t cellSize() const { return m_cellSize; }
+ const AllocatorAttributes& attributes() const { return m_attributes; }
+ bool needsDestruction() const { return m_attributes.destruction == NeedsDestruction; }
+ DestructionMode destruction() const { return m_attributes.destruction; }
+ HeapCell::Kind cellKind() const { return m_attributes.cellKind; }
+ void* allocate(GCDeferralContext* = nullptr);
+ void* tryAllocate(GCDeferralContext* = nullptr);
Heap* heap() { return m_heap; }
- MarkedBlock* takeLastActiveBlock()
+ MarkedBlock::Handle* takeLastActiveBlock()
{
- MarkedBlock* block = m_lastActiveBlock;
+ MarkedBlock::Handle* block = m_lastActiveBlock;
m_lastActiveBlock = 0;
return block;
}
- template<typename Functor> void forEachBlock(Functor&);
+ template<typename Functor> void forEachBlock(const Functor&);
+ template<typename Functor> void forEachNotEmptyBlock(const Functor&);
- void addBlock(MarkedBlock*);
- void removeBlock(MarkedBlock*);
- void init(Heap*, MarkedSpace*, size_t cellSize, MarkedBlock::DestructorType);
+ void addBlock(MarkedBlock::Handle*);
+ void removeBlock(MarkedBlock::Handle*);
bool isPagedOut(double deadline);
+
+ static size_t blockSizeForBytes(size_t);
+
+ Lock& bitvectorLock() { return m_bitvectorLock; }
-private:
- JS_EXPORT_PRIVATE void* allocateSlowCase(size_t);
- void* tryAllocate(size_t);
- void* tryAllocateHelper(size_t);
- MarkedBlock* allocateBlock(size_t);
-
- MarkedBlock::FreeList m_freeList;
- MarkedBlock* m_currentBlock;
- MarkedBlock* m_lastActiveBlock;
- MarkedBlock* m_nextBlockToSweep;
- MarkedBlock* m_lastFullBlock;
- DoublyLinkedList<MarkedBlock> m_blockList;
- size_t m_cellSize;
- MarkedBlock::DestructorType m_destructorType;
- Heap* m_heap;
- MarkedSpace* m_markedSpace;
-};
-
-inline ptrdiff_t MarkedAllocator::offsetOfFreeListHead()
-{
- return OBJECT_OFFSETOF(MarkedAllocator, m_freeList) + OBJECT_OFFSETOF(MarkedBlock::FreeList, head);
-}
+#define MARKED_ALLOCATOR_BIT_ACCESSORS(lowerBitName, capitalBitName) \
+ bool is ## capitalBitName(const AbstractLocker&, size_t index) const { return m_ ## lowerBitName[index]; } \
+ bool is ## capitalBitName(const AbstractLocker& locker, MarkedBlock::Handle* block) const { return is ## capitalBitName(locker, block->index()); } \
+ void setIs ## capitalBitName(const AbstractLocker&, size_t index, bool value) { m_ ## lowerBitName[index] = value; } \
+ void setIs ## capitalBitName(const AbstractLocker& locker, MarkedBlock::Handle* block, bool value) { setIs ## capitalBitName(locker, block->index(), value); }
+ FOR_EACH_MARKED_ALLOCATOR_BIT(MARKED_ALLOCATOR_BIT_ACCESSORS)
+#undef MARKED_ALLOCATOR_BIT_ACCESSORS
-inline MarkedAllocator::MarkedAllocator()
- : m_currentBlock(0)
- , m_lastActiveBlock(0)
- , m_nextBlockToSweep(0)
- , m_lastFullBlock(0)
- , m_cellSize(0)
- , m_destructorType(MarkedBlock::None)
- , m_heap(0)
- , m_markedSpace(0)
-{
-}
-
-inline void MarkedAllocator::init(Heap* heap, MarkedSpace* markedSpace, size_t cellSize, MarkedBlock::DestructorType destructorType)
-{
- m_heap = heap;
- m_markedSpace = markedSpace;
- m_cellSize = cellSize;
- m_destructorType = destructorType;
-}
-
-inline void* MarkedAllocator::allocate(size_t bytes)
-{
- MarkedBlock::FreeCell* head = m_freeList.head;
- if (UNLIKELY(!head)) {
- void* result = allocateSlowCase(bytes);
-#ifndef NDEBUG
- memset(result, 0xCD, bytes);
-#endif
- return result;
+ template<typename Func>
+ void forEachBitVector(const AbstractLocker&, const Func& func)
+ {
+#define MARKED_ALLOCATOR_BIT_CALLBACK(lowerBitName, capitalBitName) \
+ func(m_ ## lowerBitName);
+ FOR_EACH_MARKED_ALLOCATOR_BIT(MARKED_ALLOCATOR_BIT_CALLBACK);
+#undef MARKED_ALLOCATOR_BIT_CALLBACK
}
- m_freeList.head = head->next;
-#ifndef NDEBUG
- memset(head, 0xCD, bytes);
-#endif
- return head;
-}
-
-inline void MarkedAllocator::stopAllocating()
-{
- ASSERT(!m_lastActiveBlock);
- if (!m_currentBlock) {
- ASSERT(!m_freeList.head);
- return;
+ template<typename Func>
+ void forEachBitVectorWithName(const AbstractLocker&, const Func& func)
+ {
+#define MARKED_ALLOCATOR_BIT_CALLBACK(lowerBitName, capitalBitName) \
+ func(m_ ## lowerBitName, #capitalBitName);
+ FOR_EACH_MARKED_ALLOCATOR_BIT(MARKED_ALLOCATOR_BIT_CALLBACK);
+#undef MARKED_ALLOCATOR_BIT_CALLBACK
}
- m_currentBlock->stopAllocating(m_freeList);
- m_lastActiveBlock = m_currentBlock;
- m_currentBlock = 0;
- m_freeList = MarkedBlock::FreeList();
-}
+ MarkedAllocator* nextAllocator() const { return m_nextAllocator; }
+ MarkedAllocator* nextAllocatorInSubspace() const { return m_nextAllocatorInSubspace; }
+
+ void setNextAllocator(MarkedAllocator* allocator) { m_nextAllocator = allocator; }
+ void setNextAllocatorInSubspace(MarkedAllocator* allocator) { m_nextAllocatorInSubspace = allocator; }
+
+ MarkedBlock::Handle* findEmptyBlockToSteal();
+
+ MarkedBlock::Handle* findBlockToSweep();
+
+ Subspace* subspace() const { return m_subspace; }
+ MarkedSpace& markedSpace() const;
+
+ void dump(PrintStream&) const;
+ void dumpBits(PrintStream& = WTF::dataFile());
+
+private:
+ friend class MarkedBlock;
+
+ bool shouldStealEmptyBlocksFromOtherAllocators() const;
+
+ JS_EXPORT_PRIVATE void* allocateSlowCase(GCDeferralContext*);
+ JS_EXPORT_PRIVATE void* tryAllocateSlowCase(GCDeferralContext*);
+ void* allocateSlowCaseImpl(GCDeferralContext*, bool crashOnFailure);
+ void didConsumeFreeList();
+ void* tryAllocateWithoutCollecting();
+ MarkedBlock::Handle* tryAllocateBlock();
+ void* tryAllocateIn(MarkedBlock::Handle*);
+ void* allocateIn(MarkedBlock::Handle*);
+ ALWAYS_INLINE void doTestCollectionsIfNeeded(GCDeferralContext*);
+
+ void setFreeList(const FreeList&);
+
+ FreeList m_freeList;
+
+ Vector<MarkedBlock::Handle*> m_blocks;
+ Vector<unsigned> m_freeBlockIndices;
-inline void MarkedAllocator::resumeAllocating()
-{
- if (!m_lastActiveBlock)
- return;
+ // Mutator uses this to guard resizing the bitvectors. Those things in the GC that may run
+ // concurrently to the mutator must lock this when accessing the bitvectors.
+ Lock m_bitvectorLock;
+#define MARKED_ALLOCATOR_BIT_DECLARATION(lowerBitName, capitalBitName) \
+ FastBitVector m_ ## lowerBitName;
+ FOR_EACH_MARKED_ALLOCATOR_BIT(MARKED_ALLOCATOR_BIT_DECLARATION)
+#undef MARKED_ALLOCATOR_BIT_DECLARATION
+
+ // After you do something to a block based on one of these cursors, you clear the bit in the
+ // corresponding bitvector and leave the cursor where it was.
+ size_t m_allocationCursor { 0 }; // Points to the next block that is a candidate for allocation.
+ size_t m_emptyCursor { 0 }; // Points to the next block that is a candidate for empty allocation (allocating in empty blocks).
+ size_t m_unsweptCursor { 0 }; // Points to the next block that is a candidate for incremental sweeping.
+
+ MarkedBlock::Handle* m_currentBlock;
+ MarkedBlock::Handle* m_lastActiveBlock;
- m_freeList = m_lastActiveBlock->resumeAllocating();
- m_currentBlock = m_lastActiveBlock;
- m_lastActiveBlock = 0;
+ Lock m_lock;
+ unsigned m_cellSize;
+ AllocatorAttributes m_attributes;
+ // FIXME: All of these should probably be references.
+ // https://bugs.webkit.org/show_bug.cgi?id=166988
+ Heap* m_heap;
+ Subspace* m_subspace;
+ MarkedAllocator* m_nextAllocator { nullptr };
+ MarkedAllocator* m_nextAllocatorInSubspace { nullptr };
+};
+
+inline ptrdiff_t MarkedAllocator::offsetOfFreeList()
+{
+ return OBJECT_OFFSETOF(MarkedAllocator, m_freeList);
}
-template <typename Functor> inline void MarkedAllocator::forEachBlock(Functor& functor)
+inline ptrdiff_t MarkedAllocator::offsetOfCellSize()
{
- MarkedBlock* next;
- for (MarkedBlock* block = m_blockList.head(); block; block = next) {
- next = block->next();
- functor(block);
- }
+ return OBJECT_OFFSETOF(MarkedAllocator, m_cellSize);
}
} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/MarkedAllocatorInlines.h b/Source/JavaScriptCore/heap/MarkedAllocatorInlines.h
new file mode 100644
index 000000000..bd9d70729
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkedAllocatorInlines.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "MarkedAllocator.h"
+
+namespace JSC {
+
+ALWAYS_INLINE void* MarkedAllocator::tryAllocate(GCDeferralContext* deferralContext)
+{
+ unsigned remaining = m_freeList.remaining;
+ if (remaining) {
+ unsigned cellSize = m_cellSize;
+ remaining -= cellSize;
+ m_freeList.remaining = remaining;
+ return m_freeList.payloadEnd - remaining - cellSize;
+ }
+
+ FreeCell* head = m_freeList.head;
+ if (UNLIKELY(!head))
+ return tryAllocateSlowCase(deferralContext);
+
+ m_freeList.head = head->next;
+ return head;
+}
+
+ALWAYS_INLINE void* MarkedAllocator::allocate(GCDeferralContext* deferralContext)
+{
+ unsigned remaining = m_freeList.remaining;
+ if (remaining) {
+ unsigned cellSize = m_cellSize;
+ remaining -= cellSize;
+ m_freeList.remaining = remaining;
+ return m_freeList.payloadEnd - remaining - cellSize;
+ }
+
+ FreeCell* head = m_freeList.head;
+ if (UNLIKELY(!head))
+ return allocateSlowCase(deferralContext);
+
+ m_freeList.head = head->next;
+ return head;
+}
+
+template <typename Functor> inline void MarkedAllocator::forEachBlock(const Functor& functor)
+{
+ m_live.forEachSetBit(
+ [&] (size_t index) {
+ functor(m_blocks[index]);
+ });
+}
+
+template <typename Functor> inline void MarkedAllocator::forEachNotEmptyBlock(const Functor& functor)
+{
+ m_markingNotEmpty.forEachSetBit(
+ [&] (size_t index) {
+ functor(m_blocks[index]);
+ });
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/MarkedBlock.cpp b/Source/JavaScriptCore/heap/MarkedBlock.cpp
index 674f45636..3e4aca2d3 100644
--- a/Source/JavaScriptCore/heap/MarkedBlock.cpp
+++ b/Source/JavaScriptCore/heap/MarkedBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,236 +26,477 @@
#include "config.h"
#include "MarkedBlock.h"
-#include "DelayedReleaseScope.h"
-#include "IncrementalSweeper.h"
#include "JSCell.h"
#include "JSDestructibleObject.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+#include "MarkedBlockInlines.h"
+#include "SuperSampler.h"
+#include "SweepingScope.h"
namespace JSC {
-MarkedBlock* MarkedBlock::create(DeadBlock* block, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
+const size_t MarkedBlock::blockSize;
+
+static const bool computeBalance = false;
+static size_t balance;
+
+MarkedBlock::Handle* MarkedBlock::tryCreate(Heap& heap)
{
- ASSERT(reinterpret_cast<size_t>(block) == (reinterpret_cast<size_t>(block) & blockMask));
- Region* region = block->region();
- return new (NotNull, block) MarkedBlock(region, allocator, cellSize, destructorType);
+ if (computeBalance) {
+ balance++;
+ if (!(balance % 10))
+ dataLog("MarkedBlock Balance: ", balance, "\n");
+ }
+ void* blockSpace = tryFastAlignedMalloc(blockSize, blockSize);
+ if (!blockSpace)
+ return nullptr;
+ if (scribbleFreeCells())
+ scribble(blockSpace, blockSize);
+ return new Handle(heap, blockSpace);
}
-MarkedBlock::MarkedBlock(Region* region, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
- : HeapBlock<MarkedBlock>(region)
- , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
- , m_endAtom((allocator->cellSize() ? atomsPerBlock : region->blockSize() / atomSize) - m_atomsPerCell + 1)
- , m_destructorType(destructorType)
- , m_allocator(allocator)
- , m_state(New) // All cells start out unmarked.
- , m_weakSet(allocator->heap()->vm())
+MarkedBlock::Handle::Handle(Heap& heap, void* blockSpace)
+ : m_weakSet(heap.vm(), CellContainer())
+ , m_newlyAllocatedVersion(MarkedSpace::nullVersion)
{
- ASSERT(allocator);
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ m_block = new (NotNull, blockSpace) MarkedBlock(*heap.vm(), *this);
+
+ m_weakSet.setContainer(*m_block);
+
+ heap.didAllocateBlock(blockSize);
}
-inline void MarkedBlock::callDestructor(JSCell* cell)
+MarkedBlock::Handle::~Handle()
{
- // A previous eager sweep may already have run cell's destructor.
- if (cell->isZapped())
- return;
+ Heap& heap = *this->heap();
+ if (computeBalance) {
+ balance--;
+ if (!(balance % 10))
+ dataLog("MarkedBlock Balance: ", balance, "\n");
+ }
+ removeFromAllocator();
+ m_block->~MarkedBlock();
+ fastAlignedFree(m_block);
+ heap.didFreeBlock(blockSize);
+}
- cell->methodTableForDestruction()->destroy(cell);
- cell->zap();
+MarkedBlock::MarkedBlock(VM& vm, Handle& handle)
+ : m_markingVersion(MarkedSpace::nullVersion)
+ , m_handle(handle)
+ , m_vm(&vm)
+{
+ if (false)
+ dataLog(RawPointer(this), ": Allocated.\n");
}
-template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, MarkedBlock::DestructorType dtorType>
-MarkedBlock::FreeList MarkedBlock::specializedSweep()
+void MarkedBlock::Handle::unsweepWithNoNewlyAllocated()
{
- ASSERT(blockState != Allocated && blockState != FreeListed);
- ASSERT(!(dtorType == MarkedBlock::None && sweepMode == SweepOnly));
+ RELEASE_ASSERT(m_isFreeListed);
+ m_isFreeListed = false;
+}
- // This produces a free list that is ordered in reverse through the block.
- // This is fine, since the allocation code makes no assumptions about the
- // order of the free list.
- FreeCell* head = 0;
- size_t count = 0;
- for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
- if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i))))
- continue;
+void MarkedBlock::Handle::setIsFreeListed()
+{
+ m_allocator->setIsEmpty(NoLockingNecessary, this, false);
+ m_isFreeListed = true;
+}
- JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
+void MarkedBlock::Handle::stopAllocating(const FreeList& freeList)
+{
+ auto locker = holdLock(block().m_lock);
+
+ if (false)
+ dataLog(RawPointer(this), ": MarkedBlock::Handle::stopAllocating!\n");
+ ASSERT(!allocator()->isAllocated(NoLockingNecessary, this));
+
+ if (!isFreeListed()) {
+ if (false)
+ dataLog("There ain't no newly allocated.\n");
+ // This means that we either didn't use this block at all for allocation since last GC,
+ // or someone had already done stopAllocating() before.
+ ASSERT(freeList.allocationWillFail());
+ return;
+ }
+
+ if (false)
+ dataLog("Free list: ", freeList, "\n");
+
+ // Roll back to a coherent state for Heap introspection. Cells newly
+ // allocated from our free list are not currently marked, so we need another
+ // way to tell what's live vs dead.
+
+ m_newlyAllocated.clearAll();
+ m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion();
+
+ forEachCell(
+ [&] (HeapCell* cell, HeapCell::Kind) -> IterationStatus {
+ setNewlyAllocated(cell);
+ return IterationStatus::Continue;
+ });
+
+ forEachFreeCell(
+ freeList,
+ [&] (HeapCell* cell) {
+ if (false)
+ dataLog("Free cell: ", RawPointer(cell), "\n");
+ if (m_attributes.destruction == NeedsDestruction)
+ cell->zap();
+ clearNewlyAllocated(cell);
+ });
+
+ m_isFreeListed = false;
+}
- if (dtorType != MarkedBlock::None && blockState != New)
- callDestructor(cell);
+void MarkedBlock::Handle::lastChanceToFinalize()
+{
+ allocator()->setIsAllocated(NoLockingNecessary, this, false);
+ m_block->m_marks.clearAll();
+ m_block->clearHasAnyMarked();
+ m_block->m_markingVersion = heap()->objectSpace().markingVersion();
+ m_weakSet.lastChanceToFinalize();
+ m_newlyAllocated.clearAll();
+ m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion();
+ sweep();
+}
- if (sweepMode == SweepToFreeList) {
- FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
- freeCell->next = head;
- head = freeCell;
- ++count;
+FreeList MarkedBlock::Handle::resumeAllocating()
+{
+ {
+ auto locker = holdLock(block().m_lock);
+
+ if (false)
+ dataLog(RawPointer(this), ": MarkedBlock::Handle::resumeAllocating!\n");
+ ASSERT(!allocator()->isAllocated(NoLockingNecessary, this));
+ ASSERT(!isFreeListed());
+
+ if (!hasAnyNewlyAllocated()) {
+ if (false)
+ dataLog("There ain't no newly allocated.\n");
+ // This means we had already exhausted the block when we stopped allocation.
+ return FreeList();
}
}
- // We only want to discard the newlyAllocated bits if we're creating a FreeList,
- // otherwise we would lose information on what's currently alive.
- if (sweepMode == SweepToFreeList && m_newlyAllocated)
- m_newlyAllocated.clear();
+ // Re-create our free list from before stopping allocation. Note that this may return an empty
+ // freelist, in which case the block will still be Marked!
+ return sweep(SweepToFreeList);
+}
- m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
- return FreeList(head, count * cellSize());
+void MarkedBlock::Handle::zap(const FreeList& freeList)
+{
+ forEachFreeCell(
+ freeList,
+ [&] (HeapCell* cell) {
+ if (m_attributes.destruction == NeedsDestruction)
+ cell->zap();
+ });
}
-MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
+template<typename Func>
+void MarkedBlock::Handle::forEachFreeCell(const FreeList& freeList, const Func& func)
{
- ASSERT(DelayedReleaseScope::isInEffectFor(heap()->m_objectSpace));
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ if (freeList.remaining) {
+ for (unsigned remaining = freeList.remaining; remaining; remaining -= cellSize())
+ func(bitwise_cast<HeapCell*>(freeList.payloadEnd - remaining));
+ } else {
+ for (FreeCell* current = freeList.head; current;) {
+ FreeCell* next = current->next;
+ func(bitwise_cast<HeapCell*>(current));
+ current = next;
+ }
+ }
+}
- m_weakSet.sweep();
+void MarkedBlock::aboutToMarkSlow(HeapVersion markingVersion)
+{
+ ASSERT(vm()->heap.objectSpace().isMarking());
+ LockHolder locker(m_lock);
+
+ if (!areMarksStale(markingVersion))
+ return;
+
+ MarkedAllocator* allocator = handle().allocator();
+
+ if (handle().allocator()->isAllocated(holdLock(allocator->bitvectorLock()), &handle())
+ || !marksConveyLivenessDuringMarking(markingVersion)) {
+ if (false)
+ dataLog(RawPointer(this), ": Clearing marks without doing anything else.\n");
+ // We already know that the block is full and is already recognized as such, or that the
+ // block did not survive the previous GC. So, we can clear mark bits the old fashioned
+ // way. Note that it's possible for such a block to have newlyAllocated with an up-to-
+ // date version! If it does, then we want to leave the newlyAllocated alone, since that
+ // means that we had allocated in this previously empty block but did not fill it up, so
+ // we created a newlyAllocated.
+ m_marks.clearAll();
+ } else {
+ if (false)
+ dataLog(RawPointer(this), ": Doing things.\n");
+ HeapVersion newlyAllocatedVersion = space()->newlyAllocatedVersion();
+ if (handle().m_newlyAllocatedVersion == newlyAllocatedVersion) {
+ // Merge the contents of marked into newlyAllocated. If we get the full set of bits
+ // then invalidate newlyAllocated and set allocated.
+ handle().m_newlyAllocated.mergeAndClear(m_marks);
+ } else {
+ // Replace the contents of newlyAllocated with marked. If we get the full set of
+ // bits then invalidate newlyAllocated and set allocated.
+ handle().m_newlyAllocated.setAndClear(m_marks);
+ }
+ handle().m_newlyAllocatedVersion = newlyAllocatedVersion;
+ }
+ clearHasAnyMarked();
+ WTF::storeStoreFence();
+ m_markingVersion = markingVersion;
+
+ // This means we're the first ones to mark any object in this block.
+ allocator->setIsMarkingNotEmpty(holdLock(allocator->bitvectorLock()), &handle(), true);
+}
- if (sweepMode == SweepOnly && m_destructorType == MarkedBlock::None)
- return FreeList();
+void MarkedBlock::Handle::resetAllocated()
+{
+ m_newlyAllocated.clearAll();
+ m_newlyAllocatedVersion = MarkedSpace::nullVersion;
+}
- if (m_destructorType == MarkedBlock::ImmortalStructure)
- return sweepHelper<MarkedBlock::ImmortalStructure>(sweepMode);
- if (m_destructorType == MarkedBlock::Normal)
- return sweepHelper<MarkedBlock::Normal>(sweepMode);
- return sweepHelper<MarkedBlock::None>(sweepMode);
+void MarkedBlock::resetMarks()
+{
+ // We want aboutToMarkSlow() to see what the mark bits were after the last collection. It uses
+ // the version number to distinguish between the marks having already been stale before
+ // beginMarking(), or just stale now that beginMarking() bumped the version. If we have a version
+ // wraparound, then we will call this method before resetting the version to null. When the
+ // version is null, aboutToMarkSlow() will assume that the marks were not stale as of before
+ // beginMarking(). Hence the need to whip the marks into shape.
+ if (areMarksStale())
+ m_marks.clearAll();
+ m_markingVersion = MarkedSpace::nullVersion;
}
-template<MarkedBlock::DestructorType dtorType>
-MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
+#if !ASSERT_DISABLED
+void MarkedBlock::assertMarksNotStale()
{
- switch (m_state) {
- case New:
- ASSERT(sweepMode == SweepToFreeList);
- return specializedSweep<New, SweepToFreeList, dtorType>();
- case FreeListed:
- // Happens when a block transitions to fully allocated.
- ASSERT(sweepMode == SweepToFreeList);
- return FreeList();
- case Allocated:
- RELEASE_ASSERT_NOT_REACHED();
- return FreeList();
- case Marked:
- return sweepMode == SweepToFreeList
- ? specializedSweep<Marked, SweepToFreeList, dtorType>()
- : specializedSweep<Marked, SweepOnly, dtorType>();
- }
+ ASSERT(m_markingVersion == vm()->heap.objectSpace().markingVersion());
+}
+#endif // !ASSERT_DISABLED
- RELEASE_ASSERT_NOT_REACHED();
- return FreeList();
+bool MarkedBlock::areMarksStale()
+{
+ return areMarksStale(vm()->heap.objectSpace().markingVersion());
}
-class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
-public:
- SetNewlyAllocatedFunctor(MarkedBlock* block)
- : m_block(block)
- {
- }
+bool MarkedBlock::Handle::areMarksStale()
+{
+ return m_block->areMarksStale();
+}
- void operator()(JSCell* cell)
- {
- ASSERT(MarkedBlock::blockFor(cell) == m_block);
- m_block->setNewlyAllocated(cell);
- }
+bool MarkedBlock::isMarked(const void* p)
+{
+ return isMarked(vm()->heap.objectSpace().markingVersion(), p);
+}
-private:
- MarkedBlock* m_block;
-};
+void MarkedBlock::Handle::didConsumeFreeList()
+{
+ auto locker = holdLock(block().m_lock);
+ if (false)
+ dataLog(RawPointer(this), ": MarkedBlock::Handle::didConsumeFreeList!\n");
+ ASSERT(isFreeListed());
+ m_isFreeListed = false;
+ allocator()->setIsAllocated(NoLockingNecessary, this, true);
+}
-void MarkedBlock::stopAllocating(const FreeList& freeList)
+size_t MarkedBlock::markCount()
{
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
- FreeCell* head = freeList.head;
+ return areMarksStale() ? 0 : m_marks.count();
+}
- if (m_state == Marked) {
- // If the block is in the Marked state then we know that:
- // 1) It was not used for allocation during the previous allocation cycle.
- // 2) It may have dead objects, and we only know them to be dead by the
- // fact that their mark bits are unset.
- // Hence if the block is Marked we need to leave it Marked.
-
- ASSERT(!head);
+bool MarkedBlock::Handle::isEmpty()
+{
+ return m_allocator->isEmpty(NoLockingNecessary, this);
+}
+
+void MarkedBlock::clearHasAnyMarked()
+{
+ m_biasedMarkCount = m_markCountBias;
+}
+
+void MarkedBlock::noteMarkedSlow()
+{
+ MarkedAllocator* allocator = handle().allocator();
+ allocator->setIsMarkingRetired(holdLock(allocator->bitvectorLock()), &handle(), true);
+}
+
+void MarkedBlock::Handle::removeFromAllocator()
+{
+ if (!m_allocator)
return;
- }
-
- ASSERT(m_state == FreeListed);
-
- // Roll back to a coherent state for Heap introspection. Cells newly
- // allocated from our free list are not currently marked, so we need another
- // way to tell what's live vs dead.
- ASSERT(!m_newlyAllocated);
- m_newlyAllocated = adoptPtr(new WTF::Bitmap<atomsPerBlock>());
+ m_allocator->removeBlock(this);
+}
- SetNewlyAllocatedFunctor functor(this);
- forEachCell(functor);
+void MarkedBlock::updateNeedsDestruction()
+{
+ m_needsDestruction = handle().needsDestruction();
+}
- FreeCell* next;
- for (FreeCell* current = head; current; current = next) {
- next = current->next;
- reinterpret_cast<JSCell*>(current)->zap();
- clearNewlyAllocated(current);
- }
+void MarkedBlock::Handle::didAddToAllocator(MarkedAllocator* allocator, size_t index)
+{
+ ASSERT(m_index == std::numeric_limits<size_t>::max());
+ ASSERT(!m_allocator);
+
+ m_index = index;
+ m_allocator = allocator;
+
+ size_t cellSize = allocator->cellSize();
+ m_atomsPerCell = (cellSize + atomSize - 1) / atomSize;
+ m_endAtom = atomsPerBlock - m_atomsPerCell + 1;
- m_state = Marked;
+ m_attributes = allocator->attributes();
+
+ if (m_attributes.cellKind != HeapCell::JSCell)
+ RELEASE_ASSERT(m_attributes.destruction == DoesNotNeedDestruction);
+
+ block().updateNeedsDestruction();
+
+ double markCountBias = -(Options::minMarkedBlockUtilization() * cellsPerBlock());
+
+ // The mark count bias should be comfortably within this range.
+ RELEASE_ASSERT(markCountBias > static_cast<double>(std::numeric_limits<int16_t>::min()));
+ RELEASE_ASSERT(markCountBias < 0);
+
+ // This means we haven't marked anything yet.
+ block().m_biasedMarkCount = block().m_markCountBias = static_cast<int16_t>(markCountBias);
}
-void MarkedBlock::clearMarks()
+void MarkedBlock::Handle::didRemoveFromAllocator()
{
-#if ENABLE(GGC)
- if (heap()->operationInProgress() == JSC::EdenCollection)
- this->clearMarksWithCollectionType<EdenCollection>();
- else
- this->clearMarksWithCollectionType<FullCollection>();
-#else
- this->clearMarksWithCollectionType<FullCollection>();
-#endif
+ ASSERT(m_index != std::numeric_limits<size_t>::max());
+ ASSERT(m_allocator);
+
+ m_index = std::numeric_limits<size_t>::max();
+ m_allocator = nullptr;
}
-void MarkedBlock::clearRememberedSet()
+bool MarkedBlock::Handle::isLive(const HeapCell* cell)
{
- m_rememberedSet.clearAll();
+ return isLive(space()->markingVersion(), space()->isMarking(), cell);
}
-template <HeapOperation collectionType>
-void MarkedBlock::clearMarksWithCollectionType()
+bool MarkedBlock::Handle::isLiveCell(const void* p)
{
- ASSERT(collectionType == FullCollection || collectionType == EdenCollection);
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ return isLiveCell(space()->markingVersion(), space()->isMarking(), p);
+}
- ASSERT(m_state != New && m_state != FreeListed);
- if (collectionType == FullCollection) {
- m_marks.clearAll();
-#if ENABLE(GGC)
- m_rememberedSet.clearAll();
+#if !ASSERT_DISABLED
+void MarkedBlock::assertValidCell(VM& vm, HeapCell* cell) const
+{
+ RELEASE_ASSERT(&vm == this->vm());
+ RELEASE_ASSERT(const_cast<MarkedBlock*>(this)->handle().cellAlign(cell) == cell);
+}
#endif
- }
- // This will become true at the end of the mark phase. We set it now to
- // avoid an extra pass to do so later.
- m_state = Marked;
+void MarkedBlock::Handle::dumpState(PrintStream& out)
+{
+ CommaPrinter comma;
+ allocator()->forEachBitVectorWithName(
+ holdLock(allocator()->bitvectorLock()),
+ [&] (FastBitVector& bitvector, const char* name) {
+ out.print(comma, name, ":", bitvector[index()] ? "YES" : "no");
+ });
}
-void MarkedBlock::lastChanceToFinalize()
+Subspace* MarkedBlock::Handle::subspace() const
{
- m_weakSet.lastChanceToFinalize();
-
- clearNewlyAllocated();
- clearMarksWithCollectionType<FullCollection>();
- sweep();
+ return allocator()->subspace();
}
-MarkedBlock::FreeList MarkedBlock::resumeAllocating()
+FreeList MarkedBlock::Handle::sweep(SweepMode sweepMode)
{
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ SweepingScope sweepingScope(*heap());
+
+ m_allocator->setIsUnswept(NoLockingNecessary, this, false);
+
+ m_weakSet.sweep();
- ASSERT(m_state == Marked);
+ if (sweepMode == SweepOnly && m_attributes.destruction == DoesNotNeedDestruction)
+ return FreeList();
- if (!m_newlyAllocated) {
- // We didn't have to create a "newly allocated" bitmap. That means we were already Marked
- // when we last stopped allocation, so return an empty free list and stay in the Marked state.
+ if (UNLIKELY(m_isFreeListed)) {
+ RELEASE_ASSERT(sweepMode == SweepToFreeList);
return FreeList();
}
+
+ ASSERT(!m_allocator->isAllocated(NoLockingNecessary, this));
+
+ if (space()->isMarking())
+ block().m_lock.lock();
+
+ if (m_attributes.destruction == NeedsDestruction)
+ return subspace()->finishSweep(*this, sweepMode);
+
+ // Handle the no-destructor specializations here, since we have the most of those. This
+ // ensures that they don't get re-specialized for every destructor space.
+
+ EmptyMode emptyMode = this->emptyMode();
+ ScribbleMode scribbleMode = this->scribbleMode();
+ NewlyAllocatedMode newlyAllocatedMode = this->newlyAllocatedMode();
+ MarksMode marksMode = this->marksMode();
+
+ FreeList result;
+ auto trySpecialized = [&] () -> bool {
+ if (sweepMode != SweepToFreeList)
+ return false;
+ if (scribbleMode != DontScribble)
+ return false;
+ if (newlyAllocatedMode != DoesNotHaveNewlyAllocated)
+ return false;
+
+ switch (emptyMode) {
+ case IsEmpty:
+ switch (marksMode) {
+ case MarksNotStale:
+ result = specializedSweep<true, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, [] (VM&, JSCell*) { });
+ return true;
+ case MarksStale:
+ result = specializedSweep<true, IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, [] (VM&, JSCell*) { });
+ return true;
+ }
+ break;
+ case NotEmpty:
+ switch (marksMode) {
+ case MarksNotStale:
+ result = specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, [] (VM&, JSCell*) { });
+ return true;
+ case MarksStale:
+ result = specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(IsEmpty, SweepToFreeList, BlockHasNoDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, [] (VM&, JSCell*) { });
+ return true;
+ }
+ break;
+ }
+
+ return false;
+ };
+
+ if (trySpecialized())
+ return result;
- // Re-create our free list from before stopping allocation.
- return sweep(SweepToFreeList);
+ // The template arguments don't matter because the first one is false.
+ return specializedSweep<false, IsEmpty, SweepOnly, BlockHasNoDestructors, DontScribble, HasNewlyAllocated, MarksStale>(emptyMode, sweepMode, BlockHasNoDestructors, scribbleMode, newlyAllocatedMode, marksMode, [] (VM&, JSCell*) { });
}
} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::MarkedBlock::Handle::SweepMode mode)
+{
+ switch (mode) {
+ case JSC::MarkedBlock::Handle::SweepToFreeList:
+ out.print("SweepToFreeList");
+ return;
+ case JSC::MarkedBlock::Handle::SweepOnly:
+ out.print("SweepOnly");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/MarkedBlock.h b/Source/JavaScriptCore/heap/MarkedBlock.h
index 73f56cd72..b10594173 100644
--- a/Source/JavaScriptCore/heap/MarkedBlock.h
+++ b/Source/JavaScriptCore/heap/MarkedBlock.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
* Copyright (C) 2001 Peter Kelly (pmk@post.com)
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2017 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -19,473 +19,603 @@
*
*/
-#ifndef MarkedBlock_h
-#define MarkedBlock_h
+#pragma once
-#include "BlockAllocator.h"
-#include "HeapBlock.h"
-
-#include "HeapOperation.h"
+#include "AllocatorAttributes.h"
+#include "DestructionMode.h"
+#include "FreeList.h"
+#include "HeapCell.h"
+#include "IterationStatus.h"
#include "WeakSet.h"
+#include <wtf/Atomics.h>
#include <wtf/Bitmap.h>
#include <wtf/DataLog.h>
#include <wtf/DoublyLinkedList.h>
#include <wtf/HashFunctions.h>
-#include <wtf/PageAllocationAligned.h>
#include <wtf/StdLibExtras.h>
-#include <wtf/Vector.h>
-
-// Set to log state transitions of blocks.
-#define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
-
-#if HEAP_LOG_BLOCK_STATE_TRANSITIONS
-#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
- dataLogF( \
- "%s:%d %s: block %s = %p, %d\n", \
- __FILE__, __LINE__, __FUNCTION__, \
- #block, (block), (block)->m_state); \
- } while (false)
-#else
-#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
-#endif
namespace JSC {
- class Heap;
- class JSCell;
- class MarkedAllocator;
-
- typedef uintptr_t Bits;
-
- static const size_t MB = 1024 * 1024;
+class Heap;
+class JSCell;
+class MarkedAllocator;
+class MarkedSpace;
+class SlotVisitor;
+class Subspace;
+
+typedef uintptr_t Bits;
+typedef uint32_t HeapVersion;
+
+// A marked block is a page-aligned container for heap-allocated objects.
+// Objects are allocated within cells of the marked block. For a given
+// marked block, all cells have the same size. Objects smaller than the
+// cell size may be allocated in the marked block, in which case the
+// allocation suffers from internal fragmentation: wasted space whose
+// size is equal to the difference between the cell size and the object
+// size.
+
+class MarkedBlock {
+ WTF_MAKE_NONCOPYABLE(MarkedBlock);
+ friend class LLIntOffsetsExtractor;
+ friend struct VerifyMarked;
+
+public:
+ class Handle;
+private:
+ friend class Handle;
+public:
+ static const size_t atomSize = 16; // bytes
+ static const size_t blockSize = 16 * KB;
+ static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
+
+ static const size_t atomsPerBlock = blockSize / atomSize;
+
+ static_assert(!(MarkedBlock::atomSize & (MarkedBlock::atomSize - 1)), "MarkedBlock::atomSize must be a power of two.");
+ static_assert(!(MarkedBlock::blockSize & (MarkedBlock::blockSize - 1)), "MarkedBlock::blockSize must be a power of two.");
- bool isZapped(const JSCell*);
+ struct VoidFunctor {
+ typedef void ReturnType;
+ void returnValue() { }
+ };
- // A marked block is a page-aligned container for heap-allocated objects.
- // Objects are allocated within cells of the marked block. For a given
- // marked block, all cells have the same size. Objects smaller than the
- // cell size may be allocated in the marked block, in which case the
- // allocation suffers from internal fragmentation: wasted space whose
- // size is equal to the difference between the cell size and the object
- // size.
-
- class MarkedBlock : public HeapBlock<MarkedBlock> {
- friend class LLIntOffsetsExtractor;
-
+ class CountFunctor {
public:
- static const size_t atomSize = 16; // bytes
- static const size_t atomShiftAmount = 4; // log_2(atomSize) FIXME: Change atomSize to 16.
- static const size_t blockSize = 64 * KB;
- static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
-
- static const size_t atomsPerBlock = blockSize / atomSize;
- static const size_t atomMask = atomsPerBlock - 1;
+ typedef size_t ReturnType;
- static const size_t markByteShiftAmount = 3; // log_2(word size for m_marks) FIXME: Change word size for m_marks to uint8_t.
+ CountFunctor() : m_count(0) { }
+ void count(size_t count) const { m_count += count; }
+ ReturnType returnValue() const { return m_count; }
- struct FreeCell {
- FreeCell* next;
- };
+ private:
+ // FIXME: This is mutable because we're using a functor rather than C++ lambdas.
+ // https://bugs.webkit.org/show_bug.cgi?id=159644
+ mutable ReturnType m_count;
+ };
- struct FreeList {
- FreeCell* head;
- size_t bytes;
-
- FreeList();
- FreeList(FreeCell*, size_t);
- };
-
- struct VoidFunctor {
- typedef void ReturnType;
- void returnValue() { }
- };
-
- class CountFunctor {
- public:
- typedef size_t ReturnType;
-
- CountFunctor() : m_count(0) { }
- void count(size_t count) { m_count += count; }
- ReturnType returnValue() { return m_count; }
-
- private:
- ReturnType m_count;
- };
-
- enum DestructorType { None, ImmortalStructure, Normal };
- static MarkedBlock* create(DeadBlock*, MarkedAllocator*, size_t cellSize, DestructorType);
+ class Handle {
+ WTF_MAKE_NONCOPYABLE(Handle);
+ WTF_MAKE_FAST_ALLOCATED;
+ friend class LLIntOffsetsExtractor;
+ friend class MarkedBlock;
+ friend struct VerifyMarked;
+ public:
+
+ ~Handle();
+
+ MarkedBlock& block();
+
+ void* cellAlign(void*);
+
+ bool isEmpty();
- static bool isAtomAligned(const void*);
- static MarkedBlock* blockFor(const void*);
- static size_t firstAtom();
-
void lastChanceToFinalize();
MarkedAllocator* allocator() const;
+ Subspace* subspace() const;
Heap* heap() const;
+ inline MarkedSpace* space() const;
VM* vm() const;
WeakSet& weakSet();
-
+
+ // Sweeping ensures that destructors get called and removes the block from the unswept
+ // set. Sweeping to free list also removes the block from the empty set, if it was in that
+ // set. Sweeping with SweepOnly may add this block to the empty set, if the block is found
+ // to be empty.
+ //
+ // Note that you need to make sure that the empty bit reflects reality. If it's not set
+ // and the block is freshly created, then we'll make the mistake of running destructors in
+ // the block. If it's not set and the block has nothing marked, then we'll make the
+ // mistake of making a pop freelist rather than a bump freelist.
enum SweepMode { SweepOnly, SweepToFreeList };
FreeList sweep(SweepMode = SweepOnly);
-
+
+ // This is to be called by Subspace.
+ template<typename DestroyFunc>
+ FreeList finishSweepKnowingSubspace(SweepMode, const DestroyFunc&);
+
+ void unsweepWithNoNewlyAllocated();
+
+ void zap(const FreeList&);
+
void shrink();
-
- void visitWeakSet(HeapRootVisitor&);
+
+ void visitWeakSet(SlotVisitor&);
void reapWeakSet();
-
+
// While allocating from a free list, MarkedBlock temporarily has bogus
// cell liveness data. To restore accurate cell liveness data, call one
// of these functions:
void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
void stopAllocating(const FreeList&);
FreeList resumeAllocating(); // Call this if you canonicalized a block for some non-collection related purpose.
- void didConsumeEmptyFreeList(); // Call this if you sweep a block, but the returned FreeList is empty.
- void didSweepToNoAvail(); // Call this if you sweep a block and get an empty free list back.
-
- // Returns true if the "newly allocated" bitmap was non-null
- // and was successfully cleared and false otherwise.
- bool clearNewlyAllocated();
- void clearMarks();
- void clearRememberedSet();
- template <HeapOperation collectionType>
- void clearMarksWithCollectionType();
-
- size_t markCount();
- bool isEmpty();
-
+
size_t cellSize();
- DestructorType destructorType();
-
+ inline unsigned cellsPerBlock();
+
+ const AllocatorAttributes& attributes() const;
+ DestructionMode destruction() const;
+ bool needsDestruction() const;
+ HeapCell::Kind cellKind() const;
+
+ size_t markCount();
size_t size();
- size_t capacity();
+
+ inline bool isLive(HeapVersion markingVersion, bool isMarking, const HeapCell*);
+ inline bool isLiveCell(HeapVersion markingVersion, bool isMarking, const void*);
- bool isMarked(const void*);
- bool testAndSetMarked(const void*);
- bool isLive(const JSCell*);
+ bool isLive(const HeapCell*);
bool isLiveCell(const void*);
- void setMarked(const void*);
- void clearMarked(const void*);
-
- void setRemembered(const void*);
- void clearRemembered(const void*);
- void atomicClearRemembered(const void*);
- bool isRemembered(const void*);
bool isNewlyAllocated(const void*);
void setNewlyAllocated(const void*);
void clearNewlyAllocated(const void*);
-
- bool needsSweeping();
-
- template <typename Functor> void forEachCell(Functor&);
- template <typename Functor> void forEachLiveCell(Functor&);
- template <typename Functor> void forEachDeadCell(Functor&);
-
- static ptrdiff_t offsetOfMarks() { return OBJECT_OFFSETOF(MarkedBlock, m_marks); }
-
+
+ HeapVersion newlyAllocatedVersion() const { return m_newlyAllocatedVersion; }
+
+ inline bool isNewlyAllocatedStale() const;
+
+ inline bool hasAnyNewlyAllocated();
+ void resetAllocated();
+
+ template <typename Functor> IterationStatus forEachCell(const Functor&);
+ template <typename Functor> inline IterationStatus forEachLiveCell(const Functor&);
+ template <typename Functor> inline IterationStatus forEachDeadCell(const Functor&);
+ template <typename Functor> inline IterationStatus forEachMarkedCell(const Functor&);
+
+ JS_EXPORT_PRIVATE bool areMarksStale();
+
+ void assertMarksNotStale();
+
+ bool isFreeListed() const { return m_isFreeListed; }
+
+ size_t index() const { return m_index; }
+
+ void removeFromAllocator();
+
+ void didAddToAllocator(MarkedAllocator*, size_t index);
+ void didRemoveFromAllocator();
+
+ void dumpState(PrintStream&);
+
private:
- static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two.
-
- enum BlockState { New, FreeListed, Allocated, Marked };
- template<DestructorType> FreeList sweepHelper(SweepMode = SweepOnly);
-
- typedef char Atom[atomSize];
+ Handle(Heap&, void*);
+
+ enum SweepDestructionMode { BlockHasNoDestructors, BlockHasDestructors, BlockHasDestructorsAndCollectorIsRunning };
+ enum ScribbleMode { DontScribble, Scribble };
+ enum EmptyMode { IsEmpty, NotEmpty };
+ enum NewlyAllocatedMode { HasNewlyAllocated, DoesNotHaveNewlyAllocated };
+ enum MarksMode { MarksStale, MarksNotStale };
+
+ SweepDestructionMode sweepDestructionMode();
+ EmptyMode emptyMode();
+ ScribbleMode scribbleMode();
+ NewlyAllocatedMode newlyAllocatedMode();
+ MarksMode marksMode();
+
+ template<bool, EmptyMode, SweepMode, SweepDestructionMode, ScribbleMode, NewlyAllocatedMode, MarksMode, typename DestroyFunc>
+ FreeList specializedSweep(EmptyMode, SweepMode, SweepDestructionMode, ScribbleMode, NewlyAllocatedMode, MarksMode, const DestroyFunc&);
+
+ template<typename Func>
+ void forEachFreeCell(const FreeList&, const Func&);
+
+ void setIsFreeListed();
+
+ MarkedBlock::Handle* m_prev;
+ MarkedBlock::Handle* m_next;
+
+ size_t m_atomsPerCell { std::numeric_limits<size_t>::max() };
+ size_t m_endAtom { std::numeric_limits<size_t>::max() }; // This is a fuzzy end. Always test for < m_endAtom.
+
+ WTF::Bitmap<atomsPerBlock> m_newlyAllocated;
+
+ AllocatorAttributes m_attributes;
+ bool m_isFreeListed { false };
+
+ MarkedAllocator* m_allocator { nullptr };
+ size_t m_index { std::numeric_limits<size_t>::max() };
+ WeakSet m_weakSet;
+
+ HeapVersion m_newlyAllocatedVersion;
+
+ MarkedBlock* m_block { nullptr };
+ };
+
+ static MarkedBlock::Handle* tryCreate(Heap&);
+
+ Handle& handle();
+
+ VM* vm() const;
+ inline Heap* heap() const;
+ inline MarkedSpace* space() const;
+
+ static bool isAtomAligned(const void*);
+ static MarkedBlock* blockFor(const void*);
+ static size_t firstAtom();
+ size_t atomNumber(const void*);
+
+ size_t markCount();
- MarkedBlock(Region*, MarkedAllocator*, size_t cellSize, DestructorType);
- Atom* atoms();
- size_t atomNumber(const void*);
- void callDestructor(JSCell*);
- template<BlockState, SweepMode, DestructorType> FreeList specializedSweep();
+ bool isMarked(const void*);
+ bool isMarked(HeapVersion markingVersion, const void*);
+ bool isMarkedConcurrently(HeapVersion markingVersion, const void*);
+ bool testAndSetMarked(const void*);
- size_t m_atomsPerCell;
- size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
-#if ENABLE(PARALLEL_GC)
- WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic, uint8_t> m_marks;
- WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic, uint8_t> m_rememberedSet;
+ bool isAtom(const void*);
+ void clearMarked(const void*);
+
+ size_t cellSize();
+ const AllocatorAttributes& attributes() const;
+
+ bool hasAnyMarked() const;
+ void noteMarked();
+#if ASSERT_DISABLED
+ void assertValidCell(VM&, HeapCell*) const { }
#else
- WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic, uint8_t> m_marks;
- WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic, uint8_t> m_rememberedSet;
+ void assertValidCell(VM&, HeapCell*) const;
#endif
- OwnPtr<WTF::Bitmap<atomsPerBlock>> m_newlyAllocated;
+
+ WeakSet& weakSet();
- DestructorType m_destructorType;
- MarkedAllocator* m_allocator;
- BlockState m_state;
- WeakSet m_weakSet;
+ JS_EXPORT_PRIVATE bool areMarksStale();
+ bool areMarksStale(HeapVersion markingVersion);
+ struct MarksWithDependency {
+ bool areStale;
+ ConsumeDependency dependency;
};
+ MarksWithDependency areMarksStaleWithDependency(HeapVersion markingVersion);
+
+ void aboutToMark(HeapVersion markingVersion);
+
+ void assertMarksNotStale();
+
+ bool needsDestruction() const { return m_needsDestruction; }
+
+ // This is usually a no-op, and we use it as a no-op that touches the page in isPagedOut().
+ void updateNeedsDestruction();
+
+ void resetMarks();
+
+ bool isMarkedRaw(const void* p);
+ HeapVersion markingVersion() const { return m_markingVersion; }
+
+private:
+ static const size_t atomAlignmentMask = atomSize - 1;
- inline MarkedBlock::FreeList::FreeList()
- : head(0)
- , bytes(0)
- {
- }
-
- inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes)
- : head(head)
- , bytes(bytes)
- {
- }
-
- inline size_t MarkedBlock::firstAtom()
- {
- return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize;
- }
-
- inline MarkedBlock::Atom* MarkedBlock::atoms()
- {
- return reinterpret_cast<Atom*>(this);
- }
-
- inline bool MarkedBlock::isAtomAligned(const void* p)
- {
- return !(reinterpret_cast<Bits>(p) & atomAlignmentMask);
- }
-
- inline MarkedBlock* MarkedBlock::blockFor(const void* p)
- {
- return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
- }
-
- inline MarkedAllocator* MarkedBlock::allocator() const
- {
- return m_allocator;
- }
-
- inline Heap* MarkedBlock::heap() const
- {
- return m_weakSet.heap();
- }
-
- inline VM* MarkedBlock::vm() const
- {
- return m_weakSet.vm();
- }
-
- inline WeakSet& MarkedBlock::weakSet()
- {
- return m_weakSet;
- }
-
- inline void MarkedBlock::shrink()
- {
- m_weakSet.shrink();
- }
-
- inline void MarkedBlock::visitWeakSet(HeapRootVisitor& heapRootVisitor)
- {
- m_weakSet.visit(heapRootVisitor);
- }
-
- inline void MarkedBlock::reapWeakSet()
- {
- m_weakSet.reap();
- }
-
- inline void MarkedBlock::didConsumeFreeList()
- {
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
-
- ASSERT(m_state == FreeListed);
- m_state = Allocated;
- }
-
- inline void MarkedBlock::didConsumeEmptyFreeList()
- {
- HEAP_LOG_BLOCK_STATE_TRANSITION(this);
-
- ASSERT(!m_newlyAllocated);
- ASSERT(m_state == FreeListed);
- m_state = Marked;
- }
-
- inline size_t MarkedBlock::markCount()
- {
- return m_marks.count();
- }
-
- inline bool MarkedBlock::isEmpty()
- {
- return m_marks.isEmpty() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty());
- }
-
- inline size_t MarkedBlock::cellSize()
- {
- return m_atomsPerCell * atomSize;
- }
-
- inline MarkedBlock::DestructorType MarkedBlock::destructorType()
- {
- return m_destructorType;
- }
-
- inline size_t MarkedBlock::size()
- {
- return markCount() * cellSize();
- }
-
- inline size_t MarkedBlock::capacity()
- {
- return region()->blockSize();
- }
-
- inline size_t MarkedBlock::atomNumber(const void* p)
- {
- return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize;
- }
-
- inline void MarkedBlock::setRemembered(const void* p)
- {
- m_rememberedSet.set(atomNumber(p));
- }
-
- inline void MarkedBlock::clearRemembered(const void* p)
- {
- m_rememberedSet.clear(atomNumber(p));
- }
-
- inline void MarkedBlock::atomicClearRemembered(const void* p)
- {
- m_rememberedSet.concurrentTestAndClear(atomNumber(p));
- }
-
- inline bool MarkedBlock::isRemembered(const void* p)
- {
- return m_rememberedSet.get(atomNumber(p));
- }
-
- inline bool MarkedBlock::isMarked(const void* p)
- {
- return m_marks.get(atomNumber(p));
- }
-
- inline bool MarkedBlock::testAndSetMarked(const void* p)
- {
- return m_marks.concurrentTestAndSet(atomNumber(p));
- }
-
- inline void MarkedBlock::setMarked(const void* p)
- {
- m_marks.set(atomNumber(p));
- }
-
- inline void MarkedBlock::clearMarked(const void* p)
- {
- ASSERT(m_marks.get(atomNumber(p)));
- m_marks.clear(atomNumber(p));
- }
-
- inline bool MarkedBlock::isNewlyAllocated(const void* p)
- {
- return m_newlyAllocated->get(atomNumber(p));
- }
-
- inline void MarkedBlock::setNewlyAllocated(const void* p)
- {
- m_newlyAllocated->set(atomNumber(p));
- }
+ typedef char Atom[atomSize];
- inline void MarkedBlock::clearNewlyAllocated(const void* p)
- {
- m_newlyAllocated->clear(atomNumber(p));
- }
+ MarkedBlock(VM&, Handle&);
+ Atom* atoms();
+
+ void aboutToMarkSlow(HeapVersion markingVersion);
+ void clearHasAnyMarked();
+
+ void noteMarkedSlow();
+
+ inline bool marksConveyLivenessDuringMarking(HeapVersion markingVersion);
+
+ WTF::Bitmap<atomsPerBlock> m_marks;
- inline bool MarkedBlock::clearNewlyAllocated()
- {
- if (m_newlyAllocated) {
- m_newlyAllocated.clear();
- return true;
- }
+ bool m_needsDestruction;
+ Lock m_lock;
+
+ // The actual mark count can be computed by doing: m_biasedMarkCount - m_markCountBias. Note
+ // that this count is racy. It will accurately detect whether or not exactly zero things were
+ // marked, but if N things got marked, then this may report anything in the range [1, N] (or
+ // before unbiased, it would be [1 + m_markCountBias, N + m_markCountBias].)
+ int16_t m_biasedMarkCount;
+
+ // We bias the mark count so that if m_biasedMarkCount >= 0 then the block should be retired.
+ // We go to all this trouble to make marking a bit faster: this way, marking knows when to
+ // retire a block using a js/jns on m_biasedMarkCount.
+ //
+ // For example, if a block has room for 100 objects and retirement happens whenever 90% are
+ // live, then m_markCountBias will be -90. This way, when marking begins, this will cause us to
+ // set m_biasedMarkCount to -90 as well, since:
+ //
+ // m_biasedMarkCount = actualMarkCount + m_markCountBias.
+ //
+ // Marking an object will increment m_biasedMarkCount. Once 90 objects get marked, we will have
+ // m_biasedMarkCount = 0, which will trigger retirement. In other words, we want to set
+ // m_markCountBias like so:
+ //
+ // m_markCountBias = -(minMarkedBlockUtilization * cellsPerBlock)
+ //
+ // All of this also means that you can detect if any objects are marked by doing:
+ //
+ // m_biasedMarkCount != m_markCountBias
+ int16_t m_markCountBias;
+
+ HeapVersion m_markingVersion;
+
+ Handle& m_handle;
+ VM* m_vm;
+};
+
+inline MarkedBlock::Handle& MarkedBlock::handle()
+{
+ return m_handle;
+}
+
+inline MarkedBlock& MarkedBlock::Handle::block()
+{
+ return *m_block;
+}
+
+inline size_t MarkedBlock::firstAtom()
+{
+ return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize;
+}
+
+inline MarkedBlock::Atom* MarkedBlock::atoms()
+{
+ return reinterpret_cast<Atom*>(this);
+}
+
+inline bool MarkedBlock::isAtomAligned(const void* p)
+{
+ return !(reinterpret_cast<Bits>(p) & atomAlignmentMask);
+}
+
+inline void* MarkedBlock::Handle::cellAlign(void* p)
+{
+ Bits base = reinterpret_cast<Bits>(block().atoms() + firstAtom());
+ Bits bits = reinterpret_cast<Bits>(p);
+ bits -= base;
+ bits -= bits % cellSize();
+ bits += base;
+ return reinterpret_cast<void*>(bits);
+}
+
+inline MarkedBlock* MarkedBlock::blockFor(const void* p)
+{
+ return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
+}
+
+inline MarkedAllocator* MarkedBlock::Handle::allocator() const
+{
+ return m_allocator;
+}
+
+inline Heap* MarkedBlock::Handle::heap() const
+{
+ return m_weakSet.heap();
+}
+
+inline VM* MarkedBlock::Handle::vm() const
+{
+ return m_weakSet.vm();
+}
+
+inline VM* MarkedBlock::vm() const
+{
+ return m_vm;
+}
+
+inline WeakSet& MarkedBlock::Handle::weakSet()
+{
+ return m_weakSet;
+}
+
+inline WeakSet& MarkedBlock::weakSet()
+{
+ return m_handle.weakSet();
+}
+
+inline void MarkedBlock::Handle::shrink()
+{
+ m_weakSet.shrink();
+}
+
+inline void MarkedBlock::Handle::visitWeakSet(SlotVisitor& visitor)
+{
+ return m_weakSet.visit(visitor);
+}
+
+inline void MarkedBlock::Handle::reapWeakSet()
+{
+ m_weakSet.reap();
+}
+
+inline size_t MarkedBlock::Handle::cellSize()
+{
+ return m_atomsPerCell * atomSize;
+}
+
+inline size_t MarkedBlock::cellSize()
+{
+ return m_handle.cellSize();
+}
+
+inline const AllocatorAttributes& MarkedBlock::Handle::attributes() const
+{
+ return m_attributes;
+}
+
+inline const AllocatorAttributes& MarkedBlock::attributes() const
+{
+ return m_handle.attributes();
+}
+
+inline bool MarkedBlock::Handle::needsDestruction() const
+{
+ return m_attributes.destruction == NeedsDestruction;
+}
+
+inline DestructionMode MarkedBlock::Handle::destruction() const
+{
+ return m_attributes.destruction;
+}
+
+inline HeapCell::Kind MarkedBlock::Handle::cellKind() const
+{
+ return m_attributes.cellKind;
+}
+
+inline size_t MarkedBlock::Handle::markCount()
+{
+ return m_block->markCount();
+}
+
+inline size_t MarkedBlock::Handle::size()
+{
+ return markCount() * cellSize();
+}
+
+inline size_t MarkedBlock::atomNumber(const void* p)
+{
+ return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize;
+}
+
+inline bool MarkedBlock::areMarksStale(HeapVersion markingVersion)
+{
+ return markingVersion != m_markingVersion;
+}
+
+ALWAYS_INLINE MarkedBlock::MarksWithDependency MarkedBlock::areMarksStaleWithDependency(HeapVersion markingVersion)
+{
+ auto consumed = consumeLoad(&m_markingVersion);
+ MarksWithDependency ret;
+ ret.areStale = consumed.value != markingVersion;
+ ret.dependency = consumed.dependency;
+ return ret;
+}
+
+inline void MarkedBlock::aboutToMark(HeapVersion markingVersion)
+{
+ if (UNLIKELY(areMarksStale(markingVersion)))
+ aboutToMarkSlow(markingVersion);
+ WTF::loadLoadFence();
+}
+
+#if ASSERT_DISABLED
+inline void MarkedBlock::assertMarksNotStale()
+{
+}
+#endif // ASSERT_DISABLED
+
+inline void MarkedBlock::Handle::assertMarksNotStale()
+{
+ block().assertMarksNotStale();
+}
+
+inline bool MarkedBlock::isMarkedRaw(const void* p)
+{
+ return m_marks.get(atomNumber(p));
+}
+
+inline bool MarkedBlock::isMarked(HeapVersion markingVersion, const void* p)
+{
+ return areMarksStale(markingVersion) ? false : isMarkedRaw(p);
+}
+
+inline bool MarkedBlock::isMarkedConcurrently(HeapVersion markingVersion, const void* p)
+{
+ auto marksWithDependency = areMarksStaleWithDependency(markingVersion);
+ if (marksWithDependency.areStale)
return false;
- }
-
- inline bool MarkedBlock::isLive(const JSCell* cell)
- {
- switch (m_state) {
- case Allocated:
- return true;
-
- case Marked:
- return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell));
-
- case New:
- case FreeListed:
- RELEASE_ASSERT_NOT_REACHED();
- return false;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
+ return m_marks.get(atomNumber(p) + marksWithDependency.dependency);
+}
+
+inline bool MarkedBlock::testAndSetMarked(const void* p)
+{
+ assertMarksNotStale();
+ return m_marks.concurrentTestAndSet(atomNumber(p));
+}
+
+inline bool MarkedBlock::Handle::isNewlyAllocated(const void* p)
+{
+ return m_newlyAllocated.get(m_block->atomNumber(p));
+}
+
+inline void MarkedBlock::Handle::setNewlyAllocated(const void* p)
+{
+ m_newlyAllocated.set(m_block->atomNumber(p));
+}
+
+inline void MarkedBlock::Handle::clearNewlyAllocated(const void* p)
+{
+ m_newlyAllocated.clear(m_block->atomNumber(p));
+}
+
+inline bool MarkedBlock::isAtom(const void* p)
+{
+ ASSERT(MarkedBlock::isAtomAligned(p));
+ size_t atomNumber = this->atomNumber(p);
+ size_t firstAtom = MarkedBlock::firstAtom();
+ if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata.
return false;
- }
-
- inline bool MarkedBlock::isLiveCell(const void* p)
- {
- ASSERT(MarkedBlock::isAtomAligned(p));
- size_t atomNumber = this->atomNumber(p);
- size_t firstAtom = this->firstAtom();
- if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata.
- return false;
- if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles.
- return false;
- if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range.
- return false;
-
- return isLive(static_cast<const JSCell*>(p));
- }
-
- template <typename Functor> inline void MarkedBlock::forEachCell(Functor& functor)
- {
- for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
- JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
- functor(cell);
- }
- }
-
- template <typename Functor> inline void MarkedBlock::forEachLiveCell(Functor& functor)
- {
- for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
- JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
- if (!isLive(cell))
- continue;
-
- functor(cell);
- }
- }
+ if ((atomNumber - firstAtom) % m_handle.m_atomsPerCell) // Filters pointers into cell middles.
+ return false;
+ if (atomNumber >= m_handle.m_endAtom) // Filters pointers into invalid cells out of the range.
+ return false;
+ return true;
+}
+
+template <typename Functor>
+inline IterationStatus MarkedBlock::Handle::forEachCell(const Functor& functor)
+{
+ HeapCell::Kind kind = m_attributes.cellKind;
+ for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
+ HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&m_block->atoms()[i]);
+ if (functor(cell, kind) == IterationStatus::Done)
+ return IterationStatus::Done;
+ }
+ return IterationStatus::Continue;
+}
+
+inline bool MarkedBlock::hasAnyMarked() const
+{
+ return m_biasedMarkCount != m_markCountBias;
+}
+
+inline void MarkedBlock::noteMarked()
+{
+ // This is racy by design. We don't want to pay the price of an atomic increment!
+ int16_t biasedMarkCount = m_biasedMarkCount;
+ ++biasedMarkCount;
+ m_biasedMarkCount = biasedMarkCount;
+ if (UNLIKELY(!biasedMarkCount))
+ noteMarkedSlow();
+}
- template <typename Functor> inline void MarkedBlock::forEachDeadCell(Functor& functor)
- {
- for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
- JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
- if (isLive(cell))
- continue;
+} // namespace JSC
- functor(cell);
- }
- }
+namespace WTF {
- inline bool MarkedBlock::needsSweeping()
+struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
+ static unsigned hash(JSC::MarkedBlock* const& key)
{
- return m_state == Marked;
+ // Aligned VM regions tend to be monotonically increasing integers,
+ // which is a great hash function, but we have to remove the low bits,
+ // since they're always zero, which is a terrible hash function!
+ return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize;
}
+};
-} // namespace JSC
+template<> struct DefaultHash<JSC::MarkedBlock*> {
+ typedef MarkedBlockHash Hash;
+};
-namespace WTF {
-
- struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
- static unsigned hash(JSC::MarkedBlock* const& key)
- {
- // Aligned VM regions tend to be monotonically increasing integers,
- // which is a great hash function, but we have to remove the low bits,
- // since they're always zero, which is a terrible hash function!
- return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize;
- }
- };
-
- template<> struct DefaultHash<JSC::MarkedBlock*> {
- typedef MarkedBlockHash Hash;
- };
+void printInternal(PrintStream& out, JSC::MarkedBlock::Handle::SweepMode);
} // namespace WTF
-
-#endif // MarkedBlock_h
diff --git a/Source/JavaScriptCore/heap/MarkedBlockInlines.h b/Source/JavaScriptCore/heap/MarkedBlockInlines.h
new file mode 100644
index 000000000..78379f3c7
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkedBlockInlines.h
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSCell.h"
+#include "MarkedAllocator.h"
+#include "MarkedBlock.h"
+#include "MarkedSpace.h"
+#include "Operations.h"
+#include "SuperSampler.h"
+#include "VM.h"
+
+namespace JSC {
+
+inline unsigned MarkedBlock::Handle::cellsPerBlock()
+{
+ return MarkedSpace::blockPayload / cellSize();
+}
+
+inline bool MarkedBlock::Handle::isNewlyAllocatedStale() const
+{
+ return m_newlyAllocatedVersion != space()->newlyAllocatedVersion();
+}
+
+inline bool MarkedBlock::Handle::hasAnyNewlyAllocated()
+{
+ return !isNewlyAllocatedStale();
+}
+
+inline Heap* MarkedBlock::heap() const
+{
+ return &vm()->heap;
+}
+
+inline MarkedSpace* MarkedBlock::space() const
+{
+ return &heap()->objectSpace();
+}
+
+inline MarkedSpace* MarkedBlock::Handle::space() const
+{
+ return &heap()->objectSpace();
+}
+
+inline bool MarkedBlock::marksConveyLivenessDuringMarking(HeapVersion markingVersion)
+{
+ // This returns true if any of these is true:
+ // - We just created the block and so the bits are clear already.
+ // - This block has objects marked during the last GC, and so its version was up-to-date just
+ // before the current collection did beginMarking(). This means that any objects that have
+ // their mark bit set are valid objects that were never deleted, and so are candidates for
+ // marking in any conservative scan. Using our jargon, they are "live".
+ // - We did ~2^32 collections and rotated the version back to null, so we needed to hard-reset
+ // everything. If the marks had been stale, we would have cleared them. So, we can be sure that
+ // any set mark bit reflects objects marked during last GC, i.e. "live" objects.
+ // It would be absurd to use this method when not collecting, since this special "one version
+ // back" state only makes sense when we're in a concurrent collection and have to be
+ // conservative.
+ ASSERT(space()->isMarking());
+ if (heap()->collectionScope() != CollectionScope::Full)
+ return false;
+ return m_markingVersion == MarkedSpace::nullVersion
+ || MarkedSpace::nextVersion(m_markingVersion) == markingVersion;
+}
+
+inline bool MarkedBlock::Handle::isLive(HeapVersion markingVersion, bool isMarking, const HeapCell* cell)
+{
+ ASSERT(!isFreeListed());
+
+ if (UNLIKELY(hasAnyNewlyAllocated())) {
+ if (isNewlyAllocated(cell))
+ return true;
+ }
+
+ if (allocator()->isAllocated(NoLockingNecessary, this))
+ return true;
+
+ MarkedBlock& block = this->block();
+
+ if (block.areMarksStale()) {
+ if (!isMarking)
+ return false;
+ if (!block.marksConveyLivenessDuringMarking(markingVersion))
+ return false;
+ }
+
+ return block.m_marks.get(block.atomNumber(cell));
+}
+
+inline bool MarkedBlock::Handle::isLiveCell(HeapVersion markingVersion, bool isMarking, const void* p)
+{
+ if (!m_block->isAtom(p))
+ return false;
+ return isLive(markingVersion, isMarking, static_cast<const HeapCell*>(p));
+}
+
+// The following has to be true for specialization to kick in:
+//
+// sweepMode == SweepToFreeList
+// scribbleMode == DontScribble
+// newlyAllocatedMode == DoesNotHaveNewlyAllocated
+// destructionMode != BlockHasDestrictorsAndCollectorIsRunning
+//
+// emptyMode = IsEmpty
+// destructionMode = DoesNotNeedDestruction
+// marksMode = MarksNotStale (1)
+// marksMode = MarksStale (2)
+// emptyMode = NotEmpty
+// destructionMode = DoesNotNeedDestruction
+// marksMode = MarksNotStale (3)
+// marksMode = MarksStale (4)
+// destructionMode = NeedsDestruction
+// marksMode = MarksNotStale (5)
+// marksMode = MarksStale (6)
+//
+// Only the DoesNotNeedDestruction one should be specialized by MarkedBlock.
+
+template<bool specialize, MarkedBlock::Handle::EmptyMode specializedEmptyMode, MarkedBlock::Handle::SweepMode specializedSweepMode, MarkedBlock::Handle::SweepDestructionMode specializedDestructionMode, MarkedBlock::Handle::ScribbleMode specializedScribbleMode, MarkedBlock::Handle::NewlyAllocatedMode specializedNewlyAllocatedMode, MarkedBlock::Handle::MarksMode specializedMarksMode, typename DestroyFunc>
+FreeList MarkedBlock::Handle::specializedSweep(MarkedBlock::Handle::EmptyMode emptyMode, MarkedBlock::Handle::SweepMode sweepMode, MarkedBlock::Handle::SweepDestructionMode destructionMode, MarkedBlock::Handle::ScribbleMode scribbleMode, MarkedBlock::Handle::NewlyAllocatedMode newlyAllocatedMode, MarkedBlock::Handle::MarksMode marksMode, const DestroyFunc& destroyFunc)
+{
+ if (specialize) {
+ emptyMode = specializedEmptyMode;
+ sweepMode = specializedSweepMode;
+ destructionMode = specializedDestructionMode;
+ scribbleMode = specializedScribbleMode;
+ newlyAllocatedMode = specializedNewlyAllocatedMode;
+ marksMode = specializedMarksMode;
+ }
+
+ RELEASE_ASSERT(!(destructionMode == BlockHasNoDestructors && sweepMode == SweepOnly));
+
+ SuperSamplerScope superSamplerScope(false);
+
+ MarkedBlock& block = this->block();
+
+ if (false)
+ dataLog(RawPointer(this), "/", RawPointer(&block), ": MarkedBlock::Handle::specializedSweep!\n");
+
+ if (Options::useBumpAllocator()
+ && emptyMode == IsEmpty
+ && newlyAllocatedMode == DoesNotHaveNewlyAllocated) {
+
+ // This is an incredibly powerful assertion that checks the sanity of our block bits.
+ if (marksMode == MarksNotStale && !block.m_marks.isEmpty()) {
+ WTF::dataFile().atomically(
+ [&] (PrintStream& out) {
+ out.print("Block ", RawPointer(&block), ": marks not empty!\n");
+ out.print("Block lock is held: ", block.m_lock.isHeld(), "\n");
+ out.print("Marking version of block: ", block.m_markingVersion, "\n");
+ out.print("Marking version of heap: ", space()->markingVersion(), "\n");
+ UNREACHABLE_FOR_PLATFORM();
+ });
+ }
+
+ char* startOfLastCell = static_cast<char*>(cellAlign(block.atoms() + m_endAtom - 1));
+ char* payloadEnd = startOfLastCell + cellSize();
+ RELEASE_ASSERT(payloadEnd - MarkedBlock::blockSize <= bitwise_cast<char*>(&block));
+ char* payloadBegin = bitwise_cast<char*>(block.atoms() + firstAtom());
+ if (scribbleMode == Scribble)
+ scribble(payloadBegin, payloadEnd - payloadBegin);
+ if (sweepMode == SweepToFreeList)
+ setIsFreeListed();
+ else
+ m_allocator->setIsEmpty(NoLockingNecessary, this, true);
+ if (space()->isMarking())
+ block.m_lock.unlock();
+ FreeList result = FreeList::bump(payloadEnd, payloadEnd - payloadBegin);
+ if (false)
+ dataLog("Quickly swept block ", RawPointer(this), " with cell size ", cellSize(), " and attributes ", m_attributes, ": ", result, "\n");
+ return result;
+ }
+
+ // This produces a free list that is ordered in reverse through the block.
+ // This is fine, since the allocation code makes no assumptions about the
+ // order of the free list.
+ FreeCell* head = 0;
+ size_t count = 0;
+ bool isEmpty = true;
+ Vector<size_t> deadCells;
+ VM& vm = *this->vm();
+ auto handleDeadCell = [&] (size_t i) {
+ HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&block.atoms()[i]);
+
+ if (destructionMode != BlockHasNoDestructors && emptyMode == NotEmpty) {
+ JSCell* jsCell = static_cast<JSCell*>(cell);
+ if (!jsCell->isZapped()) {
+ destroyFunc(vm, jsCell);
+ jsCell->zap();
+ }
+ }
+
+ if (sweepMode == SweepToFreeList) {
+ FreeCell* freeCell = reinterpret_cast_ptr<FreeCell*>(cell);
+ if (scribbleMode == Scribble)
+ scribble(freeCell, cellSize());
+ freeCell->next = head;
+ head = freeCell;
+ ++count;
+ }
+ };
+ for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
+ if (emptyMode == NotEmpty
+ && ((marksMode == MarksNotStale && block.m_marks.get(i))
+ || (newlyAllocatedMode == HasNewlyAllocated && m_newlyAllocated.get(i)))) {
+ isEmpty = false;
+ continue;
+ }
+
+ if (destructionMode == BlockHasDestructorsAndCollectorIsRunning)
+ deadCells.append(i);
+ else
+ handleDeadCell(i);
+ }
+
+ // We only want to discard the newlyAllocated bits if we're creating a FreeList,
+ // otherwise we would lose information on what's currently alive.
+ if (sweepMode == SweepToFreeList && newlyAllocatedMode == HasNewlyAllocated)
+ m_newlyAllocatedVersion = MarkedSpace::nullVersion;
+
+ if (space()->isMarking())
+ block.m_lock.unlock();
+
+ if (destructionMode == BlockHasDestructorsAndCollectorIsRunning) {
+ for (size_t i : deadCells)
+ handleDeadCell(i);
+ }
+
+ FreeList result = FreeList::list(head, count * cellSize());
+ if (sweepMode == SweepToFreeList)
+ setIsFreeListed();
+ else if (isEmpty)
+ m_allocator->setIsEmpty(NoLockingNecessary, this, true);
+ if (false)
+ dataLog("Slowly swept block ", RawPointer(&block), " with cell size ", cellSize(), " and attributes ", m_attributes, ": ", result, "\n");
+ return result;
+}
+
+template<typename DestroyFunc>
+FreeList MarkedBlock::Handle::finishSweepKnowingSubspace(SweepMode sweepMode, const DestroyFunc& destroyFunc)
+{
+ SweepDestructionMode destructionMode = this->sweepDestructionMode();
+ EmptyMode emptyMode = this->emptyMode();
+ ScribbleMode scribbleMode = this->scribbleMode();
+ NewlyAllocatedMode newlyAllocatedMode = this->newlyAllocatedMode();
+ MarksMode marksMode = this->marksMode();
+
+ FreeList result;
+ auto trySpecialized = [&] () -> bool {
+ if (sweepMode != SweepToFreeList)
+ return false;
+ if (scribbleMode != DontScribble)
+ return false;
+ if (newlyAllocatedMode != DoesNotHaveNewlyAllocated)
+ return false;
+ if (destructionMode != BlockHasDestructors)
+ return false;
+ if (emptyMode == IsEmpty)
+ return false;
+
+ switch (marksMode) {
+ case MarksNotStale:
+ result = specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale>(IsEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksNotStale, destroyFunc);
+ return true;
+ case MarksStale:
+ result = specializedSweep<true, NotEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale>(IsEmpty, SweepToFreeList, BlockHasDestructors, DontScribble, DoesNotHaveNewlyAllocated, MarksStale, destroyFunc);
+ return true;
+ }
+
+ return false;
+ };
+
+ if (trySpecialized())
+ return result;
+
+ // The template arguments don't matter because the first one is false.
+ return specializedSweep<false, IsEmpty, SweepOnly, BlockHasNoDestructors, DontScribble, HasNewlyAllocated, MarksStale>(emptyMode, sweepMode, destructionMode, scribbleMode, newlyAllocatedMode, marksMode, destroyFunc);
+}
+
+inline MarkedBlock::Handle::SweepDestructionMode MarkedBlock::Handle::sweepDestructionMode()
+{
+ if (m_attributes.destruction == NeedsDestruction) {
+ if (space()->isMarking())
+ return BlockHasDestructorsAndCollectorIsRunning;
+ return BlockHasDestructors;
+ }
+ return BlockHasNoDestructors;
+}
+
+inline MarkedBlock::Handle::EmptyMode MarkedBlock::Handle::emptyMode()
+{
+ // It's not obvious, but this is the only way to know if the block is empty. It's the only
+ // bit that captures these caveats:
+ // - It's true when the block is freshly allocated.
+ // - It's true if the block had been swept in the past, all destructors were called, and that
+ // sweep proved that the block is empty.
+ // - It's false if there are any destructors that need to be called, even if the block has no
+ // live objects.
+ return m_allocator->isEmpty(NoLockingNecessary, this) ? IsEmpty : NotEmpty;
+}
+
+inline MarkedBlock::Handle::ScribbleMode MarkedBlock::Handle::scribbleMode()
+{
+ return scribbleFreeCells() ? Scribble : DontScribble;
+}
+
+inline MarkedBlock::Handle::NewlyAllocatedMode MarkedBlock::Handle::newlyAllocatedMode()
+{
+ return hasAnyNewlyAllocated() ? HasNewlyAllocated : DoesNotHaveNewlyAllocated;
+}
+
+inline MarkedBlock::Handle::MarksMode MarkedBlock::Handle::marksMode()
+{
+ HeapVersion markingVersion = space()->markingVersion();
+ bool marksAreUseful = !block().areMarksStale(markingVersion);
+ if (space()->isMarking())
+ marksAreUseful |= block().marksConveyLivenessDuringMarking(markingVersion);
+ return marksAreUseful ? MarksNotStale : MarksStale;
+}
+
+template <typename Functor>
+inline IterationStatus MarkedBlock::Handle::forEachLiveCell(const Functor& functor)
+{
+ HeapCell::Kind kind = m_attributes.cellKind;
+ for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
+ HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&m_block->atoms()[i]);
+ if (!isLive(cell))
+ continue;
+
+ if (functor(cell, kind) == IterationStatus::Done)
+ return IterationStatus::Done;
+ }
+ return IterationStatus::Continue;
+}
+
+template <typename Functor>
+inline IterationStatus MarkedBlock::Handle::forEachDeadCell(const Functor& functor)
+{
+ HeapCell::Kind kind = m_attributes.cellKind;
+ for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
+ HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&m_block->atoms()[i]);
+ if (isLive(cell))
+ continue;
+
+ if (functor(cell, kind) == IterationStatus::Done)
+ return IterationStatus::Done;
+ }
+ return IterationStatus::Continue;
+}
+
+template <typename Functor>
+inline IterationStatus MarkedBlock::Handle::forEachMarkedCell(const Functor& functor)
+{
+ HeapCell::Kind kind = m_attributes.cellKind;
+ MarkedBlock& block = this->block();
+ bool areMarksStale = block.areMarksStale();
+ WTF::loadLoadFence();
+ if (areMarksStale)
+ return IterationStatus::Continue;
+ for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
+ HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&m_block->atoms()[i]);
+ if (!block.isMarkedRaw(cell))
+ continue;
+
+ if (functor(cell, kind) == IterationStatus::Done)
+ return IterationStatus::Done;
+ }
+ return IterationStatus::Continue;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/MarkedBlockSet.h b/Source/JavaScriptCore/heap/MarkedBlockSet.h
index 022a17389..d54e901a6 100644
--- a/Source/JavaScriptCore/heap/MarkedBlockSet.h
+++ b/Source/JavaScriptCore/heap/MarkedBlockSet.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MarkedBlockSet_h
-#define MarkedBlockSet_h
+#pragma once
#include "MarkedBlock.h"
#include "TinyBloomFilter.h"
@@ -57,7 +56,7 @@ inline void MarkedBlockSet::add(MarkedBlock* block)
inline void MarkedBlockSet::remove(MarkedBlock* block)
{
- int oldCapacity = m_set.capacity();
+ unsigned oldCapacity = m_set.capacity();
m_set.remove(block);
if (m_set.capacity() != oldCapacity) // Indicates we've removed a lot of blocks.
recomputeFilter();
@@ -82,5 +81,3 @@ inline const HashSet<MarkedBlock*>& MarkedBlockSet::set() const
}
} // namespace JSC
-
-#endif // MarkedBlockSet_h
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.cpp b/Source/JavaScriptCore/heap/MarkedSpace.cpp
index e005337a6..0dee44eba 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.cpp
+++ b/Source/JavaScriptCore/heap/MarkedSpace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2017 Apple Inc. All rights reserved.
* Copyright (C) 2007 Eric Seidel <eric@webkit.org>
*
* This library is free software; you can redistribute it and/or
@@ -21,239 +21,346 @@
#include "config.h"
#include "MarkedSpace.h"
-#include "DelayedReleaseScope.h"
+#include "FunctionCodeBlock.h"
#include "IncrementalSweeper.h"
-#include "JSGlobalObject.h"
-#include "JSLock.h"
#include "JSObject.h"
-
+#include "JSCInlines.h"
+#include "MarkedAllocatorInlines.h"
+#include "MarkedBlockInlines.h"
+#include <wtf/ListDump.h>
namespace JSC {
-class Structure;
-
-class Free {
-public:
- typedef MarkedBlock* ReturnType;
+std::array<size_t, MarkedSpace::numSizeClasses> MarkedSpace::s_sizeClassForSizeStep;
- enum FreeMode { FreeOrShrink, FreeAll };
-
- Free(FreeMode, MarkedSpace*);
- void operator()(MarkedBlock*);
- ReturnType returnValue();
-
-private:
- FreeMode m_freeMode;
- MarkedSpace* m_markedSpace;
- DoublyLinkedList<MarkedBlock> m_blocks;
-};
+namespace {
-inline Free::Free(FreeMode freeMode, MarkedSpace* newSpace)
- : m_freeMode(freeMode)
- , m_markedSpace(newSpace)
+const Vector<size_t>& sizeClasses()
{
+ static Vector<size_t>* result;
+ static std::once_flag once;
+ std::call_once(
+ once,
+ [] {
+ result = new Vector<size_t>();
+
+ auto add = [&] (size_t sizeClass) {
+ sizeClass = WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeClass);
+ if (Options::dumpSizeClasses())
+ dataLog("Adding JSC MarkedSpace size class: ", sizeClass, "\n");
+ // Perform some validation as we go.
+ RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep));
+ if (result->isEmpty())
+ RELEASE_ASSERT(sizeClass == MarkedSpace::sizeStep);
+ result->append(sizeClass);
+ };
+
+ // This is a definition of the size classes in our GC. It must define all of the
+ // size classes from sizeStep up to largeCutoff.
+
+ // Have very precise size classes for the small stuff. This is a loop to make it easy to reduce
+ // atomSize.
+ for (size_t size = MarkedSpace::sizeStep; size < MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep)
+ add(size);
+
+ // We want to make sure that the remaining size classes minimize internal fragmentation (i.e.
+ // the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential
+ // way starting at just above the precise size classes to four cells per block.
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload), "\n");
+
+ for (unsigned i = 0; ; ++i) {
+ double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i);
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Next size class as a double: ", approximateSize, "\n");
+
+ size_t approximateSizeInBytes = static_cast<size_t>(approximateSize);
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Next size class as bytes: ", approximateSizeInBytes, "\n");
+
+ // Make sure that the computer did the math correctly.
+ RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff);
+
+ if (approximateSizeInBytes > MarkedSpace::largeCutoff)
+ break;
+
+ size_t sizeClass =
+ WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes);
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Size class: ", sizeClass, "\n");
+
+ // Optimize the size class so that there isn't any slop at the end of the block's
+ // payload.
+ unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass;
+ size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1);
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Possibly better size class: ", possiblyBetterSizeClass, "\n");
+
+ // The size class we just came up with is better than the other one if it reduces
+ // total wastage assuming we only allocate cells of that size.
+ size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass;
+ size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock;
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Original wastage: ", originalWastage, ", new wastage: ", newWastage, "\n");
+
+ size_t betterSizeClass;
+ if (newWastage > originalWastage)
+ betterSizeClass = sizeClass;
+ else
+ betterSizeClass = possiblyBetterSizeClass;
+
+ if (Options::dumpSizeClasses())
+ dataLog(" Choosing size class: ", betterSizeClass, "\n");
+
+ if (betterSizeClass == result->last()) {
+ // Defense for when expStep is small.
+ continue;
+ }
+
+ // This is usually how we get out of the loop.
+ if (betterSizeClass > MarkedSpace::largeCutoff
+ || betterSizeClass > Options::largeAllocationCutoff())
+ break;
+
+ add(betterSizeClass);
+ }
+
+ // Manually inject size classes for objects we know will be allocated in high volume.
+ add(sizeof(UnlinkedFunctionExecutable));
+ add(sizeof(UnlinkedFunctionCodeBlock));
+ add(sizeof(FunctionExecutable));
+ add(sizeof(FunctionCodeBlock));
+ add(sizeof(JSString));
+ add(sizeof(JSFunction));
+ add(sizeof(PropertyTable));
+ add(sizeof(Structure));
+
+ {
+ // Sort and deduplicate.
+ std::sort(result->begin(), result->end());
+ auto it = std::unique(result->begin(), result->end());
+ result->shrinkCapacity(it - result->begin());
+ }
+
+ if (Options::dumpSizeClasses())
+ dataLog("JSC Heap MarkedSpace size class dump: ", listDump(*result), "\n");
+
+ // We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about
+ // the size class table. This checks our results against that function's assumptions.
+ for (size_t size = MarkedSpace::sizeStep, i = 0; size <= MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep, i++)
+ RELEASE_ASSERT(result->at(i) == size);
+ });
+ return *result;
}
-inline void Free::operator()(MarkedBlock* block)
+template<typename TableType, typename SizeClassCons, typename DefaultCons>
+void buildSizeClassTable(TableType& table, const SizeClassCons& cons, const DefaultCons& defaultCons)
{
- if (m_freeMode == FreeOrShrink)
- m_markedSpace->freeOrShrinkBlock(block);
- else
- m_markedSpace->freeBlock(block);
+ size_t nextIndex = 0;
+ for (size_t sizeClass : sizeClasses()) {
+ auto entry = cons(sizeClass);
+ size_t index = MarkedSpace::sizeClassToIndex(sizeClass);
+ for (size_t i = nextIndex; i <= index; ++i)
+ table[i] = entry;
+ nextIndex = index + 1;
+ }
+ for (size_t i = nextIndex; i < MarkedSpace::numSizeClasses; ++i)
+ table[i] = defaultCons(MarkedSpace::indexToSizeClass(i));
}
-inline Free::ReturnType Free::returnValue()
+} // anonymous namespace
+
+void MarkedSpace::initializeSizeClassForStepSize()
{
- return m_blocks.head();
+ static std::once_flag flag;
+ std::call_once(
+ flag,
+ [] {
+ buildSizeClassTable(
+ s_sizeClassForSizeStep,
+ [&] (size_t sizeClass) -> size_t {
+ return sizeClass;
+ },
+ [&] (size_t sizeClass) -> size_t {
+ return sizeClass;
+ });
+ });
}
-struct VisitWeakSet : MarkedBlock::VoidFunctor {
- VisitWeakSet(HeapRootVisitor& heapRootVisitor) : m_heapRootVisitor(heapRootVisitor) { }
- void operator()(MarkedBlock* block) { block->visitWeakSet(m_heapRootVisitor); }
-private:
- HeapRootVisitor& m_heapRootVisitor;
-};
-
-struct ReapWeakSet : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->reapWeakSet(); }
-};
-
MarkedSpace::MarkedSpace(Heap* heap)
: m_heap(heap)
, m_capacity(0)
, m_isIterating(false)
- , m_currentDelayedReleaseScope(nullptr)
{
- for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- allocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::None);
- normalDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::Normal);
- immortalStructureDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::ImmortalStructure);
- }
-
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- allocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::None);
- normalDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::Normal);
- immortalStructureDestructorAllocatorFor(cellSize).init(heap, this, cellSize, MarkedBlock::ImmortalStructure);
- }
-
- m_normalSpace.largeAllocator.init(heap, this, 0, MarkedBlock::None);
- m_normalDestructorSpace.largeAllocator.init(heap, this, 0, MarkedBlock::Normal);
- m_immortalStructureDestructorSpace.largeAllocator.init(heap, this, 0, MarkedBlock::ImmortalStructure);
+ initializeSizeClassForStepSize();
}
MarkedSpace::~MarkedSpace()
{
- Free free(Free::FreeAll, this);
- forEachBlock(free);
+ forEachBlock(
+ [&] (MarkedBlock::Handle* block) {
+ freeBlock(block);
+ });
+ for (LargeAllocation* allocation : m_largeAllocations)
+ allocation->destroy();
ASSERT(!m_blocks.set().size());
}
-struct LastChanceToFinalize : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->lastChanceToFinalize(); }
-};
-
void MarkedSpace::lastChanceToFinalize()
{
- DelayedReleaseScope delayedReleaseScope(*this);
- stopAllocating();
- forEachBlock<LastChanceToFinalize>();
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.lastChanceToFinalize();
+ return IterationStatus::Continue;
+ });
+ for (LargeAllocation* allocation : m_largeAllocations)
+ allocation->lastChanceToFinalize();
}
void MarkedSpace::sweep()
{
- if (Options::logGC())
- dataLog("Eagerly sweeping...");
- m_heap->sweeper()->willFinishSweeping();
- forEachBlock<Sweep>();
+ m_heap->sweeper()->stopSweeping();
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.sweep();
+ return IterationStatus::Continue;
+ });
}
-void MarkedSpace::resetAllocators()
+void MarkedSpace::sweepLargeAllocations()
{
- for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- allocatorFor(cellSize).reset();
- normalDestructorAllocatorFor(cellSize).reset();
- immortalStructureDestructorAllocatorFor(cellSize).reset();
- }
-
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- allocatorFor(cellSize).reset();
- normalDestructorAllocatorFor(cellSize).reset();
- immortalStructureDestructorAllocatorFor(cellSize).reset();
+ RELEASE_ASSERT(m_largeAllocationsNurseryOffset == m_largeAllocations.size());
+ unsigned srcIndex = m_largeAllocationsNurseryOffsetForSweep;
+ unsigned dstIndex = srcIndex;
+ while (srcIndex < m_largeAllocations.size()) {
+ LargeAllocation* allocation = m_largeAllocations[srcIndex++];
+ allocation->sweep();
+ if (allocation->isEmpty()) {
+ m_capacity -= allocation->cellSize();
+ allocation->destroy();
+ continue;
+ }
+ m_largeAllocations[dstIndex++] = allocation;
}
+ m_largeAllocations.resize(dstIndex);
+ m_largeAllocationsNurseryOffset = m_largeAllocations.size();
+}
- m_normalSpace.largeAllocator.reset();
- m_normalDestructorSpace.largeAllocator.reset();
- m_immortalStructureDestructorSpace.largeAllocator.reset();
+void MarkedSpace::prepareForAllocation()
+{
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.prepareForAllocation();
+ return IterationStatus::Continue;
+ });
-#if ENABLE(GGC)
- m_blocksWithNewObjects.clear();
-#endif
+ m_activeWeakSets.takeFrom(m_newActiveWeakSets);
+
+ if (m_heap->collectionScope() == CollectionScope::Eden)
+ m_largeAllocationsNurseryOffsetForSweep = m_largeAllocationsNurseryOffset;
+ else
+ m_largeAllocationsNurseryOffsetForSweep = 0;
+ m_largeAllocationsNurseryOffset = m_largeAllocations.size();
+
+ m_allocatorForEmptyAllocation = m_firstAllocator;
}
-void MarkedSpace::visitWeakSets(HeapRootVisitor& heapRootVisitor)
+void MarkedSpace::visitWeakSets(SlotVisitor& visitor)
{
- VisitWeakSet visitWeakSet(heapRootVisitor);
- if (m_heap->operationInProgress() == EdenCollection) {
- for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
- visitWeakSet(m_blocksWithNewObjects[i]);
- } else
- forEachBlock(visitWeakSet);
+ auto visit = [&] (WeakSet* weakSet) {
+ weakSet->visit(visitor);
+ };
+
+ m_newActiveWeakSets.forEach(visit);
+
+ if (m_heap->collectionScope() == CollectionScope::Full)
+ m_activeWeakSets.forEach(visit);
}
void MarkedSpace::reapWeakSets()
{
- if (m_heap->operationInProgress() == EdenCollection) {
- for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
- m_blocksWithNewObjects[i]->reapWeakSet();
- } else
- forEachBlock<ReapWeakSet>();
+ auto visit = [&] (WeakSet* weakSet) {
+ weakSet->reap();
+ };
+
+ m_newActiveWeakSets.forEach(visit);
+
+ if (m_heap->collectionScope() == CollectionScope::Full)
+ m_activeWeakSets.forEach(visit);
}
-template <typename Functor>
-void MarkedSpace::forEachAllocator()
+void MarkedSpace::stopAllocating()
{
- Functor functor;
- forEachAllocator(functor);
+ ASSERT(!isIterating());
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.stopAllocating();
+ return IterationStatus::Continue;
+ });
}
-template <typename Functor>
-void MarkedSpace::forEachAllocator(Functor& functor)
+void MarkedSpace::prepareForConservativeScan()
{
- for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- functor(allocatorFor(cellSize));
- functor(normalDestructorAllocatorFor(cellSize));
- functor(immortalStructureDestructorAllocatorFor(cellSize));
- }
-
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- functor(allocatorFor(cellSize));
- functor(normalDestructorAllocatorFor(cellSize));
- functor(immortalStructureDestructorAllocatorFor(cellSize));
- }
-
- functor(m_normalSpace.largeAllocator);
- functor(m_normalDestructorSpace.largeAllocator);
- functor(m_immortalStructureDestructorSpace.largeAllocator);
+ m_largeAllocationsForThisCollectionBegin = m_largeAllocations.begin() + m_largeAllocationsOffsetForThisCollection;
+ m_largeAllocationsForThisCollectionSize = m_largeAllocations.size() - m_largeAllocationsOffsetForThisCollection;
+ m_largeAllocationsForThisCollectionEnd = m_largeAllocations.end();
+ RELEASE_ASSERT(m_largeAllocationsForThisCollectionEnd == m_largeAllocationsForThisCollectionBegin + m_largeAllocationsForThisCollectionSize);
+
+ std::sort(
+ m_largeAllocationsForThisCollectionBegin, m_largeAllocationsForThisCollectionEnd,
+ [&] (LargeAllocation* a, LargeAllocation* b) {
+ return a < b;
+ });
}
-struct StopAllocatingFunctor {
- void operator()(MarkedAllocator& allocator) { allocator.stopAllocating(); }
-};
-
-void MarkedSpace::stopAllocating()
+void MarkedSpace::prepareForMarking()
{
- ASSERT(!isIterating());
- forEachAllocator<StopAllocatingFunctor>();
+ if (m_heap->collectionScope() == CollectionScope::Eden)
+ m_largeAllocationsOffsetForThisCollection = m_largeAllocationsNurseryOffset;
+ else
+ m_largeAllocationsOffsetForThisCollection = 0;
}
-struct ResumeAllocatingFunctor {
- void operator()(MarkedAllocator& allocator) { allocator.resumeAllocating(); }
-};
-
void MarkedSpace::resumeAllocating()
{
- ASSERT(isIterating());
- DelayedReleaseScope scope(*this);
- forEachAllocator<ResumeAllocatingFunctor>();
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.resumeAllocating();
+ return IterationStatus::Continue;
+ });
+ // Nothing to do for LargeAllocations.
}
bool MarkedSpace::isPagedOut(double deadline)
{
- for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- if (allocatorFor(cellSize).isPagedOut(deadline)
- || normalDestructorAllocatorFor(cellSize).isPagedOut(deadline)
- || immortalStructureDestructorAllocatorFor(cellSize).isPagedOut(deadline))
- return true;
- }
-
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- if (allocatorFor(cellSize).isPagedOut(deadline)
- || normalDestructorAllocatorFor(cellSize).isPagedOut(deadline)
- || immortalStructureDestructorAllocatorFor(cellSize).isPagedOut(deadline))
- return true;
- }
-
- if (m_normalSpace.largeAllocator.isPagedOut(deadline)
- || m_normalDestructorSpace.largeAllocator.isPagedOut(deadline)
- || m_immortalStructureDestructorSpace.largeAllocator.isPagedOut(deadline))
- return true;
-
- return false;
+ bool result = false;
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ if (allocator.isPagedOut(deadline)) {
+ result = true;
+ return IterationStatus::Done;
+ }
+ return IterationStatus::Continue;
+ });
+ // FIXME: Consider taking LargeAllocations into account here.
+ return result;
}
-void MarkedSpace::freeBlock(MarkedBlock* block)
+void MarkedSpace::freeBlock(MarkedBlock::Handle* block)
{
block->allocator()->removeBlock(block);
- m_capacity -= block->capacity();
- m_blocks.remove(block);
- if (block->capacity() == MarkedBlock::blockSize) {
- m_heap->blockAllocator().deallocate(MarkedBlock::destroy(block));
- return;
- }
- m_heap->blockAllocator().deallocateCustomSize(MarkedBlock::destroy(block));
+ m_capacity -= MarkedBlock::blockSize;
+ m_blocks.remove(&block->block());
+ delete block;
}
-void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block)
+void MarkedSpace::freeOrShrinkBlock(MarkedBlock::Handle* block)
{
if (!block->isEmpty()) {
block->shrink();
@@ -263,77 +370,75 @@ void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block)
freeBlock(block);
}
-struct Shrink : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->shrink(); }
-};
-
void MarkedSpace::shrink()
{
- Free freeOrShrink(Free::FreeOrShrink, this);
- forEachBlock(freeOrShrink);
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.shrink();
+ return IterationStatus::Continue;
+ });
}
-static void clearNewlyAllocatedInBlock(MarkedBlock* block)
+void MarkedSpace::beginMarking()
{
- if (!block)
- return;
- block->clearNewlyAllocated();
-}
-
-struct ClearNewlyAllocated : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->clearNewlyAllocated(); }
-};
+ if (m_heap->collectionScope() == CollectionScope::Full) {
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.beginMarkingForFullCollection();
+ return IterationStatus::Continue;
+ });
+
+ if (UNLIKELY(nextVersion(m_markingVersion) == initialVersion)) {
+ forEachBlock(
+ [&] (MarkedBlock::Handle* handle) {
+ handle->block().resetMarks();
+ });
+ }
+
+ m_markingVersion = nextVersion(m_markingVersion);
+
+ for (LargeAllocation* allocation : m_largeAllocations)
+ allocation->flip();
+ }
-#ifndef NDEBUG
-struct VerifyNewlyAllocated : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { ASSERT(!block->clearNewlyAllocated()); }
-};
-#endif
+ if (!ASSERT_DISABLED) {
+ forEachBlock(
+ [&] (MarkedBlock::Handle* block) {
+ if (block->areMarksStale())
+ return;
+ ASSERT(!block->isFreeListed());
+ });
+ }
+
+ m_isMarking = true;
+}
-void MarkedSpace::clearNewlyAllocated()
+void MarkedSpace::endMarking()
{
- for (size_t i = 0; i < preciseCount; ++i) {
- clearNewlyAllocatedInBlock(m_normalSpace.preciseAllocators[i].takeLastActiveBlock());
- clearNewlyAllocatedInBlock(m_normalDestructorSpace.preciseAllocators[i].takeLastActiveBlock());
- clearNewlyAllocatedInBlock(m_immortalStructureDestructorSpace.preciseAllocators[i].takeLastActiveBlock());
+ if (UNLIKELY(nextVersion(m_newlyAllocatedVersion) == initialVersion)) {
+ forEachBlock(
+ [&] (MarkedBlock::Handle* handle) {
+ handle->resetAllocated();
+ });
}
+
+ m_newlyAllocatedVersion = nextVersion(m_newlyAllocatedVersion);
+
+ for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i)
+ m_largeAllocations[i]->clearNewlyAllocated();
- for (size_t i = 0; i < impreciseCount; ++i) {
- clearNewlyAllocatedInBlock(m_normalSpace.impreciseAllocators[i].takeLastActiveBlock());
- clearNewlyAllocatedInBlock(m_normalDestructorSpace.impreciseAllocators[i].takeLastActiveBlock());
- clearNewlyAllocatedInBlock(m_immortalStructureDestructorSpace.impreciseAllocators[i].takeLastActiveBlock());
+ if (!ASSERT_DISABLED) {
+ for (LargeAllocation* allocation : m_largeAllocations)
+ ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated());
}
- // We have to iterate all of the blocks in the large allocators because they are
- // canonicalized as they are used up (see MarkedAllocator::tryAllocateHelper)
- // which creates the m_newlyAllocated bitmap.
- ClearNewlyAllocated functor;
- m_normalSpace.largeAllocator.forEachBlock(functor);
- m_normalDestructorSpace.largeAllocator.forEachBlock(functor);
- m_immortalStructureDestructorSpace.largeAllocator.forEachBlock(functor);
-
-#ifndef NDEBUG
- VerifyNewlyAllocated verifyFunctor;
- forEachBlock(verifyFunctor);
-#endif
-}
-
-#ifndef NDEBUG
-struct VerifyMarked : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { ASSERT(block->needsSweeping()); }
-};
-#endif
-
-void MarkedSpace::clearMarks()
-{
- if (m_heap->operationInProgress() == EdenCollection) {
- for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
- m_blocksWithNewObjects[i]->clearMarks();
- } else
- forEachBlock<ClearMarks>();
-#ifndef NDEBUG
- forEachBlock<VerifyMarked>();
-#endif
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.endMarking();
+ return IterationStatus::Continue;
+ });
+
+ m_isMarking = false;
}
void MarkedSpace::willStartIterating()
@@ -350,4 +455,130 @@ void MarkedSpace::didFinishIterating()
m_isIterating = false;
}
+size_t MarkedSpace::objectCount()
+{
+ size_t result = 0;
+ forEachBlock(
+ [&] (MarkedBlock::Handle* block) {
+ result += block->markCount();
+ });
+ for (LargeAllocation* allocation : m_largeAllocations) {
+ if (allocation->isMarked())
+ result++;
+ }
+ return result;
+}
+
+size_t MarkedSpace::size()
+{
+ size_t result = 0;
+ forEachBlock(
+ [&] (MarkedBlock::Handle* block) {
+ result += block->markCount() * block->cellSize();
+ });
+ for (LargeAllocation* allocation : m_largeAllocations) {
+ if (allocation->isMarked())
+ result += allocation->cellSize();
+ }
+ return result;
+}
+
+size_t MarkedSpace::capacity()
+{
+ return m_capacity;
+}
+
+void MarkedSpace::addActiveWeakSet(WeakSet* weakSet)
+{
+ // We conservatively assume that the WeakSet should belong in the new set. In fact, some weak
+ // sets might contain new weak handles even though they are tied to old objects. This slightly
+ // increases the amount of scanning that an eden collection would have to do, but the effect
+ // ought to be small.
+ m_newActiveWeakSets.append(weakSet);
+}
+
+void MarkedSpace::didAddBlock(MarkedBlock::Handle* block)
+{
+ // WARNING: This function is called before block is fully initialized. The block will not know
+ // its cellSize() or attributes(). The latter implies that you can't ask things like
+ // needsDestruction().
+ m_capacity += MarkedBlock::blockSize;
+ m_blocks.add(&block->block());
+}
+
+void MarkedSpace::didAllocateInBlock(MarkedBlock::Handle* block)
+{
+ if (block->weakSet().isOnList()) {
+ block->weakSet().remove();
+ m_newActiveWeakSets.append(&block->weakSet());
+ }
+}
+
+MarkedBlock::Handle* MarkedSpace::findEmptyBlockToSteal()
+{
+ for (; m_allocatorForEmptyAllocation; m_allocatorForEmptyAllocation = m_allocatorForEmptyAllocation->nextAllocator()) {
+ if (MarkedBlock::Handle* block = m_allocatorForEmptyAllocation->findEmptyBlockToSteal())
+ return block;
+ }
+ return nullptr;
+}
+
+void MarkedSpace::snapshotUnswept()
+{
+ if (m_heap->collectionScope() == CollectionScope::Eden) {
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.snapshotUnsweptForEdenCollection();
+ return IterationStatus::Continue;
+ });
+ } else {
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.snapshotUnsweptForFullCollection();
+ return IterationStatus::Continue;
+ });
+ }
+}
+
+void MarkedSpace::assertNoUnswept()
+{
+ if (ASSERT_DISABLED)
+ return;
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.assertNoUnswept();
+ return IterationStatus::Continue;
+ });
+}
+
+void MarkedSpace::dumpBits(PrintStream& out)
+{
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ out.print("Bits for ", allocator, ":\n");
+ allocator.dumpBits(out);
+ return IterationStatus::Continue;
+ });
+}
+
+MarkedAllocator* MarkedSpace::addMarkedAllocator(
+ const AbstractLocker&, Subspace* subspace, size_t sizeClass)
+{
+ MarkedAllocator* allocator = m_bagOfAllocators.add(heap(), subspace, sizeClass);
+ allocator->setNextAllocator(nullptr);
+
+ WTF::storeStoreFence();
+
+ if (!m_firstAllocator) {
+ m_firstAllocator = allocator;
+ m_lastAllocator = allocator;
+ m_allocatorForEmptyAllocation = allocator;
+ } else {
+ m_lastAllocator->setNextAllocator(allocator);
+ m_lastAllocator = allocator;
+ }
+
+ return allocator;
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.h b/Source/JavaScriptCore/heap/MarkedSpace.h
index e853d6674..26be5e3df 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.h
+++ b/Source/JavaScriptCore/heap/MarkedSpace.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
* Copyright (C) 2001 Peter Kelly (pmk@post.com)
- * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2017 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -19,291 +19,227 @@
*
*/
-#ifndef MarkedSpace_h
-#define MarkedSpace_h
+#pragma once
-#include "MachineStackMarker.h"
+#include "IterationStatus.h"
+#include "LargeAllocation.h"
#include "MarkedAllocator.h"
#include "MarkedBlock.h"
#include "MarkedBlockSet.h"
#include <array>
-#include <wtf/PageAllocationAligned.h>
-#include <wtf/Bitmap.h>
-#include <wtf/DoublyLinkedList.h>
+#include <wtf/Bag.h>
#include <wtf/HashSet.h>
#include <wtf/Noncopyable.h>
+#include <wtf/RetainPtr.h>
+#include <wtf/SentinelLinkedList.h>
#include <wtf/Vector.h>
namespace JSC {
-class DelayedReleaseScope;
class Heap;
class HeapIterationScope;
-class JSCell;
-class LiveObjectIterator;
class LLIntOffsetsExtractor;
-class WeakGCHandle;
-class SlotVisitor;
+class Subspace;
+class WeakSet;
-struct ClearMarks : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block)
- {
- block->clearMarks();
- }
-};
-
-struct ClearRememberedSet : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block)
- {
- block->clearRememberedSet();
- }
-};
-
-struct Sweep : MarkedBlock::VoidFunctor {
- void operator()(MarkedBlock* block) { block->sweep(); }
-};
-
-struct MarkCount : MarkedBlock::CountFunctor {
- void operator()(MarkedBlock* block) { count(block->markCount()); }
-};
-
-struct Size : MarkedBlock::CountFunctor {
- void operator()(MarkedBlock* block) { count(block->markCount() * block->cellSize()); }
-};
+typedef uint32_t HeapVersion;
class MarkedSpace {
WTF_MAKE_NONCOPYABLE(MarkedSpace);
public:
+ // sizeStep is really a synonym for atomSize; it's no accident that they are the same.
+ static const size_t sizeStep = MarkedBlock::atomSize;
+
+ // Sizes up to this amount get a size class for each size step.
+ static const size_t preciseCutoff = 80;
+
+ // The amount of available payload in a block is the block's size minus the header. But the
+ // header size might not be atom size aligned, so we round down the result accordingly.
+ static const size_t blockPayload = (MarkedBlock::blockSize - sizeof(MarkedBlock)) & ~(MarkedBlock::atomSize - 1);
+
+ // The largest cell we're willing to allocate in a MarkedBlock the "normal way" (i.e. using size
+ // classes, rather than a large allocation) is half the size of the payload, rounded down. This
+ // ensures that we only use the size class approach if it means being able to pack two things
+ // into one block.
+ static const size_t largeCutoff = (blockPayload / 2) & ~(sizeStep - 1);
+
+ static const size_t numSizeClasses = largeCutoff / sizeStep;
+
+ static const HeapVersion nullVersion = 0; // The version of freshly allocated blocks.
+ static const HeapVersion initialVersion = 2; // The version that the heap starts out with. Set to make sure that nextVersion(nullVersion) != initialVersion.
+
+ static HeapVersion nextVersion(HeapVersion version)
+ {
+ version++;
+ if (version == nullVersion)
+ version = initialVersion;
+ return version;
+ }
+
+ static size_t sizeClassToIndex(size_t size)
+ {
+ ASSERT(size);
+ return (size + sizeStep - 1) / sizeStep - 1;
+ }
+
+ static size_t indexToSizeClass(size_t index)
+ {
+ return (index + 1) * sizeStep;
+ }
+
MarkedSpace(Heap*);
~MarkedSpace();
- void lastChanceToFinalize();
+
+ Heap* heap() const { return m_heap; }
+
+ void lastChanceToFinalize(); // You must call stopAllocating before you call this.
- MarkedAllocator& firstAllocator();
- MarkedAllocator& allocatorFor(size_t);
- MarkedAllocator& immortalStructureDestructorAllocatorFor(size_t);
- MarkedAllocator& normalDestructorAllocatorFor(size_t);
- void* allocateWithNormalDestructor(size_t);
- void* allocateWithImmortalStructureDestructor(size_t);
- void* allocateWithoutDestructor(size_t);
-
- void resetAllocators();
+ static size_t optimalSizeFor(size_t);
+
+ void prepareForAllocation();
- void visitWeakSets(HeapRootVisitor&);
+ void visitWeakSets(SlotVisitor&);
void reapWeakSets();
MarkedBlockSet& blocks() { return m_blocks; }
void willStartIterating();
- bool isIterating() { return m_isIterating; }
+ bool isIterating() const { return m_isIterating; }
void didFinishIterating();
void stopAllocating();
void resumeAllocating(); // If we just stopped allocation but we didn't do a collection, we need to resume allocation.
-
- typedef HashSet<MarkedBlock*>::iterator BlockIterator;
- template<typename Functor> typename Functor::ReturnType forEachLiveCell(HeapIterationScope&, Functor&);
- template<typename Functor> typename Functor::ReturnType forEachLiveCell(HeapIterationScope&);
- template<typename Functor> typename Functor::ReturnType forEachDeadCell(HeapIterationScope&, Functor&);
- template<typename Functor> typename Functor::ReturnType forEachDeadCell(HeapIterationScope&);
- template<typename Functor> typename Functor::ReturnType forEachBlock(Functor&);
- template<typename Functor> typename Functor::ReturnType forEachBlock();
+ void prepareForMarking();
+ void prepareForConservativeScan();
+
+ typedef HashSet<MarkedBlock*>::iterator BlockIterator;
+
+ template<typename Functor> void forEachLiveCell(HeapIterationScope&, const Functor&);
+ template<typename Functor> void forEachDeadCell(HeapIterationScope&, const Functor&);
+ template<typename Functor> void forEachBlock(const Functor&);
+
void shrink();
- void freeBlock(MarkedBlock*);
- void freeOrShrinkBlock(MarkedBlock*);
+ void freeBlock(MarkedBlock::Handle*);
+ void freeOrShrinkBlock(MarkedBlock::Handle*);
- void didAddBlock(MarkedBlock*);
- void didConsumeFreeList(MarkedBlock*);
- void didAllocateInBlock(MarkedBlock*);
+ void didAddBlock(MarkedBlock::Handle*);
+ void didConsumeFreeList(MarkedBlock::Handle*);
+ void didAllocateInBlock(MarkedBlock::Handle*);
- void clearMarks();
- void clearRememberedSet();
+ void beginMarking();
+ void endMarking();
+ void snapshotUnswept();
void clearNewlyAllocated();
void sweep();
+ void sweepLargeAllocations();
+ void assertNoUnswept();
size_t objectCount();
size_t size();
size_t capacity();
bool isPagedOut(double deadline);
+
+ HeapVersion markingVersion() const { return m_markingVersion; }
+ HeapVersion newlyAllocatedVersion() const { return m_newlyAllocatedVersion; }
-#if USE(CF)
- template<typename T> void releaseSoon(RetainPtr<T>&&);
-#endif
-
+ const Vector<LargeAllocation*>& largeAllocations() const { return m_largeAllocations; }
+ unsigned largeAllocationsNurseryOffset() const { return m_largeAllocationsNurseryOffset; }
+ unsigned largeAllocationsOffsetForThisCollection() const { return m_largeAllocationsOffsetForThisCollection; }
+
+ // These are cached pointers and offsets for quickly searching the large allocations that are
+ // relevant to this collection.
+ LargeAllocation** largeAllocationsForThisCollectionBegin() const { return m_largeAllocationsForThisCollectionBegin; }
+ LargeAllocation** largeAllocationsForThisCollectionEnd() const { return m_largeAllocationsForThisCollectionEnd; }
+ unsigned largeAllocationsForThisCollectionSize() const { return m_largeAllocationsForThisCollectionSize; }
+
+ MarkedAllocator* firstAllocator() const { return m_firstAllocator; }
+ MarkedAllocator* allocatorForEmptyAllocation() const { return m_allocatorForEmptyAllocation; }
+
+ MarkedBlock::Handle* findEmptyBlockToSteal();
+
+ Lock& allocatorLock() { return m_allocatorLock; }
+ MarkedAllocator* addMarkedAllocator(const AbstractLocker&, Subspace*, size_t cellSize);
+
+ // When this is true it means that we have flipped but the mark bits haven't converged yet.
+ bool isMarking() const { return m_isMarking; }
+
+ void dumpBits(PrintStream& = WTF::dataFile());
+
+ JS_EXPORT_PRIVATE static std::array<size_t, numSizeClasses> s_sizeClassForSizeStep;
+
private:
- friend class DelayedReleaseScope;
friend class LLIntOffsetsExtractor;
+ friend class JIT;
+ friend class WeakSet;
+ friend class Subspace;
+
+ void* allocateSlow(Subspace&, GCDeferralContext*, size_t);
+ void* tryAllocateSlow(Subspace&, GCDeferralContext*, size_t);
- template<typename Functor> void forEachAllocator(Functor&);
- template<typename Functor> void forEachAllocator();
-
- // [ 32... 128 ]
- static const size_t preciseStep = MarkedBlock::atomSize;
- static const size_t preciseCutoff = 128;
- static const size_t preciseCount = preciseCutoff / preciseStep;
+ static void initializeSizeClassForStepSize();
+
+ void initializeSubspace(Subspace&);
- // [ 1024... blockSize ]
- static const size_t impreciseStep = 2 * preciseCutoff;
- static const size_t impreciseCutoff = MarkedBlock::blockSize / 2;
- static const size_t impreciseCount = impreciseCutoff / impreciseStep;
+ template<typename Functor> inline void forEachAllocator(const Functor&);
+
+ void addActiveWeakSet(WeakSet*);
- struct Subspace {
- std::array<MarkedAllocator, preciseCount> preciseAllocators;
- std::array<MarkedAllocator, impreciseCount> impreciseAllocators;
- MarkedAllocator largeAllocator;
- };
+ Vector<Subspace*> m_subspaces;
- Subspace m_normalDestructorSpace;
- Subspace m_immortalStructureDestructorSpace;
- Subspace m_normalSpace;
+ Vector<LargeAllocation*> m_largeAllocations;
+ unsigned m_largeAllocationsNurseryOffset { 0 };
+ unsigned m_largeAllocationsOffsetForThisCollection { 0 };
+ unsigned m_largeAllocationsNurseryOffsetForSweep { 0 };
+ LargeAllocation** m_largeAllocationsForThisCollectionBegin { nullptr };
+ LargeAllocation** m_largeAllocationsForThisCollectionEnd { nullptr };
+ unsigned m_largeAllocationsForThisCollectionSize { 0 };
Heap* m_heap;
+ HeapVersion m_markingVersion { initialVersion };
+ HeapVersion m_newlyAllocatedVersion { initialVersion };
size_t m_capacity;
bool m_isIterating;
+ bool m_isMarking { false };
MarkedBlockSet m_blocks;
- Vector<MarkedBlock*> m_blocksWithNewObjects;
-
- DelayedReleaseScope* m_currentDelayedReleaseScope;
+
+ SentinelLinkedList<WeakSet, BasicRawSentinelNode<WeakSet>> m_activeWeakSets;
+ SentinelLinkedList<WeakSet, BasicRawSentinelNode<WeakSet>> m_newActiveWeakSets;
+
+ Lock m_allocatorLock;
+ Bag<MarkedAllocator> m_bagOfAllocators;
+ MarkedAllocator* m_firstAllocator { nullptr };
+ MarkedAllocator* m_lastAllocator { nullptr };
+ MarkedAllocator* m_allocatorForEmptyAllocation { nullptr };
};
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell(HeapIterationScope&, Functor& functor)
+template <typename Functor> inline void MarkedSpace::forEachBlock(const Functor& functor)
{
- ASSERT(isIterating());
- BlockIterator end = m_blocks.set().end();
- for (BlockIterator it = m_blocks.set().begin(); it != end; ++it)
- (*it)->forEachLiveCell(functor);
- return functor.returnValue();
+ forEachAllocator(
+ [&] (MarkedAllocator& allocator) -> IterationStatus {
+ allocator.forEachBlock(functor);
+ return IterationStatus::Continue;
+ });
}
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachLiveCell(HeapIterationScope& scope)
+template <typename Functor>
+void MarkedSpace::forEachAllocator(const Functor& functor)
{
- Functor functor;
- return forEachLiveCell(scope, functor);
-}
-
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell(HeapIterationScope&, Functor& functor)
-{
- ASSERT(isIterating());
- BlockIterator end = m_blocks.set().end();
- for (BlockIterator it = m_blocks.set().begin(); it != end; ++it)
- (*it)->forEachDeadCell(functor);
- return functor.returnValue();
-}
-
-template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachDeadCell(HeapIterationScope& scope)
-{
- Functor functor;
- return forEachDeadCell(scope, functor);
-}
-
-inline MarkedAllocator& MarkedSpace::allocatorFor(size_t bytes)
-{
- ASSERT(bytes);
- if (bytes <= preciseCutoff)
- return m_normalSpace.preciseAllocators[(bytes - 1) / preciseStep];
- if (bytes <= impreciseCutoff)
- return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_normalSpace.largeAllocator;
-}
-
-inline MarkedAllocator& MarkedSpace::immortalStructureDestructorAllocatorFor(size_t bytes)
-{
- ASSERT(bytes);
- if (bytes <= preciseCutoff)
- return m_immortalStructureDestructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
- if (bytes <= impreciseCutoff)
- return m_immortalStructureDestructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_immortalStructureDestructorSpace.largeAllocator;
+ for (MarkedAllocator* allocator = m_firstAllocator; allocator; allocator = allocator->nextAllocator()) {
+ if (functor(*allocator) == IterationStatus::Done)
+ return;
+ }
}
-inline MarkedAllocator& MarkedSpace::normalDestructorAllocatorFor(size_t bytes)
+ALWAYS_INLINE size_t MarkedSpace::optimalSizeFor(size_t bytes)
{
ASSERT(bytes);
if (bytes <= preciseCutoff)
- return m_normalDestructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
- if (bytes <= impreciseCutoff)
- return m_normalDestructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
- return m_normalDestructorSpace.largeAllocator;
-}
-
-inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes)
-{
- return allocatorFor(bytes).allocate(bytes);
-}
-
-inline void* MarkedSpace::allocateWithImmortalStructureDestructor(size_t bytes)
-{
- return immortalStructureDestructorAllocatorFor(bytes).allocate(bytes);
-}
-
-inline void* MarkedSpace::allocateWithNormalDestructor(size_t bytes)
-{
- return normalDestructorAllocatorFor(bytes).allocate(bytes);
-}
-
-template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock(Functor& functor)
-{
- for (size_t i = 0; i < preciseCount; ++i) {
- m_normalSpace.preciseAllocators[i].forEachBlock(functor);
- m_normalDestructorSpace.preciseAllocators[i].forEachBlock(functor);
- m_immortalStructureDestructorSpace.preciseAllocators[i].forEachBlock(functor);
- }
-
- for (size_t i = 0; i < impreciseCount; ++i) {
- m_normalSpace.impreciseAllocators[i].forEachBlock(functor);
- m_normalDestructorSpace.impreciseAllocators[i].forEachBlock(functor);
- m_immortalStructureDestructorSpace.impreciseAllocators[i].forEachBlock(functor);
- }
-
- m_normalSpace.largeAllocator.forEachBlock(functor);
- m_normalDestructorSpace.largeAllocator.forEachBlock(functor);
- m_immortalStructureDestructorSpace.largeAllocator.forEachBlock(functor);
-
- return functor.returnValue();
-}
-
-template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock()
-{
- Functor functor;
- return forEachBlock(functor);
-}
-
-inline void MarkedSpace::didAddBlock(MarkedBlock* block)
-{
- m_capacity += block->capacity();
- m_blocks.add(block);
-}
-
-inline void MarkedSpace::didAllocateInBlock(MarkedBlock* block)
-{
-#if ENABLE(GGC)
- m_blocksWithNewObjects.append(block);
-#else
- UNUSED_PARAM(block);
-#endif
-}
-
-inline void MarkedSpace::clearRememberedSet()
-{
- forEachBlock<ClearRememberedSet>();
-}
-
-inline size_t MarkedSpace::objectCount()
-{
- return forEachBlock<MarkCount>();
-}
-
-inline size_t MarkedSpace::size()
-{
- return forEachBlock<Size>();
-}
-
-inline size_t MarkedSpace::capacity()
-{
- return m_capacity;
+ return WTF::roundUpToMultipleOf<sizeStep>(bytes);
+ if (bytes <= largeCutoff)
+ return s_sizeClassForSizeStep[sizeClassToIndex(bytes)];
+ return bytes;
}
} // namespace JSC
-
-#endif // MarkedSpace_h
diff --git a/Source/JavaScriptCore/heap/MarkedSpaceInlines.h b/Source/JavaScriptCore/heap/MarkedSpaceInlines.h
new file mode 100644
index 000000000..e629cb094
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkedSpaceInlines.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "MarkedBlockInlines.h"
+#include "MarkedSpace.h"
+
+namespace JSC {
+
+template<typename Functor> inline void MarkedSpace::forEachLiveCell(HeapIterationScope&, const Functor& functor)
+{
+ ASSERT(isIterating());
+ BlockIterator end = m_blocks.set().end();
+ for (BlockIterator it = m_blocks.set().begin(); it != end; ++it) {
+ if ((*it)->handle().forEachLiveCell(functor) == IterationStatus::Done)
+ return;
+ }
+ for (LargeAllocation* allocation : m_largeAllocations) {
+ if (allocation->isLive()) {
+ if (functor(allocation->cell(), allocation->attributes().cellKind) == IterationStatus::Done)
+ return;
+ }
+ }
+}
+
+template<typename Functor> inline void MarkedSpace::forEachDeadCell(HeapIterationScope&, const Functor& functor)
+{
+ ASSERT(isIterating());
+ BlockIterator end = m_blocks.set().end();
+ for (BlockIterator it = m_blocks.set().begin(); it != end; ++it) {
+ if ((*it)->handle().forEachDeadCell(functor) == IterationStatus::Done)
+ return;
+ }
+ for (LargeAllocation* allocation : m_largeAllocations) {
+ if (!allocation->isLive()) {
+ if (functor(allocation->cell(), allocation->attributes().cellKind) == IterationStatus::Done)
+ return;
+ }
+ }
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/MarkingConstraint.cpp b/Source/JavaScriptCore/heap/MarkingConstraint.cpp
new file mode 100644
index 000000000..39a308187
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkingConstraint.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MarkingConstraint.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+MarkingConstraint::MarkingConstraint(
+ CString abbreviatedName, CString name,
+ ::Function<void(SlotVisitor&, const VisitingTimeout&)> executeFunction,
+ ConstraintVolatility volatility)
+ : m_abbreviatedName(abbreviatedName)
+ , m_name(WTFMove(name))
+ , m_executeFunction(WTFMove(executeFunction))
+ , m_volatility(volatility)
+{
+}
+
+MarkingConstraint::MarkingConstraint(
+ CString abbreviatedName, CString name,
+ ::Function<void(SlotVisitor&, const VisitingTimeout&)> executeFunction,
+ ::Function<double(SlotVisitor&)> quickWorkEstimateFunction,
+ ConstraintVolatility volatility)
+ : m_abbreviatedName(abbreviatedName)
+ , m_name(WTFMove(name))
+ , m_executeFunction(WTFMove(executeFunction))
+ , m_quickWorkEstimateFunction(WTFMove(quickWorkEstimateFunction))
+ , m_volatility(volatility)
+{
+}
+
+MarkingConstraint::~MarkingConstraint()
+{
+}
+
+void MarkingConstraint::resetStats()
+{
+ m_lastVisitCount = 0;
+}
+
+void MarkingConstraint::execute(SlotVisitor& visitor, bool& didVisitSomething, MonotonicTime timeout)
+{
+ if (Options::logGC())
+ dataLog(abbreviatedName());
+ VisitingTimeout visitingTimeout(visitor, didVisitSomething, timeout);
+ m_executeFunction(visitor, visitingTimeout);
+ m_lastVisitCount = visitingTimeout.visitCount(visitor);
+ didVisitSomething = visitingTimeout.didVisitSomething(visitor);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/MarkingConstraint.h b/Source/JavaScriptCore/heap/MarkingConstraint.h
new file mode 100644
index 000000000..4c57419de
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkingConstraint.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ConstraintVolatility.h"
+#include "VisitingTimeout.h"
+#include <wtf/FastMalloc.h>
+#include <wtf/Function.h>
+#include <wtf/MonotonicTime.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/text/CString.h>
+
+namespace JSC {
+
+class MarkingConstraintSet;
+class SlotVisitor;
+
+class MarkingConstraint {
+ WTF_MAKE_NONCOPYABLE(MarkingConstraint);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ JS_EXPORT_PRIVATE MarkingConstraint(
+ CString abbreviatedName, CString name,
+ ::Function<void(SlotVisitor&, const VisitingTimeout&)>,
+ ConstraintVolatility);
+
+ JS_EXPORT_PRIVATE MarkingConstraint(
+ CString abbreviatedName, CString name,
+ ::Function<void(SlotVisitor&, const VisitingTimeout&)>,
+ ::Function<double(SlotVisitor&)>,
+ ConstraintVolatility);
+
+ JS_EXPORT_PRIVATE ~MarkingConstraint();
+
+ unsigned index() const { return m_index; }
+
+ const char* abbreviatedName() const { return m_abbreviatedName.data(); }
+ const char* name() const { return m_name.data(); }
+
+ void resetStats();
+
+ size_t lastVisitCount() const { return m_lastVisitCount; }
+
+ void execute(SlotVisitor&, bool& didVisitSomething, MonotonicTime timeout);
+
+ double quickWorkEstimate(SlotVisitor& visitor)
+ {
+ if (!m_quickWorkEstimateFunction)
+ return 0;
+ return m_quickWorkEstimateFunction(visitor);
+ }
+
+ double workEstimate(SlotVisitor& visitor)
+ {
+ return lastVisitCount() + quickWorkEstimate(visitor);
+ }
+
+ ConstraintVolatility volatility() const { return m_volatility; }
+
+private:
+ friend class MarkingConstraintSet; // So it can set m_index.
+
+ unsigned m_index { UINT_MAX };
+ CString m_abbreviatedName;
+ CString m_name;
+ ::Function<void(SlotVisitor&, const VisitingTimeout& timeout)> m_executeFunction;
+ ::Function<double(SlotVisitor&)> m_quickWorkEstimateFunction;
+ ConstraintVolatility m_volatility;
+ size_t m_lastVisitCount { 0 };
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/MarkingConstraintSet.cpp b/Source/JavaScriptCore/heap/MarkingConstraintSet.cpp
new file mode 100644
index 000000000..01c06e75c
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkingConstraintSet.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MarkingConstraintSet.h"
+
+#include "Options.h"
+#include <wtf/TimeWithDynamicClockType.h>
+
+namespace JSC {
+
+class MarkingConstraintSet::ExecutionContext {
+public:
+ ExecutionContext(MarkingConstraintSet& set, SlotVisitor& visitor, MonotonicTime timeout)
+ : m_set(set)
+ , m_visitor(visitor)
+ , m_timeout(timeout)
+ {
+ }
+
+ bool didVisitSomething() const
+ {
+ return m_didVisitSomething;
+ }
+
+ bool shouldTimeOut() const
+ {
+ return didVisitSomething() && hasElapsed(m_timeout);
+ }
+
+ // Returns false if it times out.
+ bool drain(BitVector& unexecuted)
+ {
+ for (size_t index : unexecuted) {
+ execute(index);
+ unexecuted.clear(index);
+ if (shouldTimeOut())
+ return false;
+ }
+ return true;
+ }
+
+ bool didExecute(size_t index) const { return m_executed.get(index); }
+
+ void execute(size_t index)
+ {
+ m_set.m_set[index]->execute(m_visitor, m_didVisitSomething, m_timeout);
+ m_executed.set(index);
+ }
+
+private:
+ MarkingConstraintSet& m_set;
+ SlotVisitor& m_visitor;
+ MonotonicTime m_timeout;
+ BitVector m_executed;
+ bool m_didVisitSomething { false };
+};
+
+MarkingConstraintSet::MarkingConstraintSet()
+{
+}
+
+MarkingConstraintSet::~MarkingConstraintSet()
+{
+}
+
+void MarkingConstraintSet::didStartMarking()
+{
+ m_unexecutedRoots.clearAll();
+ m_unexecutedOutgrowths.clearAll();
+ for (auto& constraint : m_set) {
+ constraint->resetStats();
+ switch (constraint->volatility()) {
+ case ConstraintVolatility::GreyedByExecution:
+ m_unexecutedRoots.set(constraint->index());
+ break;
+ case ConstraintVolatility::GreyedByMarking:
+ m_unexecutedOutgrowths.set(constraint->index());
+ break;
+ case ConstraintVolatility::SeldomGreyed:
+ break;
+ }
+ }
+ m_iteration = 1;
+}
+
+void MarkingConstraintSet::add(CString abbreviatedName, CString name, Function<void(SlotVisitor&, const VisitingTimeout&)> function, ConstraintVolatility volatility)
+{
+ add(std::make_unique<MarkingConstraint>(WTFMove(abbreviatedName), WTFMove(name), WTFMove(function), volatility));
+}
+
+void MarkingConstraintSet::add(
+ CString abbreviatedName, CString name,
+ Function<void(SlotVisitor&, const VisitingTimeout&)> executeFunction,
+ Function<double(SlotVisitor&)> quickWorkEstimateFunction,
+ ConstraintVolatility volatility)
+{
+ add(std::make_unique<MarkingConstraint>(WTFMove(abbreviatedName), WTFMove(name), WTFMove(executeFunction), WTFMove(quickWorkEstimateFunction), volatility));
+}
+
+void MarkingConstraintSet::add(
+ std::unique_ptr<MarkingConstraint> constraint)
+{
+ constraint->m_index = m_set.size();
+ m_ordered.append(constraint.get());
+ if (constraint->volatility() == ConstraintVolatility::GreyedByMarking)
+ m_outgrowths.append(constraint.get());
+ m_set.append(WTFMove(constraint));
+}
+
+bool MarkingConstraintSet::executeConvergence(SlotVisitor& visitor, MonotonicTime timeout)
+{
+ bool result = executeConvergenceImpl(visitor, timeout);
+ if (Options::logGC())
+ dataLog(" ");
+ return result;
+}
+
+bool MarkingConstraintSet::isWavefrontAdvancing(SlotVisitor& visitor)
+{
+ for (MarkingConstraint* outgrowth : m_outgrowths) {
+ if (outgrowth->workEstimate(visitor))
+ return true;
+ }
+ return false;
+}
+
+bool MarkingConstraintSet::executeConvergenceImpl(SlotVisitor& visitor, MonotonicTime timeout)
+{
+ ExecutionContext executionContext(*this, visitor, timeout);
+
+ unsigned iteration = m_iteration++;
+
+ if (Options::logGC())
+ dataLog("i#", iteration, ":");
+
+ // If there are any constraints that we have not executed at all during this cycle, then
+ // we should execute those now.
+ if (!executionContext.drain(m_unexecutedRoots))
+ return false;
+
+ // First iteration is before any visitor draining, so it's unlikely to trigger any constraints other
+ // than roots.
+ if (iteration == 1)
+ return false;
+
+ if (!executionContext.drain(m_unexecutedOutgrowths))
+ return false;
+
+ // We want to keep preferring the outgrowth constraints - the ones that need to be fixpointed
+ // even in a stop-the-world GC - until they stop producing. They have a tendency to go totally
+ // silent at some point during GC, at which point it makes sense not to run them again until
+ // the end. Outgrowths producing new information corresponds almost exactly to the wavefront
+ // advancing: it usually means that we are marking objects that should be marked based on
+ // other objects that we would have marked anyway. Once the wavefront is no longer advancing,
+ // we want to run mostly the root constraints (based on their predictions of how much work
+ // they will have) because at this point we are just trying to outpace the retreating
+ // wavefront.
+ //
+ // Note that this function (executeConvergenceImpl) only returns true if it runs all
+ // constraints. So, all we are controlling are the heuristics that say which constraints to
+ // run first. Choosing the constraints that are the most likely to produce means running fewer
+ // constraints before returning.
+ bool isWavefrontAdvancing = this->isWavefrontAdvancing(visitor);
+
+ std::sort(
+ m_ordered.begin(), m_ordered.end(),
+ [&] (MarkingConstraint* a, MarkingConstraint* b) -> bool {
+ // Remember: return true if a should come before b.
+
+ auto volatilityScore = [] (MarkingConstraint* constraint) -> unsigned {
+ return constraint->volatility() == ConstraintVolatility::GreyedByMarking ? 1 : 0;
+ };
+
+ unsigned aVolatilityScore = volatilityScore(a);
+ unsigned bVolatilityScore = volatilityScore(b);
+
+ if (aVolatilityScore != bVolatilityScore) {
+ if (isWavefrontAdvancing)
+ return aVolatilityScore > bVolatilityScore;
+ else
+ return aVolatilityScore < bVolatilityScore;
+ }
+
+ double aWorkEstimate = a->workEstimate(visitor);
+ double bWorkEstimate = b->workEstimate(visitor);
+
+ if (aWorkEstimate != bWorkEstimate)
+ return aWorkEstimate > bWorkEstimate;
+
+ // This causes us to use SeldomGreyed vs GreyedByExecution as a final tie-breaker.
+ return a->volatility() > b->volatility();
+ });
+
+ for (MarkingConstraint* constraint : m_ordered) {
+ size_t i = constraint->index();
+
+ if (executionContext.didExecute(i))
+ continue;
+ executionContext.execute(i);
+
+ // Once we're in convergence, it makes the most sense to let some marking happen anytime
+ // we find work.
+ // FIXME: Maybe this should execute all constraints until timeout? Not clear if that's
+ // better or worse. Maybe even better is this:
+ // - If the visitor is empty, keep running.
+ // - If the visitor is has at least N things, return.
+ // - Else run until timeout.
+ // https://bugs.webkit.org/show_bug.cgi?id=166832
+ if (executionContext.didVisitSomething())
+ return false;
+ }
+
+ return true;
+}
+
+void MarkingConstraintSet::executeAll(SlotVisitor& visitor)
+{
+ bool didVisitSomething = false;
+ for (auto& constraint : m_set)
+ constraint->execute(visitor, didVisitSomething, MonotonicTime::infinity());
+ if (Options::logGC())
+ dataLog(" ");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/MarkingConstraintSet.h b/Source/JavaScriptCore/heap/MarkingConstraintSet.h
new file mode 100644
index 000000000..40e616fd0
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MarkingConstraintSet.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "MarkingConstraint.h"
+#include <wtf/BitVector.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class MarkingConstraintSet {
+public:
+ MarkingConstraintSet();
+ ~MarkingConstraintSet();
+
+ void didStartMarking();
+
+ void add(
+ CString abbreviatedName,
+ CString name,
+ ::Function<void(SlotVisitor&, const VisitingTimeout&)>,
+ ConstraintVolatility);
+
+ void add(
+ CString abbreviatedName,
+ CString name,
+ ::Function<void(SlotVisitor&, const VisitingTimeout&)>,
+ ::Function<double(SlotVisitor&)>,
+ ConstraintVolatility);
+
+ void add(std::unique_ptr<MarkingConstraint>);
+
+ // Assuming that the mark stacks are all empty, this will give you a guess as to whether or
+ // not the wavefront is advancing.
+ bool isWavefrontAdvancing(SlotVisitor&);
+ bool isWavefrontRetreating(SlotVisitor& visitor) { return !isWavefrontAdvancing(visitor); }
+
+ // Returns true if this executed all constraints and none of them produced new work. This
+ // assumes that you've alraedy visited roots and drained from there.
+ bool executeConvergence(
+ SlotVisitor&,
+ MonotonicTime timeout = MonotonicTime::infinity());
+
+ // Simply runs all constraints without any shenanigans.
+ void executeAll(SlotVisitor&);
+
+private:
+ class ExecutionContext;
+ friend class ExecutionContext;
+
+ bool executeConvergenceImpl(SlotVisitor&, MonotonicTime timeout);
+
+ bool drain(SlotVisitor&, MonotonicTime, BitVector& unexecuted, BitVector& executed, bool& didVisitSomething);
+
+ BitVector m_unexecutedRoots;
+ BitVector m_unexecutedOutgrowths;
+ Vector<std::unique_ptr<MarkingConstraint>> m_set;
+ Vector<MarkingConstraint*> m_ordered;
+ Vector<MarkingConstraint*> m_outgrowths;
+ unsigned m_iteration { 1 };
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/MutatorScheduler.cpp b/Source/JavaScriptCore/heap/MutatorScheduler.cpp
new file mode 100644
index 000000000..305b8b6a8
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MutatorScheduler.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MutatorScheduler.h"
+
+#include <wtf/TimeWithDynamicClockType.h>
+
+namespace JSC {
+
+MutatorScheduler::MutatorScheduler()
+{
+}
+
+MutatorScheduler::~MutatorScheduler()
+{
+}
+
+void MutatorScheduler::didStop()
+{
+}
+
+void MutatorScheduler::willResume()
+{
+}
+
+void MutatorScheduler::didReachTermination()
+{
+}
+
+void MutatorScheduler::didExecuteConstraints()
+{
+}
+
+void MutatorScheduler::synchronousDrainingDidStall()
+{
+}
+
+void MutatorScheduler::log()
+{
+}
+
+bool MutatorScheduler::shouldStop()
+{
+ return hasElapsed(timeToStop());
+}
+
+bool MutatorScheduler::shouldResume()
+{
+ return hasElapsed(timeToResume());
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/MutatorScheduler.h b/Source/JavaScriptCore/heap/MutatorScheduler.h
new file mode 100644
index 000000000..63652e533
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MutatorScheduler.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/FastMalloc.h>
+#include <wtf/MonotonicTime.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+class MutatorScheduler {
+ WTF_MAKE_NONCOPYABLE(MutatorScheduler);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ enum State {
+ Normal, // Not collecting.
+ Stopped,
+ Resumed
+ };
+
+ MutatorScheduler();
+ virtual ~MutatorScheduler();
+
+ virtual State state() const = 0;
+
+ virtual void beginCollection() = 0;
+
+ virtual void didStop();
+ virtual void willResume();
+
+ // At the top of an iteration, the GC will may call didReachTermination.
+ virtual void didReachTermination();
+
+ // If it called didReachTermination, it will then later call didExecuteConstraints.
+ virtual void didExecuteConstraints();
+
+ // After doing that, it will do synchronous draining. When this stalls - either due to timeout or
+ // just 'cause, it will call this.
+ virtual void synchronousDrainingDidStall();
+
+ virtual MonotonicTime timeToStop() = 0; // Call while resumed, to ask when to stop.
+ virtual MonotonicTime timeToResume() = 0; // Call while stopped, to ask when to resume.
+
+ virtual void log();
+
+ bool shouldStop(); // Call while resumed, to ask if we should stop now.
+ bool shouldResume(); // Call while stopped, to ask if we should resume now.
+
+ virtual void endCollection() = 0;
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/MutatorState.cpp b/Source/JavaScriptCore/heap/MutatorState.cpp
new file mode 100644
index 000000000..5a90b7b94
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MutatorState.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MutatorState.h"
+
+#include <wtf/PrintStream.h>
+
+using namespace JSC;
+
+namespace WTF {
+
+void printInternal(PrintStream& out, MutatorState state)
+{
+ switch (state) {
+ case MutatorState::Running:
+ out.print("Running");
+ return;
+ case MutatorState::Allocating:
+ out.print("Allocating");
+ return;
+ case MutatorState::Sweeping:
+ out.print("Sweeping");
+ return;
+ case MutatorState::Collecting:
+ out.print("Collecting");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/MutatorState.h b/Source/JavaScriptCore/heap/MutatorState.h
new file mode 100644
index 000000000..38a1981d0
--- /dev/null
+++ b/Source/JavaScriptCore/heap/MutatorState.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+enum class MutatorState {
+ // The mutator is running when it's not inside a Heap slow path.
+ Running,
+
+ // The mutator is in an allocation slow path.
+ Allocating,
+
+ // The mutator is sweeping.
+ Sweeping,
+
+ // The mutator is collecting.
+ Collecting
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::MutatorState);
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/CopyWriteBarrier.h b/Source/JavaScriptCore/heap/OpaqueRootSet.h
index 847712666..f190989b2 100644
--- a/Source/JavaScriptCore/heap/CopyWriteBarrier.h
+++ b/Source/JavaScriptCore/heap/OpaqueRootSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,68 +23,69 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CopyWriteBarrier_h
-#define CopyWriteBarrier_h
+#pragma once
-#include "Heap.h"
+#include <wtf/HashSet.h>
namespace JSC {
-template <typename T>
-class CopyWriteBarrier {
+class OpaqueRootSet {
+ WTF_MAKE_NONCOPYABLE(OpaqueRootSet);
public:
- CopyWriteBarrier()
- : m_value(0)
+ OpaqueRootSet()
+ : m_lastQueriedRoot(nullptr)
+ , m_containsLastQueriedRoot(false)
{
}
-
- CopyWriteBarrier(VM& vm, const JSCell* owner, T& value)
+
+ bool contains(void* root) const
{
- this->set(vm, owner, &value);
+ if (root != m_lastQueriedRoot) {
+ m_lastQueriedRoot = root;
+ m_containsLastQueriedRoot = m_roots.contains(root);
+ }
+ return m_containsLastQueriedRoot;
}
-
- CopyWriteBarrier(VM& vm, const JSCell* owner, T* value)
+
+ bool isEmpty() const
{
- this->set(vm, owner, value);
+ return m_roots.isEmpty();
}
-
- bool operator!() const { return !m_value; }
-
- typedef T* (CopyWriteBarrier::*UnspecifiedBoolType);
- operator UnspecifiedBoolType*() const { return m_value ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0; }
-
- T* get() const
+
+ void clear()
{
- return m_value;
+ m_roots.clear();
+ m_lastQueriedRoot = nullptr;
+ m_containsLastQueriedRoot = false;
}
-
- T* operator*() const
+
+ bool add(void* root)
{
- return get();
+ if (root == m_lastQueriedRoot)
+ m_containsLastQueriedRoot = true;
+ return m_roots.add(root).isNewEntry;
}
-
- T* operator->() const
+
+ int size() const
{
- return get();
+ return m_roots.size();
}
-
- void set(VM&, const JSCell* owner, T* value)
+
+ HashSet<void*>::const_iterator begin() const
{
- this->m_value = value;
- Heap::writeBarrier(owner);
+ return m_roots.begin();
}
-
- void setWithoutWriteBarrier(T* value)
+
+ HashSet<void*>::const_iterator end() const
{
- this->m_value = value;
+ return m_roots.end();
}
-
- void clear() { m_value = 0; }
+
private:
- T* m_value;
+ HashSet<void*> m_roots;
+ mutable void* m_lastQueriedRoot;
+ mutable bool m_containsLastQueriedRoot;
};
} // namespace JSC
-
-#endif // CopyWriteBarrier_h
diff --git a/Source/JavaScriptCore/heap/HeapBlock.h b/Source/JavaScriptCore/heap/PreventCollectionScope.h
index 6f2a74c08..766cd1ff4 100644
--- a/Source/JavaScriptCore/heap/HeapBlock.h
+++ b/Source/JavaScriptCore/heap/PreventCollectionScope.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,51 +23,28 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HeapBlock_h
-#define HeapBlock_h
+#pragma once
-#include <wtf/DoublyLinkedList.h>
-#include <wtf/StdLibExtras.h>
+#include "Heap.h"
-namespace JSC {
+namespace JSC {
-enum AllocationEffort { AllocationCanFail, AllocationMustSucceed };
-
-class Region;
-
-#if COMPILER(GCC)
-#define CLASS_IF_GCC class
-#else
-#define CLASS_IF_GCC
-#endif
-
-template<typename T>
-class HeapBlock : public DoublyLinkedListNode<T> {
- friend CLASS_IF_GCC DoublyLinkedListNode<T>;
+class PreventCollectionScope {
public:
- static HeapBlock* destroy(HeapBlock* block) WARN_UNUSED_RETURN
+ PreventCollectionScope(Heap& heap)
+ : m_heap(heap)
{
- static_cast<T*>(block)->~T();
- return block;
+ m_heap.preventCollection();
}
-
- HeapBlock(Region* region)
- : DoublyLinkedListNode<T>()
- , m_region(region)
- , m_prev(0)
- , m_next(0)
+
+ ~PreventCollectionScope()
{
- ASSERT(m_region);
+ m_heap.allowCollection();
}
- Region* region() const { return m_region; }
-
private:
- Region* m_region;
- T* m_prev;
- T* m_next;
+ Heap& m_heap;
};
} // namespace JSC
-#endif
diff --git a/Source/JavaScriptCore/heap/Region.h b/Source/JavaScriptCore/heap/Region.h
deleted file mode 100644
index c638059b6..000000000
--- a/Source/JavaScriptCore/heap/Region.h
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JSC_Region_h
-#define JSC_Region_h
-
-#include "HeapBlock.h"
-#include "SuperRegion.h"
-#include <wtf/DoublyLinkedList.h>
-#include <wtf/MetaAllocatorHandle.h>
-#include <wtf/PageAllocationAligned.h>
-
-#define HEAP_MEMORY_ID reinterpret_cast<void*>(static_cast<intptr_t>(-3))
-
-#define ENABLE_SUPER_REGION 0
-
-#ifndef ENABLE_SUPER_REGION
-#if USE(JSVALUE64) && !CPU(ARM64)
-#define ENABLE_SUPER_REGION 1
-#else
-#define ENABLE_SUPER_REGION 0
-#endif
-#endif
-
-namespace JSC {
-
-class DeadBlock : public HeapBlock<DeadBlock> {
-public:
- DeadBlock(Region*);
-};
-
-inline DeadBlock::DeadBlock(Region* region)
- : HeapBlock<DeadBlock>(region)
-{
-}
-
-class Region : public DoublyLinkedListNode<Region> {
- friend CLASS_IF_GCC DoublyLinkedListNode<Region>;
- friend class BlockAllocator;
-public:
- ~Region();
- static Region* create(SuperRegion*, size_t blockSize);
- static Region* createCustomSize(SuperRegion*, size_t blockSize, size_t blockAlignment);
- Region* reset(size_t blockSize);
- void destroy();
-
- size_t blockSize() const { return m_blockSize; }
- bool isFull() const { return m_blocksInUse == m_totalBlocks; }
- bool isEmpty() const { return !m_blocksInUse; }
- bool isCustomSize() const { return m_isCustomSize; }
-
- DeadBlock* allocate();
- void deallocate(void*);
-
- static const size_t s_regionSize = 64 * KB;
- static const size_t s_regionMask = ~(s_regionSize - 1);
-
-protected:
- Region(size_t blockSize, size_t totalBlocks, bool isExcess);
- void initializeBlockList();
-
- bool m_isExcess;
-
-private:
- void* base();
- size_t size();
-
- size_t m_totalBlocks;
- size_t m_blocksInUse;
- size_t m_blockSize;
- bool m_isCustomSize;
- Region* m_prev;
- Region* m_next;
- DoublyLinkedList<DeadBlock> m_deadBlocks;
-};
-
-
-class NormalRegion : public Region {
- friend class Region;
-private:
- NormalRegion(PassRefPtr<WTF::MetaAllocatorHandle>, size_t blockSize, size_t totalBlocks);
-
- static NormalRegion* tryCreate(SuperRegion*, size_t blockSize);
- static NormalRegion* tryCreateCustomSize(SuperRegion*, size_t blockSize, size_t blockAlignment);
-
- void* base() { return m_allocation->start(); }
- size_t size() { return m_allocation->sizeInBytes(); }
-
- NormalRegion* reset(size_t blockSize);
-
- RefPtr<WTF::MetaAllocatorHandle> m_allocation;
-};
-
-class ExcessRegion : public Region {
- friend class Region;
-private:
- ExcessRegion(PageAllocationAligned&, size_t blockSize, size_t totalBlocks);
-
- ~ExcessRegion();
-
- static ExcessRegion* create(size_t blockSize);
- static ExcessRegion* createCustomSize(size_t blockSize, size_t blockAlignment);
-
- void* base() { return m_allocation.base(); }
- size_t size() { return m_allocation.size(); }
-
- ExcessRegion* reset(size_t blockSize);
-
- PageAllocationAligned m_allocation;
-};
-
-inline NormalRegion::NormalRegion(PassRefPtr<WTF::MetaAllocatorHandle> allocation, size_t blockSize, size_t totalBlocks)
- : Region(blockSize, totalBlocks, false)
- , m_allocation(allocation)
-{
- initializeBlockList();
-}
-
-inline NormalRegion* NormalRegion::tryCreate(SuperRegion* superRegion, size_t blockSize)
-{
- RefPtr<WTF::MetaAllocatorHandle> allocation = superRegion->allocate(s_regionSize, HEAP_MEMORY_ID);
- if (!allocation)
- return 0;
- return new NormalRegion(allocation, blockSize, s_regionSize / blockSize);
-}
-
-inline NormalRegion* NormalRegion::tryCreateCustomSize(SuperRegion* superRegion, size_t blockSize, size_t blockAlignment)
-{
- ASSERT_UNUSED(blockAlignment, blockAlignment <= s_regionSize);
- RefPtr<WTF::MetaAllocatorHandle> allocation = superRegion->allocate(blockSize, HEAP_MEMORY_ID);
- if (!allocation)
- return 0;
- return new NormalRegion(allocation, blockSize, 1);
-}
-
-inline NormalRegion* NormalRegion::reset(size_t blockSize)
-{
- ASSERT(!m_isExcess);
- RefPtr<WTF::MetaAllocatorHandle> allocation = m_allocation.release();
- return new (NotNull, this) NormalRegion(allocation.release(), blockSize, s_regionSize / blockSize);
-}
-
-inline ExcessRegion::ExcessRegion(PageAllocationAligned& allocation, size_t blockSize, size_t totalBlocks)
- : Region(blockSize, totalBlocks, true)
- , m_allocation(allocation)
-{
- initializeBlockList();
-}
-
-inline ExcessRegion::~ExcessRegion()
-{
- m_allocation.deallocate();
-}
-
-inline ExcessRegion* ExcessRegion::create(size_t blockSize)
-{
- PageAllocationAligned allocation = PageAllocationAligned::allocate(s_regionSize, s_regionSize, OSAllocator::JSGCHeapPages);
- ASSERT(static_cast<bool>(allocation));
- return new ExcessRegion(allocation, blockSize, s_regionSize / blockSize);
-}
-
-inline ExcessRegion* ExcessRegion::createCustomSize(size_t blockSize, size_t blockAlignment)
-{
- PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockAlignment, OSAllocator::JSGCHeapPages);
- ASSERT(static_cast<bool>(allocation));
- return new ExcessRegion(allocation, blockSize, 1);
-}
-
-inline ExcessRegion* ExcessRegion::reset(size_t blockSize)
-{
- ASSERT(m_isExcess);
- PageAllocationAligned allocation = m_allocation;
- return new (NotNull, this) ExcessRegion(allocation, blockSize, s_regionSize / blockSize);
-}
-
-inline Region::Region(size_t blockSize, size_t totalBlocks, bool isExcess)
- : DoublyLinkedListNode<Region>()
- , m_isExcess(isExcess)
- , m_totalBlocks(totalBlocks)
- , m_blocksInUse(0)
- , m_blockSize(blockSize)
- , m_isCustomSize(false)
- , m_prev(0)
- , m_next(0)
-{
-}
-
-inline void Region::initializeBlockList()
-{
- char* start = static_cast<char*>(base());
- char* current = start;
- for (size_t i = 0; i < m_totalBlocks; i++) {
- ASSERT(current < start + size());
- m_deadBlocks.append(new (NotNull, current) DeadBlock(this));
- current += m_blockSize;
- }
-}
-
-inline Region* Region::create(SuperRegion* superRegion, size_t blockSize)
-{
-#if ENABLE(SUPER_REGION)
- ASSERT(blockSize <= s_regionSize);
- ASSERT(!(s_regionSize % blockSize));
- Region* region = NormalRegion::tryCreate(superRegion, blockSize);
- if (LIKELY(!!region))
- return region;
-#else
- UNUSED_PARAM(superRegion);
-#endif
- return ExcessRegion::create(blockSize);
-}
-
-inline Region* Region::createCustomSize(SuperRegion* superRegion, size_t blockSize, size_t blockAlignment)
-{
-#if ENABLE(SUPER_REGION)
- Region* region = NormalRegion::tryCreateCustomSize(superRegion, blockSize, blockAlignment);
- if (UNLIKELY(!region))
- region = ExcessRegion::createCustomSize(blockSize, blockAlignment);
-#else
- UNUSED_PARAM(superRegion);
- Region* region = ExcessRegion::createCustomSize(blockSize, blockAlignment);
-#endif
- region->m_isCustomSize = true;
- return region;
-}
-
-inline Region::~Region()
-{
- ASSERT(isEmpty());
-}
-
-inline void Region::destroy()
-{
-#if ENABLE(SUPER_REGION)
- if (UNLIKELY(m_isExcess))
- delete static_cast<ExcessRegion*>(this);
- else
- delete static_cast<NormalRegion*>(this);
-#else
- delete static_cast<ExcessRegion*>(this);
-#endif
-}
-
-inline Region* Region::reset(size_t blockSize)
-{
-#if ENABLE(SUPER_REGION)
- ASSERT(isEmpty());
- if (UNLIKELY(m_isExcess))
- return static_cast<ExcessRegion*>(this)->reset(blockSize);
- return static_cast<NormalRegion*>(this)->reset(blockSize);
-#else
- return static_cast<ExcessRegion*>(this)->reset(blockSize);
-#endif
-}
-
-inline DeadBlock* Region::allocate()
-{
- ASSERT(!isFull());
- m_blocksInUse++;
- return m_deadBlocks.removeHead();
-}
-
-inline void Region::deallocate(void* base)
-{
- ASSERT(base);
- ASSERT(m_blocksInUse);
- ASSERT(base >= this->base() && base < static_cast<char*>(this->base()) + size());
- DeadBlock* block = new (NotNull, base) DeadBlock(this);
- m_deadBlocks.push(block);
- m_blocksInUse--;
-}
-
-inline void* Region::base()
-{
-#if ENABLE(SUPER_REGION)
- if (UNLIKELY(m_isExcess))
- return static_cast<ExcessRegion*>(this)->ExcessRegion::base();
- return static_cast<NormalRegion*>(this)->NormalRegion::base();
-#else
- return static_cast<ExcessRegion*>(this)->ExcessRegion::base();
-#endif
-}
-
-inline size_t Region::size()
-{
-#if ENABLE(SUPER_REGION)
- if (UNLIKELY(m_isExcess))
- return static_cast<ExcessRegion*>(this)->ExcessRegion::size();
- return static_cast<NormalRegion*>(this)->NormalRegion::size();
-#else
- return static_cast<ExcessRegion*>(this)->ExcessRegion::size();
-#endif
-}
-
-} // namespace JSC
-
-#endif // JSC_Region_h
diff --git a/Source/JavaScriptCore/heap/RegisterState.h b/Source/JavaScriptCore/heap/RegisterState.h
new file mode 100644
index 000000000..6005a9143
--- /dev/null
+++ b/Source/JavaScriptCore/heap/RegisterState.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <setjmp.h>
+
+namespace JSC {
+
+#if !OS(WINDOWS)
+
+// ALLOCATE_AND_GET_REGISTER_STATE has to ensure that the GC sees callee-saves. It achieves this by
+// ensuring that the callee-saves are either spilled to the stack or saved in the RegisterState. The code
+// looks like it's achieving only the latter. However, it's possible that the compiler chooses to use
+// a callee-save for one of the caller's variables, which means that the value that we were interested in
+// got spilled. In that case, we will store something bogus into the RegisterState, and that's OK.
+
+#if CPU(X86)
+struct RegisterState {
+ uint32_t ebx;
+ uint32_t edi;
+ uint32_t esi;
+};
+
+#define SAVE_REG(regname, where) \
+ asm volatile ("movl %%" #regname ", %0" : "=m"(where) : : "memory")
+
+#define ALLOCATE_AND_GET_REGISTER_STATE(registers) \
+ RegisterState registers; \
+ SAVE_REG(ebx, registers.ebx); \
+ SAVE_REG(edi, registers.edi); \
+ SAVE_REG(esi, registers.esi)
+
+#elif CPU(X86_64)
+struct RegisterState {
+ uint64_t rbx;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+};
+
+#define SAVE_REG(regname, where) \
+ asm volatile ("movq %%" #regname ", %0" : "=m"(where) : : "memory")
+
+#define ALLOCATE_AND_GET_REGISTER_STATE(registers) \
+ RegisterState registers; \
+ SAVE_REG(rbx, registers.rbx); \
+ SAVE_REG(r12, registers.r12); \
+ SAVE_REG(r13, registers.r13); \
+ SAVE_REG(r14, registers.r14); \
+ SAVE_REG(r15, registers.r15)
+
+#elif CPU(ARM_THUMB2)
+struct RegisterState {
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+};
+
+#define SAVE_REG(regname, where) \
+ asm volatile ("str " #regname ", %0" : "=m"(where) : : "memory")
+
+#define ALLOCATE_AND_GET_REGISTER_STATE(registers) \
+ RegisterState registers; \
+ SAVE_REG(r4, registers.r4); \
+ SAVE_REG(r5, registers.r5); \
+ SAVE_REG(r6, registers.r6); \
+ SAVE_REG(r8, registers.r8); \
+ SAVE_REG(r9, registers.r9); \
+ SAVE_REG(r10, registers.r10); \
+ SAVE_REG(r11, registers.r11)
+
+#elif CPU(ARM64)
+struct RegisterState {
+ uint64_t x19;
+ uint64_t x20;
+ uint64_t x21;
+ uint64_t x22;
+ uint64_t x23;
+ uint64_t x24;
+ uint64_t x25;
+ uint64_t x26;
+ uint64_t x27;
+ uint64_t x28;
+};
+
+#define SAVE_REG(regname, where) \
+ asm volatile ("str " #regname ", %0" : "=m"(where) : : "memory")
+
+#define ALLOCATE_AND_GET_REGISTER_STATE(registers) \
+ RegisterState registers; \
+ SAVE_REG(x19, registers.x19); \
+ SAVE_REG(x20, registers.x20); \
+ SAVE_REG(x21, registers.x21); \
+ SAVE_REG(x22, registers.x22); \
+ SAVE_REG(x23, registers.x23); \
+ SAVE_REG(x24, registers.x24); \
+ SAVE_REG(x25, registers.x25); \
+ SAVE_REG(x26, registers.x26); \
+ SAVE_REG(x27, registers.x27); \
+ SAVE_REG(x28, registers.x28)
+
+#endif
+#endif // !OS(WINDOWS)
+
+#ifndef ALLOCATE_AND_GET_REGISTER_STATE
+#if COMPILER(GCC_OR_CLANG)
+#define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
+#else
+#define REGISTER_BUFFER_ALIGNMENT
+#endif
+
+typedef jmp_buf RegisterState;
+
+// ALLOCATE_AND_GET_REGISTER_STATE() is a macro so that it is always "inlined" even in debug builds.
+#if COMPILER(MSVC)
+#pragma warning(push)
+#pragma warning(disable: 4611)
+#define ALLOCATE_AND_GET_REGISTER_STATE(registers) \
+ RegisterState registers REGISTER_BUFFER_ALIGNMENT; \
+ setjmp(registers)
+#pragma warning(pop)
+#else
+#define ALLOCATE_AND_GET_REGISTER_STATE(registers) \
+ RegisterState registers REGISTER_BUFFER_ALIGNMENT; \
+ setjmp(registers)
+#endif
+#endif // ALLOCATE_AND_GET_REGISTER_STATE
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/RecursiveAllocationScope.h b/Source/JavaScriptCore/heap/ReleaseHeapAccessScope.h
index e05f60d9c..e39dd1f4a 100644
--- a/Source/JavaScriptCore/heap/RecursiveAllocationScope.h
+++ b/Source/JavaScriptCore/heap/ReleaseHeapAccessScope.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,43 +23,36 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef RecursiveAllocationScope_h
-#define RecursiveAllocationScope_h
+#pragma once
#include "Heap.h"
-#include "VM.h"
namespace JSC {
-class RecursiveAllocationScope {
+// Almost all of the VM's code runs with "heap access". This means that the GC thread believes that
+// the VM is messing with the heap in a way that would be unsafe for certain phases of the collector,
+// like the weak reference fixpoint, stack scanning, and changing barrier modes. However, many long
+// running operations inside the VM don't require heap access. For example, memcpying a typed array
+// if a reference to it is on the stack is totally fine without heap access. Blocking on a futex is
+// also fine without heap access. Releasing heap access for long-running code (in the case of futex
+// wait, possibly infinitely long-running) ensures that the GC can finish a collection cycle while
+// you are waiting.
+class ReleaseHeapAccessScope {
public:
- RecursiveAllocationScope(Heap& heap)
+ ReleaseHeapAccessScope(Heap& heap)
: m_heap(heap)
-#ifndef NDEBUG
- , m_savedObjectClass(heap.vm()->m_initializingObjectClass)
-#endif
{
-#ifndef NDEBUG
- m_heap.vm()->m_initializingObjectClass = nullptr;
-#endif
- m_heap.m_deferralDepth++; // Make sure that we don't GC.
+ m_heap.releaseAccess();
}
- ~RecursiveAllocationScope()
+ ~ReleaseHeapAccessScope()
{
- m_heap.m_deferralDepth--; // Decrement deferal manually so we don't GC when we do so since we are already GCing!.
-#ifndef NDEBUG
- m_heap.vm()->m_initializingObjectClass = m_savedObjectClass;
-#endif
+ m_heap.acquireAccess();
}
private:
Heap& m_heap;
-#ifndef NDEBUG
- const ClassInfo* m_savedObjectClass;
-#endif
};
-}
+} // namespace JSC
-#endif // RecursiveAllocationScope_h
diff --git a/Source/JavaScriptCore/heap/RunningScope.h b/Source/JavaScriptCore/heap/RunningScope.h
new file mode 100644
index 000000000..b89d03cfb
--- /dev/null
+++ b/Source/JavaScriptCore/heap/RunningScope.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Heap.h"
+
+namespace JSC {
+
+class RunningScope {
+public:
+ RunningScope(Heap& heap)
+ : m_heap(heap)
+ , m_oldState(m_heap.m_mutatorState)
+ {
+ m_heap.m_mutatorState = MutatorState::Running;
+ }
+
+ ~RunningScope()
+ {
+ m_heap.m_mutatorState = m_oldState;
+ }
+
+private:
+ Heap& m_heap;
+ MutatorState m_oldState;
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.cpp b/Source/JavaScriptCore/heap/SlotVisitor.cpp
index 4fd0da725..f0260fc2f 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.cpp
+++ b/Source/JavaScriptCore/heap/SlotVisitor.cpp
@@ -1,29 +1,88 @@
+/*
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include "config.h"
#include "SlotVisitor.h"
-#include "SlotVisitorInlines.h"
+#include "CPU.h"
#include "ConservativeRoots.h"
-#include "CopiedSpace.h"
-#include "CopiedSpaceInlines.h"
-#include "GCThread.h"
+#include "GCSegmentedArrayInlines.h"
+#include "HeapCellInlines.h"
+#include "HeapProfiler.h"
+#include "HeapSnapshotBuilder.h"
#include "JSArray.h"
#include "JSDestructibleObject.h"
-#include "VM.h"
#include "JSObject.h"
#include "JSString.h"
-#include "Operations.h"
-#include <wtf/StackStats.h>
+#include "JSCInlines.h"
+#include "SlotVisitorInlines.h"
+#include "StopIfNecessaryTimer.h"
+#include "SuperSampler.h"
+#include "VM.h"
+#include <wtf/Lock.h>
namespace JSC {
-SlotVisitor::SlotVisitor(GCThreadSharedData& shared)
- : m_stack(shared.m_vm->heap.blockAllocator())
- , m_bytesVisited(0)
- , m_bytesCopied(0)
+#if ENABLE(GC_VALIDATION)
+static void validate(JSCell* cell)
+{
+ RELEASE_ASSERT(cell);
+
+ if (!cell->structure()) {
+ dataLogF("cell at %p has a null structure\n" , cell);
+ CRASH();
+ }
+
+ // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
+ // I hate this sentence.
+ VM& vm = *cell->vm();
+ if (cell->structure()->structure()->JSCell::classInfo(vm) != cell->structure()->JSCell::classInfo(vm)) {
+ const char* parentClassName = 0;
+ const char* ourClassName = 0;
+ if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo(vm))
+ parentClassName = cell->structure()->structure()->JSCell::classInfo(vm)->className;
+ if (cell->structure()->JSCell::classInfo(vm))
+ ourClassName = cell->structure()->JSCell::classInfo(vm)->className;
+ dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
+ cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
+ CRASH();
+ }
+
+ // Make sure we can walk the ClassInfo chain
+ const ClassInfo* info = cell->classInfo(vm);
+ do { } while ((info = info->parentClass));
+}
+#endif
+
+SlotVisitor::SlotVisitor(Heap& heap, CString codeName)
+ : m_bytesVisited(0)
, m_visitCount(0)
, m_isInParallelMode(false)
- , m_shared(shared)
- , m_shouldHashCons(false)
+ , m_markingVersion(MarkedSpace::initialVersion)
+ , m_heap(heap)
+ , m_codeName(codeName)
#if !ASSERT_DISABLED
, m_isCheckingForDefaultMarkViolation(false)
, m_isDraining(false)
@@ -33,343 +92,752 @@ SlotVisitor::SlotVisitor(GCThreadSharedData& shared)
SlotVisitor::~SlotVisitor()
{
- clearMarkStack();
+ clearMarkStacks();
}
-void SlotVisitor::setup()
+void SlotVisitor::didStartMarking()
{
- m_shared.m_shouldHashCons = m_shared.m_vm->haveEnoughNewStringsToHashCons();
- m_shouldHashCons = m_shared.m_shouldHashCons;
-#if ENABLE(PARALLEL_GC)
- for (unsigned i = 0; i < m_shared.m_gcThreads.size(); ++i)
- m_shared.m_gcThreads[i]->slotVisitor()->m_shouldHashCons = m_shared.m_shouldHashCons;
-#endif
+ if (heap()->collectionScope() == CollectionScope::Full)
+ RELEASE_ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
+ else
+ reset();
+
+ if (HeapProfiler* heapProfiler = vm().heapProfiler())
+ m_heapSnapshotBuilder = heapProfiler->activeSnapshotBuilder();
+
+ m_markingVersion = heap()->objectSpace().markingVersion();
}
void SlotVisitor::reset()
{
+ RELEASE_ASSERT(!m_opaqueRoots.size());
m_bytesVisited = 0;
- m_bytesCopied = 0;
m_visitCount = 0;
- ASSERT(m_stack.isEmpty());
-#if ENABLE(PARALLEL_GC)
- ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
-#else
- m_opaqueRoots.clear();
-#endif
- if (m_shouldHashCons) {
- m_uniqueStrings.clear();
- m_shouldHashCons = false;
- }
+ m_heapSnapshotBuilder = nullptr;
+ RELEASE_ASSERT(!m_currentCell);
}
-void SlotVisitor::clearMarkStack()
+void SlotVisitor::clearMarkStacks()
{
- m_stack.clear();
+ forEachMarkStack(
+ [&] (MarkStackArray& stack) -> IterationStatus {
+ stack.clear();
+ return IterationStatus::Continue;
+ });
}
void SlotVisitor::append(ConservativeRoots& conservativeRoots)
{
- StackStats::probe();
- JSCell** roots = conservativeRoots.roots();
+ HeapCell** roots = conservativeRoots.roots();
size_t size = conservativeRoots.size();
for (size_t i = 0; i < size; ++i)
- internalAppend(0, roots[i]);
+ appendJSCellOrAuxiliary(roots[i]);
}
-ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
+void SlotVisitor::appendJSCellOrAuxiliary(HeapCell* heapCell)
{
- StackStats::probe();
-
- ASSERT(Heap::isMarked(cell));
+ if (!heapCell)
+ return;
- if (isJSString(cell)) {
- JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
+ ASSERT(!m_isCheckingForDefaultMarkViolation);
+
+ auto validateCell = [&] (JSCell* jsCell) {
+ StructureID structureID = jsCell->structureID();
+
+ auto die = [&] (const char* text) {
+ WTF::dataFile().atomically(
+ [&] (PrintStream& out) {
+ out.print(text);
+ out.print("GC type: ", heap()->collectionScope(), "\n");
+ out.print("Object at: ", RawPointer(jsCell), "\n");
+#if USE(JSVALUE64)
+ out.print("Structure ID: ", structureID, " (0x", format("%x", structureID), ")\n");
+ out.print("Structure ID table size: ", heap()->structureIDTable().size(), "\n");
+#else
+ out.print("Structure: ", RawPointer(structureID), "\n");
+#endif
+ out.print("Object contents:");
+ for (unsigned i = 0; i < 2; ++i)
+ out.print(" ", format("0x%016llx", bitwise_cast<uint64_t*>(jsCell)[i]));
+ out.print("\n");
+ CellContainer container = jsCell->cellContainer();
+ out.print("Is marked: ", container.isMarked(jsCell), "\n");
+ out.print("Is newly allocated: ", container.isNewlyAllocated(jsCell), "\n");
+ if (container.isMarkedBlock()) {
+ MarkedBlock& block = container.markedBlock();
+ out.print("Block: ", RawPointer(&block), "\n");
+ block.handle().dumpState(out);
+ out.print("\n");
+ out.print("Is marked raw: ", block.isMarkedRaw(jsCell), "\n");
+ out.print("Marking version: ", block.markingVersion(), "\n");
+ out.print("Heap marking version: ", heap()->objectSpace().markingVersion(), "\n");
+ out.print("Is newly allocated raw: ", block.handle().isNewlyAllocated(jsCell), "\n");
+ out.print("Newly allocated version: ", block.handle().newlyAllocatedVersion(), "\n");
+ out.print("Heap newly allocated version: ", heap()->objectSpace().newlyAllocatedVersion(), "\n");
+ }
+ UNREACHABLE_FOR_PLATFORM();
+ });
+ };
+
+ // It's not OK for the structure to be null at any GC scan point. We must not GC while
+ // an object is not fully initialized.
+ if (!structureID)
+ die("GC scan found corrupt object: structureID is zero!\n");
+
+ // It's not OK for the structure to be nuked at any GC scan point.
+ if (isNuked(structureID))
+ die("GC scan found object in bad state: structureID is nuked!\n");
+
+#if USE(JSVALUE64)
+ // This detects the worst of the badness.
+ if (structureID >= heap()->structureIDTable().size())
+ die("GC scan found corrupt object: structureID is out of bounds!\n");
+#endif
+ };
+
+ // In debug mode, we validate before marking since this makes it clearer what the problem
+ // was. It's also slower, so we don't do it normally.
+ if (!ASSERT_DISABLED && heapCell->cellKind() == HeapCell::JSCell)
+ validateCell(static_cast<JSCell*>(heapCell));
+
+ if (Heap::testAndSetMarked(m_markingVersion, heapCell))
return;
- }
+
+ switch (heapCell->cellKind()) {
+ case HeapCell::JSCell: {
+ // We have ample budget to perform validation here.
+
+ JSCell* jsCell = static_cast<JSCell*>(heapCell);
+ validateCell(jsCell);
+
+ jsCell->setCellState(CellState::PossiblyGrey);
- if (isJSFinalObject(cell)) {
- JSFinalObject::visitChildren(const_cast<JSCell*>(cell), visitor);
+ appendToMarkStack(jsCell);
return;
}
+
+ case HeapCell::Auxiliary: {
+ noteLiveAuxiliaryCell(heapCell);
+ return;
+ } }
+}
+
+void SlotVisitor::appendUnbarriered(JSValue value)
+{
+ if (!value || !value.isCell())
+ return;
+
+ if (UNLIKELY(m_heapSnapshotBuilder))
+ m_heapSnapshotBuilder->appendEdge(m_currentCell, value.asCell());
+
+ setMarkedAndAppendToMarkStack(value.asCell());
+}
+
+void SlotVisitor::appendHidden(JSValue value)
+{
+ if (!value || !value.isCell())
+ return;
+
+ setMarkedAndAppendToMarkStack(value.asCell());
+}
+
+void SlotVisitor::setMarkedAndAppendToMarkStack(JSCell* cell)
+{
+ SuperSamplerScope superSamplerScope(false);
+
+ ASSERT(!m_isCheckingForDefaultMarkViolation);
+ if (!cell)
+ return;
+
+#if ENABLE(GC_VALIDATION)
+ validate(cell);
+#endif
+
+ if (cell->isLargeAllocation())
+ setMarkedAndAppendToMarkStack(cell->largeAllocation(), cell);
+ else
+ setMarkedAndAppendToMarkStack(cell->markedBlock(), cell);
+}
+
+template<typename ContainerType>
+ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& container, JSCell* cell)
+{
+ container.aboutToMark(m_markingVersion);
+
+ if (container.testAndSetMarked(cell))
+ return;
+
+ ASSERT(cell->structure());
+
+ // Indicate that the object is grey and that:
+ // In case of concurrent GC: it's the first time it is grey in this GC cycle.
+ // In case of eden collection: it's a new object that became grey rather than an old remembered object.
+ cell->setCellState(CellState::PossiblyGrey);
+
+ appendToMarkStack(container, cell);
+}
+
+void SlotVisitor::appendToMarkStack(JSCell* cell)
+{
+ if (cell->isLargeAllocation())
+ appendToMarkStack(cell->largeAllocation(), cell);
+ else
+ appendToMarkStack(cell->markedBlock(), cell);
+}
+
+template<typename ContainerType>
+ALWAYS_INLINE void SlotVisitor::appendToMarkStack(ContainerType& container, JSCell* cell)
+{
+ ASSERT(Heap::isMarkedConcurrently(cell));
+ ASSERT(!cell->isZapped());
+
+ container.noteMarked();
+
+ m_visitCount++;
+ m_bytesVisited += container.cellSize();
+
+ m_collectorStack.append(cell);
+}
- if (isJSArray(cell)) {
- JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
+void SlotVisitor::appendToMutatorMarkStack(const JSCell* cell)
+{
+ m_mutatorStack.append(cell);
+}
+
+void SlotVisitor::markAuxiliary(const void* base)
+{
+ HeapCell* cell = bitwise_cast<HeapCell*>(base);
+
+ ASSERT(cell->heap() == heap());
+
+ if (Heap::testAndSetMarked(m_markingVersion, cell))
return;
+
+ noteLiveAuxiliaryCell(cell);
+}
+
+void SlotVisitor::noteLiveAuxiliaryCell(HeapCell* cell)
+{
+ // We get here once per GC under these circumstances:
+ //
+ // Eden collection: if the cell was allocated since the last collection and is live somehow.
+ //
+ // Full collection: if the cell is live somehow.
+
+ CellContainer container = cell->cellContainer();
+
+ container.assertValidCell(vm(), cell);
+ container.noteMarked();
+
+ m_visitCount++;
+
+ size_t cellSize = container.cellSize();
+ m_bytesVisited += cellSize;
+ m_nonCellVisitCount += cellSize;
+}
+
+class SetCurrentCellScope {
+public:
+ SetCurrentCellScope(SlotVisitor& visitor, const JSCell* cell)
+ : m_visitor(visitor)
+ {
+ ASSERT(!m_visitor.m_currentCell);
+ m_visitor.m_currentCell = const_cast<JSCell*>(cell);
+ }
+
+ ~SetCurrentCellScope()
+ {
+ ASSERT(m_visitor.m_currentCell);
+ m_visitor.m_currentCell = nullptr;
+ }
+
+private:
+ SlotVisitor& m_visitor;
+};
+
+ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell)
+{
+ ASSERT(Heap::isMarkedConcurrently(cell));
+
+ SetCurrentCellScope currentCellScope(*this, cell);
+
+ if (false) {
+ dataLog("Visiting ", RawPointer(cell));
+ if (!m_isFirstVisit)
+ dataLog(" (subsequent)");
+ dataLog("\n");
+ }
+
+ // Funny story: it's possible for the object to be black already, if we barrier the object at
+ // about the same time that it's marked. That's fine. It's a gnarly and super-rare race. It's
+ // not clear to me that it would be correct or profitable to bail here if the object is already
+ // black.
+
+ cell->setCellState(CellState::PossiblyBlack);
+
+ WTF::storeLoadFence();
+
+ switch (cell->type()) {
+ case StringType:
+ JSString::visitChildren(const_cast<JSCell*>(cell), *this);
+ break;
+
+ case FinalObjectType:
+ JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this);
+ break;
+
+ case ArrayType:
+ JSArray::visitChildren(const_cast<JSCell*>(cell), *this);
+ break;
+
+ default:
+ // FIXME: This could be so much better.
+ // https://bugs.webkit.org/show_bug.cgi?id=162462
+ cell->methodTable(vm())->visitChildren(const_cast<JSCell*>(cell), *this);
+ break;
+ }
+
+ if (UNLIKELY(m_heapSnapshotBuilder)) {
+ if (m_isFirstVisit)
+ m_heapSnapshotBuilder->appendNode(const_cast<JSCell*>(cell));
}
+}
- cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
+void SlotVisitor::visitAsConstraint(const JSCell* cell)
+{
+ m_isFirstVisit = false;
+ visitChildren(cell);
}
-void SlotVisitor::donateKnownParallel()
+void SlotVisitor::donateKnownParallel(MarkStackArray& from, MarkStackArray& to)
{
- StackStats::probe();
// NOTE: Because we re-try often, we can afford to be conservative, and
// assume that donating is not profitable.
// Avoid locking when a thread reaches a dead end in the object graph.
- if (m_stack.size() < 2)
+ if (from.size() < 2)
return;
// If there's already some shared work queued up, be conservative and assume
// that donating more is not profitable.
- if (m_shared.m_sharedMarkStack.size())
+ if (to.size())
return;
// If we're contending on the lock, be conservative and assume that another
// thread is already donating.
- std::unique_lock<std::mutex> lock(m_shared.m_markingMutex, std::try_to_lock);
+ std::unique_lock<Lock> lock(m_heap.m_markingMutex, std::try_to_lock);
if (!lock.owns_lock())
return;
// Otherwise, assume that a thread will go idle soon, and donate.
- m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
+ from.donateSomeCellsTo(to);
- if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers())
- m_shared.m_markingConditionVariable.notify_all();
+ m_heap.m_markingConditionVariable.notifyAll();
}
-void SlotVisitor::drain()
+void SlotVisitor::donateKnownParallel()
{
- StackStats::probe();
- ASSERT(m_isInParallelMode);
-
-#if ENABLE(PARALLEL_GC)
- if (Options::numberOfGCMarkers() > 1) {
- while (!m_stack.isEmpty()) {
- m_stack.refill();
- for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;)
- visitChildren(*this, m_stack.removeLast());
- donateKnownParallel();
- }
-
- mergeOpaqueRootsIfNecessary();
+ forEachMarkStack(
+ [&] (MarkStackArray& stack) -> IterationStatus {
+ donateKnownParallel(stack, correspondingGlobalStack(stack));
+ return IterationStatus::Continue;
+ });
+}
+
+void SlotVisitor::updateMutatorIsStopped(const AbstractLocker&)
+{
+ m_mutatorIsStopped = (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
+}
+
+void SlotVisitor::updateMutatorIsStopped()
+{
+ if (mutatorIsStoppedIsUpToDate())
return;
+ updateMutatorIsStopped(holdLock(m_rightToRun));
+}
+
+bool SlotVisitor::hasAcknowledgedThatTheMutatorIsResumed() const
+{
+ return !m_mutatorIsStopped;
+}
+
+bool SlotVisitor::mutatorIsStoppedIsUpToDate() const
+{
+ return m_mutatorIsStopped == (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
+}
+
+void SlotVisitor::optimizeForStoppedMutator()
+{
+ m_canOptimizeForStoppedMutator = true;
+}
+
+NEVER_INLINE void SlotVisitor::drain(MonotonicTime timeout)
+{
+ if (!m_isInParallelMode) {
+ dataLog("FATAL: attempting to drain when not in parallel mode.\n");
+ RELEASE_ASSERT_NOT_REACHED();
}
-#endif
- while (!m_stack.isEmpty()) {
- m_stack.refill();
- while (m_stack.canRemoveLast())
- visitChildren(*this, m_stack.removeLast());
+ auto locker = holdLock(m_rightToRun);
+
+ while (!hasElapsed(timeout)) {
+ updateMutatorIsStopped(locker);
+ IterationStatus status = forEachMarkStack(
+ [&] (MarkStackArray& stack) -> IterationStatus {
+ if (stack.isEmpty())
+ return IterationStatus::Continue;
+
+ stack.refill();
+
+ m_isFirstVisit = (&stack == &m_collectorStack);
+
+ for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); stack.canRemoveLast() && countdown--;)
+ visitChildren(stack.removeLast());
+ return IterationStatus::Done;
+ });
+ if (status == IterationStatus::Continue)
+ break;
+
+ m_rightToRun.safepoint();
+ donateKnownParallel();
}
+
+ mergeIfNecessary();
}
-void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
+size_t SlotVisitor::performIncrementOfDraining(size_t bytesRequested)
{
- StackStats::probe();
- ASSERT(m_isInParallelMode);
-
- ASSERT(Options::numberOfGCMarkers());
-
- bool shouldBeParallel;
+ RELEASE_ASSERT(m_isInParallelMode);
-#if ENABLE(PARALLEL_GC)
- shouldBeParallel = Options::numberOfGCMarkers() > 1;
-#else
- ASSERT(Options::numberOfGCMarkers() == 1);
- shouldBeParallel = false;
-#endif
-
- if (!shouldBeParallel) {
- // This call should be a no-op.
- ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
- ASSERT(m_stack.isEmpty());
- ASSERT(m_shared.m_sharedMarkStack.isEmpty());
- return;
+ size_t cellsRequested = bytesRequested / MarkedBlock::atomSize;
+ {
+ auto locker = holdLock(m_heap.m_markingMutex);
+ forEachMarkStack(
+ [&] (MarkStackArray& stack) -> IterationStatus {
+ cellsRequested -= correspondingGlobalStack(stack).transferTo(stack, cellsRequested);
+ return cellsRequested ? IterationStatus::Continue : IterationStatus::Done;
+ });
}
+
+ size_t cellBytesVisited = 0;
+ m_nonCellVisitCount = 0;
+
+ auto bytesVisited = [&] () -> size_t {
+ return cellBytesVisited + m_nonCellVisitCount;
+ };
+
+ auto isDone = [&] () -> bool {
+ return bytesVisited() >= bytesRequested;
+ };
-#if ENABLE(PARALLEL_GC)
{
- std::lock_guard<std::mutex> lock(m_shared.m_markingMutex);
- m_shared.m_numberOfActiveParallelMarkers++;
+ auto locker = holdLock(m_rightToRun);
+
+ while (!isDone()) {
+ updateMutatorIsStopped(locker);
+ IterationStatus status = forEachMarkStack(
+ [&] (MarkStackArray& stack) -> IterationStatus {
+ if (stack.isEmpty() || isDone())
+ return IterationStatus::Continue;
+
+ stack.refill();
+
+ m_isFirstVisit = (&stack == &m_collectorStack);
+
+ unsigned countdown = Options::minimumNumberOfScansBetweenRebalance();
+ while (countdown && stack.canRemoveLast() && !isDone()) {
+ const JSCell* cell = stack.removeLast();
+ cellBytesVisited += cell->cellSize();
+ visitChildren(cell);
+ countdown--;
+ }
+ return IterationStatus::Done;
+ });
+ if (status == IterationStatus::Continue)
+ break;
+ m_rightToRun.safepoint();
+ donateKnownParallel();
+ }
}
+
+ donateAll();
+ mergeIfNecessary();
+
+ return bytesVisited();
+}
+
+bool SlotVisitor::didReachTermination()
+{
+ LockHolder locker(m_heap.m_markingMutex);
+ return didReachTermination(locker);
+}
+
+bool SlotVisitor::didReachTermination(const AbstractLocker&)
+{
+ return isEmpty()
+ && !m_heap.m_numberOfActiveParallelMarkers
+ && m_heap.m_sharedCollectorMarkStack->isEmpty()
+ && m_heap.m_sharedMutatorMarkStack->isEmpty();
+}
+
+bool SlotVisitor::hasWork(const AbstractLocker&)
+{
+ return !m_heap.m_sharedCollectorMarkStack->isEmpty()
+ || !m_heap.m_sharedMutatorMarkStack->isEmpty();
+}
+
+NEVER_INLINE SlotVisitor::SharedDrainResult SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode, MonotonicTime timeout)
+{
+ ASSERT(m_isInParallelMode);
+
+ ASSERT(Options::numberOfGCMarkers());
+
+ bool isActive = false;
while (true) {
{
- std::unique_lock<std::mutex> lock(m_shared.m_markingMutex);
- m_shared.m_numberOfActiveParallelMarkers--;
+ LockHolder locker(m_heap.m_markingMutex);
+ if (isActive)
+ m_heap.m_numberOfActiveParallelMarkers--;
+ m_heap.m_numberOfWaitingParallelMarkers++;
- // How we wait differs depending on drain mode.
if (sharedDrainMode == MasterDrain) {
- // Wait until either termination is reached, or until there is some work
- // for us to do.
while (true) {
- // Did we reach termination?
- if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
- // Let any sleeping slaves know it's time for them to return;
- m_shared.m_markingConditionVariable.notify_all();
- return;
+ if (hasElapsed(timeout))
+ return SharedDrainResult::TimedOut;
+
+ if (didReachTermination(locker)) {
+ m_heap.m_markingConditionVariable.notifyAll();
+ return SharedDrainResult::Done;
}
- // Is there work to be done?
- if (!m_shared.m_sharedMarkStack.isEmpty())
+ if (hasWork(locker))
break;
-
- // Otherwise wait.
- m_shared.m_markingConditionVariable.wait(lock);
+
+ m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
}
} else {
ASSERT(sharedDrainMode == SlaveDrain);
+
+ if (hasElapsed(timeout))
+ return SharedDrainResult::TimedOut;
- // Did we detect termination? If so, let the master know.
- if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
- m_shared.m_markingConditionVariable.notify_all();
+ if (didReachTermination(locker)) {
+ m_heap.m_markingConditionVariable.notifyAll();
+
+ // If we're in concurrent mode, then we know that the mutator will eventually do
+ // the right thing because:
+ // - It's possible that the collector has the conn. In that case, the collector will
+ // wake up from the notification above. This will happen if the app released heap
+ // access. Native apps can spend a lot of time with heap access released.
+ // - It's possible that the mutator will allocate soon. Then it will check if we
+ // reached termination. This is the most likely outcome in programs that allocate
+ // a lot.
+ // - WebCore never releases access. But WebCore has a runloop. The runloop will check
+ // if we reached termination.
+ // So, this tells the runloop that it's got things to do.
+ m_heap.m_stopIfNecessaryTimer->scheduleSoon();
+ }
- m_shared.m_markingConditionVariable.wait(lock, [this] { return !m_shared.m_sharedMarkStack.isEmpty() || m_shared.m_parallelMarkersShouldExit; });
+ auto isReady = [&] () -> bool {
+ return hasWork(locker)
+ || m_heap.m_parallelMarkersShouldExit;
+ };
+
+ m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout, isReady);
- // Is the current phase done? If so, return from this function.
- if (m_shared.m_parallelMarkersShouldExit)
- return;
+ if (m_heap.m_parallelMarkersShouldExit)
+ return SharedDrainResult::Done;
}
-
- size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers;
- m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount);
- m_shared.m_numberOfActiveParallelMarkers++;
+
+ forEachMarkStack(
+ [&] (MarkStackArray& stack) -> IterationStatus {
+ stack.stealSomeCellsFrom(
+ correspondingGlobalStack(stack),
+ m_heap.m_numberOfWaitingParallelMarkers);
+ return IterationStatus::Continue;
+ });
+
+ m_heap.m_numberOfActiveParallelMarkers++;
+ m_heap.m_numberOfWaitingParallelMarkers--;
}
- drain();
+ drain(timeout);
+ isActive = true;
}
-#endif
}
-void SlotVisitor::mergeOpaqueRoots()
+SlotVisitor::SharedDrainResult SlotVisitor::drainInParallel(MonotonicTime timeout)
{
- StackStats::probe();
- ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
- {
- MutexLocker locker(m_shared.m_opaqueRootsLock);
- HashSet<void*>::iterator begin = m_opaqueRoots.begin();
- HashSet<void*>::iterator end = m_opaqueRoots.end();
- for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
- m_shared.m_opaqueRoots.add(*iter);
- }
- m_opaqueRoots.clear();
+ donateAndDrain(timeout);
+ return drainFromShared(MasterDrain, timeout);
}
-ALWAYS_INLINE bool JSString::tryHashConsLock()
+SlotVisitor::SharedDrainResult SlotVisitor::drainInParallelPassively(MonotonicTime timeout)
{
-#if ENABLE(PARALLEL_GC)
- unsigned currentFlags = m_flags;
-
- if (currentFlags & HashConsLock)
- return false;
-
- unsigned newFlags = currentFlags | HashConsLock;
+ ASSERT(m_isInParallelMode);
+
+ ASSERT(Options::numberOfGCMarkers());
+
+ if (Options::numberOfGCMarkers() == 1
+ || (m_heap.m_worldState.load() & Heap::mutatorWaitingBit)
+ || !m_heap.hasHeapAccess()
+ || m_heap.collectorBelievesThatTheWorldIsStopped()) {
+ // This is an optimization over drainInParallel() when we have a concurrent mutator but
+ // otherwise it is not profitable.
+ return drainInParallel(timeout);
+ }
- if (!WTF::weakCompareAndSwap(&m_flags, currentFlags, newFlags))
- return false;
+ LockHolder locker(m_heap.m_markingMutex);
+ donateAll(locker);
+
+ for (;;) {
+ if (hasElapsed(timeout))
+ return SharedDrainResult::TimedOut;
+
+ if (didReachTermination(locker)) {
+ m_heap.m_markingConditionVariable.notifyAll();
+ return SharedDrainResult::Done;
+ }
+
+ m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
+ }
+}
- WTF::memoryBarrierAfterLock();
- return true;
-#else
- if (isHashConsSingleton())
- return false;
+void SlotVisitor::donateAll()
+{
+ if (isEmpty())
+ return;
+
+ donateAll(holdLock(m_heap.m_markingMutex));
+}
- m_flags |= HashConsLock;
+void SlotVisitor::donateAll(const AbstractLocker&)
+{
+ forEachMarkStack(
+ [&] (MarkStackArray& stack) -> IterationStatus {
+ stack.transferTo(correspondingGlobalStack(stack));
+ return IterationStatus::Continue;
+ });
- return true;
-#endif
+ m_heap.m_markingConditionVariable.notifyAll();
}
-ALWAYS_INLINE void JSString::releaseHashConsLock()
+void SlotVisitor::addOpaqueRoot(void* root)
{
-#if ENABLE(PARALLEL_GC)
- WTF::memoryBarrierBeforeUnlock();
-#endif
- m_flags &= ~HashConsLock;
+ if (!root)
+ return;
+
+ if (m_ignoreNewOpaqueRoots)
+ return;
+
+ if (Options::numberOfGCMarkers() == 1) {
+ // Put directly into the shared HashSet.
+ m_heap.m_opaqueRoots.add(root);
+ return;
+ }
+ // Put into the local set, but merge with the shared one every once in
+ // a while to make sure that the local sets don't grow too large.
+ mergeOpaqueRootsIfProfitable();
+ m_opaqueRoots.add(root);
}
-ALWAYS_INLINE bool JSString::shouldTryHashCons()
+bool SlotVisitor::containsOpaqueRoot(void* root) const
{
- return ((length() > 1) && !isRope() && !isHashConsSingleton());
+ if (!root)
+ return false;
+
+ ASSERT(!m_isInParallelMode);
+ return m_heap.m_opaqueRoots.contains(root);
}
-ALWAYS_INLINE void SlotVisitor::internalAppend(void* from, JSValue* slot)
+TriState SlotVisitor::containsOpaqueRootTriState(void* root) const
{
- // This internalAppend is only intended for visits to object and array backing stores.
- // as it can change the JSValue pointed to be the argument when the original JSValue
- // is a string that contains the same contents as another string.
+ if (!root)
+ return FalseTriState;
+
+ if (m_opaqueRoots.contains(root))
+ return TrueTriState;
+ std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
+ if (m_heap.m_opaqueRoots.contains(root))
+ return TrueTriState;
+ return MixedTriState;
+}
- StackStats::probe();
- ASSERT(slot);
- JSValue value = *slot;
- ASSERT(value);
- if (!value.isCell())
+void SlotVisitor::mergeIfNecessary()
+{
+ if (m_opaqueRoots.isEmpty())
return;
+ mergeOpaqueRoots();
+}
- JSCell* cell = value.asCell();
- if (!cell)
+void SlotVisitor::mergeOpaqueRootsIfProfitable()
+{
+ if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold())
return;
-
- validate(cell);
-
- if (m_shouldHashCons && cell->isString()) {
- JSString* string = jsCast<JSString*>(cell);
- if (string->shouldTryHashCons() && string->tryHashConsLock()) {
- UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value);
- if (addResult.isNewEntry)
- string->setHashConsSingleton();
- else {
- JSValue existingJSValue = addResult.iterator->value;
- if (value != existingJSValue)
- jsCast<JSString*>(existingJSValue.asCell())->clearHashConsSingleton();
- *slot = existingJSValue;
- string->releaseHashConsLock();
- return;
- }
- string->releaseHashConsLock();
- }
+ mergeOpaqueRoots();
+}
+
+void SlotVisitor::donate()
+{
+ if (!m_isInParallelMode) {
+ dataLog("FATAL: Attempting to donate when not in parallel mode.\n");
+ RELEASE_ASSERT_NOT_REACHED();
}
-
- internalAppend(from, cell);
+
+ if (Options::numberOfGCMarkers() == 1)
+ return;
+
+ donateKnownParallel();
}
-void SlotVisitor::harvestWeakReferences()
+void SlotVisitor::donateAndDrain(MonotonicTime timeout)
{
- StackStats::probe();
- for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
- current->visitWeakReferences(*this);
+ donate();
+ drain(timeout);
}
-void SlotVisitor::finalizeUnconditionalFinalizers()
+void SlotVisitor::mergeOpaqueRoots()
{
- StackStats::probe();
- while (m_shared.m_unconditionalFinalizers.hasNext())
- m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
+ {
+ std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
+ for (auto* root : m_opaqueRoots)
+ m_heap.m_opaqueRoots.add(root);
+ }
+ m_opaqueRoots.clear();
}
-#if ENABLE(GC_VALIDATION)
-void SlotVisitor::validate(JSCell* cell)
+void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
{
- RELEASE_ASSERT(cell);
+ m_heap.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
+}
- if (!cell->structure()) {
- dataLogF("cell at %p has a null structure\n" , cell);
- CRASH();
- }
+void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
+{
+ m_heap.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
+}
- // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
- // I hate this sentence.
- if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) {
- const char* parentClassName = 0;
- const char* ourClassName = 0;
- if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo())
- parentClassName = cell->structure()->structure()->JSCell::classInfo()->className;
- if (cell->structure()->JSCell::classInfo())
- ourClassName = cell->structure()->JSCell::classInfo()->className;
- dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
- cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
- CRASH();
- }
+void SlotVisitor::didRace(const VisitRaceKey& race)
+{
+ if (Options::verboseVisitRace())
+ dataLog(toCString("GC visit race: ", race, "\n"));
+
+ auto locker = holdLock(heap()->m_raceMarkStackLock);
+ JSCell* cell = race.cell();
+ cell->setCellState(CellState::PossiblyGrey);
+ heap()->m_raceMarkStack->append(cell);
+}
- // Make sure we can walk the ClassInfo chain
- const ClassInfo* info = cell->classInfo();
- do { } while ((info = info->parentClass));
+void SlotVisitor::dump(PrintStream& out) const
+{
+ out.print("Collector: [", pointerListDump(collectorMarkStack()), "], Mutator: [", pointerListDump(mutatorMarkStack()), "]");
}
-#else
-void SlotVisitor::validate(JSCell*)
+
+MarkStackArray& SlotVisitor::correspondingGlobalStack(MarkStackArray& stack)
{
+ if (&stack == &m_collectorStack)
+ return *m_heap.m_sharedCollectorMarkStack;
+ RELEASE_ASSERT(&stack == &m_mutatorStack);
+ return *m_heap.m_sharedMutatorMarkStack;
}
-#endif
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h
index 4a8dc3e97..83479af7f 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.h
+++ b/Source/JavaScriptCore/heap/SlotVisitor.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,126 +23,215 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SlotVisitor_h
-#define SlotVisitor_h
+#pragma once
-#include "CopyToken.h"
+#include "CellState.h"
#include "HandleTypes.h"
-#include "MarkStackInlines.h"
-
-#include <wtf/text/StringHash.h>
+#include "IterationStatus.h"
+#include "MarkStack.h"
+#include "OpaqueRootSet.h"
+#include "VisitRaceKey.h"
+#include <wtf/MonotonicTime.h>
namespace JSC {
class ConservativeRoots;
class GCThreadSharedData;
class Heap;
+class HeapCell;
+class HeapSnapshotBuilder;
+class MarkedBlock;
+class UnconditionalFinalizer;
template<typename T> class Weak;
+class WeakReferenceHarvester;
template<typename T> class WriteBarrierBase;
-template<typename T> class JITWriteBarrier;
+
+typedef uint32_t HeapVersion;
class SlotVisitor {
WTF_MAKE_NONCOPYABLE(SlotVisitor);
- friend class HeapRootVisitor; // Allowed to mark a JSValue* or JSCell** directly.
+ WTF_MAKE_FAST_ALLOCATED;
+
+ friend class SetCurrentCellScope;
+ friend class Heap;
public:
- SlotVisitor(GCThreadSharedData&);
+ SlotVisitor(Heap&, CString codeName);
~SlotVisitor();
- MarkStackArray& markStack() { return m_stack; }
-
+ MarkStackArray& collectorMarkStack() { return m_collectorStack; }
+ MarkStackArray& mutatorMarkStack() { return m_mutatorStack; }
+ const MarkStackArray& collectorMarkStack() const { return m_collectorStack; }
+ const MarkStackArray& mutatorMarkStack() const { return m_mutatorStack; }
+
+ VM& vm();
+ const VM& vm() const;
Heap* heap() const;
void append(ConservativeRoots&);
- template<typename T> void append(JITWriteBarrier<T>*);
- template<typename T> void append(WriteBarrierBase<T>*);
+ template<typename T> void append(const WriteBarrierBase<T>&);
+ template<typename T> void appendHidden(const WriteBarrierBase<T>&);
template<typename Iterator> void append(Iterator begin , Iterator end);
- void appendValues(WriteBarrierBase<Unknown>*, size_t count);
+ void appendValues(const WriteBarrierBase<Unknown>*, size_t count);
+ void appendValuesHidden(const WriteBarrierBase<Unknown>*, size_t count);
+
+ // These don't require you to prove that you have a WriteBarrier<>. That makes sense
+ // for:
+ //
+ // - roots.
+ // - sophisticated data structures that barrier through other means (like DFG::Plan and
+ // friends).
+ //
+ // If you are not a root and you don't know what kind of barrier you have, then you
+ // shouldn't call these methods.
+ JS_EXPORT_PRIVATE void appendUnbarriered(JSValue);
+ void appendUnbarriered(JSValue*, size_t);
+ void appendUnbarriered(JSCell*);
template<typename T>
- void appendUnbarrieredPointer(T**);
- void appendUnbarrieredValue(JSValue*);
- template<typename T>
- void appendUnbarrieredWeak(Weak<T>*);
- void unconditionallyAppend(JSCell*);
+ void append(const Weak<T>& weak);
+
+ JS_EXPORT_PRIVATE void addOpaqueRoot(void*);
- void addOpaqueRoot(void*);
- bool containsOpaqueRoot(void*);
- TriState containsOpaqueRootTriState(void*);
- int opaqueRootCount();
+ JS_EXPORT_PRIVATE bool containsOpaqueRoot(void*) const;
+ TriState containsOpaqueRootTriState(void*) const;
- GCThreadSharedData& sharedData() const { return m_shared; }
- bool isEmpty() { return m_stack.isEmpty(); }
+ bool isEmpty() { return m_collectorStack.isEmpty() && m_mutatorStack.isEmpty(); }
- void setup();
+ void didStartMarking();
void reset();
- void clearMarkStack();
+ void clearMarkStacks();
size_t bytesVisited() const { return m_bytesVisited; }
- size_t bytesCopied() const { return m_bytesCopied; }
size_t visitCount() const { return m_visitCount; }
+
+ void addToVisitCount(size_t value) { m_visitCount += value; }
void donate();
- void drain();
- void donateAndDrain();
+ void drain(MonotonicTime timeout = MonotonicTime::infinity());
+ void donateAndDrain(MonotonicTime timeout = MonotonicTime::infinity());
enum SharedDrainMode { SlaveDrain, MasterDrain };
- void drainFromShared(SharedDrainMode);
+ enum class SharedDrainResult { Done, TimedOut };
+ SharedDrainResult drainFromShared(SharedDrainMode, MonotonicTime timeout = MonotonicTime::infinity());
- void harvestWeakReferences();
- void finalizeUnconditionalFinalizers();
+ SharedDrainResult drainInParallel(MonotonicTime timeout = MonotonicTime::infinity());
+ SharedDrainResult drainInParallelPassively(MonotonicTime timeout = MonotonicTime::infinity());
- void copyLater(JSCell*, CopyToken, void*, size_t);
+ // Attempts to perform an increment of draining that involves only walking `bytes` worth of data. This
+ // is likely to accidentally walk more or less than that. It will usually mark more than bytes. It may
+ // mark less than bytes if we're reaching termination or if the global worklist is empty (which may in
+ // rare cases happen temporarily even if we're not reaching termination).
+ size_t performIncrementOfDraining(size_t bytes);
- void reportExtraMemoryUsage(JSCell* owner, size_t);
+ JS_EXPORT_PRIVATE void mergeIfNecessary();
+
+ // This informs the GC about auxiliary of some size that we are keeping alive. If you don't do
+ // this then the space will be freed at end of GC.
+ void markAuxiliary(const void* base);
+
+ void reportExtraMemoryVisited(size_t);
+#if ENABLE(RESOURCE_USAGE)
+ void reportExternalMemoryVisited(size_t);
+#endif
void addWeakReferenceHarvester(WeakReferenceHarvester*);
void addUnconditionalFinalizer(UnconditionalFinalizer*);
-#if ENABLE(OBJECT_MARK_LOGGING)
- inline void resetChildCount() { m_logChildCount = 0; }
- inline unsigned childCount() { return m_logChildCount; }
- inline void incrementChildCount() { m_logChildCount++; }
-#endif
+ void dump(PrintStream&) const;
+
+ bool isBuildingHeapSnapshot() const { return !!m_heapSnapshotBuilder; }
+
+ HeapVersion markingVersion() const { return m_markingVersion; }
+
+ bool mutatorIsStopped() const { return m_mutatorIsStopped; }
+
+ Lock& rightToRun() { return m_rightToRun; }
+
+ void updateMutatorIsStopped(const AbstractLocker&);
+ void updateMutatorIsStopped();
+
+ bool hasAcknowledgedThatTheMutatorIsResumed() const;
+ bool mutatorIsStoppedIsUpToDate() const;
+
+ void optimizeForStoppedMutator();
+
+ void didRace(const VisitRaceKey&);
+ void didRace(JSCell* cell, const char* reason) { didRace(VisitRaceKey(cell, reason)); }
+
+ void visitAsConstraint(const JSCell*);
+
+ bool didReachTermination();
+
+ void setIgnoreNewOpaqueRoots(bool value) { m_ignoreNewOpaqueRoots = value; }
+
+ void donateAll();
+
+ const char* codeName() const { return m_codeName.data(); }
private:
friend class ParallelModeEnabler;
- JS_EXPORT_PRIVATE static void validate(JSCell*);
+ void appendJSCellOrAuxiliary(HeapCell*);
+ void appendHidden(JSValue);
- void append(JSValue*);
- void append(JSValue*, size_t count);
- void append(JSCell**);
+ JS_EXPORT_PRIVATE void setMarkedAndAppendToMarkStack(JSCell*);
+
+ template<typename ContainerType>
+ void setMarkedAndAppendToMarkStack(ContainerType&, JSCell*);
+
+ void appendToMarkStack(JSCell*);
- void internalAppend(void* from, JSCell*);
- void internalAppend(void* from, JSValue);
- void internalAppend(void* from, JSValue*);
+ template<typename ContainerType>
+ void appendToMarkStack(ContainerType&, JSCell*);
- JS_EXPORT_PRIVATE void mergeOpaqueRoots();
- void mergeOpaqueRootsIfNecessary();
+ void appendToMutatorMarkStack(const JSCell*);
+
+ void noteLiveAuxiliaryCell(HeapCell*);
+
+ void mergeOpaqueRoots();
+
void mergeOpaqueRootsIfProfitable();
+
+ void visitChildren(const JSCell*);
void donateKnownParallel();
+ void donateKnownParallel(MarkStackArray& from, MarkStackArray& to);
+
+ void donateAll(const AbstractLocker&);
- MarkStackArray m_stack;
- HashSet<void*> m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector.
+ bool hasWork(const AbstractLocker&);
+ bool didReachTermination(const AbstractLocker&);
+
+ template<typename Func>
+ IterationStatus forEachMarkStack(const Func&);
+
+ MarkStackArray& correspondingGlobalStack(MarkStackArray&);
+
+ MarkStackArray m_collectorStack;
+ MarkStackArray m_mutatorStack;
+ OpaqueRootSet m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector.
+ bool m_ignoreNewOpaqueRoots { false }; // Useful as a debugging mode.
size_t m_bytesVisited;
- size_t m_bytesCopied;
size_t m_visitCount;
+ size_t m_nonCellVisitCount { 0 }; // Used for incremental draining, ignored otherwise.
bool m_isInParallelMode;
-
- GCThreadSharedData& m_shared;
-
- bool m_shouldHashCons; // Local per-thread copy of shared flag for performance reasons
- typedef HashMap<StringImpl*, JSValue> UniqueStringMap;
- UniqueStringMap m_uniqueStrings;
-#if ENABLE(OBJECT_MARK_LOGGING)
- unsigned m_logChildCount;
-#endif
+ HeapVersion m_markingVersion;
+
+ Heap& m_heap;
+ HeapSnapshotBuilder* m_heapSnapshotBuilder { nullptr };
+ JSCell* m_currentCell { nullptr };
+ bool m_isFirstVisit { false };
+ bool m_mutatorIsStopped { false };
+ bool m_canOptimizeForStoppedMutator { false };
+ Lock m_rightToRun;
+
+ CString m_codeName;
+
public:
#if !ASSERT_DISABLED
bool m_isCheckingForDefaultMarkViolation;
@@ -170,5 +259,3 @@ private:
};
} // namespace JSC
-
-#endif // SlotVisitor_h
diff --git a/Source/JavaScriptCore/heap/SlotVisitorInlines.h b/Source/JavaScriptCore/heap/SlotVisitorInlines.h
index ccd2e4ae1..06475a093 100644
--- a/Source/JavaScriptCore/heap/SlotVisitorInlines.h
+++ b/Source/JavaScriptCore/heap/SlotVisitorInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,250 +23,101 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SlotVisitorInlines_h
-#define SlotVisitorInlines_h
+#pragma once
-#include "CopiedBlockInlines.h"
-#include "CopiedSpaceInlines.h"
-#include "Options.h"
#include "SlotVisitor.h"
#include "Weak.h"
#include "WeakInlines.h"
namespace JSC {
-ALWAYS_INLINE void SlotVisitor::append(JSValue* slot, size_t count)
+inline void SlotVisitor::appendUnbarriered(JSValue* slot, size_t count)
{
- for (size_t i = 0; i < count; ++i) {
- JSValue& value = slot[i];
- internalAppend(&value, value);
- }
+ for (size_t i = count; i--;)
+ appendUnbarriered(slot[i]);
}
-template<typename T>
-inline void SlotVisitor::appendUnbarrieredPointer(T** slot)
+inline void SlotVisitor::appendUnbarriered(JSCell* cell)
{
- ASSERT(slot);
- JSCell* cell = *slot;
- internalAppend(slot, cell);
-}
-
-ALWAYS_INLINE void SlotVisitor::append(JSValue* slot)
-{
- ASSERT(slot);
- internalAppend(slot, *slot);
-}
-
-ALWAYS_INLINE void SlotVisitor::appendUnbarrieredValue(JSValue* slot)
-{
- ASSERT(slot);
- internalAppend(slot, *slot);
-}
-
-ALWAYS_INLINE void SlotVisitor::append(JSCell** slot)
-{
- ASSERT(slot);
- internalAppend(slot, *slot);
+ appendUnbarriered(JSValue(cell));
}
template<typename T>
-ALWAYS_INLINE void SlotVisitor::appendUnbarrieredWeak(Weak<T>* weak)
-{
- ASSERT(weak);
- if (weak->get())
- internalAppend(0, weak->get());
-}
-
-ALWAYS_INLINE void SlotVisitor::internalAppend(void* from, JSValue value)
+inline void SlotVisitor::append(const Weak<T>& weak)
{
- if (!value || !value.isCell())
- return;
- internalAppend(from, value.asCell());
+ appendUnbarriered(weak.get());
}
-ALWAYS_INLINE void SlotVisitor::internalAppend(void* from, JSCell* cell)
-{
- ASSERT(!m_isCheckingForDefaultMarkViolation);
- if (!cell)
- return;
-#if ENABLE(ALLOCATION_LOGGING)
- dataLogF("JSC GC noticing reference from %p to %p.\n", from, cell);
-#else
- UNUSED_PARAM(from);
-#endif
-#if ENABLE(GC_VALIDATION)
- validate(cell);
-#endif
- if (Heap::testAndSetMarked(cell) || !cell->structure())
- return;
-
- m_bytesVisited += MarkedBlock::blockFor(cell)->cellSize();
-
- MARK_LOG_CHILD(*this, cell);
-
- unconditionallyAppend(cell);
-}
-
-ALWAYS_INLINE void SlotVisitor::unconditionallyAppend(JSCell* cell)
+template<typename T>
+inline void SlotVisitor::append(const WriteBarrierBase<T>& slot)
{
- ASSERT(Heap::isMarked(cell));
- m_visitCount++;
-
- // Should never attempt to mark something that is zapped.
- ASSERT(!cell->isZapped());
-
- m_stack.append(cell);
+ appendUnbarriered(slot.get());
}
-template<typename T> inline void SlotVisitor::append(WriteBarrierBase<T>* slot)
+template<typename T>
+inline void SlotVisitor::appendHidden(const WriteBarrierBase<T>& slot)
{
- internalAppend(slot, *slot->slot());
+ appendHidden(slot.get());
}
-template<typename Iterator> inline void SlotVisitor::append(Iterator begin, Iterator end)
+template<typename Iterator>
+inline void SlotVisitor::append(Iterator begin, Iterator end)
{
for (auto it = begin; it != end; ++it)
- append(&*it);
-}
-
-ALWAYS_INLINE void SlotVisitor::appendValues(WriteBarrierBase<Unknown>* barriers, size_t count)
-{
- append(barriers->slot(), count);
+ append(*it);
}
-inline void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
+inline void SlotVisitor::appendValues(const WriteBarrierBase<Unknown>* barriers, size_t count)
{
- m_shared.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
+ for (size_t i = 0; i < count; ++i)
+ append(barriers[i]);
}
-inline void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
+inline void SlotVisitor::appendValuesHidden(const WriteBarrierBase<Unknown>* barriers, size_t count)
{
- m_shared.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
+ for (size_t i = 0; i < count; ++i)
+ appendHidden(barriers[i]);
}
-inline void SlotVisitor::addOpaqueRoot(void* root)
+inline void SlotVisitor::reportExtraMemoryVisited(size_t size)
{
-#if ENABLE(PARALLEL_GC)
- if (Options::numberOfGCMarkers() == 1) {
- // Put directly into the shared HashSet.
- m_shared.m_opaqueRoots.add(root);
- return;
+ if (m_isFirstVisit) {
+ heap()->reportExtraMemoryVisited(size);
+ m_nonCellVisitCount += size;
}
- // Put into the local set, but merge with the shared one every once in
- // a while to make sure that the local sets don't grow too large.
- mergeOpaqueRootsIfProfitable();
- m_opaqueRoots.add(root);
-#else
- m_opaqueRoots.add(root);
-#endif
}
-inline bool SlotVisitor::containsOpaqueRoot(void* root)
+#if ENABLE(RESOURCE_USAGE)
+inline void SlotVisitor::reportExternalMemoryVisited(size_t size)
{
- ASSERT(!m_isInParallelMode);
-#if ENABLE(PARALLEL_GC)
- ASSERT(m_opaqueRoots.isEmpty());
- return m_shared.m_opaqueRoots.contains(root);
-#else
- return m_opaqueRoots.contains(root);
-#endif
+ if (m_isFirstVisit)
+ heap()->reportExternalMemoryVisited(size);
}
-
-inline TriState SlotVisitor::containsOpaqueRootTriState(void* root)
-{
- if (m_opaqueRoots.contains(root))
- return TrueTriState;
- MutexLocker locker(m_shared.m_opaqueRootsLock);
- if (m_shared.m_opaqueRoots.contains(root))
- return TrueTriState;
- return MixedTriState;
-}
-
-inline int SlotVisitor::opaqueRootCount()
-{
- ASSERT(!m_isInParallelMode);
-#if ENABLE(PARALLEL_GC)
- ASSERT(m_opaqueRoots.isEmpty());
- return m_shared.m_opaqueRoots.size();
-#else
- return m_opaqueRoots.size();
#endif
-}
-inline void SlotVisitor::mergeOpaqueRootsIfNecessary()
-{
- if (m_opaqueRoots.isEmpty())
- return;
- mergeOpaqueRoots();
-}
-
-inline void SlotVisitor::mergeOpaqueRootsIfProfitable()
-{
- if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold())
- return;
- mergeOpaqueRoots();
-}
-
-inline void SlotVisitor::donate()
+inline Heap* SlotVisitor::heap() const
{
- ASSERT(m_isInParallelMode);
- if (Options::numberOfGCMarkers() == 1)
- return;
-
- donateKnownParallel();
+ return &m_heap;
}
-inline void SlotVisitor::donateAndDrain()
+inline VM& SlotVisitor::vm()
{
- donate();
- drain();
+ return *m_heap.m_vm;
}
-inline void SlotVisitor::copyLater(JSCell* owner, CopyToken token, void* ptr, size_t bytes)
+inline const VM& SlotVisitor::vm() const
{
- ASSERT(bytes);
- CopiedBlock* block = CopiedSpace::blockFor(ptr);
- if (block->isOversize()) {
- m_shared.m_copiedSpace->pin(block);
- return;
- }
-
- SpinLockHolder locker(&block->workListLock());
- if (heap()->operationInProgress() == FullCollection || block->shouldReportLiveBytes(locker, owner)) {
- m_bytesCopied += bytes;
- block->reportLiveBytes(locker, owner, token, bytes);
- }
-}
-
-inline void SlotVisitor::reportExtraMemoryUsage(JSCell* owner, size_t size)
-{
-#if ENABLE(GGC)
- // We don't want to double-count the extra memory that was reported in previous collections.
- if (heap()->operationInProgress() == EdenCollection && MarkedBlock::blockFor(owner)->isRemembered(owner))
- return;
-#else
- UNUSED_PARAM(owner);
-#endif
-
- size_t* counter = &m_shared.m_vm->heap.m_extraMemoryUsage;
-
-#if ENABLE(COMPARE_AND_SWAP)
- for (;;) {
- size_t oldSize = *counter;
- if (WTF::weakCompareAndSwapSize(counter, oldSize, oldSize + size))
- return;
- }
-#else
- (*counter) += size;
-#endif
+ return *m_heap.m_vm;
}
-inline Heap* SlotVisitor::heap() const
+template<typename Func>
+IterationStatus SlotVisitor::forEachMarkStack(const Func& func)
{
- return &sharedData().m_vm->heap;
+ if (func(m_collectorStack) == IterationStatus::Done)
+ return IterationStatus::Done;
+ if (func(m_mutatorStack) == IterationStatus::Done)
+ return IterationStatus::Done;
+ return IterationStatus::Continue;
}
} // namespace JSC
-
-#endif // SlotVisitorInlines_h
-
diff --git a/Source/JavaScriptCore/heap/SpaceTimeMutatorScheduler.cpp b/Source/JavaScriptCore/heap/SpaceTimeMutatorScheduler.cpp
new file mode 100644
index 000000000..7281bc4b4
--- /dev/null
+++ b/Source/JavaScriptCore/heap/SpaceTimeMutatorScheduler.cpp
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "SpaceTimeMutatorScheduler.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+// The scheduler will often make decisions based on state that is in flux. It will be fine so
+// long as multiple uses of the same value all see the same value. We wouldn't get this for free,
+// since our need to modularize the calculation results in a tendency to access the same mutable
+// field in Heap multiple times, and to access the current time multiple times.
+class SpaceTimeMutatorScheduler::Snapshot {
+public:
+ Snapshot(SpaceTimeMutatorScheduler& scheduler)
+ {
+ m_now = MonotonicTime::now();
+ m_bytesAllocatedThisCycle = scheduler.bytesAllocatedThisCycleImpl();
+ }
+
+ MonotonicTime now() const { return m_now; }
+
+ double bytesAllocatedThisCycle() const { return m_bytesAllocatedThisCycle; }
+
+private:
+ MonotonicTime m_now;
+ double m_bytesAllocatedThisCycle;
+};
+
+SpaceTimeMutatorScheduler::SpaceTimeMutatorScheduler(Heap& heap)
+ : m_heap(heap)
+ , m_period(Seconds::fromMilliseconds(Options::concurrentGCPeriodMS()))
+{
+}
+
+SpaceTimeMutatorScheduler::~SpaceTimeMutatorScheduler()
+{
+}
+
+MutatorScheduler::State SpaceTimeMutatorScheduler::state() const
+{
+ return m_state;
+}
+
+void SpaceTimeMutatorScheduler::beginCollection()
+{
+ RELEASE_ASSERT(m_state == Normal);
+ m_state = Stopped;
+ m_startTime = MonotonicTime::now();
+
+ m_bytesAllocatedThisCycleAtTheBeginning = m_heap.m_bytesAllocatedThisCycle;
+ m_bytesAllocatedThisCycleAtTheEnd =
+ Options::concurrentGCMaxHeadroom() *
+ std::max<double>(m_bytesAllocatedThisCycleAtTheBeginning, m_heap.m_maxEdenSize);
+}
+
+void SpaceTimeMutatorScheduler::didStop()
+{
+ RELEASE_ASSERT(m_state == Stopped || m_state == Resumed);
+ m_state = Stopped;
+}
+
+void SpaceTimeMutatorScheduler::willResume()
+{
+ RELEASE_ASSERT(m_state == Stopped || m_state == Resumed);
+ m_state = Resumed;
+}
+
+void SpaceTimeMutatorScheduler::didExecuteConstraints()
+{
+ // If we execute constraints, we want to forgive the GC for all of the time it had stopped the
+ // world for in this increment. This hack is empirically better than every other heuristic I
+ // tried, because it just means that the GC is happy to pause for longer when it's dealing
+ // with things that don't play well with concurrency.
+ // FIXME: The feels so wrong but benchmarks so good.
+ // https://bugs.webkit.org/show_bug.cgi?id=166833
+ m_startTime = MonotonicTime::now();
+}
+
+MonotonicTime SpaceTimeMutatorScheduler::timeToStop()
+{
+ switch (m_state) {
+ case Normal:
+ return MonotonicTime::infinity();
+ case Stopped:
+ return MonotonicTime::now();
+ case Resumed: {
+ Snapshot snapshot(*this);
+ if (!shouldBeResumed(snapshot))
+ return snapshot.now();
+ return snapshot.now() - elapsedInPeriod(snapshot) + m_period;
+ } }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return MonotonicTime();
+}
+
+MonotonicTime SpaceTimeMutatorScheduler::timeToResume()
+{
+ switch (m_state) {
+ case Normal:
+ case Resumed:
+ return MonotonicTime::now();
+ case Stopped: {
+ Snapshot snapshot(*this);
+ if (shouldBeResumed(snapshot))
+ return snapshot.now();
+ return snapshot.now() - elapsedInPeriod(snapshot) + m_period * collectorUtilization(snapshot);
+ } }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return MonotonicTime();
+}
+
+void SpaceTimeMutatorScheduler::log()
+{
+ ASSERT(Options::logGC());
+ Snapshot snapshot(*this);
+ dataLog(
+ "a=", format("%.0lf", bytesSinceBeginningOfCycle(snapshot) / 1024), "kb ",
+ "hf=", format("%.3lf", headroomFullness(snapshot)), " ",
+ "mu=", format("%.3lf", mutatorUtilization(snapshot)), " ");
+}
+
+void SpaceTimeMutatorScheduler::endCollection()
+{
+ m_state = Normal;
+ m_startTime = MonotonicTime::now();
+}
+
+double SpaceTimeMutatorScheduler::bytesAllocatedThisCycleImpl()
+{
+ return m_heap.m_bytesAllocatedThisCycle;
+}
+
+double SpaceTimeMutatorScheduler::bytesSinceBeginningOfCycle(const Snapshot& snapshot)
+{
+ return snapshot.bytesAllocatedThisCycle() - m_bytesAllocatedThisCycleAtTheBeginning;
+}
+
+double SpaceTimeMutatorScheduler::maxHeadroom()
+{
+ return m_bytesAllocatedThisCycleAtTheEnd - m_bytesAllocatedThisCycleAtTheBeginning;
+}
+
+double SpaceTimeMutatorScheduler::headroomFullness(const Snapshot& snapshot)
+{
+ double result = bytesSinceBeginningOfCycle(snapshot) / maxHeadroom();
+
+ // headroomFullness can be NaN and other interesting things if
+ // bytesAllocatedThisCycleAtTheBeginning is zero. We see that in debug tests. This code
+ // defends against all floating point dragons.
+
+ if (!(result >= 0))
+ result = 0;
+ if (!(result <= 1))
+ result = 1;
+
+ return result;
+}
+
+double SpaceTimeMutatorScheduler::mutatorUtilization(const Snapshot& snapshot)
+{
+ double mutatorUtilization = 1 - headroomFullness(snapshot);
+
+ // Scale the mutator utilization into the permitted window.
+ mutatorUtilization =
+ Options::minimumMutatorUtilization() +
+ mutatorUtilization * (
+ Options::maximumMutatorUtilization() -
+ Options::minimumMutatorUtilization());
+
+ return mutatorUtilization;
+}
+
+double SpaceTimeMutatorScheduler::collectorUtilization(const Snapshot& snapshot)
+{
+ return 1 - mutatorUtilization(snapshot);
+}
+
+Seconds SpaceTimeMutatorScheduler::elapsedInPeriod(const Snapshot& snapshot)
+{
+ return (snapshot.now() - m_startTime) % m_period;
+}
+
+double SpaceTimeMutatorScheduler::phase(const Snapshot& snapshot)
+{
+ return elapsedInPeriod(snapshot) / m_period;
+}
+
+bool SpaceTimeMutatorScheduler::shouldBeResumed(const Snapshot& snapshot)
+{
+ return phase(snapshot) > collectorUtilization(snapshot);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/SpaceTimeMutatorScheduler.h b/Source/JavaScriptCore/heap/SpaceTimeMutatorScheduler.h
new file mode 100644
index 000000000..ebded2559
--- /dev/null
+++ b/Source/JavaScriptCore/heap/SpaceTimeMutatorScheduler.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "MutatorScheduler.h"
+#include <wtf/Seconds.h>
+
+namespace JSC {
+
+class Heap;
+
+// The JSC concurrent GC sometimes stops the world in order to stay ahead of it. These deliberate,
+// synthetic pauses ensure that the GC won't have to do one huge pause in order to catch up to the
+// retreating wavefront. The scheduler is called "space-time" because it links the amount of time
+// that the world is paused for to the amount of space that the world allocated since the GC cycle
+// began.
+
+class SpaceTimeMutatorScheduler : public MutatorScheduler {
+public:
+ SpaceTimeMutatorScheduler(Heap&);
+ ~SpaceTimeMutatorScheduler();
+
+ State state() const override;
+
+ void beginCollection() override;
+
+ void didStop() override;
+ void willResume() override;
+ void didExecuteConstraints() override;
+
+ MonotonicTime timeToStop() override;
+ MonotonicTime timeToResume() override;
+
+ void log() override;
+
+ void endCollection() override;
+
+private:
+ class Snapshot;
+ friend class Snapshot;
+
+ double bytesAllocatedThisCycleImpl();
+
+ double bytesSinceBeginningOfCycle(const Snapshot&);
+ double maxHeadroom();
+ double headroomFullness(const Snapshot&);
+ double mutatorUtilization(const Snapshot&);
+ double collectorUtilization(const Snapshot&);
+ Seconds elapsedInPeriod(const Snapshot&);
+ double phase(const Snapshot&);
+ bool shouldBeResumed(const Snapshot&);
+
+ Heap& m_heap;
+ Seconds m_period;
+ State m_state { Normal };
+
+ double m_bytesAllocatedThisCycleAtTheBeginning { 0 };
+ double m_bytesAllocatedThisCycleAtTheEnd { 0 };
+ MonotonicTime m_startTime;
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.cpp b/Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.cpp
new file mode 100644
index 000000000..158355ad6
--- /dev/null
+++ b/Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.cpp
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "StochasticSpaceTimeMutatorScheduler.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+// The scheduler will often make decisions based on state that is in flux. It will be fine so
+// long as multiple uses of the same value all see the same value. We wouldn't get this for free,
+// since our need to modularize the calculation results in a tendency to access the same mutable
+// field in Heap multiple times, and to access the current time multiple times.
+class StochasticSpaceTimeMutatorScheduler::Snapshot {
+public:
+ Snapshot(StochasticSpaceTimeMutatorScheduler& scheduler)
+ {
+ m_now = MonotonicTime::now();
+ m_bytesAllocatedThisCycle = scheduler.bytesAllocatedThisCycleImpl();
+ }
+
+ MonotonicTime now() const { return m_now; }
+
+ double bytesAllocatedThisCycle() const { return m_bytesAllocatedThisCycle; }
+
+private:
+ MonotonicTime m_now;
+ double m_bytesAllocatedThisCycle;
+};
+
+StochasticSpaceTimeMutatorScheduler::StochasticSpaceTimeMutatorScheduler(Heap& heap)
+ : m_heap(heap)
+ , m_minimumPause(Seconds::fromMilliseconds(Options::minimumGCPauseMS()))
+ , m_pauseScale(Options::gcPauseScale())
+{
+}
+
+StochasticSpaceTimeMutatorScheduler::~StochasticSpaceTimeMutatorScheduler()
+{
+}
+
+MutatorScheduler::State StochasticSpaceTimeMutatorScheduler::state() const
+{
+ return m_state;
+}
+
+void StochasticSpaceTimeMutatorScheduler::beginCollection()
+{
+ RELEASE_ASSERT(m_state == Normal);
+ m_state = Stopped;
+
+ m_bytesAllocatedThisCycleAtTheBeginning = m_heap.m_bytesAllocatedThisCycle;
+ m_bytesAllocatedThisCycleAtTheEnd =
+ Options::concurrentGCMaxHeadroom() *
+ std::max<double>(m_bytesAllocatedThisCycleAtTheBeginning, m_heap.m_maxEdenSize);
+
+ if (Options::logGC())
+ dataLog("ca=", m_bytesAllocatedThisCycleAtTheBeginning / 1024, "kb h=", (m_bytesAllocatedThisCycleAtTheEnd - m_bytesAllocatedThisCycleAtTheBeginning) / 1024, "kb ");
+
+ m_beforeConstraints = MonotonicTime::now();
+}
+
+void StochasticSpaceTimeMutatorScheduler::didStop()
+{
+ RELEASE_ASSERT(m_state == Stopped || m_state == Resumed);
+ m_state = Stopped;
+}
+
+void StochasticSpaceTimeMutatorScheduler::willResume()
+{
+ RELEASE_ASSERT(m_state == Stopped || m_state == Resumed);
+ m_state = Resumed;
+}
+
+void StochasticSpaceTimeMutatorScheduler::didReachTermination()
+{
+ m_beforeConstraints = MonotonicTime::now();
+}
+
+void StochasticSpaceTimeMutatorScheduler::didExecuteConstraints()
+{
+ Snapshot snapshot(*this);
+
+ Seconds constraintExecutionDuration = snapshot.now() - m_beforeConstraints;
+
+ m_targetPause = std::max(
+ constraintExecutionDuration * m_pauseScale,
+ m_minimumPause);
+
+ if (Options::logGC())
+ dataLog("tp=", m_targetPause.milliseconds(), "ms ");
+
+ m_plannedResumeTime = snapshot.now() + m_targetPause;
+}
+
+void StochasticSpaceTimeMutatorScheduler::synchronousDrainingDidStall()
+{
+ Snapshot snapshot(*this);
+
+ double resumeProbability = mutatorUtilization(snapshot);
+ if (resumeProbability < Options::epsilonMutatorUtilization()) {
+ m_plannedResumeTime = MonotonicTime::infinity();
+ return;
+ }
+
+ bool shouldResume = m_random.get() < resumeProbability;
+
+ if (shouldResume) {
+ m_plannedResumeTime = snapshot.now();
+ return;
+ }
+
+ m_plannedResumeTime = snapshot.now() + m_targetPause;
+}
+
+MonotonicTime StochasticSpaceTimeMutatorScheduler::timeToStop()
+{
+ switch (m_state) {
+ case Normal:
+ return MonotonicTime::infinity();
+ case Stopped:
+ return MonotonicTime::now();
+ case Resumed: {
+ // Once we're running, we keep going unless we run out of headroom.
+ Snapshot snapshot(*this);
+ if (mutatorUtilization(snapshot) < Options::epsilonMutatorUtilization())
+ return MonotonicTime::now();
+ return MonotonicTime::infinity();
+ } }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return MonotonicTime();
+}
+
+MonotonicTime StochasticSpaceTimeMutatorScheduler::timeToResume()
+{
+ switch (m_state) {
+ case Normal:
+ case Resumed:
+ return MonotonicTime::now();
+ case Stopped:
+ return m_plannedResumeTime;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return MonotonicTime();
+}
+
+void StochasticSpaceTimeMutatorScheduler::log()
+{
+ ASSERT(Options::logGC());
+ Snapshot snapshot(*this);
+ dataLog(
+ "a=", format("%.0lf", bytesSinceBeginningOfCycle(snapshot) / 1024), "kb ",
+ "hf=", format("%.3lf", headroomFullness(snapshot)), " ",
+ "mu=", format("%.3lf", mutatorUtilization(snapshot)), " ");
+}
+
+void StochasticSpaceTimeMutatorScheduler::endCollection()
+{
+ m_state = Normal;
+}
+
+double StochasticSpaceTimeMutatorScheduler::bytesAllocatedThisCycleImpl()
+{
+ return m_heap.m_bytesAllocatedThisCycle;
+}
+
+double StochasticSpaceTimeMutatorScheduler::bytesSinceBeginningOfCycle(const Snapshot& snapshot)
+{
+ return snapshot.bytesAllocatedThisCycle() - m_bytesAllocatedThisCycleAtTheBeginning;
+}
+
+double StochasticSpaceTimeMutatorScheduler::maxHeadroom()
+{
+ return m_bytesAllocatedThisCycleAtTheEnd - m_bytesAllocatedThisCycleAtTheBeginning;
+}
+
+double StochasticSpaceTimeMutatorScheduler::headroomFullness(const Snapshot& snapshot)
+{
+ double result = bytesSinceBeginningOfCycle(snapshot) / maxHeadroom();
+
+ // headroomFullness can be NaN and other interesting things if
+ // bytesAllocatedThisCycleAtTheBeginning is zero. We see that in debug tests. This code
+ // defends against all floating point dragons.
+
+ if (!(result >= 0))
+ result = 0;
+ if (!(result <= 1))
+ result = 1;
+
+ return result;
+}
+
+double StochasticSpaceTimeMutatorScheduler::mutatorUtilization(const Snapshot& snapshot)
+{
+ double mutatorUtilization = 1 - headroomFullness(snapshot);
+
+ // Scale the mutator utilization into the permitted window.
+ mutatorUtilization =
+ Options::minimumMutatorUtilization() +
+ mutatorUtilization * (
+ Options::maximumMutatorUtilization() -
+ Options::minimumMutatorUtilization());
+
+ return mutatorUtilization;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.h b/Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.h
new file mode 100644
index 000000000..4eca90856
--- /dev/null
+++ b/Source/JavaScriptCore/heap/StochasticSpaceTimeMutatorScheduler.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "MutatorScheduler.h"
+#include <wtf/Seconds.h>
+#include <wtf/WeakRandom.h>
+
+namespace JSC {
+
+class Heap;
+
+// The JSC concurrent GC sometimes stops the world in order to stay ahead of it. These deliberate,
+// synthetic pauses ensure that the GC won't have to do one huge pause in order to catch up to the
+// retreating wavefront. The scheduler is called "space-time" because it links the amount of time
+// that the world is paused for to the amount of space that the world allocated since the GC cycle
+// began.
+
+class StochasticSpaceTimeMutatorScheduler : public MutatorScheduler {
+public:
+ StochasticSpaceTimeMutatorScheduler(Heap&);
+ ~StochasticSpaceTimeMutatorScheduler();
+
+ State state() const override;
+
+ void beginCollection() override;
+
+ void didStop() override;
+ void willResume() override;
+ void didReachTermination() override;
+ void didExecuteConstraints() override;
+ void synchronousDrainingDidStall() override;
+
+ MonotonicTime timeToStop() override;
+ MonotonicTime timeToResume() override;
+
+ void log() override;
+
+ void endCollection() override;
+
+private:
+ class Snapshot;
+ friend class Snapshot;
+
+ double bytesAllocatedThisCycleImpl();
+
+ double bytesSinceBeginningOfCycle(const Snapshot&);
+ double maxHeadroom();
+ double headroomFullness(const Snapshot&);
+ double mutatorUtilization(const Snapshot&);
+
+ Heap& m_heap;
+ State m_state { Normal };
+
+ WeakRandom m_random;
+
+ Seconds m_minimumPause;
+ double m_pauseScale;
+ Seconds m_targetPause;
+
+ double m_bytesAllocatedThisCycleAtTheBeginning { 0 };
+ double m_bytesAllocatedThisCycleAtTheEnd { 0 };
+
+ MonotonicTime m_beforeConstraints;
+ MonotonicTime m_plannedResumeTime;
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/StopIfNecessaryTimer.cpp b/Source/JavaScriptCore/heap/StopIfNecessaryTimer.cpp
new file mode 100644
index 000000000..6e3176c2c
--- /dev/null
+++ b/Source/JavaScriptCore/heap/StopIfNecessaryTimer.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "StopIfNecessaryTimer.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+StopIfNecessaryTimer::StopIfNecessaryTimer(VM* vm)
+ : HeapTimer(vm)
+{
+}
+
+void StopIfNecessaryTimer::doWork()
+{
+ cancelTimer();
+ WTF::storeStoreFence();
+ m_vm->heap.stopIfNecessary();
+}
+
+void StopIfNecessaryTimer::scheduleSoon()
+{
+ if (isScheduled()) {
+ WTF::loadLoadFence();
+ return;
+ }
+ scheduleTimer(0);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/StopIfNecessaryTimer.h b/Source/JavaScriptCore/heap/StopIfNecessaryTimer.h
new file mode 100644
index 000000000..a68318474
--- /dev/null
+++ b/Source/JavaScriptCore/heap/StopIfNecessaryTimer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "HeapTimer.h"
+
+namespace JSC {
+
+class Heap;
+
+class StopIfNecessaryTimer : public HeapTimer {
+public:
+ explicit StopIfNecessaryTimer(VM*);
+
+ void doWork() override;
+
+ void scheduleSoon();
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/Strong.h b/Source/JavaScriptCore/heap/Strong.h
index 27ab5d31f..264c24e25 100644
--- a/Source/JavaScriptCore/heap/Strong.h
+++ b/Source/JavaScriptCore/heap/Strong.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef Strong_h
-#define Strong_h
+#pragma once
#include <wtf/Assertions.h>
#include "Handle.h"
@@ -84,9 +83,7 @@ public:
bool operator!() const { return !slot() || !*slot(); }
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef JSValue (HandleBase::*UnspecifiedBoolType);
- operator UnspecifiedBoolType*() const { return !!*this ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0; }
+ explicit operator bool() const { return !!*this; }
void swap(Strong& other)
{
@@ -154,6 +151,4 @@ template<typename T> struct VectorTraits<JSC::Strong<T>> : SimpleClassVectorTrai
template<typename P> struct HashTraits<JSC::Strong<P>> : SimpleClassHashTraits<JSC::Strong<P>> { };
-}
-
-#endif // Strong_h
+} // namespace WTF
diff --git a/Source/JavaScriptCore/heap/StrongInlines.h b/Source/JavaScriptCore/heap/StrongInlines.h
index e1fbe90d2..6b8197c68 100644
--- a/Source/JavaScriptCore/heap/StrongInlines.h
+++ b/Source/JavaScriptCore/heap/StrongInlines.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef StrongInlines_h
-#define StrongInlines_h
+#pragma once
#include "VM.h"
@@ -53,5 +52,3 @@ inline void Strong<T>::set(VM& vm, ExternalType value)
}
} // namespace JSC
-
-#endif // StrongInlines_h
diff --git a/Source/JavaScriptCore/heap/Subspace.cpp b/Source/JavaScriptCore/heap/Subspace.cpp
new file mode 100644
index 000000000..ed6c19eac
--- /dev/null
+++ b/Source/JavaScriptCore/heap/Subspace.cpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Subspace.h"
+
+#include "JSCInlines.h"
+#include "MarkedAllocatorInlines.h"
+#include "MarkedBlockInlines.h"
+#include "PreventCollectionScope.h"
+#include "SubspaceInlines.h"
+
+namespace JSC {
+
+namespace {
+
+// Writing it this way ensures that when you pass this as a functor, the callee is specialized for
+// this callback. If you wrote this as a normal function then the callee would be specialized for
+// the function's type and it would have indirect calls to that function. And unlike a lambda, it's
+// possible to mark this ALWAYS_INLINE.
+struct DestroyFunc {
+ ALWAYS_INLINE void operator()(VM& vm, JSCell* cell) const
+ {
+ ASSERT(cell->structureID());
+ ASSERT(cell->inlineTypeFlags() & StructureIsImmortal);
+ Structure* structure = cell->structure(vm);
+ const ClassInfo* classInfo = structure->classInfo();
+ MethodTable::DestroyFunctionPtr destroy = classInfo->methodTable.destroy;
+ destroy(cell);
+ }
+};
+
+} // anonymous namespace
+
+Subspace::Subspace(CString name, Heap& heap, AllocatorAttributes attributes)
+ : m_space(heap.objectSpace())
+ , m_name(name)
+ , m_attributes(attributes)
+{
+ // It's remotely possible that we're GCing right now even if the client is careful to only
+ // create subspaces right after VM creation, since collectContinuously (and probably other
+ // things) could cause a GC to be launched at pretty much any time and it's not 100% obvious
+ // that all clients would be able to ensure that there are zero safepoints between when they
+ // create VM and when they do this. Preventing GC while we're creating the Subspace ensures
+ // that we don't have to worry about whether it's OK for the GC to ever see a brand new
+ // subspace.
+ PreventCollectionScope preventCollectionScope(heap);
+ heap.objectSpace().m_subspaces.append(this);
+
+ for (size_t i = MarkedSpace::numSizeClasses; i--;)
+ m_allocatorForSizeStep[i] = nullptr;
+}
+
+Subspace::~Subspace()
+{
+}
+
+FreeList Subspace::finishSweep(MarkedBlock::Handle& block, MarkedBlock::Handle::SweepMode sweepMode)
+{
+ return block.finishSweepKnowingSubspace(sweepMode, DestroyFunc());
+}
+
+void Subspace::destroy(VM& vm, JSCell* cell)
+{
+ DestroyFunc()(vm, cell);
+}
+
+// The reason why we distinguish between allocate and tryAllocate is to minimize the number of
+// checks on the allocation path in both cases. Likewise, the reason why we have overloads with and
+// without deferralContext is to minimize the amount of code for calling allocate when you don't
+// need the deferralContext.
+void* Subspace::allocate(size_t size)
+{
+ if (MarkedAllocator* allocator = tryAllocatorFor(size))
+ return allocator->allocate();
+ return allocateSlow(nullptr, size);
+}
+
+void* Subspace::allocate(GCDeferralContext* deferralContext, size_t size)
+{
+ if (MarkedAllocator* allocator = tryAllocatorFor(size))
+ return allocator->allocate(deferralContext);
+ return allocateSlow(deferralContext, size);
+}
+
+void* Subspace::tryAllocate(size_t size)
+{
+ if (MarkedAllocator* allocator = tryAllocatorFor(size))
+ return allocator->tryAllocate();
+ return tryAllocateSlow(nullptr, size);
+}
+
+void* Subspace::tryAllocate(GCDeferralContext* deferralContext, size_t size)
+{
+ if (MarkedAllocator* allocator = tryAllocatorFor(size))
+ return allocator->tryAllocate(deferralContext);
+ return tryAllocateSlow(deferralContext, size);
+}
+
+MarkedAllocator* Subspace::allocatorForSlow(size_t size)
+{
+ size_t index = MarkedSpace::sizeClassToIndex(size);
+ size_t sizeClass = MarkedSpace::s_sizeClassForSizeStep[index];
+ if (!sizeClass)
+ return nullptr;
+
+ // This is written in such a way that it's OK for the JIT threads to end up here if they want
+ // to generate code that uses some allocator that hadn't been used yet. Note that a possibly-
+ // just-as-good solution would be to return null if we're in the JIT since the JIT treats null
+ // allocator as "please always take the slow path". But, that could lead to performance
+ // surprises and the algorithm here is pretty easy. Only this code has to hold the lock, to
+ // prevent simultaneously MarkedAllocator creations from multiple threads. This code ensures
+ // that any "forEachAllocator" traversals will only see this allocator after it's initialized
+ // enough: it will have
+ auto locker = holdLock(m_space.allocatorLock());
+ if (MarkedAllocator* allocator = m_allocatorForSizeStep[index])
+ return allocator;
+
+ if (false)
+ dataLog("Creating marked allocator for ", m_name, ", ", m_attributes, ", ", sizeClass, ".\n");
+ MarkedAllocator* allocator = m_space.addMarkedAllocator(locker, this, sizeClass);
+ index = MarkedSpace::sizeClassToIndex(sizeClass);
+ for (;;) {
+ if (MarkedSpace::s_sizeClassForSizeStep[index] != sizeClass)
+ break;
+
+ m_allocatorForSizeStep[index] = allocator;
+
+ if (!index--)
+ break;
+ }
+ allocator->setNextAllocatorInSubspace(m_firstAllocator);
+ WTF::storeStoreFence();
+ m_firstAllocator = allocator;
+ return allocator;
+}
+
+void* Subspace::allocateSlow(GCDeferralContext* deferralContext, size_t size)
+{
+ void* result = tryAllocateSlow(deferralContext, size);
+ RELEASE_ASSERT(result);
+ return result;
+}
+
+void* Subspace::tryAllocateSlow(GCDeferralContext* deferralContext, size_t size)
+{
+ if (MarkedAllocator* allocator = allocatorFor(size))
+ return allocator->tryAllocate(deferralContext);
+
+ if (size <= Options::largeAllocationCutoff()
+ && size <= MarkedSpace::largeCutoff) {
+ dataLog("FATAL: attampting to allocate small object using large allocation.\n");
+ dataLog("Requested allocation size: ", size, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ m_space.heap()->collectIfNecessaryOrDefer(deferralContext);
+
+ size = WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(size);
+ LargeAllocation* allocation = LargeAllocation::tryCreate(*m_space.m_heap, size, this);
+ if (!allocation)
+ return nullptr;
+
+ m_space.m_largeAllocations.append(allocation);
+ m_space.m_heap->didAllocate(size);
+ m_space.m_capacity += size;
+
+ m_largeAllocations.append(allocation);
+
+ return allocation->cell();
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/Subspace.h b/Source/JavaScriptCore/heap/Subspace.h
new file mode 100644
index 000000000..d95f71b7c
--- /dev/null
+++ b/Source/JavaScriptCore/heap/Subspace.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "MarkedBlock.h"
+#include "MarkedSpace.h"
+#include <wtf/text/CString.h>
+
+namespace JSC {
+
+// The idea of subspaces is that you can provide some custom behavior for your objects if you
+// allocate them from a custom Subspace in which you override some of the virtual methods. This
+// class is the baseclass of Subspaces and it provides a reasonable default implementation, where
+// sweeping assumes immortal structure. The common ways of overriding this are:
+//
+// - Provide customized destructor behavior. You can change how the destructor is called. You can
+// also specialize the destructor call in the loop.
+//
+// - Use the Subspace as a quick way to iterate all of the objects in that subspace.
+class Subspace {
+ WTF_MAKE_NONCOPYABLE(Subspace);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ JS_EXPORT_PRIVATE Subspace(CString name, Heap&, AllocatorAttributes);
+ JS_EXPORT_PRIVATE virtual ~Subspace();
+
+ const char *name() const { return m_name.data(); }
+ MarkedSpace& space() const { return m_space; }
+
+ const AllocatorAttributes& attributes() const { return m_attributes; }
+
+ // The purpose of overriding this is to specialize the sweep for your destructors. This won't
+ // be called for no-destructor blocks. This must call MarkedBlock::finishSweepKnowingSubspace.
+ virtual FreeList finishSweep(MarkedBlock::Handle&, MarkedBlock::Handle::SweepMode);
+
+ // These get called for large objects.
+ virtual void destroy(VM&, JSCell*);
+
+ MarkedAllocator* tryAllocatorFor(size_t);
+ MarkedAllocator* allocatorFor(size_t);
+
+ JS_EXPORT_PRIVATE void* allocate(size_t);
+ JS_EXPORT_PRIVATE void* allocate(GCDeferralContext*, size_t);
+
+ JS_EXPORT_PRIVATE void* tryAllocate(size_t);
+ JS_EXPORT_PRIVATE void* tryAllocate(GCDeferralContext*, size_t);
+
+ template<typename Func>
+ void forEachMarkedBlock(const Func&);
+
+ template<typename Func>
+ void forEachNotEmptyMarkedBlock(const Func&);
+
+ template<typename Func>
+ void forEachLargeAllocation(const Func&);
+
+ template<typename Func>
+ void forEachMarkedCell(const Func&);
+
+ static ptrdiff_t offsetOfAllocatorForSizeStep() { return OBJECT_OFFSETOF(Subspace, m_allocatorForSizeStep); }
+
+ MarkedAllocator** allocatorForSizeStep() { return &m_allocatorForSizeStep[0]; }
+
+private:
+ MarkedAllocator* allocatorForSlow(size_t);
+
+ // These slow paths are concerned with large allocations and allocator creation.
+ void* allocateSlow(GCDeferralContext*, size_t);
+ void* tryAllocateSlow(GCDeferralContext*, size_t);
+
+ MarkedSpace& m_space;
+
+ CString m_name;
+ AllocatorAttributes m_attributes;
+
+ std::array<MarkedAllocator*, MarkedSpace::numSizeClasses> m_allocatorForSizeStep;
+ MarkedAllocator* m_firstAllocator { nullptr };
+ SentinelLinkedList<LargeAllocation, BasicRawSentinelNode<LargeAllocation>> m_largeAllocations;
+};
+
+ALWAYS_INLINE MarkedAllocator* Subspace::tryAllocatorFor(size_t size)
+{
+ if (size <= MarkedSpace::largeCutoff)
+ return m_allocatorForSizeStep[MarkedSpace::sizeClassToIndex(size)];
+ return nullptr;
+}
+
+ALWAYS_INLINE MarkedAllocator* Subspace::allocatorFor(size_t size)
+{
+ if (size <= MarkedSpace::largeCutoff) {
+ if (MarkedAllocator* result = m_allocatorForSizeStep[MarkedSpace::sizeClassToIndex(size)])
+ return result;
+ return allocatorForSlow(size);
+ }
+ return nullptr;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/SubspaceInlines.h b/Source/JavaScriptCore/heap/SubspaceInlines.h
new file mode 100644
index 000000000..b6851c353
--- /dev/null
+++ b/Source/JavaScriptCore/heap/SubspaceInlines.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSCell.h"
+#include "MarkedAllocator.h"
+#include "MarkedBlock.h"
+#include "MarkedSpace.h"
+#include "Subspace.h"
+
+namespace JSC {
+
+template<typename Func>
+void Subspace::forEachMarkedBlock(const Func& func)
+{
+ for (MarkedAllocator* allocator = m_firstAllocator; allocator; allocator = allocator->nextAllocatorInSubspace())
+ allocator->forEachBlock(func);
+}
+
+template<typename Func>
+void Subspace::forEachNotEmptyMarkedBlock(const Func& func)
+{
+ for (MarkedAllocator* allocator = m_firstAllocator; allocator; allocator = allocator->nextAllocatorInSubspace())
+ allocator->forEachNotEmptyBlock(func);
+}
+
+template<typename Func>
+void Subspace::forEachLargeAllocation(const Func& func)
+{
+ for (LargeAllocation* allocation = m_largeAllocations.begin(); allocation != m_largeAllocations.end(); allocation = allocation->next())
+ func(allocation);
+}
+
+template<typename Func>
+void Subspace::forEachMarkedCell(const Func& func)
+{
+ forEachNotEmptyMarkedBlock(
+ [&] (MarkedBlock::Handle* handle) {
+ handle->forEachMarkedCell(
+ [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
+ func(cell, kind);
+ return IterationStatus::Continue;
+ });
+ });
+ forEachLargeAllocation(
+ [&] (LargeAllocation* allocation) {
+ if (allocation->isMarked())
+ func(allocation->cell(), m_attributes.cellKind);
+ });
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/SuperRegion.cpp b/Source/JavaScriptCore/heap/SuperRegion.cpp
deleted file mode 100644
index d58f600b5..000000000
--- a/Source/JavaScriptCore/heap/SuperRegion.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "SuperRegion.h"
-
-#include "Region.h"
-
-namespace JSC {
-
-const uint64_t SuperRegion::s_fixedHeapMemoryPoolSize = 4 * 1024 * static_cast<uint64_t>(MB);
-
-SuperRegion::SuperRegion()
- : MetaAllocator(Region::s_regionSize, Region::s_regionSize)
- , m_reservationBase(0)
-{
-#if ENABLE(SUPER_REGION)
- // Over-allocate so that we can make sure that we're aligned to the size of Regions.
- m_reservation = PageReservation::reserve(s_fixedHeapMemoryPoolSize + Region::s_regionSize, OSAllocator::JSGCHeapPages);
- m_reservationBase = getAlignedBase(m_reservation);
- addFreshFreeSpace(m_reservationBase, s_fixedHeapMemoryPoolSize);
-#else
- UNUSED_PARAM(m_reservation);
- UNUSED_PARAM(m_reservationBase);
-#endif
-}
-
-SuperRegion::~SuperRegion()
-{
-#if ENABLE(SUPER_REGION)
- m_reservation.deallocate();
-#endif
-}
-
-void* SuperRegion::getAlignedBase(PageReservation& reservation)
-{
- for (char* current = static_cast<char*>(reservation.base()); current < static_cast<char*>(reservation.base()) + Region::s_regionSize; current += pageSize()) {
- if (!(reinterpret_cast<size_t>(current) & ~Region::s_regionMask))
- return current;
- }
- ASSERT_NOT_REACHED();
- return 0;
-}
-
-void* SuperRegion::allocateNewSpace(size_t&)
-{
- return 0;
-}
-
-void SuperRegion::notifyNeedPage(void* page)
-{
- m_reservation.commit(page, Region::s_regionSize);
-}
-
-void SuperRegion::notifyPageIsFree(void* page)
-{
- m_reservation.decommit(page, Region::s_regionSize);
-}
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/SweepingScope.h b/Source/JavaScriptCore/heap/SweepingScope.h
new file mode 100644
index 000000000..a3f686286
--- /dev/null
+++ b/Source/JavaScriptCore/heap/SweepingScope.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Heap.h"
+
+namespace JSC {
+
+class SweepingScope {
+public:
+ SweepingScope(Heap& heap)
+ : m_heap(heap)
+ , m_oldState(m_heap.m_mutatorState)
+ {
+ m_heap.m_mutatorState = MutatorState::Sweeping;
+ }
+
+ ~SweepingScope()
+ {
+ m_heap.m_mutatorState = m_oldState;
+ }
+
+private:
+ Heap& m_heap;
+ MutatorState m_oldState;
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/SynchronousStopTheWorldMutatorScheduler.cpp b/Source/JavaScriptCore/heap/SynchronousStopTheWorldMutatorScheduler.cpp
new file mode 100644
index 000000000..7ac95adfe
--- /dev/null
+++ b/Source/JavaScriptCore/heap/SynchronousStopTheWorldMutatorScheduler.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "SynchronousStopTheWorldMutatorScheduler.h"
+
+namespace JSC {
+
+SynchronousStopTheWorldMutatorScheduler::SynchronousStopTheWorldMutatorScheduler()
+{
+}
+
+SynchronousStopTheWorldMutatorScheduler::~SynchronousStopTheWorldMutatorScheduler()
+{
+}
+
+MutatorScheduler::State SynchronousStopTheWorldMutatorScheduler::state() const
+{
+ return m_state;
+}
+
+void SynchronousStopTheWorldMutatorScheduler::beginCollection()
+{
+ RELEASE_ASSERT(m_state == Normal);
+ m_state = Stopped;
+}
+
+MonotonicTime SynchronousStopTheWorldMutatorScheduler::timeToStop()
+{
+ return m_state == Normal ? MonotonicTime::infinity() : MonotonicTime::now();
+}
+
+MonotonicTime SynchronousStopTheWorldMutatorScheduler::timeToResume()
+{
+ return m_state == Normal ? MonotonicTime::now() : MonotonicTime::infinity();
+}
+
+void SynchronousStopTheWorldMutatorScheduler::endCollection()
+{
+ RELEASE_ASSERT(m_state == Stopped);
+ m_state = Normal;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/SynchronousStopTheWorldMutatorScheduler.h b/Source/JavaScriptCore/heap/SynchronousStopTheWorldMutatorScheduler.h
new file mode 100644
index 000000000..46af40d73
--- /dev/null
+++ b/Source/JavaScriptCore/heap/SynchronousStopTheWorldMutatorScheduler.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "MutatorScheduler.h"
+
+namespace JSC {
+
+// The JSC concurrent GC relies on stopping the world to stay ahead of the retreating wavefront.
+// It so happens that the same API can be reused to implement a non-concurrent GC mode, which we
+// use on platforms that don't support the GC's atomicity protocols. That means anything other
+// than X86-64 and ARM64. This scheduler is a drop-in replacement for the concurrent GC's
+// SpaceTimeMutatorScheduler. It tells the GC to never resume the world once the GC cycle begins.
+
+class SynchronousStopTheWorldMutatorScheduler : public MutatorScheduler {
+public:
+ SynchronousStopTheWorldMutatorScheduler();
+ ~SynchronousStopTheWorldMutatorScheduler();
+
+ State state() const override;
+
+ void beginCollection() override;
+
+ MonotonicTime timeToStop() override;
+ MonotonicTime timeToResume() override;
+
+ void endCollection() override;
+
+private:
+ State m_state { Normal };
+};
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/TinyBloomFilter.h b/Source/JavaScriptCore/heap/TinyBloomFilter.h
index 15a419de8..3ad7715ff 100644
--- a/Source/JavaScriptCore/heap/TinyBloomFilter.h
+++ b/Source/JavaScriptCore/heap/TinyBloomFilter.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef TinyBloomFilter_h
-#define TinyBloomFilter_h
+#pragma once
namespace JSC {
@@ -75,5 +74,3 @@ inline void TinyBloomFilter::reset()
}
} // namespace JSC
-
-#endif // TinyBloomFilter_h
diff --git a/Source/JavaScriptCore/heap/UnconditionalFinalizer.h b/Source/JavaScriptCore/heap/UnconditionalFinalizer.h
index 26029d046..10c0cc84e 100644
--- a/Source/JavaScriptCore/heap/UnconditionalFinalizer.h
+++ b/Source/JavaScriptCore/heap/UnconditionalFinalizer.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef UnconditionalFinalizer_h
-#define UnconditionalFinalizer_h
+#pragma once
#include "ListableHandler.h"
@@ -35,13 +34,12 @@ namespace JSC {
// associated with each CodeBlock.
class UnconditionalFinalizer : public ListableHandler<UnconditionalFinalizer> {
-public:
+ WTF_MAKE_FAST_ALLOCATED;
+public:
virtual void finalizeUnconditionally() = 0;
protected:
virtual ~UnconditionalFinalizer() { }
};
-}
-
-#endif // UltraWeakFinalizer_h
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/VisitRaceKey.cpp b/Source/JavaScriptCore/heap/VisitRaceKey.cpp
new file mode 100644
index 000000000..28ee30b80
--- /dev/null
+++ b/Source/JavaScriptCore/heap/VisitRaceKey.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "VisitRaceKey.h"
+
+namespace JSC {
+
+const char* VisitRaceKey::m_deletedValueRaceName = "deleted value";
+
+void VisitRaceKey::dump(PrintStream& out) const
+{
+ out.print(RawPointer(m_cell), "(", m_raceName, ")");
+}
+
+} // namespace JSC
+
+
diff --git a/Source/JavaScriptCore/heap/VisitRaceKey.h b/Source/JavaScriptCore/heap/VisitRaceKey.h
new file mode 100644
index 000000000..23187a975
--- /dev/null
+++ b/Source/JavaScriptCore/heap/VisitRaceKey.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/HashTable.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+class JSCell;
+
+class VisitRaceKey {
+public:
+ VisitRaceKey() { }
+
+ VisitRaceKey(JSCell* cell, const char* raceName)
+ : m_cell(cell)
+ , m_raceName(raceName)
+ {
+ }
+
+ VisitRaceKey(WTF::HashTableDeletedValueType)
+ : m_raceName(m_deletedValueRaceName)
+ {
+ }
+
+ bool operator==(const VisitRaceKey& other) const
+ {
+ return m_cell == other.m_cell
+ && m_raceName == other.m_raceName;
+ }
+
+ bool operator!=(const VisitRaceKey& other) const
+ {
+ return !(*this == other);
+ }
+
+ explicit operator bool() const
+ {
+ return *this != VisitRaceKey();
+ }
+
+ void dump(PrintStream& out) const;
+
+ JSCell* cell() const { return m_cell; }
+ const char* raceName() const { return m_raceName; }
+
+ bool isHashTableDeletedValue() const
+ {
+ return *this == VisitRaceKey(WTF::HashTableDeletedValue);
+ }
+
+ unsigned hash() const
+ {
+ return WTF::PtrHash<JSCell*>::hash(m_cell) ^ WTF::PtrHash<const char*>::hash(m_raceName);
+ }
+
+private:
+ static const char* m_deletedValueRaceName;
+
+ JSCell* m_cell { nullptr };
+ const char* m_raceName { nullptr };
+};
+
+struct VisitRaceKeyHash {
+ static unsigned hash(const VisitRaceKey& key) { return key.hash(); }
+ static bool equal(const VisitRaceKey& a, const VisitRaceKey& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::VisitRaceKey> {
+ typedef JSC::VisitRaceKeyHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::VisitRaceKey> : SimpleClassHashTraits<JSC::VisitRaceKey> { };
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/heap/WriteBarrierBuffer.h b/Source/JavaScriptCore/heap/VisitingTimeout.h
index 9126bdbe9..f1ac861f3 100644
--- a/Source/JavaScriptCore/heap/WriteBarrierBuffer.h
+++ b/Source/JavaScriptCore/heap/VisitingTimeout.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,49 +23,46 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WriteBarrierBuffer_h
-#define WriteBarrierBuffer_h
+#pragma once
-#include <wtf/FastMalloc.h>
+#include "SlotVisitor.h"
+#include <wtf/MonotonicTime.h>
namespace JSC {
-class Heap;
-class JSCell;
-
-namespace FTL { class LowerDFGToLLVM; }
-
-class WriteBarrierBuffer {
- friend class FTL::LowerDFGToLLVM;
+class VisitingTimeout {
public:
- WriteBarrierBuffer(unsigned capacity);
- ~WriteBarrierBuffer();
-
- void add(JSCell*);
- void flush(Heap&);
- void reset();
-
- static ptrdiff_t currentIndexOffset()
+ VisitingTimeout()
{
- return OBJECT_OFFSETOF(WriteBarrierBuffer, m_currentIndex);
}
-
- static ptrdiff_t capacityOffset()
+
+ VisitingTimeout(SlotVisitor& visitor, bool didVisitSomething, MonotonicTime timeout)
+ : m_didVisitSomething(didVisitSomething)
+ , m_visitCountBefore(visitor.visitCount())
+ , m_timeout(timeout)
{
- return OBJECT_OFFSETOF(WriteBarrierBuffer, m_capacity);
}
-
- static ptrdiff_t bufferOffset()
+
+ size_t visitCount(SlotVisitor& visitor) const
{
- return OBJECT_OFFSETOF(WriteBarrierBuffer, m_buffer);
+ return visitor.visitCount() - m_visitCountBefore;
}
+ bool didVisitSomething(SlotVisitor& visitor) const
+ {
+ return m_didVisitSomething || visitCount(visitor);
+ }
+
+ bool shouldTimeOut(SlotVisitor& visitor) const
+ {
+ return didVisitSomething(visitor) && hasElapsed(m_timeout);
+ }
+
private:
- unsigned m_currentIndex;
- unsigned m_capacity;
- JSCell** m_buffer;
+ bool m_didVisitSomething { false };
+ size_t m_visitCountBefore { 0 };
+ MonotonicTime m_timeout;
};
} // namespace JSC
-#endif // WriteBarrierBuffer_h
diff --git a/Source/JavaScriptCore/heap/Weak.cpp b/Source/JavaScriptCore/heap/Weak.cpp
index 3857b60d2..a30b7c085 100644
--- a/Source/JavaScriptCore/heap/Weak.cpp
+++ b/Source/JavaScriptCore/heap/Weak.cpp
@@ -26,6 +26,7 @@
#include "config.h"
#include "Weak.h"
+#include "JSCInlines.h"
#include "WeakSetInlines.h"
namespace JSC {
diff --git a/Source/JavaScriptCore/heap/Weak.h b/Source/JavaScriptCore/heap/Weak.h
index 80cdbd82c..2d7b38bb1 100644
--- a/Source/JavaScriptCore/heap/Weak.h
+++ b/Source/JavaScriptCore/heap/Weak.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012, 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,11 +23,13 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef Weak_h
-#define Weak_h
+#pragma once
+#include "JSExportMacros.h"
#include <cstddef>
+#include <wtf/HashTraits.h>
#include <wtf/Noncopyable.h>
+#include <wtf/VectorTraits.h>
namespace JSC {
@@ -50,35 +52,33 @@ public:
{
}
- Weak(T*, WeakHandleOwner* = 0, void* context = 0);
+ inline Weak(T*, WeakHandleOwner* = 0, void* context = 0);
enum HashTableDeletedValueTag { HashTableDeletedValue };
- bool isHashTableDeletedValue() const;
- Weak(HashTableDeletedValueTag);
+ inline bool isHashTableDeletedValue() const;
+ inline Weak(HashTableDeletedValueTag);
- Weak(Weak&&);
+ inline Weak(Weak&&);
~Weak()
{
clear();
}
- void swap(Weak&);
+ inline void swap(Weak&);
- Weak& operator=(Weak&&);
+ inline Weak& operator=(Weak&&);
- bool operator!() const;
- T* operator->() const;
- T& operator*() const;
- T* get() const;
+ inline bool operator!() const;
+ inline T* operator->() const;
+ inline T& operator*() const;
+ inline T* get() const;
- bool was(T*) const;
+ inline bool was(T*) const;
- // This conversion operator allows implicit conversion to bool but not to other integer types.
- typedef void* (Weak::*UnspecifiedBoolType);
- operator UnspecifiedBoolType*() const;
+ inline explicit operator bool() const;
- WeakImpl* leakImpl() WARN_UNUSED_RETURN;
+ inline WeakImpl* leakImpl() WARN_UNUSED_RETURN;
void clear()
{
if (!m_impl)
@@ -87,11 +87,28 @@ public:
}
private:
- static WeakImpl* hashTableDeletedValue();
+ static inline WeakImpl* hashTableDeletedValue();
WeakImpl* m_impl;
};
} // namespace JSC
-#endif // Weak_h
+namespace WTF {
+
+template<typename T> struct VectorTraits<JSC::Weak<T>> : SimpleClassVectorTraits {
+ static const bool canCompareWithMemcmp = false;
+};
+
+template<typename T> struct HashTraits<JSC::Weak<T>> : SimpleClassHashTraits<JSC::Weak<T>> {
+ typedef JSC::Weak<T> StorageType;
+
+ typedef std::nullptr_t EmptyValueType;
+ static EmptyValueType emptyValue() { return nullptr; }
+
+ typedef T* PeekType;
+ static PeekType peek(const StorageType& value) { return value.get(); }
+ static PeekType peek(EmptyValueType) { return PeekType(); }
+};
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/heap/WeakBlock.cpp b/Source/JavaScriptCore/heap/WeakBlock.cpp
index 957090569..0ac318060 100644
--- a/Source/JavaScriptCore/heap/WeakBlock.cpp
+++ b/Source/JavaScriptCore/heap/WeakBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,22 +26,30 @@
#include "config.h"
#include "WeakBlock.h"
+#include "CellContainerInlines.h"
#include "Heap.h"
-#include "HeapRootVisitor.h"
+#include "JSCInlines.h"
#include "JSObject.h"
-#include "Operations.h"
-#include "Structure.h"
+#include "WeakHandleOwner.h"
namespace JSC {
-WeakBlock* WeakBlock::create(DeadBlock* block)
+WeakBlock* WeakBlock::create(Heap& heap, CellContainer container)
{
- Region* region = block->region();
- return new (NotNull, block) WeakBlock(region);
+ heap.didAllocateBlock(WeakBlock::blockSize);
+ return new (NotNull, fastMalloc(blockSize)) WeakBlock(container);
}
-WeakBlock::WeakBlock(Region* region)
- : HeapBlock<WeakBlock>(region)
+void WeakBlock::destroy(Heap& heap, WeakBlock* block)
+{
+ block->~WeakBlock();
+ fastFree(block);
+ heap.didFreeBlock(WeakBlock::blockSize);
+}
+
+WeakBlock::WeakBlock(CellContainer container)
+ : DoublyLinkedListNode<WeakBlock>()
+ , m_container(container)
{
for (size_t i = 0; i < weakImplCount(); ++i) {
WeakImpl* weakImpl = &weakImpls()[i];
@@ -76,54 +84,75 @@ void WeakBlock::sweep()
finalize(weakImpl);
if (weakImpl->state() == WeakImpl::Deallocated)
addToFreeList(&sweepResult.freeList, weakImpl);
- else
+ else {
sweepResult.blockIsFree = false;
+ if (weakImpl->state() == WeakImpl::Live)
+ sweepResult.blockIsLogicallyEmpty = false;
+ }
}
m_sweepResult = sweepResult;
ASSERT(!m_sweepResult.isNull());
}
-void WeakBlock::visit(HeapRootVisitor& heapRootVisitor)
+template<typename ContainerType>
+void WeakBlock::specializedVisit(ContainerType& container, SlotVisitor& visitor)
{
- // If a block is completely empty, a visit won't have any effect.
- if (isEmpty())
- return;
-
- SlotVisitor& visitor = heapRootVisitor.visitor();
+ HeapVersion markingVersion = visitor.markingVersion();
- for (size_t i = 0; i < weakImplCount(); ++i) {
+ size_t count = weakImplCount();
+ for (size_t i = 0; i < count; ++i) {
WeakImpl* weakImpl = &weakImpls()[i];
if (weakImpl->state() != WeakImpl::Live)
continue;
- const JSValue& jsValue = weakImpl->jsValue();
- if (Heap::isLive(jsValue.asCell()))
- continue;
-
WeakHandleOwner* weakHandleOwner = weakImpl->weakHandleOwner();
if (!weakHandleOwner)
continue;
+ JSValue jsValue = weakImpl->jsValue();
+ if (container.isMarkedConcurrently(markingVersion, jsValue.asCell()))
+ continue;
+
if (!weakHandleOwner->isReachableFromOpaqueRoots(Handle<Unknown>::wrapSlot(&const_cast<JSValue&>(jsValue)), weakImpl->context(), visitor))
continue;
- heapRootVisitor.visit(&const_cast<JSValue&>(jsValue));
+ visitor.appendUnbarriered(jsValue);
}
}
+void WeakBlock::visit(SlotVisitor& visitor)
+{
+ // If a block is completely empty, a visit won't have any effect.
+ if (isEmpty())
+ return;
+
+ // If this WeakBlock doesn't belong to a CellContainer, we won't even be here.
+ ASSERT(m_container);
+
+ if (m_container.isLargeAllocation())
+ specializedVisit(m_container.largeAllocation(), visitor);
+ else
+ specializedVisit(m_container.markedBlock(), visitor);
+}
+
void WeakBlock::reap()
{
// If a block is completely empty, a reaping won't have any effect.
if (isEmpty())
return;
+ // If this WeakBlock doesn't belong to a CellContainer, we won't even be here.
+ ASSERT(m_container);
+
+ HeapVersion markingVersion = m_container.heap()->objectSpace().markingVersion();
+
for (size_t i = 0; i < weakImplCount(); ++i) {
WeakImpl* weakImpl = &weakImpls()[i];
if (weakImpl->state() > WeakImpl::Dead)
continue;
- if (Heap::isLive(weakImpl->jsValue().asCell())) {
+ if (m_container.isMarked(markingVersion, weakImpl->jsValue().asCell())) {
ASSERT(weakImpl->state() == WeakImpl::Live);
continue;
}
diff --git a/Source/JavaScriptCore/heap/WeakBlock.h b/Source/JavaScriptCore/heap/WeakBlock.h
index b6b631e27..6d4ff0709 100644
--- a/Source/JavaScriptCore/heap/WeakBlock.h
+++ b/Source/JavaScriptCore/heap/WeakBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,73 +23,71 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WeakBlock_h
-#define WeakBlock_h
+#pragma once
-#include "HeapBlock.h"
-#include "WeakHandleOwner.h"
+#include "CellContainer.h"
#include "WeakImpl.h"
#include <wtf/DoublyLinkedList.h>
#include <wtf/StdLibExtras.h>
namespace JSC {
-class DeadBlock;
-class HeapRootVisitor;
-class JSValue;
-class WeakHandleOwner;
+class Heap;
+class SlotVisitor;
-class WeakBlock : public HeapBlock<WeakBlock> {
+class WeakBlock : public DoublyLinkedListNode<WeakBlock> {
public:
friend class WTF::DoublyLinkedListNode<WeakBlock>;
- static const size_t blockSize = 4 * KB; // 5% of MarkedBlock size
+ static const size_t blockSize = 256; // 1/16 of MarkedBlock size
struct FreeCell {
FreeCell* next;
};
struct SweepResult {
- SweepResult();
bool isNull() const;
- bool blockIsFree;
- FreeCell* freeList;
+ bool blockIsFree { true };
+ bool blockIsLogicallyEmpty { true };
+ FreeCell* freeList { nullptr };
};
- static WeakBlock* create(DeadBlock*);
+ static WeakBlock* create(Heap&, CellContainer);
+ static void destroy(Heap&, WeakBlock*);
static WeakImpl* asWeakImpl(FreeCell*);
bool isEmpty();
+ bool isLogicallyEmptyButNotFree() const;
void sweep();
SweepResult takeSweepResult();
- void visit(HeapRootVisitor&);
+ void visit(SlotVisitor&);
+
void reap();
void lastChanceToFinalize();
+ void disconnectContainer() { m_container = CellContainer(); }
private:
static FreeCell* asFreeCell(WeakImpl*);
+
+ template<typename ContainerType>
+ void specializedVisit(ContainerType&, SlotVisitor&);
- WeakBlock(Region*);
- WeakImpl* firstWeakImpl();
+ explicit WeakBlock(CellContainer);
void finalize(WeakImpl*);
WeakImpl* weakImpls();
size_t weakImplCount();
void addToFreeList(FreeCell**, WeakImpl*);
+ CellContainer m_container;
+ WeakBlock* m_prev;
+ WeakBlock* m_next;
SweepResult m_sweepResult;
};
-inline WeakBlock::SweepResult::SweepResult()
- : blockIsFree(true)
- , freeList(0)
-{
- ASSERT(isNull());
-}
-
inline bool WeakBlock::SweepResult::isNull() const
{
return blockIsFree && !freeList; // This state is impossible, so we can use it to mean null.
@@ -138,6 +136,9 @@ inline bool WeakBlock::isEmpty()
return !m_sweepResult.isNull() && m_sweepResult.blockIsFree;
}
-} // namespace JSC
+inline bool WeakBlock::isLogicallyEmptyButNotFree() const
+{
+ return !m_sweepResult.isNull() && !m_sweepResult.blockIsFree && m_sweepResult.blockIsLogicallyEmpty;
+}
-#endif // WeakBlock_h
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/WeakHandleOwner.cpp b/Source/JavaScriptCore/heap/WeakHandleOwner.cpp
index 67e1774df..044518f7a 100644
--- a/Source/JavaScriptCore/heap/WeakHandleOwner.cpp
+++ b/Source/JavaScriptCore/heap/WeakHandleOwner.cpp
@@ -26,6 +26,8 @@
#include "config.h"
#include "WeakHandleOwner.h"
+#include "JSCInlines.h"
+
namespace JSC {
class SlotVisitor;
diff --git a/Source/JavaScriptCore/heap/WeakHandleOwner.h b/Source/JavaScriptCore/heap/WeakHandleOwner.h
index 6304dd20b..219a9c591 100644
--- a/Source/JavaScriptCore/heap/WeakHandleOwner.h
+++ b/Source/JavaScriptCore/heap/WeakHandleOwner.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WeakHandleOwner_h
-#define WeakHandleOwner_h
+#pragma once
#include "Handle.h"
@@ -40,5 +39,3 @@ public:
};
} // namespace JSC
-
-#endif // WeakHandleOwner_h
diff --git a/Source/JavaScriptCore/heap/WeakImpl.h b/Source/JavaScriptCore/heap/WeakImpl.h
index ca93fb286..3f6ffbfa4 100644
--- a/Source/JavaScriptCore/heap/WeakImpl.h
+++ b/Source/JavaScriptCore/heap/WeakImpl.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WeakImpl_h
-#define WeakImpl_h
+#pragma once
#include "JSCJSValue.h"
@@ -52,7 +51,9 @@ public:
void setState(State);
const JSValue& jsValue();
+ static ptrdiff_t offsetOfJSValue() { return OBJECT_OFFSETOF(WeakImpl, m_jsValue); }
WeakHandleOwner* weakHandleOwner();
+ static ptrdiff_t offsetOfWeakHandleOwner() { return OBJECT_OFFSETOF(WeakImpl, m_weakHandleOwner); }
void* context();
static WeakImpl* asWeakImpl(JSValue*);
@@ -111,5 +112,3 @@ inline WeakImpl* WeakImpl::asWeakImpl(JSValue* slot)
}
} // namespace JSC
-
-#endif // WeakImpl_h
diff --git a/Source/JavaScriptCore/heap/WeakInlines.h b/Source/JavaScriptCore/heap/WeakInlines.h
index 8cfd50153..d53cc54b6 100644
--- a/Source/JavaScriptCore/heap/WeakInlines.h
+++ b/Source/JavaScriptCore/heap/WeakInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012, 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,13 +23,11 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WeakInlines_h
-#define WeakInlines_h
+#pragma once
#include "JSCell.h"
#include "WeakSetInlines.h"
#include <wtf/Assertions.h>
-#include <wtf/HashTraits.h>
namespace JSC {
@@ -65,7 +63,7 @@ template<typename T> inline void Weak<T>::swap(Weak& other)
template<typename T> inline auto Weak<T>::operator=(Weak&& other) -> Weak&
{
- Weak weak = std::move(other);
+ Weak weak = WTFMove(other);
swap(weak);
return *this;
}
@@ -73,20 +71,23 @@ template<typename T> inline auto Weak<T>::operator=(Weak&& other) -> Weak&
template<typename T> inline T* Weak<T>::operator->() const
{
ASSERT(m_impl && m_impl->state() == WeakImpl::Live);
- return jsCast<T*>(m_impl->jsValue().asCell());
+ // We can't use jsCast here since we could be called in a finalizer.
+ return static_cast<T*>(m_impl->jsValue().asCell());
}
template<typename T> inline T& Weak<T>::operator*() const
{
ASSERT(m_impl && m_impl->state() == WeakImpl::Live);
- return *jsCast<T*>(m_impl->jsValue().asCell());
+ // We can't use jsCast here since we could be called in a finalizer.
+ return *static_cast<T*>(m_impl->jsValue().asCell());
}
template<typename T> inline T* Weak<T>::get() const
{
if (!m_impl || m_impl->state() != WeakImpl::Live)
- return 0;
- return jsCast<T*>(m_impl->jsValue().asCell());
+ return nullptr;
+ // We can't use jsCast here since we could be called in a finalizer.
+ return static_cast<T*>(m_impl->jsValue().asCell());
}
template<typename T> inline bool Weak<T>::was(T* other) const
@@ -99,9 +100,9 @@ template<typename T> inline bool Weak<T>::operator!() const
return !m_impl || !m_impl->jsValue() || m_impl->state() != WeakImpl::Live;
}
-template<typename T> inline Weak<T>::operator UnspecifiedBoolType*() const
+template<typename T> inline Weak<T>::operator bool() const
{
- return reinterpret_cast<UnspecifiedBoolType*>(!!*this);
+ return !!*this;
}
template<typename T> inline WeakImpl* Weak<T>::leakImpl()
@@ -148,24 +149,3 @@ template<typename T> inline void weakClear(Weak<T>& weak, T* cell)
}
} // namespace JSC
-
-namespace WTF {
-
-template<typename T> struct VectorTraits<JSC::Weak<T>> : SimpleClassVectorTraits {
- static const bool canCompareWithMemcmp = false;
-};
-
-template<typename T> struct HashTraits<JSC::Weak<T>> : SimpleClassHashTraits<JSC::Weak<T>> {
- typedef JSC::Weak<T> StorageType;
-
- typedef std::nullptr_t EmptyValueType;
- static EmptyValueType emptyValue() { return nullptr; }
-
- typedef T* PeekType;
- static PeekType peek(const StorageType& value) { return value.get(); }
- static PeekType peek(EmptyValueType) { return PeekType(); }
-};
-
-} // namespace WTF
-
-#endif // WeakInlines_h
diff --git a/Source/JavaScriptCore/heap/WeakReferenceHarvester.h b/Source/JavaScriptCore/heap/WeakReferenceHarvester.h
index 90b4deed0..4807e0679 100644
--- a/Source/JavaScriptCore/heap/WeakReferenceHarvester.h
+++ b/Source/JavaScriptCore/heap/WeakReferenceHarvester.h
@@ -17,15 +17,12 @@
*
*/
-#ifndef WeakReferenceHarvester_h
-#define WeakReferenceHarvester_h
+#pragma once
#include "ListableHandler.h"
namespace JSC {
-class MarkStack;
-class MarkStackSharedData;
class SlotVisitor;
class WeakReferenceHarvester : public ListableHandler<WeakReferenceHarvester> {
@@ -41,5 +38,3 @@ protected:
};
} // namespace JSC
-
-#endif // WeakReferenceHarvester_h
diff --git a/Source/JavaScriptCore/heap/WeakSet.cpp b/Source/JavaScriptCore/heap/WeakSet.cpp
index e62e66eae..faae02c7e 100644
--- a/Source/JavaScriptCore/heap/WeakSet.cpp
+++ b/Source/JavaScriptCore/heap/WeakSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,26 +27,60 @@
#include "WeakSet.h"
#include "Heap.h"
+#include "JSCInlines.h"
#include "VM.h"
namespace JSC {
WeakSet::~WeakSet()
{
+ if (isOnList())
+ remove();
+
+ Heap& heap = *this->heap();
WeakBlock* next = 0;
for (WeakBlock* block = m_blocks.head(); block; block = next) {
next = block->next();
- heap()->blockAllocator().deallocate(WeakBlock::destroy(block));
+ WeakBlock::destroy(heap, block);
}
m_blocks.clear();
}
void WeakSet::sweep()
{
- for (WeakBlock* block = m_blocks.head(); block; block = block->next())
+ for (WeakBlock* block = m_blocks.head(); block;) {
+ heap()->sweepNextLogicallyEmptyWeakBlock();
+
+ WeakBlock* nextBlock = block->next();
block->sweep();
+ if (block->isLogicallyEmptyButNotFree()) {
+ // If this WeakBlock is logically empty, but still has Weaks pointing into it,
+ // we can't destroy it just yet. Detach it from the WeakSet and hand ownership
+ // to the Heap so we don't pin down the entire MarkedBlock or LargeAllocation.
+ m_blocks.remove(block);
+ heap()->addLogicallyEmptyWeakBlock(block);
+ block->disconnectContainer();
+ }
+ block = nextBlock;
+ }
+
+ resetAllocator();
+}
+
+void WeakSet::shrink()
+{
+ WeakBlock* next;
+ for (WeakBlock* block = m_blocks.head(); block; block = next) {
+ next = block->next();
+
+ if (block->isEmpty())
+ removeAllocator(block);
+ }
resetAllocator();
+
+ if (m_blocks.isEmpty() && isOnList())
+ remove();
}
WeakBlock::FreeCell* WeakSet::findAllocator()
@@ -73,7 +107,10 @@ WeakBlock::FreeCell* WeakSet::tryFindAllocator()
WeakBlock::FreeCell* WeakSet::addAllocator()
{
- WeakBlock* block = WeakBlock::create(heap()->blockAllocator().allocate<WeakBlock>());
+ if (!isOnList())
+ heap()->objectSpace().addActiveWeakSet(this);
+
+ WeakBlock* block = WeakBlock::create(*heap(), m_container);
heap()->didAllocate(WeakBlock::blockSize);
m_blocks.append(block);
WeakBlock::SweepResult sweepResult = block->takeSweepResult();
@@ -84,7 +121,7 @@ WeakBlock::FreeCell* WeakSet::addAllocator()
void WeakSet::removeAllocator(WeakBlock* block)
{
m_blocks.remove(block);
- heap()->blockAllocator().deallocate(WeakBlock::destroy(block));
+ WeakBlock::destroy(*heap(), block);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/WeakSet.h b/Source/JavaScriptCore/heap/WeakSet.h
index a5ddcaffa..ddcf743b5 100644
--- a/Source/JavaScriptCore/heap/WeakSet.h
+++ b/Source/JavaScriptCore/heap/WeakSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,33 +23,38 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WeakSet_h
-#define WeakSet_h
+#pragma once
+#include "CellContainer.h"
#include "WeakBlock.h"
+#include <wtf/SentinelLinkedList.h>
namespace JSC {
class Heap;
class WeakImpl;
-class WeakSet {
+class WeakSet : public BasicRawSentinelNode<WeakSet> {
friend class LLIntOffsetsExtractor;
public:
static WeakImpl* allocate(JSValue, WeakHandleOwner* = 0, void* context = 0);
static void deallocate(WeakImpl*);
- WeakSet(VM*);
+ WeakSet(VM*, CellContainer);
~WeakSet();
void lastChanceToFinalize();
+
+ CellContainer container() const { return m_container; }
+ void setContainer(CellContainer container) { m_container = container; }
Heap* heap() const;
VM* vm() const;
bool isEmpty() const;
- void visit(HeapRootVisitor&);
+ void visit(SlotVisitor&);
+
void reap();
void sweep();
void shrink();
@@ -65,12 +70,14 @@ private:
WeakBlock* m_nextAllocator;
DoublyLinkedList<WeakBlock> m_blocks;
VM* m_vm;
+ CellContainer m_container;
};
-inline WeakSet::WeakSet(VM* vm)
+inline WeakSet::WeakSet(VM* vm, CellContainer container)
: m_allocator(0)
, m_nextAllocator(0)
, m_vm(vm)
+ , m_container(container)
{
}
@@ -100,7 +107,7 @@ inline void WeakSet::lastChanceToFinalize()
block->lastChanceToFinalize();
}
-inline void WeakSet::visit(HeapRootVisitor& visitor)
+inline void WeakSet::visit(SlotVisitor& visitor)
{
for (WeakBlock* block = m_blocks.head(); block; block = block->next())
block->visit(visitor);
@@ -112,19 +119,6 @@ inline void WeakSet::reap()
block->reap();
}
-inline void WeakSet::shrink()
-{
- WeakBlock* next;
- for (WeakBlock* block = m_blocks.head(); block; block = next) {
- next = block->next();
-
- if (block->isEmpty())
- removeAllocator(block);
- }
-
- resetAllocator();
-}
-
inline void WeakSet::resetAllocator()
{
m_allocator = 0;
@@ -132,5 +126,3 @@ inline void WeakSet::resetAllocator()
}
} // namespace JSC
-
-#endif // WeakSet_h
diff --git a/Source/JavaScriptCore/heap/WeakSetInlines.h b/Source/JavaScriptCore/heap/WeakSetInlines.h
index f23922493..360e1f99b 100644
--- a/Source/JavaScriptCore/heap/WeakSetInlines.h
+++ b/Source/JavaScriptCore/heap/WeakSetInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,16 +23,16 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WeakSetInlines_h
-#define WeakSetInlines_h
+#pragma once
+#include "CellContainerInlines.h"
#include "MarkedBlock.h"
namespace JSC {
inline WeakImpl* WeakSet::allocate(JSValue jsValue, WeakHandleOwner* weakHandleOwner, void* context)
{
- WeakSet& weakSet = MarkedBlock::blockFor(jsValue.asCell())->weakSet();
+ WeakSet& weakSet = jsValue.asCell()->cellContainer().weakSet();
WeakBlock::FreeCell* allocator = weakSet.m_allocator;
if (UNLIKELY(!allocator))
allocator = weakSet.findAllocator();
@@ -53,5 +53,3 @@ inline void WeakBlock::finalize(WeakImpl* weakImpl)
}
} // namespace JSC
-
-#endif // WeakSetInlines_h
diff --git a/Source/JavaScriptCore/heap/WriteBarrierSupport.cpp b/Source/JavaScriptCore/heap/WriteBarrierSupport.cpp
index 5ca33c861..984f0044b 100644
--- a/Source/JavaScriptCore/heap/WriteBarrierSupport.cpp
+++ b/Source/JavaScriptCore/heap/WriteBarrierSupport.cpp
@@ -26,6 +26,8 @@
#include "config.h"
#include "WriteBarrierSupport.h"
+#include "JSCInlines.h"
+
namespace JSC {
#if ENABLE(WRITE_BARRIER_PROFILING)
diff --git a/Source/JavaScriptCore/heap/WriteBarrierSupport.h b/Source/JavaScriptCore/heap/WriteBarrierSupport.h
index 5d7d2f6fe..570bba2b8 100644
--- a/Source/JavaScriptCore/heap/WriteBarrierSupport.h
+++ b/Source/JavaScriptCore/heap/WriteBarrierSupport.h
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WriteBarrierSupport_h
-#define WriteBarrierSupport_h
+#pragma once
#include "SamplingCounter.h"
#include <wtf/Assertions.h>
@@ -94,6 +93,3 @@ public:
};
} // namespace JSC
-
-#endif // WriteBarrierSupport_h
-