summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-09-11 19:54:20 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-09-11 19:54:20 +0200
commit88a04ac016f57c2d78e714682445dff2e7db4ade (patch)
treea48ca81ee3b29953121308168db22532d5b57fe2 /Source/JavaScriptCore/heap
parent284837daa07b29d6a63a748544a90b1f5842ac5c (diff)
downloadqtwebkit-88a04ac016f57c2d78e714682445dff2e7db4ade.tar.gz
Imported WebKit commit 42d95198c30c2d1a94a5081181aad0b2be7c316c (http://svn.webkit.org/repository/webkit/trunk@128206)
This includes the rewrite of the configure part of the build system which should fix the QtQuick2 detection and allow for further simplifications in the future
Diffstat (limited to 'Source/JavaScriptCore/heap')
-rw-r--r--Source/JavaScriptCore/heap/GCThreadSharedData.cpp1
-rw-r--r--Source/JavaScriptCore/heap/GCThreadSharedData.h8
-rw-r--r--Source/JavaScriptCore/heap/Heap.h6
-rw-r--r--Source/JavaScriptCore/heap/HeapRootVisitor.h1
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.cpp398
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.h339
-rw-r--r--Source/JavaScriptCore/heap/MarkStackInlineMethods.h105
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocator.cpp55
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocator.h15
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.cpp13
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.h36
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.cpp412
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.h126
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h120
14 files changed, 836 insertions, 799 deletions
diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp b/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
index 82c52d22e..23a6b97a1 100644
--- a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
+++ b/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
@@ -29,6 +29,7 @@
#include "JSGlobalData.h"
#include "MarkStack.h"
#include "SlotVisitor.h"
+#include "SlotVisitorInlineMethods.h"
#include <wtf/MainThread.h>
namespace JSC {
diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.h b/Source/JavaScriptCore/heap/GCThreadSharedData.h
index 8868b440c..3f09a2820 100644
--- a/Source/JavaScriptCore/heap/GCThreadSharedData.h
+++ b/Source/JavaScriptCore/heap/GCThreadSharedData.h
@@ -26,7 +26,12 @@
#ifndef GCThreadSharedData_h
#define GCThreadSharedData_h
+#include "ListableHandler.h"
#include "MarkStack.h"
+#include "UnconditionalFinalizer.h"
+#include "WeakReferenceHarvester.h"
+#include <wtf/HashSet.h>
+#include <wtf/Threading.h>
#include <wtf/Vector.h>
namespace JSC {
@@ -48,7 +53,6 @@ public:
#endif
private:
- friend class MarkStack;
friend class SlotVisitor;
#if ENABLE(PARALLEL_GC)
@@ -64,7 +68,7 @@ private:
bool m_shouldHashConst;
Vector<ThreadIdentifier> m_markingThreads;
- Vector<MarkStack*> m_markingThreadsMarkStack;
+ Vector<SlotVisitor*> m_markingThreadsMarkStack;
Mutex m_markingLock;
ThreadCondition m_markingCondition;
diff --git a/Source/JavaScriptCore/heap/Heap.h b/Source/JavaScriptCore/heap/Heap.h
index 69aa97e33..e48386791 100644
--- a/Source/JavaScriptCore/heap/Heap.h
+++ b/Source/JavaScriptCore/heap/Heap.h
@@ -185,7 +185,7 @@ namespace JSC {
void* allocateWithDestructor(size_t);
void* allocateWithoutDestructor(size_t);
- void* allocateStructure();
+ void* allocateStructure(size_t);
static const size_t minExtraCost = 256;
static const size_t maxExtraCost = 1024 * 1024;
@@ -372,9 +372,9 @@ namespace JSC {
return m_objectSpace.allocateWithoutDestructor(bytes);
}
- inline void* Heap::allocateStructure()
+ inline void* Heap::allocateStructure(size_t bytes)
{
- return m_objectSpace.allocateStructure();
+ return m_objectSpace.allocateStructure(bytes);
}
inline CheckedBoolean Heap::tryAllocateStorage(size_t bytes, void** outPtr)
diff --git a/Source/JavaScriptCore/heap/HeapRootVisitor.h b/Source/JavaScriptCore/heap/HeapRootVisitor.h
index 76c97290a..9849d7c39 100644
--- a/Source/JavaScriptCore/heap/HeapRootVisitor.h
+++ b/Source/JavaScriptCore/heap/HeapRootVisitor.h
@@ -27,6 +27,7 @@
#define HeapRootVisitor_h
#include "SlotVisitor.h"
+#include "SlotVisitorInlineMethods.h"
namespace JSC {
diff --git a/Source/JavaScriptCore/heap/MarkStack.cpp b/Source/JavaScriptCore/heap/MarkStack.cpp
index 9a4a01f04..582439fd2 100644
--- a/Source/JavaScriptCore/heap/MarkStack.cpp
+++ b/Source/JavaScriptCore/heap/MarkStack.cpp
@@ -223,402 +223,4 @@ void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThread
append(other.removeLast());
}
-MarkStack::MarkStack(GCThreadSharedData& shared)
- : m_stack(shared.m_segmentAllocator)
-#if !ASSERT_DISABLED
- , m_isCheckingForDefaultMarkViolation(false)
- , m_isDraining(false)
-#endif
- , m_visitCount(0)
- , m_isInParallelMode(false)
- , m_shared(shared)
- , m_shouldHashConst(false)
-{
-}
-
-MarkStack::~MarkStack()
-{
- ASSERT(m_stack.isEmpty());
-}
-
-void MarkStack::setup()
-{
- m_shared.m_shouldHashConst = m_shared.m_globalData->haveEnoughNewStringsToHashConst();
- m_shouldHashConst = m_shared.m_shouldHashConst;
-#if ENABLE(PARALLEL_GC)
- for (unsigned i = 0; i < m_shared.m_markingThreadsMarkStack.size(); ++i)
- m_shared.m_markingThreadsMarkStack[i]->m_shouldHashConst = m_shared.m_shouldHashConst;
-#endif
-}
-
-void MarkStack::reset()
-{
- m_visitCount = 0;
- ASSERT(m_stack.isEmpty());
-#if ENABLE(PARALLEL_GC)
- ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
-#else
- m_opaqueRoots.clear();
-#endif
- if (m_shouldHashConst) {
- m_uniqueStrings.clear();
- m_shouldHashConst = false;
- }
-}
-
-void MarkStack::append(ConservativeRoots& conservativeRoots)
-{
- JSCell** roots = conservativeRoots.roots();
- size_t size = conservativeRoots.size();
- for (size_t i = 0; i < size; ++i)
- internalAppend(roots[i]);
-}
-
-ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
-{
-#if ENABLE(SIMPLE_HEAP_PROFILING)
- m_visitedTypeCounts.count(cell);
-#endif
-
- ASSERT(Heap::isMarked(cell));
-
- if (isJSString(cell)) {
- JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
- return;
- }
-
- if (isJSFinalObject(cell)) {
- JSFinalObject::visitChildren(const_cast<JSCell*>(cell), visitor);
- return;
- }
-
- if (isJSArray(cell)) {
- JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
- return;
- }
-
- cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
-}
-
-void SlotVisitor::donateKnownParallel()
-{
- // NOTE: Because we re-try often, we can afford to be conservative, and
- // assume that donating is not profitable.
-
- // Avoid locking when a thread reaches a dead end in the object graph.
- if (m_stack.size() < 2)
- return;
-
- // If there's already some shared work queued up, be conservative and assume
- // that donating more is not profitable.
- if (m_shared.m_sharedMarkStack.size())
- return;
-
- // If we're contending on the lock, be conservative and assume that another
- // thread is already donating.
- MutexTryLocker locker(m_shared.m_markingLock);
- if (!locker.locked())
- return;
-
- // Otherwise, assume that a thread will go idle soon, and donate.
- m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
-
- if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers())
- m_shared.m_markingCondition.broadcast();
-}
-
-void SlotVisitor::drain()
-{
- ASSERT(m_isInParallelMode);
-
-#if ENABLE(PARALLEL_GC)
- if (Options::numberOfGCMarkers() > 1) {
- while (!m_stack.isEmpty()) {
- m_stack.refill();
- for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;)
- visitChildren(*this, m_stack.removeLast());
- donateKnownParallel();
- }
-
- mergeOpaqueRootsIfNecessary();
- return;
- }
-#endif
-
- while (!m_stack.isEmpty()) {
- m_stack.refill();
- while (m_stack.canRemoveLast())
- visitChildren(*this, m_stack.removeLast());
- }
-}
-
-void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
-{
- ASSERT(m_isInParallelMode);
-
- ASSERT(Options::numberOfGCMarkers());
-
- bool shouldBeParallel;
-
-#if ENABLE(PARALLEL_GC)
- shouldBeParallel = Options::numberOfGCMarkers() > 1;
-#else
- ASSERT(Options::numberOfGCMarkers() == 1);
- shouldBeParallel = false;
-#endif
-
- if (!shouldBeParallel) {
- // This call should be a no-op.
- ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
- ASSERT(m_stack.isEmpty());
- ASSERT(m_shared.m_sharedMarkStack.isEmpty());
- return;
- }
-
-#if ENABLE(PARALLEL_GC)
- {
- MutexLocker locker(m_shared.m_markingLock);
- m_shared.m_numberOfActiveParallelMarkers++;
- }
- while (true) {
- {
- MutexLocker locker(m_shared.m_markingLock);
- m_shared.m_numberOfActiveParallelMarkers--;
-
- // How we wait differs depending on drain mode.
- if (sharedDrainMode == MasterDrain) {
- // Wait until either termination is reached, or until there is some work
- // for us to do.
- while (true) {
- // Did we reach termination?
- if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
- // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back
- m_shared.m_markingCondition.broadcast();
- return;
- }
-
- // Is there work to be done?
- if (!m_shared.m_sharedMarkStack.isEmpty())
- break;
-
- // Otherwise wait.
- m_shared.m_markingCondition.wait(m_shared.m_markingLock);
- }
- } else {
- ASSERT(sharedDrainMode == SlaveDrain);
-
- // Did we detect termination? If so, let the master know.
- if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
- m_shared.m_markingCondition.broadcast();
-
- while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) {
- if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
- doneCopying();
- m_shared.m_markingCondition.wait(m_shared.m_markingLock);
- }
-
- // Is the VM exiting? If so, exit this thread.
- if (m_shared.m_parallelMarkersShouldExit) {
- doneCopying();
- return;
- }
- }
-
- size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers;
- m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount);
- m_shared.m_numberOfActiveParallelMarkers++;
- }
-
- drain();
- }
-#endif
-}
-
-void MarkStack::mergeOpaqueRoots()
-{
- ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
- {
- MutexLocker locker(m_shared.m_opaqueRootsLock);
- HashSet<void*>::iterator begin = m_opaqueRoots.begin();
- HashSet<void*>::iterator end = m_opaqueRoots.end();
- for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
- m_shared.m_opaqueRoots.add(*iter);
- }
- m_opaqueRoots.clear();
-}
-
-void SlotVisitor::startCopying()
-{
- ASSERT(!m_copiedAllocator.isValid());
-}
-
-void* SlotVisitor::allocateNewSpaceSlow(size_t bytes)
-{
- m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock());
- m_copiedAllocator.setCurrentBlock(m_shared.m_copiedSpace->allocateBlockForCopyingPhase());
-
- void* result = 0;
- CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate(bytes, &result);
- ASSERT(didSucceed);
- return result;
-}
-
-void* SlotVisitor::allocateNewSpaceOrPin(void* ptr, size_t bytes)
-{
- if (!checkIfShouldCopyAndPinOtherwise(ptr, bytes))
- return 0;
-
- return allocateNewSpace(bytes);
-}
-
-ALWAYS_INLINE bool JSString::tryHashConstLock()
-{
-#if ENABLE(PARALLEL_GC)
- unsigned currentFlags = m_flags;
-
- if (currentFlags & HashConstLock)
- return false;
-
- unsigned newFlags = currentFlags | HashConstLock;
-
- if (!WTF::weakCompareAndSwap(&m_flags, currentFlags, newFlags))
- return false;
-
- WTF::memoryBarrierAfterLock();
- return true;
-#else
- if (isHashConstSingleton())
- return false;
-
- m_flags |= HashConstLock;
-
- return true;
-#endif
-}
-
-ALWAYS_INLINE void JSString::releaseHashConstLock()
-{
-#if ENABLE(PARALLEL_GC)
- WTF::memoryBarrierBeforeUnlock();
-#endif
- m_flags &= ~HashConstLock;
-}
-
-ALWAYS_INLINE bool JSString::shouldTryHashConst()
-{
- return ((length() > 1) && !isRope() && !isHashConstSingleton());
-}
-
-ALWAYS_INLINE void MarkStack::internalAppend(JSValue* slot)
-{
- // This internalAppend is only intended for visits to object and array backing stores.
- // as it can change the JSValue pointed to be the argument when the original JSValue
- // is a string that contains the same contents as another string.
-
- ASSERT(slot);
- JSValue value = *slot;
- ASSERT(value);
- if (!value.isCell())
- return;
-
- JSCell* cell = value.asCell();
- if (!cell)
- return;
-
- if (m_shouldHashConst && cell->isString()) {
- JSString* string = jsCast<JSString*>(cell);
- if (string->shouldTryHashConst() && string->tryHashConstLock()) {
- UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value);
- if (addResult.isNewEntry)
- string->setHashConstSingleton();
- else {
- JSValue existingJSValue = addResult.iterator->second;
- if (value != existingJSValue)
- jsCast<JSString*>(existingJSValue.asCell())->clearHashConstSingleton();
- *slot = existingJSValue;
- string->releaseHashConstLock();
- return;
- }
- string->releaseHashConstLock();
- }
- }
-
- internalAppend(cell);
-}
-
-void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsigned length)
-{
- void* oldPtr = *ptr;
- void* newPtr = allocateNewSpaceOrPin(oldPtr, bytes);
- if (newPtr) {
- size_t jsValuesOffset = static_cast<size_t>(reinterpret_cast<char*>(values) - static_cast<char*>(oldPtr));
-
- JSValue* newValues = reinterpret_cast_ptr<JSValue*>(static_cast<char*>(newPtr) + jsValuesOffset);
- for (unsigned i = 0; i < length; i++) {
- JSValue& value = values[i];
- newValues[i] = value;
- if (!value)
- continue;
- internalAppend(&newValues[i]);
- }
-
- memcpy(newPtr, oldPtr, jsValuesOffset);
- *ptr = newPtr;
- } else
- append(values, length);
-}
-
-void SlotVisitor::doneCopying()
-{
- if (!m_copiedAllocator.isValid())
- return;
-
- m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock());
-}
-
-void SlotVisitor::harvestWeakReferences()
-{
- for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
- current->visitWeakReferences(*this);
-}
-
-void SlotVisitor::finalizeUnconditionalFinalizers()
-{
- while (m_shared.m_unconditionalFinalizers.hasNext())
- m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
-}
-
-#if ENABLE(GC_VALIDATION)
-void MarkStack::validate(JSCell* cell)
-{
- if (!cell) {
- dataLog("cell is NULL\n");
- CRASH();
- }
-
- if (!cell->structure()) {
- dataLog("cell at %p has a null structure\n" , cell);
- CRASH();
- }
-
- // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
- // I hate this sentence.
- if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) {
- const char* parentClassName = 0;
- const char* ourClassName = 0;
- if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo())
- parentClassName = cell->structure()->structure()->JSCell::classInfo()->className;
- if (cell->structure()->JSCell::classInfo())
- ourClassName = cell->structure()->JSCell::classInfo()->className;
- dataLog("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
- cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
- CRASH();
- }
-}
-#else
-void MarkStack::validate(JSCell*)
-{
-}
-#endif
-
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkStack.h b/Source/JavaScriptCore/heap/MarkStack.h
index 54ae1cb02..0245e4be5 100644
--- a/Source/JavaScriptCore/heap/MarkStack.h
+++ b/Source/JavaScriptCore/heap/MarkStack.h
@@ -26,25 +26,6 @@
#ifndef MarkStack_h
#define MarkStack_h
-#include "CopiedSpace.h"
-#include "HandleTypes.h"
-#include "JSValue.h"
-#include "Options.h"
-#include "Register.h"
-#include "UnconditionalFinalizer.h"
-#include "VTableSpectrum.h"
-#include "WeakReferenceHarvester.h"
-#include <wtf/DataLog.h>
-#include <wtf/Forward.h>
-#include <wtf/HashMap.h>
-#include <wtf/HashSet.h>
-#include <wtf/Noncopyable.h>
-#include <wtf/OSAllocator.h>
-#include <wtf/PageBlock.h>
-#include <wtf/TCSpinLock.h>
-#include <wtf/text/StringHash.h>
-#include <wtf/Vector.h>
-
#if ENABLE(OBJECT_MARK_LOGGING)
#define MARK_LOG_MESSAGE0(message) dataLog(message)
#define MARK_LOG_MESSAGE1(message, arg1) dataLog(message, arg1)
@@ -69,276 +50,86 @@
#define MARK_LOG_CHILD(visitor, child) do { } while (false)
#endif
-namespace JSC {
-
- class ConservativeRoots;
- class JSGlobalData;
- class MarkStack;
- class GCThreadSharedData;
- class ParallelModeEnabler;
- class Register;
- class SlotVisitor;
- template<typename T> class WriteBarrierBase;
- template<typename T> class JITWriteBarrier;
-
- struct MarkStackSegment {
- MarkStackSegment* m_previous;
-#if !ASSERT_DISABLED
- size_t m_top;
-#endif
-
- const JSCell** data()
- {
- return bitwise_cast<const JSCell**>(this + 1);
- }
-
- static size_t capacityFromSize(size_t size)
- {
- return (size - sizeof(MarkStackSegment)) / sizeof(const JSCell*);
- }
-
- static size_t sizeFromCapacity(size_t capacity)
- {
- return sizeof(MarkStackSegment) + capacity * sizeof(const JSCell*);
- }
- };
-
- class MarkStackSegmentAllocator {
- public:
- MarkStackSegmentAllocator();
- ~MarkStackSegmentAllocator();
-
- MarkStackSegment* allocate();
- void release(MarkStackSegment*);
-
- void shrinkReserve();
-
- private:
- SpinLock m_lock;
- MarkStackSegment* m_nextFreeSegment;
- };
-
- class MarkStackArray {
- public:
- MarkStackArray(MarkStackSegmentAllocator&);
- ~MarkStackArray();
-
- void append(const JSCell*);
-
- bool canRemoveLast();
- const JSCell* removeLast();
- bool refill();
-
- bool isEmpty();
-
- void donateSomeCellsTo(MarkStackArray& other);
-
- void stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount);
-
- size_t size();
-
- private:
- MarkStackSegment* m_topSegment;
-
- JS_EXPORT_PRIVATE void expand();
-
- MarkStackSegmentAllocator& m_allocator;
-
- size_t m_segmentCapacity;
- size_t m_top;
- size_t m_numberOfPreviousSegments;
-
- size_t postIncTop()
- {
- size_t result = m_top++;
- ASSERT(result == m_topSegment->m_top++);
- return result;
- }
-
- size_t preDecTop()
- {
- size_t result = --m_top;
- ASSERT(result == --m_topSegment->m_top);
- return result;
- }
-
- void setTopForFullSegment()
- {
- ASSERT(m_topSegment->m_top == m_segmentCapacity);
- m_top = m_segmentCapacity;
- }
-
- void setTopForEmptySegment()
- {
- ASSERT(!m_topSegment->m_top);
- m_top = 0;
- }
-
- size_t top()
- {
- ASSERT(m_top == m_topSegment->m_top);
- return m_top;
- }
-
-#if ASSERT_DISABLED
- void validatePrevious() { }
-#else
- void validatePrevious()
- {
- unsigned count = 0;
- for (MarkStackSegment* current = m_topSegment->m_previous; current; current = current->m_previous)
- count++;
- ASSERT(count == m_numberOfPreviousSegments);
- }
-#endif
- };
-
- class MarkStack {
- WTF_MAKE_NONCOPYABLE(MarkStack);
- friend class HeapRootVisitor; // Allowed to mark a JSValue* or JSCell** directly.
-
- public:
- MarkStack(GCThreadSharedData&);
- ~MarkStack();
-
- void append(ConservativeRoots&);
-
- template<typename T> void append(JITWriteBarrier<T>*);
- template<typename T> void append(WriteBarrierBase<T>*);
- void appendValues(WriteBarrierBase<Unknown>*, size_t count);
-
- template<typename T>
- void appendUnbarrieredPointer(T**);
- void appendUnbarrieredValue(JSValue*);
-
- void addOpaqueRoot(void*);
- bool containsOpaqueRoot(void*);
- int opaqueRootCount();
-
- GCThreadSharedData& sharedData() { return m_shared; }
- bool isEmpty() { return m_stack.isEmpty(); }
-
- void setup();
- void reset();
-
- size_t visitCount() const { return m_visitCount; }
-
-#if ENABLE(SIMPLE_HEAP_PROFILING)
- VTableSpectrum m_visitedTypeCounts;
-#endif
-
- void addWeakReferenceHarvester(WeakReferenceHarvester*);
- void addUnconditionalFinalizer(UnconditionalFinalizer*);
-
-#if ENABLE(OBJECT_MARK_LOGGING)
- inline void resetChildCount() { m_logChildCount = 0; }
- inline unsigned childCount() { return m_logChildCount; }
- inline void incrementChildCount() { m_logChildCount++; }
-#endif
+#include <wtf/StdLibExtras.h>
+#include <wtf/TCSpinLock.h>
- protected:
- JS_EXPORT_PRIVATE static void validate(JSCell*);
+namespace JSC {
- void append(JSValue*);
- void append(JSValue*, size_t count);
- void append(JSCell**);
+class JSCell;
- void internalAppend(JSCell*);
- void internalAppend(JSValue);
- void internalAppend(JSValue*);
-
- JS_EXPORT_PRIVATE void mergeOpaqueRoots();
-
- void mergeOpaqueRootsIfNecessary()
- {
- if (m_opaqueRoots.isEmpty())
- return;
- mergeOpaqueRoots();
- }
-
- void mergeOpaqueRootsIfProfitable()
- {
- if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold())
- return;
- mergeOpaqueRoots();
- }
-
- MarkStackArray m_stack;
- HashSet<void*> m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector.
-
+struct MarkStackSegment {
+ MarkStackSegment* m_previous;
#if !ASSERT_DISABLED
- public:
- bool m_isCheckingForDefaultMarkViolation;
- bool m_isDraining;
+ size_t m_top;
#endif
- protected:
- friend class ParallelModeEnabler;
- size_t m_visitCount;
- bool m_isInParallelMode;
-
- GCThreadSharedData& m_shared;
-
- bool m_shouldHashConst; // Local per-thread copy of shared flag for performance reasons
- typedef HashMap<StringImpl*, JSValue> UniqueStringMap;
- UniqueStringMap m_uniqueStrings;
-
-#if ENABLE(OBJECT_MARK_LOGGING)
- unsigned m_logChildCount;
-#endif
- };
-
- inline void MarkStackArray::append(const JSCell* cell)
+ const JSCell** data()
{
- if (m_top == m_segmentCapacity)
- expand();
- m_topSegment->data()[postIncTop()] = cell;
+ return bitwise_cast<const JSCell**>(this + 1);
}
-
- inline bool MarkStackArray::canRemoveLast()
+
+ static size_t capacityFromSize(size_t size)
{
- return !!m_top;
+ return (size - sizeof(MarkStackSegment)) / sizeof(const JSCell*);
}
-
- inline const JSCell* MarkStackArray::removeLast()
+
+ static size_t sizeFromCapacity(size_t capacity)
{
- return m_topSegment->data()[preDecTop()];
+ return sizeof(MarkStackSegment) + capacity * sizeof(const JSCell*);
}
+};
- inline bool MarkStackArray::isEmpty()
- {
- if (m_top)
- return false;
- if (m_topSegment->m_previous) {
- ASSERT(m_topSegment->m_previous->m_top == m_segmentCapacity);
- return false;
- }
- return true;
- }
+class MarkStackSegmentAllocator {
+public:
+ MarkStackSegmentAllocator();
+ ~MarkStackSegmentAllocator();
+
+ MarkStackSegment* allocate();
+ void release(MarkStackSegment*);
+
+ void shrinkReserve();
+
+private:
+ SpinLock m_lock;
+ MarkStackSegment* m_nextFreeSegment;
+};
- inline size_t MarkStackArray::size()
- {
- return m_top + m_segmentCapacity * m_numberOfPreviousSegments;
- }
+class MarkStackArray {
+public:
+ MarkStackArray(MarkStackSegmentAllocator&);
+ ~MarkStackArray();
- class ParallelModeEnabler {
- public:
- ParallelModeEnabler(MarkStack& stack)
- : m_stack(stack)
- {
- ASSERT(!m_stack.m_isInParallelMode);
- m_stack.m_isInParallelMode = true;
- }
-
- ~ParallelModeEnabler()
- {
- ASSERT(m_stack.m_isInParallelMode);
- m_stack.m_isInParallelMode = false;
- }
-
- private:
- MarkStack& m_stack;
- };
+ void append(const JSCell*);
+
+ bool canRemoveLast();
+ const JSCell* removeLast();
+ bool refill();
+
+ void donateSomeCellsTo(MarkStackArray& other);
+ void stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount);
+
+ size_t size();
+ bool isEmpty();
+
+private:
+ JS_EXPORT_PRIVATE void expand();
+
+ size_t postIncTop();
+ size_t preDecTop();
+ void setTopForFullSegment();
+ void setTopForEmptySegment();
+ size_t top();
+
+ void validatePrevious();
+
+ MarkStackSegment* m_topSegment;
+ MarkStackSegmentAllocator& m_allocator;
+
+ size_t m_segmentCapacity;
+ size_t m_top;
+ size_t m_numberOfPreviousSegments;
+
+};
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkStackInlineMethods.h b/Source/JavaScriptCore/heap/MarkStackInlineMethods.h
index 031dfff39..d3276d7fa 100644
--- a/Source/JavaScriptCore/heap/MarkStackInlineMethods.h
+++ b/Source/JavaScriptCore/heap/MarkStackInlineMethods.h
@@ -31,94 +31,81 @@
namespace JSC {
-ALWAYS_INLINE void MarkStack::append(JSValue* slot, size_t count)
+inline size_t MarkStackArray::postIncTop()
{
- for (size_t i = 0; i < count; ++i) {
- JSValue& value = slot[i];
- internalAppend(value);
- }
+ size_t result = m_top++;
+ ASSERT(result == m_topSegment->m_top++);
+ return result;
}
-
-template<typename T>
-inline void MarkStack::appendUnbarrieredPointer(T** slot)
+
+inline size_t MarkStackArray::preDecTop()
{
- ASSERT(slot);
- JSCell* cell = *slot;
- internalAppend(cell);
+ size_t result = --m_top;
+ ASSERT(result == --m_topSegment->m_top);
+ return result;
}
-
-ALWAYS_INLINE void MarkStack::append(JSValue* slot)
+
+inline void MarkStackArray::setTopForFullSegment()
{
- ASSERT(slot);
- internalAppend(*slot);
+ ASSERT(m_topSegment->m_top == m_segmentCapacity);
+ m_top = m_segmentCapacity;
}
-ALWAYS_INLINE void MarkStack::appendUnbarrieredValue(JSValue* slot)
+inline void MarkStackArray::setTopForEmptySegment()
{
- ASSERT(slot);
- internalAppend(*slot);
+ ASSERT(!m_topSegment->m_top);
+ m_top = 0;
}
-ALWAYS_INLINE void MarkStack::append(JSCell** slot)
+inline size_t MarkStackArray::top()
{
- ASSERT(slot);
- internalAppend(*slot);
+ ASSERT(m_top == m_topSegment->m_top);
+ return m_top;
}
-ALWAYS_INLINE void MarkStack::internalAppend(JSValue value)
+#if ASSERT_DISABLED
+inline void MarkStackArray::validatePrevious() { }
+#else
+inline void MarkStackArray::validatePrevious()
{
- if (!value || !value.isCell())
- return;
- internalAppend(value.asCell());
+ unsigned count = 0;
+ for (MarkStackSegment* current = m_topSegment->m_previous; current; current = current->m_previous)
+ count++;
+ ASSERT(count == m_numberOfPreviousSegments);
}
+#endif
-inline void MarkStack::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
+inline void MarkStackArray::append(const JSCell* cell)
{
- m_shared.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
+ if (m_top == m_segmentCapacity)
+ expand();
+ m_topSegment->data()[postIncTop()] = cell;
}
-inline void MarkStack::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
+inline bool MarkStackArray::canRemoveLast()
{
- m_shared.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
+ return !!m_top;
}
-inline void MarkStack::addOpaqueRoot(void* root)
+inline const JSCell* MarkStackArray::removeLast()
{
-#if ENABLE(PARALLEL_GC)
- if (Options::numberOfGCMarkers() == 1) {
- // Put directly into the shared HashSet.
- m_shared.m_opaqueRoots.add(root);
- return;
- }
- // Put into the local set, but merge with the shared one every once in
- // a while to make sure that the local sets don't grow too large.
- mergeOpaqueRootsIfProfitable();
- m_opaqueRoots.add(root);
-#else
- m_opaqueRoots.add(root);
-#endif
+ return m_topSegment->data()[preDecTop()];
}
-inline bool MarkStack::containsOpaqueRoot(void* root)
+inline bool MarkStackArray::isEmpty()
{
- ASSERT(!m_isInParallelMode);
-#if ENABLE(PARALLEL_GC)
- ASSERT(m_opaqueRoots.isEmpty());
- return m_shared.m_opaqueRoots.contains(root);
-#else
- return m_opaqueRoots.contains(root);
-#endif
+ if (m_top)
+ return false;
+ if (m_topSegment->m_previous) {
+ ASSERT(m_topSegment->m_previous->m_top == m_segmentCapacity);
+ return false;
+ }
+ return true;
}
-inline int MarkStack::opaqueRootCount()
+inline size_t MarkStackArray::size()
{
- ASSERT(!m_isInParallelMode);
-#if ENABLE(PARALLEL_GC)
- ASSERT(m_opaqueRoots.isEmpty());
- return m_shared.m_opaqueRoots.size();
-#else
- return m_opaqueRoots.size();
-#endif
+ return m_top + m_segmentCapacity * m_numberOfPreviousSegments;
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.cpp b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
index 20b556969..ab37ead4c 100644
--- a/Source/JavaScriptCore/heap/MarkedAllocator.cpp
+++ b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
@@ -27,7 +27,7 @@ bool MarkedAllocator::isPagedOut(double deadline)
return false;
}
-inline void* MarkedAllocator::tryAllocateHelper()
+inline void* MarkedAllocator::tryAllocateHelper(size_t bytes)
{
if (!m_freeList.head) {
if (m_onlyContainsStructures && !m_heap->isSafeToSweepStructures()) {
@@ -42,12 +42,20 @@ inline void* MarkedAllocator::tryAllocateHelper()
}
for (MarkedBlock*& block = m_blocksToSweep; block; block = block->next()) {
- m_freeList = block->sweep(MarkedBlock::SweepToFreeList);
- if (m_freeList.head) {
- m_currentBlock = block;
- break;
+ MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList);
+ if (!freeList.head) {
+ block->didConsumeFreeList();
+ continue;
}
- block->didConsumeFreeList();
+
+ if (bytes > block->cellSize()) {
+ block->zapFreeList(freeList);
+ continue;
+ }
+
+ m_currentBlock = block;
+ m_freeList = freeList;
+ break;
}
if (!m_freeList.head) {
@@ -62,16 +70,16 @@ inline void* MarkedAllocator::tryAllocateHelper()
return head;
}
-inline void* MarkedAllocator::tryAllocate()
+inline void* MarkedAllocator::tryAllocate(size_t bytes)
{
ASSERT(!m_heap->isBusy());
m_heap->m_operationInProgress = Allocation;
- void* result = tryAllocateHelper();
+ void* result = tryAllocateHelper(bytes);
m_heap->m_operationInProgress = NoOperation;
return result;
}
-void* MarkedAllocator::allocateSlowCase()
+void* MarkedAllocator::allocateSlowCase(size_t bytes)
{
ASSERT(m_heap->globalData()->apiLock().currentThreadIsHoldingLock());
#if COLLECT_ON_EVERY_ALLOCATION
@@ -82,7 +90,7 @@ void* MarkedAllocator::allocateSlowCase()
ASSERT(!m_freeList.head);
m_heap->didAllocate(m_freeList.bytes);
- void* result = tryAllocate();
+ void* result = tryAllocate(bytes);
if (LIKELY(result != 0))
return result;
@@ -90,27 +98,39 @@ void* MarkedAllocator::allocateSlowCase()
if (m_heap->shouldCollect()) {
m_heap->collect(Heap::DoNotSweep);
- result = tryAllocate();
+ result = tryAllocate(bytes);
if (result)
return result;
}
ASSERT(!m_heap->shouldCollect());
- MarkedBlock* block = allocateBlock();
+ MarkedBlock* block = allocateBlock(bytes);
ASSERT(block);
addBlock(block);
- result = tryAllocate();
+ result = tryAllocate(bytes);
ASSERT(result);
return result;
}
-MarkedBlock* MarkedAllocator::allocateBlock()
+MarkedBlock* MarkedAllocator::allocateBlock(size_t bytes)
{
- MarkedBlock* block = MarkedBlock::create(m_heap->blockAllocator().allocate(), m_heap, m_cellSize, m_cellsNeedDestruction, m_onlyContainsStructures);
- m_markedSpace->didAddBlock(block);
- return block;
+ size_t minBlockSize = MarkedBlock::blockSize;
+ size_t minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(MarkedBlock) + bytes);
+ size_t blockSize = std::max(minBlockSize, minAllocationSize);
+
+ size_t cellSize = m_cellSize ? m_cellSize : WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes);
+
+ if (blockSize == MarkedBlock::blockSize) {
+ PageAllocationAligned allocation = m_heap->blockAllocator().allocate();
+ return MarkedBlock::create(allocation, m_heap, cellSize, m_cellsNeedDestruction, m_onlyContainsStructures);
+ }
+
+ PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, MarkedBlock::blockSize, OSAllocator::JSGCHeapPages);
+ if (!static_cast<bool>(allocation))
+ CRASH();
+ return MarkedBlock::create(allocation, m_heap, cellSize, m_cellsNeedDestruction, m_onlyContainsStructures);
}
void MarkedAllocator::addBlock(MarkedBlock* block)
@@ -121,6 +141,7 @@ void MarkedAllocator::addBlock(MarkedBlock* block)
m_blockList.append(block);
m_blocksToSweep = m_currentBlock = block;
m_freeList = block->sweep(MarkedBlock::SweepToFreeList);
+ m_markedSpace->didAddBlock(block);
}
void MarkedAllocator::removeBlock(MarkedBlock* block)
diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.h b/Source/JavaScriptCore/heap/MarkedAllocator.h
index c1c431194..7273c13e4 100644
--- a/Source/JavaScriptCore/heap/MarkedAllocator.h
+++ b/Source/JavaScriptCore/heap/MarkedAllocator.h
@@ -25,7 +25,7 @@ public:
size_t cellSize() { return m_cellSize; }
bool cellsNeedDestruction() { return m_cellsNeedDestruction; }
bool onlyContainsStructures() { return m_onlyContainsStructures; }
- void* allocate();
+ void* allocate(size_t);
Heap* heap() { return m_heap; }
template<typename Functor> void forEachBlock(Functor&);
@@ -39,10 +39,10 @@ public:
private:
friend class LLIntOffsetsExtractor;
- JS_EXPORT_PRIVATE void* allocateSlowCase();
- void* tryAllocate();
- void* tryAllocateHelper();
- MarkedBlock* allocateBlock();
+ JS_EXPORT_PRIVATE void* allocateSlowCase(size_t);
+ void* tryAllocate(size_t);
+ void* tryAllocateHelper(size_t);
+ MarkedBlock* allocateBlock(size_t);
MarkedBlock::FreeList m_freeList;
MarkedBlock* m_currentBlock;
@@ -75,12 +75,11 @@ inline void MarkedAllocator::init(Heap* heap, MarkedSpace* markedSpace, size_t c
m_onlyContainsStructures = onlyContainsStructures;
}
-inline void* MarkedAllocator::allocate()
+inline void* MarkedAllocator::allocate(size_t bytes)
{
MarkedBlock::FreeCell* head = m_freeList.head;
- // This is a light-weight fast path to cover the most common case.
if (UNLIKELY(!head))
- return allocateSlowCase();
+ return allocateSlowCase(bytes);
m_freeList.head = head->next;
return head;
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.cpp b/Source/JavaScriptCore/heap/MarkedSpace.cpp
index 68b059c36..689e5f9ab 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.cpp
+++ b/Source/JavaScriptCore/heap/MarkedSpace.cpp
@@ -90,6 +90,7 @@ MarkedSpace::MarkedSpace(Heap* heap)
destructorAllocatorFor(cellSize).init(heap, this, cellSize, true, false);
}
+ m_largeAllocator.init(heap, this, 0, true, false);
m_structureAllocator.init(heap, this, WTF::roundUpToMultipleOf(32, sizeof(Structure)), true, true);
}
@@ -127,6 +128,7 @@ void MarkedSpace::resetAllocators()
destructorAllocatorFor(cellSize).reset();
}
+ m_largeAllocator.reset();
m_structureAllocator.reset();
}
@@ -153,6 +155,7 @@ void MarkedSpace::canonicalizeCellLivenessData()
destructorAllocatorFor(cellSize).zapFreeList();
}
+ m_largeAllocator.zapFreeList();
m_structureAllocator.zapFreeList();
}
@@ -168,6 +171,9 @@ bool MarkedSpace::isPagedOut(double deadline)
return true;
}
+ if (m_largeAllocator.isPagedOut(deadline))
+ return true;
+
if (m_structureAllocator.isPagedOut(deadline))
return true;
@@ -178,7 +184,12 @@ void MarkedSpace::freeBlock(MarkedBlock* block)
{
allocatorFor(block).removeBlock(block);
m_blocks.remove(block);
- m_heap->blockAllocator().deallocate(MarkedBlock::destroy(block));
+ if (block->capacity() == MarkedBlock::blockSize) {
+ m_heap->blockAllocator().deallocate(MarkedBlock::destroy(block));
+ return;
+ }
+
+ MarkedBlock::destroy(block).deallocate();
}
void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block)
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.h b/Source/JavaScriptCore/heap/MarkedSpace.h
index d5dae3584..03679d9d3 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.h
+++ b/Source/JavaScriptCore/heap/MarkedSpace.h
@@ -80,7 +80,7 @@ public:
MarkedAllocator& destructorAllocatorFor(size_t);
void* allocateWithDestructor(size_t);
void* allocateWithoutDestructor(size_t);
- void* allocateStructure();
+ void* allocateStructure(size_t);
void resetAllocators();
@@ -115,15 +115,15 @@ public:
private:
friend class LLIntOffsetsExtractor;
-
- // [ 32... 256 ]
+
+ // [ 32... 512 ]
static const size_t preciseStep = MarkedBlock::atomSize;
- static const size_t preciseCutoff = 256;
+ static const size_t preciseCutoff = 512;
static const size_t preciseCount = preciseCutoff / preciseStep;
- // [ 512... 2048 ]
- static const size_t impreciseStep = preciseCutoff;
- static const size_t impreciseCutoff = maxCellSize;
+ // [ 1024... blockSize ]
+ static const size_t impreciseStep = 2 * preciseCutoff;
+ static const size_t impreciseCutoff = MarkedBlock::blockSize / 2;
static const size_t impreciseCount = impreciseCutoff / impreciseStep;
struct Subspace {
@@ -133,6 +133,7 @@ private:
Subspace m_destructorSpace;
Subspace m_normalSpace;
+ MarkedAllocator m_largeAllocator;
MarkedAllocator m_structureAllocator;
Heap* m_heap;
@@ -162,10 +163,12 @@ inline MarkedAllocator& MarkedSpace::firstAllocator()
inline MarkedAllocator& MarkedSpace::allocatorFor(size_t bytes)
{
- ASSERT(bytes && bytes <= maxCellSize);
+ ASSERT(bytes);
if (bytes <= preciseCutoff)
return m_normalSpace.preciseAllocators[(bytes - 1) / preciseStep];
- return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
+ if (bytes <= impreciseCutoff)
+ return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
+ return m_largeAllocator;
}
inline MarkedAllocator& MarkedSpace::allocatorFor(MarkedBlock* block)
@@ -181,25 +184,27 @@ inline MarkedAllocator& MarkedSpace::allocatorFor(MarkedBlock* block)
inline MarkedAllocator& MarkedSpace::destructorAllocatorFor(size_t bytes)
{
- ASSERT(bytes && bytes <= maxCellSize);
+ ASSERT(bytes);
if (bytes <= preciseCutoff)
return m_destructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
- return m_destructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
+ if (bytes <= impreciseCutoff)
+ return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
+ return m_largeAllocator;
}
inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes)
{
- return allocatorFor(bytes).allocate();
+ return allocatorFor(bytes).allocate(bytes);
}
inline void* MarkedSpace::allocateWithDestructor(size_t bytes)
{
- return destructorAllocatorFor(bytes).allocate();
+ return destructorAllocatorFor(bytes).allocate(bytes);
}
-inline void* MarkedSpace::allocateStructure()
+inline void* MarkedSpace::allocateStructure(size_t bytes)
{
- return m_structureAllocator.allocate();
+ return m_structureAllocator.allocate(bytes);
}
template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock(Functor& functor)
@@ -214,6 +219,7 @@ template <typename Functor> inline typename Functor::ReturnType MarkedSpace::for
m_destructorSpace.impreciseAllocators[i].forEachBlock(functor);
}
+ m_largeAllocator.forEachBlock(functor);
m_structureAllocator.forEachBlock(functor);
return functor.returnValue();
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.cpp b/Source/JavaScriptCore/heap/SlotVisitor.cpp
new file mode 100644
index 000000000..0f003e79d
--- /dev/null
+++ b/Source/JavaScriptCore/heap/SlotVisitor.cpp
@@ -0,0 +1,412 @@
+#include "config.h"
+#include "SlotVisitor.h"
+
+#include "ConservativeRoots.h"
+#include "CopiedSpace.h"
+#include "CopiedSpaceInlineMethods.h"
+#include "JSArray.h"
+#include "JSGlobalData.h"
+#include "JSObject.h"
+#include "JSString.h"
+
+namespace JSC {
+
+SlotVisitor::SlotVisitor(GCThreadSharedData& shared)
+ : m_stack(shared.m_segmentAllocator)
+ , m_visitCount(0)
+ , m_isInParallelMode(false)
+ , m_shared(shared)
+ , m_shouldHashConst(false)
+#if !ASSERT_DISABLED
+ , m_isCheckingForDefaultMarkViolation(false)
+ , m_isDraining(false)
+#endif
+{
+}
+
+SlotVisitor::~SlotVisitor()
+{
+ ASSERT(m_stack.isEmpty());
+}
+
+void SlotVisitor::setup()
+{
+ m_shared.m_shouldHashConst = m_shared.m_globalData->haveEnoughNewStringsToHashConst();
+ m_shouldHashConst = m_shared.m_shouldHashConst;
+#if ENABLE(PARALLEL_GC)
+ for (unsigned i = 0; i < m_shared.m_markingThreadsMarkStack.size(); ++i)
+ m_shared.m_markingThreadsMarkStack[i]->m_shouldHashConst = m_shared.m_shouldHashConst;
+#endif
+}
+
+void SlotVisitor::reset()
+{
+ m_visitCount = 0;
+ ASSERT(m_stack.isEmpty());
+#if ENABLE(PARALLEL_GC)
+ ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
+#else
+ m_opaqueRoots.clear();
+#endif
+ if (m_shouldHashConst) {
+ m_uniqueStrings.clear();
+ m_shouldHashConst = false;
+ }
+}
+
+void SlotVisitor::append(ConservativeRoots& conservativeRoots)
+{
+ JSCell** roots = conservativeRoots.roots();
+ size_t size = conservativeRoots.size();
+ for (size_t i = 0; i < size; ++i)
+ internalAppend(roots[i]);
+}
+
+ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
+{
+#if ENABLE(SIMPLE_HEAP_PROFILING)
+ m_visitedTypeCounts.count(cell);
+#endif
+
+ ASSERT(Heap::isMarked(cell));
+
+ if (isJSString(cell)) {
+ JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
+ return;
+ }
+
+ if (isJSFinalObject(cell)) {
+ JSFinalObject::visitChildren(const_cast<JSCell*>(cell), visitor);
+ return;
+ }
+
+ if (isJSArray(cell)) {
+ JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
+ return;
+ }
+
+ cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
+}
+
+void SlotVisitor::donateKnownParallel()
+{
+ // NOTE: Because we re-try often, we can afford to be conservative, and
+ // assume that donating is not profitable.
+
+ // Avoid locking when a thread reaches a dead end in the object graph.
+ if (m_stack.size() < 2)
+ return;
+
+ // If there's already some shared work queued up, be conservative and assume
+ // that donating more is not profitable.
+ if (m_shared.m_sharedMarkStack.size())
+ return;
+
+ // If we're contending on the lock, be conservative and assume that another
+ // thread is already donating.
+ MutexTryLocker locker(m_shared.m_markingLock);
+ if (!locker.locked())
+ return;
+
+ // Otherwise, assume that a thread will go idle soon, and donate.
+ m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
+
+ if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers())
+ m_shared.m_markingCondition.broadcast();
+}
+
+void SlotVisitor::drain()
+{
+ ASSERT(m_isInParallelMode);
+
+#if ENABLE(PARALLEL_GC)
+ if (Options::numberOfGCMarkers() > 1) {
+ while (!m_stack.isEmpty()) {
+ m_stack.refill();
+ for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;)
+ visitChildren(*this, m_stack.removeLast());
+ donateKnownParallel();
+ }
+
+ mergeOpaqueRootsIfNecessary();
+ return;
+ }
+#endif
+
+ while (!m_stack.isEmpty()) {
+ m_stack.refill();
+ while (m_stack.canRemoveLast())
+ visitChildren(*this, m_stack.removeLast());
+ }
+}
+
+void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
+{
+ ASSERT(m_isInParallelMode);
+
+ ASSERT(Options::numberOfGCMarkers());
+
+ bool shouldBeParallel;
+
+#if ENABLE(PARALLEL_GC)
+ shouldBeParallel = Options::numberOfGCMarkers() > 1;
+#else
+ ASSERT(Options::numberOfGCMarkers() == 1);
+ shouldBeParallel = false;
+#endif
+
+ if (!shouldBeParallel) {
+ // This call should be a no-op.
+ ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
+ ASSERT(m_stack.isEmpty());
+ ASSERT(m_shared.m_sharedMarkStack.isEmpty());
+ return;
+ }
+
+#if ENABLE(PARALLEL_GC)
+ {
+ MutexLocker locker(m_shared.m_markingLock);
+ m_shared.m_numberOfActiveParallelMarkers++;
+ }
+ while (true) {
+ {
+ MutexLocker locker(m_shared.m_markingLock);
+ m_shared.m_numberOfActiveParallelMarkers--;
+
+ // How we wait differs depending on drain mode.
+ if (sharedDrainMode == MasterDrain) {
+ // Wait until either termination is reached, or until there is some work
+ // for us to do.
+ while (true) {
+ // Did we reach termination?
+ if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
+ // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back
+ m_shared.m_markingCondition.broadcast();
+ return;
+ }
+
+ // Is there work to be done?
+ if (!m_shared.m_sharedMarkStack.isEmpty())
+ break;
+
+ // Otherwise wait.
+ m_shared.m_markingCondition.wait(m_shared.m_markingLock);
+ }
+ } else {
+ ASSERT(sharedDrainMode == SlaveDrain);
+
+ // Did we detect termination? If so, let the master know.
+ if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
+ m_shared.m_markingCondition.broadcast();
+
+ while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) {
+ if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
+ doneCopying();
+ m_shared.m_markingCondition.wait(m_shared.m_markingLock);
+ }
+
+ // Is the VM exiting? If so, exit this thread.
+ if (m_shared.m_parallelMarkersShouldExit) {
+ doneCopying();
+ return;
+ }
+ }
+
+ size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers;
+ m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount);
+ m_shared.m_numberOfActiveParallelMarkers++;
+ }
+
+ drain();
+ }
+#endif
+}
+
+void SlotVisitor::mergeOpaqueRoots()
+{
+ ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
+ {
+ MutexLocker locker(m_shared.m_opaqueRootsLock);
+ HashSet<void*>::iterator begin = m_opaqueRoots.begin();
+ HashSet<void*>::iterator end = m_opaqueRoots.end();
+ for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
+ m_shared.m_opaqueRoots.add(*iter);
+ }
+ m_opaqueRoots.clear();
+}
+
+void SlotVisitor::startCopying()
+{
+ ASSERT(!m_copiedAllocator.isValid());
+}
+
+void* SlotVisitor::allocateNewSpaceSlow(size_t bytes)
+{
+ m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock());
+ m_copiedAllocator.setCurrentBlock(m_shared.m_copiedSpace->allocateBlockForCopyingPhase());
+
+ void* result = 0;
+ CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate(bytes, &result);
+ ASSERT(didSucceed);
+ return result;
+}
+
+void* SlotVisitor::allocateNewSpaceOrPin(void* ptr, size_t bytes)
+{
+ if (!checkIfShouldCopyAndPinOtherwise(ptr, bytes))
+ return 0;
+
+ return allocateNewSpace(bytes);
+}
+
+ALWAYS_INLINE bool JSString::tryHashConstLock()
+{
+#if ENABLE(PARALLEL_GC)
+ unsigned currentFlags = m_flags;
+
+ if (currentFlags & HashConstLock)
+ return false;
+
+ unsigned newFlags = currentFlags | HashConstLock;
+
+ if (!WTF::weakCompareAndSwap(&m_flags, currentFlags, newFlags))
+ return false;
+
+ WTF::memoryBarrierAfterLock();
+ return true;
+#else
+ if (isHashConstSingleton())
+ return false;
+
+ m_flags |= HashConstLock;
+
+ return true;
+#endif
+}
+
+ALWAYS_INLINE void JSString::releaseHashConstLock()
+{
+#if ENABLE(PARALLEL_GC)
+ WTF::memoryBarrierBeforeUnlock();
+#endif
+ m_flags &= ~HashConstLock;
+}
+
+ALWAYS_INLINE bool JSString::shouldTryHashConst()
+{
+ return ((length() > 1) && !isRope() && !isHashConstSingleton());
+}
+
+ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue* slot)
+{
+ // This internalAppend is only intended for visits to object and array backing stores.
+ // as it can change the JSValue pointed to be the argument when the original JSValue
+ // is a string that contains the same contents as another string.
+
+ ASSERT(slot);
+ JSValue value = *slot;
+ ASSERT(value);
+ if (!value.isCell())
+ return;
+
+ JSCell* cell = value.asCell();
+ if (!cell)
+ return;
+
+ if (m_shouldHashConst && cell->isString()) {
+ JSString* string = jsCast<JSString*>(cell);
+ if (string->shouldTryHashConst() && string->tryHashConstLock()) {
+ UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value);
+ if (addResult.isNewEntry)
+ string->setHashConstSingleton();
+ else {
+ JSValue existingJSValue = addResult.iterator->second;
+ if (value != existingJSValue)
+ jsCast<JSString*>(existingJSValue.asCell())->clearHashConstSingleton();
+ *slot = existingJSValue;
+ string->releaseHashConstLock();
+ return;
+ }
+ string->releaseHashConstLock();
+ }
+ }
+
+ internalAppend(cell);
+}
+
+void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsigned length)
+{
+ void* oldPtr = *ptr;
+ void* newPtr = allocateNewSpaceOrPin(oldPtr, bytes);
+ if (newPtr) {
+ size_t jsValuesOffset = static_cast<size_t>(reinterpret_cast<char*>(values) - static_cast<char*>(oldPtr));
+
+ JSValue* newValues = reinterpret_cast_ptr<JSValue*>(static_cast<char*>(newPtr) + jsValuesOffset);
+ for (unsigned i = 0; i < length; i++) {
+ JSValue& value = values[i];
+ newValues[i] = value;
+ if (!value)
+ continue;
+ internalAppend(&newValues[i]);
+ }
+
+ memcpy(newPtr, oldPtr, jsValuesOffset);
+ *ptr = newPtr;
+ } else
+ append(values, length);
+}
+
+void SlotVisitor::doneCopying()
+{
+ if (!m_copiedAllocator.isValid())
+ return;
+
+ m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock());
+}
+
+void SlotVisitor::harvestWeakReferences()
+{
+ for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
+ current->visitWeakReferences(*this);
+}
+
+void SlotVisitor::finalizeUnconditionalFinalizers()
+{
+ while (m_shared.m_unconditionalFinalizers.hasNext())
+ m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
+}
+
+#if ENABLE(GC_VALIDATION)
+void SlotVisitor::validate(JSCell* cell)
+{
+ if (!cell) {
+ dataLog("cell is NULL\n");
+ CRASH();
+ }
+
+ if (!cell->structure()) {
+ dataLog("cell at %p has a null structure\n" , cell);
+ CRASH();
+ }
+
+ // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
+ // I hate this sentence.
+ if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) {
+ const char* parentClassName = 0;
+ const char* ourClassName = 0;
+ if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo())
+ parentClassName = cell->structure()->structure()->JSCell::classInfo()->className;
+ if (cell->structure()->JSCell::classInfo())
+ ourClassName = cell->structure()->JSCell::classInfo()->className;
+ dataLog("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
+ cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
+ CRASH();
+ }
+}
+#else
+void SlotVisitor::validate(JSCell*)
+{
+}
+#endif
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h
index 6364b23e4..230ed3334 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.h
+++ b/Source/JavaScriptCore/heap/SlotVisitor.h
@@ -27,35 +27,52 @@
#define SlotVisitor_h
#include "CopiedSpace.h"
-#include "MarkStack.h"
+#include "HandleTypes.h"
#include "MarkStackInlineMethods.h"
+#include <wtf/text/StringHash.h>
+
namespace JSC {
-class Heap;
+class ConservativeRoots;
class GCThreadSharedData;
+class Heap;
+template<typename T> class WriteBarrierBase;
+template<typename T> class JITWriteBarrier;
+
+class SlotVisitor {
+ WTF_MAKE_NONCOPYABLE(SlotVisitor);
+ friend class HeapRootVisitor; // Allowed to mark a JSValue* or JSCell** directly.
-class SlotVisitor : public MarkStack {
- friend class HeapRootVisitor;
public:
SlotVisitor(GCThreadSharedData&);
+ ~SlotVisitor();
- void donate()
- {
- ASSERT(m_isInParallelMode);
- if (Options::numberOfGCMarkers() == 1)
- return;
-
- donateKnownParallel();
- }
+ void append(ConservativeRoots&);
- void drain();
+ template<typename T> void append(JITWriteBarrier<T>*);
+ template<typename T> void append(WriteBarrierBase<T>*);
+ void appendValues(WriteBarrierBase<Unknown>*, size_t count);
- void donateAndDrain()
- {
- donate();
- drain();
- }
+ template<typename T>
+ void appendUnbarrieredPointer(T**);
+ void appendUnbarrieredValue(JSValue*);
+
+ void addOpaqueRoot(void*);
+ bool containsOpaqueRoot(void*);
+ int opaqueRootCount();
+
+ GCThreadSharedData& sharedData() { return m_shared; }
+ bool isEmpty() { return m_stack.isEmpty(); }
+
+ void setup();
+ void reset();
+
+ size_t visitCount() const { return m_visitCount; }
+
+ void donate();
+ void drain();
+ void donateAndDrain();
enum SharedDrainMode { SlaveDrain, MasterDrain };
void drainFromShared(SharedDrainMode);
@@ -78,19 +95,84 @@ public:
void doneCopying();
+#if ENABLE(SIMPLE_HEAP_PROFILING)
+ VTableSpectrum m_visitedTypeCounts;
+#endif
+
+ void addWeakReferenceHarvester(WeakReferenceHarvester*);
+ void addUnconditionalFinalizer(UnconditionalFinalizer*);
+
+#if ENABLE(OBJECT_MARK_LOGGING)
+ inline void resetChildCount() { m_logChildCount = 0; }
+ inline unsigned childCount() { return m_logChildCount; }
+ inline void incrementChildCount() { m_logChildCount++; }
+#endif
+
private:
+ friend class ParallelModeEnabler;
+
+ JS_EXPORT_PRIVATE static void validate(JSCell*);
+
+ void append(JSValue*);
+ void append(JSValue*, size_t count);
+ void append(JSCell**);
+
+ void internalAppend(JSCell*);
+ void internalAppend(JSValue);
+ void internalAppend(JSValue*);
+
+ JS_EXPORT_PRIVATE void mergeOpaqueRoots();
+ void mergeOpaqueRootsIfNecessary();
+ void mergeOpaqueRootsIfProfitable();
+
void* allocateNewSpaceOrPin(void*, size_t);
void* allocateNewSpaceSlow(size_t);
void donateKnownParallel();
+ MarkStackArray m_stack;
+ HashSet<void*> m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector.
+
+ size_t m_visitCount;
+ bool m_isInParallelMode;
+
+ GCThreadSharedData& m_shared;
+
+ bool m_shouldHashConst; // Local per-thread copy of shared flag for performance reasons
+ typedef HashMap<StringImpl*, JSValue> UniqueStringMap;
+ UniqueStringMap m_uniqueStrings;
+
+#if ENABLE(OBJECT_MARK_LOGGING)
+ unsigned m_logChildCount;
+#endif
+
CopiedAllocator m_copiedAllocator;
+
+public:
+#if !ASSERT_DISABLED
+ bool m_isCheckingForDefaultMarkViolation;
+ bool m_isDraining;
+#endif
};
-inline SlotVisitor::SlotVisitor(GCThreadSharedData& shared)
- : MarkStack(shared)
-{
-}
+class ParallelModeEnabler {
+public:
+ ParallelModeEnabler(SlotVisitor& stack)
+ : m_stack(stack)
+ {
+ ASSERT(!m_stack.m_isInParallelMode);
+ m_stack.m_isInParallelMode = true;
+ }
+
+ ~ParallelModeEnabler()
+ {
+ ASSERT(m_stack.m_isInParallelMode);
+ m_stack.m_isInParallelMode = false;
+ }
+
+private:
+ SlotVisitor& m_stack;
+};
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h b/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h
index f02564e10..540da3bc4 100644
--- a/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h
+++ b/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h
@@ -27,10 +27,115 @@
#define SlotVisitorInlineMethods_h
#include "CopiedSpaceInlineMethods.h"
+#include "Options.h"
#include "SlotVisitor.h"
namespace JSC {
+ALWAYS_INLINE void SlotVisitor::append(JSValue* slot, size_t count)
+{
+ for (size_t i = 0; i < count; ++i) {
+ JSValue& value = slot[i];
+ internalAppend(value);
+ }
+}
+
+template<typename T>
+inline void SlotVisitor::appendUnbarrieredPointer(T** slot)
+{
+ ASSERT(slot);
+ JSCell* cell = *slot;
+ internalAppend(cell);
+}
+
+ALWAYS_INLINE void SlotVisitor::append(JSValue* slot)
+{
+ ASSERT(slot);
+ internalAppend(*slot);
+}
+
+ALWAYS_INLINE void SlotVisitor::appendUnbarrieredValue(JSValue* slot)
+{
+ ASSERT(slot);
+ internalAppend(*slot);
+}
+
+ALWAYS_INLINE void SlotVisitor::append(JSCell** slot)
+{
+ ASSERT(slot);
+ internalAppend(*slot);
+}
+
+ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue value)
+{
+ if (!value || !value.isCell())
+ return;
+ internalAppend(value.asCell());
+}
+
+inline void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
+{
+ m_shared.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
+}
+
+inline void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
+{
+ m_shared.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
+}
+
+inline void SlotVisitor::addOpaqueRoot(void* root)
+{
+#if ENABLE(PARALLEL_GC)
+ if (Options::numberOfGCMarkers() == 1) {
+ // Put directly into the shared HashSet.
+ m_shared.m_opaqueRoots.add(root);
+ return;
+ }
+ // Put into the local set, but merge with the shared one every once in
+ // a while to make sure that the local sets don't grow too large.
+ mergeOpaqueRootsIfProfitable();
+ m_opaqueRoots.add(root);
+#else
+ m_opaqueRoots.add(root);
+#endif
+}
+
+inline bool SlotVisitor::containsOpaqueRoot(void* root)
+{
+ ASSERT(!m_isInParallelMode);
+#if ENABLE(PARALLEL_GC)
+ ASSERT(m_opaqueRoots.isEmpty());
+ return m_shared.m_opaqueRoots.contains(root);
+#else
+ return m_opaqueRoots.contains(root);
+#endif
+}
+
+inline int SlotVisitor::opaqueRootCount()
+{
+ ASSERT(!m_isInParallelMode);
+#if ENABLE(PARALLEL_GC)
+ ASSERT(m_opaqueRoots.isEmpty());
+ return m_shared.m_opaqueRoots.size();
+#else
+ return m_opaqueRoots.size();
+#endif
+}
+
+inline void SlotVisitor::mergeOpaqueRootsIfNecessary()
+{
+ if (m_opaqueRoots.isEmpty())
+ return;
+ mergeOpaqueRoots();
+}
+
+inline void SlotVisitor::mergeOpaqueRootsIfProfitable()
+{
+ if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold())
+ return;
+ mergeOpaqueRoots();
+}
+
ALWAYS_INLINE bool SlotVisitor::checkIfShouldCopyAndPinOtherwise(void* oldPtr, size_t bytes)
{
if (CopiedSpace::isOversize(bytes)) {
@@ -55,6 +160,21 @@ ALWAYS_INLINE void* SlotVisitor::allocateNewSpace(size_t bytes)
return result;
}
+inline void SlotVisitor::donate()
+{
+ ASSERT(m_isInParallelMode);
+ if (Options::numberOfGCMarkers() == 1)
+ return;
+
+ donateKnownParallel();
+}
+
+inline void SlotVisitor::donateAndDrain()
+{
+ donate();
+ drain();
+}
+
} // namespace JSC
#endif // SlotVisitorInlineMethods_h