summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/heap/SlotVisitor.h
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/heap/SlotVisitor.h
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/heap/SlotVisitor.h')
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.h215
1 files changed, 151 insertions, 64 deletions
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h
index 4a8dc3e97..83479af7f 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.h
+++ b/Source/JavaScriptCore/heap/SlotVisitor.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,126 +23,215 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SlotVisitor_h
-#define SlotVisitor_h
+#pragma once
-#include "CopyToken.h"
+#include "CellState.h"
#include "HandleTypes.h"
-#include "MarkStackInlines.h"
-
-#include <wtf/text/StringHash.h>
+#include "IterationStatus.h"
+#include "MarkStack.h"
+#include "OpaqueRootSet.h"
+#include "VisitRaceKey.h"
+#include <wtf/MonotonicTime.h>
namespace JSC {
class ConservativeRoots;
class GCThreadSharedData;
class Heap;
+class HeapCell;
+class HeapSnapshotBuilder;
+class MarkedBlock;
+class UnconditionalFinalizer;
template<typename T> class Weak;
+class WeakReferenceHarvester;
template<typename T> class WriteBarrierBase;
-template<typename T> class JITWriteBarrier;
+
+typedef uint32_t HeapVersion;
class SlotVisitor {
WTF_MAKE_NONCOPYABLE(SlotVisitor);
- friend class HeapRootVisitor; // Allowed to mark a JSValue* or JSCell** directly.
+ WTF_MAKE_FAST_ALLOCATED;
+
+ friend class SetCurrentCellScope;
+ friend class Heap;
public:
- SlotVisitor(GCThreadSharedData&);
+ SlotVisitor(Heap&, CString codeName);
~SlotVisitor();
- MarkStackArray& markStack() { return m_stack; }
-
+ MarkStackArray& collectorMarkStack() { return m_collectorStack; }
+ MarkStackArray& mutatorMarkStack() { return m_mutatorStack; }
+ const MarkStackArray& collectorMarkStack() const { return m_collectorStack; }
+ const MarkStackArray& mutatorMarkStack() const { return m_mutatorStack; }
+
+ VM& vm();
+ const VM& vm() const;
Heap* heap() const;
void append(ConservativeRoots&);
- template<typename T> void append(JITWriteBarrier<T>*);
- template<typename T> void append(WriteBarrierBase<T>*);
+ template<typename T> void append(const WriteBarrierBase<T>&);
+ template<typename T> void appendHidden(const WriteBarrierBase<T>&);
template<typename Iterator> void append(Iterator begin , Iterator end);
- void appendValues(WriteBarrierBase<Unknown>*, size_t count);
+ void appendValues(const WriteBarrierBase<Unknown>*, size_t count);
+ void appendValuesHidden(const WriteBarrierBase<Unknown>*, size_t count);
+
+ // These don't require you to prove that you have a WriteBarrier<>. That makes sense
+ // for:
+ //
+ // - roots.
+ // - sophisticated data structures that barrier through other means (like DFG::Plan and
+ // friends).
+ //
+ // If you are not a root and you don't know what kind of barrier you have, then you
+ // shouldn't call these methods.
+ JS_EXPORT_PRIVATE void appendUnbarriered(JSValue);
+ void appendUnbarriered(JSValue*, size_t);
+ void appendUnbarriered(JSCell*);
template<typename T>
- void appendUnbarrieredPointer(T**);
- void appendUnbarrieredValue(JSValue*);
- template<typename T>
- void appendUnbarrieredWeak(Weak<T>*);
- void unconditionallyAppend(JSCell*);
+ void append(const Weak<T>& weak);
+
+ JS_EXPORT_PRIVATE void addOpaqueRoot(void*);
- void addOpaqueRoot(void*);
- bool containsOpaqueRoot(void*);
- TriState containsOpaqueRootTriState(void*);
- int opaqueRootCount();
+ JS_EXPORT_PRIVATE bool containsOpaqueRoot(void*) const;
+ TriState containsOpaqueRootTriState(void*) const;
- GCThreadSharedData& sharedData() const { return m_shared; }
- bool isEmpty() { return m_stack.isEmpty(); }
+ bool isEmpty() { return m_collectorStack.isEmpty() && m_mutatorStack.isEmpty(); }
- void setup();
+ void didStartMarking();
void reset();
- void clearMarkStack();
+ void clearMarkStacks();
size_t bytesVisited() const { return m_bytesVisited; }
- size_t bytesCopied() const { return m_bytesCopied; }
size_t visitCount() const { return m_visitCount; }
+
+ void addToVisitCount(size_t value) { m_visitCount += value; }
void donate();
- void drain();
- void donateAndDrain();
+ void drain(MonotonicTime timeout = MonotonicTime::infinity());
+ void donateAndDrain(MonotonicTime timeout = MonotonicTime::infinity());
enum SharedDrainMode { SlaveDrain, MasterDrain };
- void drainFromShared(SharedDrainMode);
+ enum class SharedDrainResult { Done, TimedOut };
+ SharedDrainResult drainFromShared(SharedDrainMode, MonotonicTime timeout = MonotonicTime::infinity());
- void harvestWeakReferences();
- void finalizeUnconditionalFinalizers();
+ SharedDrainResult drainInParallel(MonotonicTime timeout = MonotonicTime::infinity());
+ SharedDrainResult drainInParallelPassively(MonotonicTime timeout = MonotonicTime::infinity());
- void copyLater(JSCell*, CopyToken, void*, size_t);
+ // Attempts to perform an increment of draining that involves only walking `bytes` worth of data. This
+ // is likely to accidentally walk more or less than that. It will usually mark more than bytes. It may
+ // mark less than bytes if we're reaching termination or if the global worklist is empty (which may in
+ // rare cases happen temporarily even if we're not reaching termination).
+ size_t performIncrementOfDraining(size_t bytes);
- void reportExtraMemoryUsage(JSCell* owner, size_t);
+ JS_EXPORT_PRIVATE void mergeIfNecessary();
+
+ // This informs the GC about auxiliary of some size that we are keeping alive. If you don't do
+ // this then the space will be freed at end of GC.
+ void markAuxiliary(const void* base);
+
+ void reportExtraMemoryVisited(size_t);
+#if ENABLE(RESOURCE_USAGE)
+ void reportExternalMemoryVisited(size_t);
+#endif
void addWeakReferenceHarvester(WeakReferenceHarvester*);
void addUnconditionalFinalizer(UnconditionalFinalizer*);
-#if ENABLE(OBJECT_MARK_LOGGING)
- inline void resetChildCount() { m_logChildCount = 0; }
- inline unsigned childCount() { return m_logChildCount; }
- inline void incrementChildCount() { m_logChildCount++; }
-#endif
+ void dump(PrintStream&) const;
+
+ bool isBuildingHeapSnapshot() const { return !!m_heapSnapshotBuilder; }
+
+ HeapVersion markingVersion() const { return m_markingVersion; }
+
+ bool mutatorIsStopped() const { return m_mutatorIsStopped; }
+
+ Lock& rightToRun() { return m_rightToRun; }
+
+ void updateMutatorIsStopped(const AbstractLocker&);
+ void updateMutatorIsStopped();
+
+ bool hasAcknowledgedThatTheMutatorIsResumed() const;
+ bool mutatorIsStoppedIsUpToDate() const;
+
+ void optimizeForStoppedMutator();
+
+ void didRace(const VisitRaceKey&);
+ void didRace(JSCell* cell, const char* reason) { didRace(VisitRaceKey(cell, reason)); }
+
+ void visitAsConstraint(const JSCell*);
+
+ bool didReachTermination();
+
+ void setIgnoreNewOpaqueRoots(bool value) { m_ignoreNewOpaqueRoots = value; }
+
+ void donateAll();
+
+ const char* codeName() const { return m_codeName.data(); }
private:
friend class ParallelModeEnabler;
- JS_EXPORT_PRIVATE static void validate(JSCell*);
+ void appendJSCellOrAuxiliary(HeapCell*);
+ void appendHidden(JSValue);
- void append(JSValue*);
- void append(JSValue*, size_t count);
- void append(JSCell**);
+ JS_EXPORT_PRIVATE void setMarkedAndAppendToMarkStack(JSCell*);
+
+ template<typename ContainerType>
+ void setMarkedAndAppendToMarkStack(ContainerType&, JSCell*);
+
+ void appendToMarkStack(JSCell*);
- void internalAppend(void* from, JSCell*);
- void internalAppend(void* from, JSValue);
- void internalAppend(void* from, JSValue*);
+ template<typename ContainerType>
+ void appendToMarkStack(ContainerType&, JSCell*);
- JS_EXPORT_PRIVATE void mergeOpaqueRoots();
- void mergeOpaqueRootsIfNecessary();
+ void appendToMutatorMarkStack(const JSCell*);
+
+ void noteLiveAuxiliaryCell(HeapCell*);
+
+ void mergeOpaqueRoots();
+
void mergeOpaqueRootsIfProfitable();
+
+ void visitChildren(const JSCell*);
void donateKnownParallel();
+ void donateKnownParallel(MarkStackArray& from, MarkStackArray& to);
+
+ void donateAll(const AbstractLocker&);
- MarkStackArray m_stack;
- HashSet<void*> m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector.
+ bool hasWork(const AbstractLocker&);
+ bool didReachTermination(const AbstractLocker&);
+
+ template<typename Func>
+ IterationStatus forEachMarkStack(const Func&);
+
+ MarkStackArray& correspondingGlobalStack(MarkStackArray&);
+
+ MarkStackArray m_collectorStack;
+ MarkStackArray m_mutatorStack;
+ OpaqueRootSet m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector.
+ bool m_ignoreNewOpaqueRoots { false }; // Useful as a debugging mode.
size_t m_bytesVisited;
- size_t m_bytesCopied;
size_t m_visitCount;
+ size_t m_nonCellVisitCount { 0 }; // Used for incremental draining, ignored otherwise.
bool m_isInParallelMode;
-
- GCThreadSharedData& m_shared;
-
- bool m_shouldHashCons; // Local per-thread copy of shared flag for performance reasons
- typedef HashMap<StringImpl*, JSValue> UniqueStringMap;
- UniqueStringMap m_uniqueStrings;
-#if ENABLE(OBJECT_MARK_LOGGING)
- unsigned m_logChildCount;
-#endif
+ HeapVersion m_markingVersion;
+
+ Heap& m_heap;
+ HeapSnapshotBuilder* m_heapSnapshotBuilder { nullptr };
+ JSCell* m_currentCell { nullptr };
+ bool m_isFirstVisit { false };
+ bool m_mutatorIsStopped { false };
+ bool m_canOptimizeForStoppedMutator { false };
+ Lock m_rightToRun;
+
+ CString m_codeName;
+
public:
#if !ASSERT_DISABLED
bool m_isCheckingForDefaultMarkViolation;
@@ -170,5 +259,3 @@ private:
};
} // namespace JSC
-
-#endif // SlotVisitor_h