summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode/CodeBlock.h
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/bytecode/CodeBlock.h
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/bytecode/CodeBlock.h')
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.h974
1 files changed, 397 insertions, 577 deletions
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
index 0d9868079..2a2966460 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -27,54 +27,49 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CodeBlock_h
-#define CodeBlock_h
+#pragma once
#include "ArrayProfile.h"
#include "ByValInfo.h"
#include "BytecodeConventions.h"
-#include "BytecodeLivenessAnalysis.h"
#include "CallLinkInfo.h"
#include "CallReturnOffsetToBytecodeOffset.h"
#include "CodeBlockHash.h"
-#include "CodeBlockSet.h"
-#include "ConcurrentJITLock.h"
#include "CodeOrigin.h"
#include "CodeType.h"
#include "CompactJITCodeMap.h"
+#include "ConcurrentJSLock.h"
#include "DFGCommon.h"
-#include "DFGCommonData.h"
#include "DFGExitProfile.h"
-#include "DFGMinifiedGraph.h"
-#include "DFGOSREntry.h"
-#include "DFGOSRExit.h"
-#include "DFGVariableEventStream.h"
#include "DeferredCompilationCallback.h"
-#include "EvalCodeCache.h"
+#include "DirectEvalCodeCache.h"
+#include "EvalExecutable.h"
#include "ExecutionCounter.h"
#include "ExpressionRangeInfo.h"
+#include "FunctionExecutable.h"
#include "HandlerInfo.h"
-#include "ObjectAllocationProfile.h"
-#include "Options.h"
-#include "Operations.h"
-#include "PutPropertySlot.h"
#include "Instruction.h"
#include "JITCode.h"
-#include "JITWriteBarrier.h"
+#include "JITMathICForwards.h"
+#include "JSCell.h"
#include "JSGlobalObject.h"
#include "JumpTable.h"
#include "LLIntCallLinkInfo.h"
+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
#include "LazyOperandValueProfile.h"
-#include "ProfilerCompilation.h"
-#include "RegExpObject.h"
-#include "StructureStubInfo.h"
+#include "ModuleProgramExecutable.h"
+#include "ObjectAllocationProfile.h"
+#include "Options.h"
+#include "ProfilerJettisonReason.h"
+#include "ProgramExecutable.h"
+#include "PutPropertySlot.h"
#include "UnconditionalFinalizer.h"
#include "ValueProfile.h"
#include "VirtualRegister.h"
#include "Watchpoint.h"
#include <wtf/Bag.h>
+#include <wtf/FastBitVector.h>
#include <wtf/FastMalloc.h>
-#include <wtf/PassOwnPtr.h>
#include <wtf/RefCountedArray.h>
#include <wtf/RefPtr.h>
#include <wtf/SegmentedVector.h>
@@ -83,33 +78,55 @@
namespace JSC {
+class BytecodeLivenessAnalysis;
+class CodeBlockSet;
class ExecState;
+class JSModuleEnvironment;
class LLIntOffsetsExtractor;
-class RepatchBuffer;
+class PCToCodeOriginMap;
+class RegisterAtOffsetList;
+class StructureStubInfo;
+
+enum class AccessType : int8_t;
-inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); }
+struct ArithProfile;
-static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
+typedef HashMap<CodeOrigin, StructureStubInfo*, CodeOriginApproximateHash> StubInfoMap;
enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
-class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester {
- WTF_MAKE_FAST_ALLOCATED;
+class CodeBlock : public JSCell {
+ typedef JSCell Base;
friend class BytecodeLivenessAnalysis;
friend class JIT;
friend class LLIntOffsetsExtractor;
+
+ class UnconditionalFinalizer : public JSC::UnconditionalFinalizer {
+ void finalizeUnconditionally() override;
+ };
+
+ class WeakReferenceHarvester : public JSC::WeakReferenceHarvester {
+ void visitWeakReferences(SlotVisitor&) override;
+ };
+
public:
enum CopyParsedBlockTag { CopyParsedBlock };
+
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+ DECLARE_INFO;
+
protected:
- CodeBlock(CopyParsedBlockTag, CodeBlock& other);
-
- CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
+ CodeBlock(VM*, Structure*, CopyParsedBlockTag, CodeBlock& other);
+ CodeBlock(VM*, Structure*, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, RefPtr<SourceProvider>&&, unsigned sourceOffset, unsigned firstLineColumnOffset);
+
+ void finishCreation(VM&, CopyParsedBlockTag, CodeBlock& other);
+ void finishCreation(VM&, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*);
WriteBarrier<JSGlobalObject> m_globalObject;
- Heap* m_heap;
public:
- JS_EXPORT_PRIVATE virtual ~CodeBlock();
+ JS_EXPORT_PRIVATE ~CodeBlock();
UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
@@ -117,39 +134,74 @@ public:
CodeBlockHash hash() const;
bool hasHash() const;
bool isSafeToComputeHash() const;
+ CString hashAsStringIfPossible() const;
CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
- void dump(PrintStream&) const;
+ JS_EXPORT_PRIVATE void dump(PrintStream&) const;
int numParameters() const { return m_numParameters; }
void setNumParameters(int newValue);
+ int numberOfArgumentsToSkip() const { return m_numberOfArgumentsToSkip; }
+
+ int numCalleeLocals() const { return m_numCalleeLocals; }
+
int* addressOfNumParameters() { return &m_numParameters; }
static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
- CodeBlock* alternative() { return m_alternative.get(); }
- PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
- void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
+ CodeBlock* alternative() const { return static_cast<CodeBlock*>(m_alternative.get()); }
+ void setAlternative(VM&, CodeBlock*);
+
+ template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor)
+ {
+ Functor f(std::forward<Functor>(functor));
+ Vector<CodeBlock*, 4> codeBlocks;
+ codeBlocks.append(this);
+
+ while (!codeBlocks.isEmpty()) {
+ CodeBlock* currentCodeBlock = codeBlocks.takeLast();
+ f(currentCodeBlock);
+
+ if (CodeBlock* alternative = currentCodeBlock->alternative())
+ codeBlocks.append(alternative);
+ if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
+ codeBlocks.append(osrEntryBlock);
+ }
+ }
CodeSpecializationKind specializationKind() const
{
return specializationFromIsConstruct(m_isConstructor);
}
-
- CodeBlock* baselineAlternative();
+
+ CodeBlock* alternativeForJettison();
+ JS_EXPORT_PRIVATE CodeBlock* baselineAlternative();
// FIXME: Get rid of this.
// https://bugs.webkit.org/show_bug.cgi?id=123677
CodeBlock* baselineVersion();
- void visitAggregate(SlotVisitor&);
-
- void dumpBytecode(PrintStream& = WTF::dataFile());
- void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
+ static size_t estimatedSize(JSCell*);
+ static void visitChildren(JSCell*, SlotVisitor&);
+ void visitChildren(SlotVisitor&);
+ void visitWeakly(SlotVisitor&);
+ void clearVisitWeaklyHasBeenCalled();
+
+ void dumpSource();
+ void dumpSource(PrintStream&);
+
+ void dumpBytecode();
+ void dumpBytecode(PrintStream&);
+ void dumpBytecode(
+ PrintStream&, unsigned bytecodeOffset,
+ const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
+ void dumpExceptionHandlers(PrintStream&);
void printStructures(PrintStream&, const Instruction*);
void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
+ void dumpMathICStats();
+
bool isStrictMode() const { return m_isStrictMode; }
ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
@@ -169,71 +221,85 @@ public:
return index >= m_numVars;
}
- HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
+ HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
+ HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
+ void removeExceptionHandlerForCallSite(CallSiteIndex);
unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
- int& startOffset, int& endOffset, unsigned& line, unsigned& column);
+ int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
-#if ENABLE(JIT)
- StructureStubInfo* addStubInfo();
- Bag<StructureStubInfo>::iterator begin() { return m_stubInfos.begin(); }
- Bag<StructureStubInfo>::iterator end() { return m_stubInfos.end(); }
+ std::optional<unsigned> bytecodeOffsetFromCallSiteIndex(CallSiteIndex);
- void resetStub(StructureStubInfo&);
+ void getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result);
+ void getStubInfoMap(StubInfoMap& result);
- void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
+ void getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result);
+ void getCallLinkInfoMap(CallLinkInfoMap& result);
- ByValInfo& getByValInfo(unsigned bytecodeIndex)
- {
- return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
- }
+ void getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result);
+ void getByValInfoMap(ByValInfoMap& result);
+
+#if ENABLE(JIT)
+ StructureStubInfo* addStubInfo(AccessType);
+ JITAddIC* addJITAddIC(ArithProfile*);
+ JITMulIC* addJITMulIC(ArithProfile*);
+ JITNegIC* addJITNegIC(ArithProfile*);
+ JITSubIC* addJITSubIC(ArithProfile*);
+ Bag<StructureStubInfo>::iterator stubInfoBegin() { return m_stubInfos.begin(); }
+ Bag<StructureStubInfo>::iterator stubInfoEnd() { return m_stubInfos.end(); }
+
+ // O(n) operation. Use getStubInfoMap() unless you really only intend to get one
+ // stub info.
+ StructureStubInfo* findStubInfo(CodeOrigin);
- CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
- {
- return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
- }
+ ByValInfo* addByValInfo();
- CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
- {
- ASSERT(!JITCode::isOptimizingJIT(jitType()));
- return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
- }
+ CallLinkInfo* addCallLinkInfo();
+ Bag<CallLinkInfo>::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); }
+ Bag<CallLinkInfo>::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); }
+
+ // This is a slow function call used primarily for compiling OSR exits in the case
+ // that there had been inlining. Chances are if you want to use this, you're really
+ // looking for a CallLinkInfoMap to amortize the cost of calling this.
+ CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
+
+ // We call this when we want to reattempt compiling something with the baseline JIT. Ideally
+ // the baseline JIT would not add data to CodeBlock, but instead it would put its data into
+ // a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we
+ // would be able to get rid of this silly function.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159061
+ void resetJITData();
#endif // ENABLE(JIT)
void unlinkIncomingCalls();
#if ENABLE(JIT)
- void unlinkCalls();
-
void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
-
- bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
- {
- return m_incomingCalls.isOnList(incoming);
- }
+ void linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode*);
#endif // ENABLE(JIT)
-#if ENABLE(LLINT)
void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
-#endif // ENABLE(LLINT)
- void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
+ void setJITCodeMap(std::unique_ptr<CompactJITCodeMap> jitCodeMap)
{
- m_jitCodeMap = jitCodeMap;
+ m_jitCodeMap = WTFMove(jitCodeMap);
}
CompactJITCodeMap* jitCodeMap()
{
return m_jitCodeMap.get();
}
+ static void clearLLIntGetByIdCache(Instruction*);
+
unsigned bytecodeOffset(Instruction* returnAddress)
{
RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
return static_cast<Instruction*>(returnAddress) - instructions().begin();
}
- bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
+ typedef JSC::Instruction Instruction;
+ typedef RefCountedArray<Instruction>& UnpackedInstructions;
unsigned numberOfInstructions() const { return m_instructions.size(); }
RefCountedArray<Instruction>& instructions() { return m_instructions; }
@@ -245,28 +311,19 @@ public:
unsigned instructionCount() const { return m_instructions.size(); }
- int argumentIndexAfterCapture(size_t argument);
-
- bool hasSlowArguments();
- const SlowArgument* machineSlowArguments();
-
- // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
- void install();
-
// Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
- PassRefPtr<CodeBlock> newReplacement();
+ CodeBlock* newReplacement();
- void setJITCode(PassRefPtr<JITCode> code, MacroAssemblerCodePtr codeWithArityCheck)
+ void setJITCode(Ref<JITCode>&& code)
{
- ASSERT(m_heap->isDeferred());
- m_heap->reportExtraMemoryCost(code->size());
- ConcurrentJITLocker locker(m_lock);
+ ASSERT(heap()->isDeferred());
+ heap()->reportExtraMemoryAllocated(code->size());
+ ConcurrentJSLocker locker(m_lock);
WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
- m_jitCode = code;
- m_jitCodeWithArityCheck = codeWithArityCheck;
+ m_jitCode = WTFMove(code);
}
- PassRefPtr<JITCode> jitCode() { return m_jitCode; }
- MacroAssemblerCodePtr jitCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
+ RefPtr<JITCode> jitCode() { return m_jitCode; }
+ static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); }
JITCode::JITType jitType() const
{
JITCode* jitCode = m_jitCode.get();
@@ -282,103 +339,44 @@ public:
}
#if ENABLE(JIT)
- virtual CodeBlock* replacement() = 0;
+ CodeBlock* replacement();
- virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
- DFG::CapabilityLevel capabilityLevel()
- {
- DFG::CapabilityLevel result = capabilityLevelInternal();
- m_capabilityLevelState = result;
- return result;
- }
- DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
+ DFG::CapabilityLevel computeCapabilityLevel();
+ DFG::CapabilityLevel capabilityLevel();
+ DFG::CapabilityLevel capabilityLevelState() { return static_cast<DFG::CapabilityLevel>(m_capabilityLevelState); }
bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
bool hasOptimizedReplacement(); // the typeToReplace is my JITType
#endif
- void jettison(ReoptimizationMode = DontCountReoptimization);
+ void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr);
- ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
+ ExecutableBase* ownerExecutable() const { return m_ownerExecutable.get(); }
+ ScriptExecutable* ownerScriptExecutable() const { return jsCast<ScriptExecutable*>(m_ownerExecutable.get()); }
- void setVM(VM* vm) { m_vm = vm; }
- VM* vm() { return m_vm; }
+ VM* vm() const { return m_vm; }
void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
VirtualRegister thisRegister() const { return m_thisRegister; }
- bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
bool usesEval() const { return m_unlinkedCode->usesEval(); }
- void setArgumentsRegister(VirtualRegister argumentsRegister)
- {
- ASSERT(argumentsRegister.isValid());
- m_argumentsRegister = argumentsRegister;
- ASSERT(usesArguments());
- }
- VirtualRegister argumentsRegister() const
- {
- ASSERT(usesArguments());
- return m_argumentsRegister;
- }
- VirtualRegister uncheckedArgumentsRegister()
- {
- if (!usesArguments())
- return VirtualRegister();
- return argumentsRegister();
- }
- void setActivationRegister(VirtualRegister activationRegister)
- {
- m_activationRegister = activationRegister;
- }
-
- VirtualRegister activationRegister() const
- {
- ASSERT(needsFullScopeChain());
- return m_activationRegister;
- }
-
- VirtualRegister uncheckedActivationRegister()
+ void setScopeRegister(VirtualRegister scopeRegister)
{
- if (!needsFullScopeChain())
- return VirtualRegister();
- return activationRegister();
+ ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid());
+ m_scopeRegister = scopeRegister;
}
- bool usesArguments() const { return m_argumentsRegister.isValid(); }
-
- bool needsActivation() const
+ VirtualRegister scopeRegister() const
{
- return m_needsActivation;
+ return m_scopeRegister;
}
- unsigned captureCount() const
+ CodeType codeType() const
{
- if (!symbolTable())
- return 0;
- return symbolTable()->captureCount();
- }
-
- int captureStart() const
- {
- if (!symbolTable())
- return 0;
- return symbolTable()->captureStart();
- }
-
- int captureEnd() const
- {
- if (!symbolTable())
- return 0;
- return symbolTable()->captureEnd();
+ return static_cast<CodeType>(m_codeType);
}
- bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const;
-
- int framePointerOffsetToGetActivationRegisters(int machineCaptureStart);
- int framePointerOffsetToGetActivationRegisters();
-
- CodeType codeType() const { return m_unlinkedCode->codeType(); }
PutPropertySlot::Context putByIdContext() const
{
if (codeType() == EvalCode)
@@ -393,20 +391,8 @@ public:
size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
- void clearEvalCache();
-
String nameForRegister(VirtualRegister);
-#if ENABLE(JIT)
- void setNumberOfByValInfos(size_t size) { m_byValInfos.resizeToFit(size); }
- size_t numberOfByValInfos() const { return m_byValInfos.size(); }
- ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
-
- void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.resizeToFit(size); }
- size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
- CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
-#endif
-
unsigned numberOfArgumentValueProfiles()
{
ASSERT(m_numParameters >= 0);
@@ -422,20 +408,12 @@ public:
unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
- ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
- {
- ValueProfile* result = binarySearch<ValueProfile, int>(
- m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
- getValueProfileBytecodeOffset<ValueProfile>);
- ASSERT(result->m_bytecodeOffset != -1);
- ASSERT(instructions()[bytecodeOffset + opcodeLength(
- m_vm->interpreter->getOpcodeID(
- instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
- return result;
- }
- SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
+ ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset);
+ SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
{
- return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
+ if (ValueProfile* valueProfile = valueProfileForBytecodeOffset(bytecodeOffset))
+ return valueProfile->computeUpdatedPrediction(locker);
+ return SpecNone;
}
unsigned totalNumberOfValueProfiles()
@@ -449,25 +427,16 @@ public:
return valueProfile(index - numberOfArgumentValueProfiles());
}
- RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
- {
- m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
- return &m_rareCaseProfiles.last();
- }
+ RareCaseProfile* addRareCaseProfile(int bytecodeOffset);
unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
- RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
- RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
- {
- return tryBinarySearch<RareCaseProfile, int>(
- m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
- getRareCaseProfileBytecodeOffset);
- }
+ RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset);
+ unsigned rareCaseProfileCountForBytecodeOffset(int bytecodeOffset);
bool likelyToTakeSlowCase(int bytecodeOffset)
{
if (!hasBaselineJITProfiling())
return false;
- unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
return value >= Options::likelyToTakeSlowCaseMinimumCount();
}
@@ -475,68 +444,22 @@ public:
{
if (!hasBaselineJITProfiling())
return false;
- unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
return value >= Options::couldTakeSlowCaseMinimumCount();
}
- RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
- {
- m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
- return &m_specialFastCaseProfiles.last();
- }
- unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
- RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
- RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
- {
- return tryBinarySearch<RareCaseProfile, int>(
- m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
- getRareCaseProfileBytecodeOffset);
- }
+ ArithProfile* arithProfileForBytecodeOffset(int bytecodeOffset);
+ ArithProfile* arithProfileForPC(Instruction*);
- bool likelyToTakeSpecialFastCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
- }
-
- bool couldTakeSpecialFastCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
- }
-
- bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned value = slowCaseCount - specialFastCaseCount;
- return value >= Options::likelyToTakeSlowCaseMinimumCount();
- }
-
- bool likelyToTakeAnySlowCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned value = slowCaseCount + specialFastCaseCount;
- return value >= Options::likelyToTakeSlowCaseMinimumCount();
- }
+ bool couldTakeSpecialFastCase(int bytecodeOffset);
unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
- ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
- {
- m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
- return &m_arrayProfiles.last();
- }
+ ArrayProfile* addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
+ ArrayProfile* addArrayProfile(unsigned bytecodeOffset);
+ ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
+ ArrayProfile* getOrAddArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
// Exception handling support
@@ -547,10 +470,7 @@ public:
bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
#if ENABLE(DFG_JIT)
- Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins()
- {
- return m_jitCode->dfgCommon()->codeOrigins;
- }
+ Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins();
// Having code origins implies that there has been some inlining.
bool hasCodeOrigins()
@@ -558,30 +478,34 @@ public:
return JITCode::isOptimizingJIT(jitType());
}
- bool canGetCodeOrigin(unsigned index)
+ bool canGetCodeOrigin(CallSiteIndex index)
{
if (!hasCodeOrigins())
return false;
- return index < codeOrigins().size();
+ return index.bits() < codeOrigins().size();
}
- CodeOrigin codeOrigin(unsigned index)
+ CodeOrigin codeOrigin(CallSiteIndex index)
{
- return codeOrigins()[index];
+ return codeOrigins()[index.bits()];
}
bool addFrequentExitSite(const DFG::FrequentExitSite& site)
{
ASSERT(JITCode::isBaselineCode(jitType()));
- ConcurrentJITLocker locker(m_lock);
- return m_exitProfile.add(locker, site);
+ ConcurrentJSLocker locker(m_lock);
+ return m_exitProfile.add(locker, this, site);
}
-
- bool hasExitSite(const DFG::FrequentExitSite& site) const
+
+ bool hasExitSite(const ConcurrentJSLocker& locker, const DFG::FrequentExitSite& site) const
{
- ConcurrentJITLocker locker(m_lock);
return m_exitProfile.hasExitSite(locker, site);
}
+ bool hasExitSite(const DFG::FrequentExitSite& site) const
+ {
+ ConcurrentJSLocker locker(m_lock);
+ return hasExitSite(locker, site);
+ }
DFG::ExitProfile& exitProfile() { return m_exitProfile; }
@@ -589,44 +513,26 @@ public:
{
return m_lazyOperandValueProfiles;
}
-#else // ENABLE(DFG_JIT)
- bool addFrequentExitSite(const DFG::FrequentExitSite&)
- {
- return false;
- }
#endif // ENABLE(DFG_JIT)
// Constant Pool
#if ENABLE(DFG_JIT)
size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
- size_t numberOfDFGIdentifiers() const
- {
- if (!JITCode::isOptimizingJIT(jitType()))
- return 0;
-
- return m_jitCode->dfgCommon()->dfgIdentifiers.size();
- }
-
- const Identifier& identifier(int index) const
- {
- size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
- if (static_cast<unsigned>(index) < unlinkedIdentifiers)
- return m_unlinkedCode->identifier(index);
- ASSERT(JITCode::isOptimizingJIT(jitType()));
- return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
- }
+ size_t numberOfDFGIdentifiers() const;
+ const Identifier& identifier(int index) const;
#else
size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
#endif
Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
- size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
+ Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
unsigned addConstant(JSValue v)
{
unsigned result = m_constantRegisters.size();
m_constantRegisters.append(WriteBarrier<Unknown>());
- m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
+ m_constantRegisters.last().set(m_globalObject->vm(), this, v);
+ m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
return result;
}
@@ -634,19 +540,19 @@ public:
{
unsigned result = m_constantRegisters.size();
m_constantRegisters.append(WriteBarrier<Unknown>());
+ m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
return result;
}
- bool findConstant(JSValue, unsigned& result);
- unsigned addOrFindConstant(JSValue);
WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
- ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
+ static ALWAYS_INLINE bool isConstantRegisterIndex(int index) { return index >= FirstConstantRegisterIndex; }
ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+ ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; }
FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
int numberOfFunctionDecls() { return m_functionDecls.size(); }
FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
-
+
RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
unsigned numberOfConstantBuffers() const
@@ -673,15 +579,19 @@ public:
return constantBufferAsVector(index).data();
}
+ Heap* heap() const { return &m_vm->heap; }
JSGlobalObject* globalObject() { return m_globalObject.get(); }
JSGlobalObject* globalObjectFor(CodeOrigin);
BytecodeLivenessAnalysis& livenessAnalysis()
{
- if (!m_livenessAnalysis)
- m_livenessAnalysis = std::make_unique<BytecodeLivenessAnalysis>(this);
- return *m_livenessAnalysis;
+ {
+ ConcurrentJSLocker locker(m_lock);
+ if (!!m_livenessAnalysis)
+ return *m_livenessAnalysis;
+ }
+ return livenessAnalysisSlow();
}
void validate();
@@ -702,10 +612,7 @@ public:
StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
-
- SymbolTable* symbolTable() const { return m_symbolTable.get(); }
-
- EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
+ DirectEvalCodeCache& directEvalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_directEvalCodeCache; }
enum ShrinkMode {
// Shrink prior to generating machine code that may point directly into vectors.
@@ -731,21 +638,18 @@ public:
m_llintExecuteCounter.deferIndefinitely();
}
- void jitAfterWarmUp()
- {
- m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
- }
-
- void jitSoon()
- {
- m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
- }
+ int32_t thresholdForJIT(int32_t threshold);
+ void jitAfterWarmUp();
+ void jitSoon();
- const ExecutionCounter& llintExecuteCounter() const
+ const BaselineExecutionCounter& llintExecuteCounter() const
{
return m_llintExecuteCounter;
}
+ typedef HashMap<Structure*, Bag<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap;
+ StructureWatchpointMap& llintGetByIdWatchpointMap() { return m_llintGetByIdWatchpointMap; }
+
// Functions for controlling when tiered compilation kicks in. This
// controls both when the optimizing compiler is invoked and when OSR
// entry happens. Two triggers exist: the loop trigger and the return
@@ -767,9 +671,13 @@ public:
// When we observe a lot of speculation failures, we trigger a
// reoptimization. But each time, we increase the optimization trigger
// to avoid thrashing.
- unsigned reoptimizationRetryCounter() const;
+ JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
void countReoptimization();
#if ENABLE(JIT)
+ static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return RegisterSet::llintBaselineCalleeSaveRegisters().numberOfSetRegisters(); }
+ static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters();
+ size_t calleeSaveSpaceAsVirtualRegisters();
+
unsigned numberOfDFGCompiles();
int32_t codeTypeThresholdMultiplier() const;
@@ -781,11 +689,11 @@ public:
return &m_jitExecuteCounter.m_counter;
}
- static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
- static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
- static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
+ static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
+ static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
+ static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
- const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
+ const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
@@ -855,7 +763,14 @@ public:
uint32_t exitCountThresholdForReoptimizationFromLoop();
bool shouldReoptimizeNow();
bool shouldReoptimizeFromLoopNow();
+
+ void setCalleeSaveRegisters(RegisterSet);
+ void setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList>);
+
+ RegisterAtOffsetList* calleeSaveRegisters() const { return m_calleeSaveRegisters.get(); }
#else // No JIT
+ static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return 0; }
+ static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 0; };
void optimizeAfterWarmUp() { }
unsigned numberOfDFGCompiles() { return 0; }
#endif
@@ -866,10 +781,11 @@ public:
void updateAllPredictions();
unsigned frameRegisterCount();
+ int stackPointerOffset();
bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
- int hasDebuggerRequests() const { return !!m_debuggerRequests; }
+ bool hasDebuggerRequests() const { return m_debuggerRequests; }
void* debuggerRequestsAddress() { return &m_debuggerRequests; }
void addBreakpoint(unsigned numBreakpoints);
@@ -885,13 +801,18 @@ public:
};
void setSteppingMode(SteppingMode);
- void clearDebuggerRequests() { m_debuggerRequests = 0; }
+ void clearDebuggerRequests()
+ {
+ m_steppingMode = SteppingModeDisabled;
+ m_numBreakpoints = 0;
+ }
+ bool wasCompiledWithDebuggingOpcodes() const { return m_unlinkedCode->wasCompiledWithDebuggingOpcodes(); }
+
// FIXME: Make these remaining members private.
- int m_numCalleeRegisters;
+ int m_numCalleeLocals;
int m_numVars;
- bool m_isConstructor;
// This is intentionally public; it's the responsibility of anyone doing any
// of the following to hold the lock:
@@ -909,21 +830,67 @@ public:
// Another exception to the rules is that the GC can do whatever it wants
// without holding any locks, because the GC is guaranteed to wait until any
// concurrent compilation threads finish what they're doing.
- mutable ConcurrentJITLock m_lock;
-
- bool m_shouldAlwaysBeInlined;
- bool m_allTransitionsHaveBeenMarked; // Initialized and used on every GC.
-
- bool m_didFailFTLCompilation;
+ mutable ConcurrentJSLock m_lock;
+
+ bool m_visitWeaklyHasBeenCalled;
+
+ bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
+
+#if ENABLE(JIT)
+ unsigned m_capabilityLevelState : 2; // DFG::CapabilityLevel
+#endif
+
+ bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
+
+ bool m_didFailJITCompilation : 1;
+ bool m_didFailFTLCompilation : 1;
+ bool m_hasBeenCompiledWithFTL : 1;
+ bool m_isConstructor : 1;
+ bool m_isStrictMode : 1;
+ unsigned m_codeType : 2; // CodeType
// Internal methods for use by validation code. It would be private if it wasn't
// for the fact that we use it from anonymous namespaces.
void beginValidationDidFail();
NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
+ struct RareData {
+ WTF_MAKE_FAST_ALLOCATED;
+ public:
+ Vector<HandlerInfo> m_exceptionHandlers;
+
+ // Buffers used for large array literals
+ Vector<Vector<JSValue>> m_constantBuffers;
+
+ // Jump Tables
+ Vector<SimpleJumpTable> m_switchJumpTables;
+ Vector<StringJumpTable> m_stringSwitchJumpTables;
+
+ DirectEvalCodeCache m_directEvalCodeCache;
+ };
+
+ void clearExceptionHandlers()
+ {
+ if (m_rareData)
+ m_rareData->m_exceptionHandlers.clear();
+ }
+
+ void appendExceptionHandler(const HandlerInfo& handler)
+ {
+ createRareDataIfNecessary(); // We may be handling the exception of an inlined call frame.
+ m_rareData->m_exceptionHandlers.append(handler);
+ }
+
+ CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite);
+
+#if ENABLE(JIT)
+ void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&);
+ std::optional<CodeOrigin> findPC(void* pc);
+#endif
+
protected:
- virtual void visitWeakReferences(SlotVisitor&) override;
- virtual void finalizeUnconditionally() override;
+ void finalizeLLIntInlineCaches();
+ void finalizeBaselineJITInlineCaches();
#if ENABLE(DFG_JIT)
void tallyFrequentExitSites();
@@ -933,6 +900,8 @@ protected:
private:
friend class CodeBlockSet;
+
+ BytecodeLivenessAnalysis& livenessAnalysisSlow();
CodeBlock* specialOSREntryBlockOrNull();
@@ -940,299 +909,147 @@ private:
double optimizationThresholdScalingFactor();
-#if ENABLE(JIT)
- ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
-#endif
-
void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
- void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants)
+ void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation);
+
+ void replaceConstant(int index, JSValue value)
{
- size_t count = constants.size();
- m_constantRegisters.resize(count);
- for (size_t i = 0; i < count; i++)
- m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
+ ASSERT(isConstantRegisterIndex(index) && static_cast<size_t>(index - FirstConstantRegisterIndex) < m_constantRegisters.size());
+ m_constantRegisters[index - FirstConstantRegisterIndex].set(m_globalObject->vm(), this, value);
}
- void dumpBytecode(PrintStream&, ExecState*, const Instruction* begin, const Instruction*&, const StubInfoMap& = StubInfoMap());
+ void dumpBytecode(
+ PrintStream&, ExecState*, const Instruction* begin, const Instruction*&,
+ const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
CString registerName(int r) const;
+ CString constantName(int index) const;
void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
enum CacheDumpMode { DumpCaches, DontDumpCaches };
- void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling);
+ void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&);
void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
- void printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
- {
- out.printf("[%4d] %-17s ", location, op);
- }
-
- void printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
- {
- printLocationAndOp(out, exec, location, it, op);
- out.printf("%s", registerName(operand).data());
- }
+ void printPutByIdCacheStatus(PrintStream&, int location, const StubInfoMap&);
+ void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+ void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand);
void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
-
-#if ENABLE(DFG_JIT)
- bool shouldImmediatelyAssumeLivenessDuringScan()
- {
- // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
- // their weak references go stale. So if a basline JIT CodeBlock gets
- // scanned, we can assume that this means that it's live.
- if (!JITCode::isOptimizingJIT(jitType()))
- return true;
-
- // For simplicity, we don't attempt to jettison code blocks during GC if
- // they are executing. Instead we strongly mark their weak references to
- // allow them to continue to execute soundly.
- if (m_mayBeExecuting)
- return true;
-
- if (Options::forceDFGCodeBlockLiveness())
- return true;
+ void dumpArithProfile(PrintStream&, ArithProfile*, bool& hasPrintedProfiling);
- return false;
- }
-#else
- bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
-#endif
+ bool shouldVisitStrongly(const ConcurrentJSLocker&);
+ bool shouldJettisonDueToWeakReference();
+ bool shouldJettisonDueToOldAge(const ConcurrentJSLocker&);
- void propagateTransitions(SlotVisitor&);
- void determineLiveness(SlotVisitor&);
+ void propagateTransitions(const ConcurrentJSLocker&, SlotVisitor&);
+ void determineLiveness(const ConcurrentJSLocker&, SlotVisitor&);
- void stronglyVisitStrongReferences(SlotVisitor&);
- void stronglyVisitWeakReferences(SlotVisitor&);
+ void stronglyVisitStrongReferences(const ConcurrentJSLocker&, SlotVisitor&);
+ void stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor&);
+ void visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor&);
+
+ std::chrono::milliseconds timeSinceCreation()
+ {
+ return std::chrono::duration_cast<std::chrono::milliseconds>(
+ std::chrono::steady_clock::now() - m_creationTime);
+ }
void createRareDataIfNecessary()
{
if (!m_rareData)
- m_rareData = adoptPtr(new RareData);
+ m_rareData = std::make_unique<RareData>();
}
-
-#if ENABLE(JIT)
- void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
- void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&);
-#endif
+
+ void insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>&);
+
WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
int m_numParameters;
+ int m_numberOfArgumentsToSkip { 0 };
union {
unsigned m_debuggerRequests;
struct {
+ unsigned m_hasDebuggerStatement : 1;
unsigned m_steppingMode : 1;
- unsigned m_numBreakpoints : 31;
+ unsigned m_numBreakpoints : 30;
};
};
- WriteBarrier<ScriptExecutable> m_ownerExecutable;
+ WriteBarrier<ExecutableBase> m_ownerExecutable;
VM* m_vm;
RefCountedArray<Instruction> m_instructions;
- WriteBarrier<SymbolTable> m_symbolTable;
VirtualRegister m_thisRegister;
- VirtualRegister m_argumentsRegister;
- VirtualRegister m_activationRegister;
-
- bool m_isStrictMode;
- bool m_needsActivation;
- bool m_mayBeExecuting;
- uint8_t m_visitAggregateHasBeenCalled;
+ VirtualRegister m_scopeRegister;
+ mutable CodeBlockHash m_hash;
RefPtr<SourceProvider> m_source;
unsigned m_sourceOffset;
unsigned m_firstLineColumnOffset;
- unsigned m_codeType;
-#if ENABLE(LLINT)
- Vector<LLIntCallLinkInfo> m_llintCallLinkInfos;
+ RefCountedArray<LLIntCallLinkInfo> m_llintCallLinkInfos;
SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls;
-#endif
+ StructureWatchpointMap m_llintGetByIdWatchpointMap;
RefPtr<JITCode> m_jitCode;
- MacroAssemblerCodePtr m_jitCodeWithArityCheck;
#if ENABLE(JIT)
+ std::unique_ptr<RegisterAtOffsetList> m_calleeSaveRegisters;
Bag<StructureStubInfo> m_stubInfos;
- Vector<ByValInfo> m_byValInfos;
- Vector<CallLinkInfo> m_callLinkInfos;
+ Bag<JITAddIC> m_addICs;
+ Bag<JITMulIC> m_mulICs;
+ Bag<JITNegIC> m_negICs;
+ Bag<JITSubIC> m_subICs;
+ Bag<ByValInfo> m_byValInfos;
+ Bag<CallLinkInfo> m_callLinkInfos;
SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls;
+ SentinelLinkedList<PolymorphicCallNode, BasicRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls;
+ std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
#endif
- OwnPtr<CompactJITCodeMap> m_jitCodeMap;
+ std::unique_ptr<CompactJITCodeMap> m_jitCodeMap;
#if ENABLE(DFG_JIT)
// This is relevant to non-DFG code blocks that serve as the profiled code block
// for DFG code blocks.
DFG::ExitProfile m_exitProfile;
CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
#endif
- Vector<ValueProfile> m_argumentValueProfiles;
- Vector<ValueProfile> m_valueProfiles;
+ RefCountedArray<ValueProfile> m_argumentValueProfiles;
+ RefCountedArray<ValueProfile> m_valueProfiles;
SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
- SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
- Vector<ArrayAllocationProfile> m_arrayAllocationProfiles;
+ RefCountedArray<ArrayAllocationProfile> m_arrayAllocationProfiles;
ArrayProfileVector m_arrayProfiles;
- Vector<ObjectAllocationProfile> m_objectAllocationProfiles;
+ RefCountedArray<ObjectAllocationProfile> m_objectAllocationProfiles;
// Constant Pool
COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
// TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
// it, so we're stuck with it for now.
Vector<WriteBarrier<Unknown>> m_constantRegisters;
- Vector<WriteBarrier<FunctionExecutable>> m_functionDecls;
- Vector<WriteBarrier<FunctionExecutable>> m_functionExprs;
+ Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation;
+ RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionDecls;
+ RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionExprs;
- RefPtr<CodeBlock> m_alternative;
+ WriteBarrier<CodeBlock> m_alternative;
- ExecutionCounter m_llintExecuteCounter;
+ BaselineExecutionCounter m_llintExecuteCounter;
- ExecutionCounter m_jitExecuteCounter;
- int32_t m_totalJITExecutions;
+ BaselineExecutionCounter m_jitExecuteCounter;
uint32_t m_osrExitCounter;
uint16_t m_optimizationDelayCounter;
uint16_t m_reoptimizationRetryCounter;
-
- mutable CodeBlockHash m_hash;
-
- std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis;
-
- struct RareData {
- WTF_MAKE_FAST_ALLOCATED;
- public:
- Vector<HandlerInfo> m_exceptionHandlers;
-
- // Buffers used for large array literals
- Vector<Vector<JSValue>> m_constantBuffers;
-
- // Jump Tables
- Vector<SimpleJumpTable> m_switchJumpTables;
- Vector<StringJumpTable> m_stringSwitchJumpTables;
-
- EvalCodeCache m_evalCodeCache;
- };
-#if COMPILER(MSVC)
- friend void WTF::deleteOwnedPtr<RareData>(RareData*);
-#endif
- OwnPtr<RareData> m_rareData;
-#if ENABLE(JIT)
- DFG::CapabilityLevel m_capabilityLevelState;
-#endif
-};
-
-// Program code is not marked by any function, so we make the global object
-// responsible for marking it.
-
-class GlobalCodeBlock : public CodeBlock {
-protected:
- GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
- : CodeBlock(CopyParsedBlock, other)
- {
- }
-
- GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
- : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
- {
- }
-};
-
-class ProgramCodeBlock : public GlobalCodeBlock {
-public:
- ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
- : GlobalCodeBlock(CopyParsedBlock, other)
- {
- }
- ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset)
- : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
- {
- }
+ std::chrono::steady_clock::time_point m_creationTime;
-#if ENABLE(JIT)
-protected:
- virtual CodeBlock* replacement() override;
- virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
-};
-
-class EvalCodeBlock : public GlobalCodeBlock {
-public:
- EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
- : GlobalCodeBlock(CopyParsedBlock, other)
- {
- }
-
- EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider)
- : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1)
- {
- }
-
- const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
- unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
-
-#if ENABLE(JIT)
-protected:
- virtual CodeBlock* replacement() override;
- virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
-
-private:
- UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); }
-};
+ std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis;
-class FunctionCodeBlock : public CodeBlock {
-public:
- FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
- : CodeBlock(CopyParsedBlock, other)
- {
- }
+ std::unique_ptr<RareData> m_rareData;
- FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
- : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
- {
- }
-
-#if ENABLE(JIT)
-protected:
- virtual CodeBlock* replacement() override;
- virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
+ UnconditionalFinalizer m_unconditionalFinalizer;
+ WeakReferenceHarvester m_weakReferenceHarvester;
};
-inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
-{
- RELEASE_ASSERT(inlineCallFrame);
- ExecutableBase* executable = inlineCallFrame->executable.get();
- RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info());
- return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
-}
-
-inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
-{
- if (codeOrigin.inlineCallFrame)
- return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
- return baselineCodeBlock;
-}
-
-inline int CodeBlock::argumentIndexAfterCapture(size_t argument)
-{
- if (argument >= static_cast<size_t>(symbolTable()->parameterCount()))
- return CallFrame::argumentOffset(argument);
-
- const SlowArgument* slowArguments = symbolTable()->slowArguments();
- if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
- return CallFrame::argumentOffset(argument);
-
- ASSERT(slowArguments[argument].status == SlowArgument::Captured);
- return slowArguments[argument].index;
-}
-
-inline bool CodeBlock::hasSlowArguments()
-{
- return !!symbolTable()->slowArguments();
-}
-
inline Register& ExecState::r(int index)
{
CodeBlock* codeBlock = this->codeBlock();
@@ -1241,44 +1058,47 @@ inline Register& ExecState::r(int index)
return this[index];
}
+inline Register& ExecState::r(VirtualRegister reg)
+{
+ return r(reg.offset());
+}
+
inline Register& ExecState::uncheckedR(int index)
{
RELEASE_ASSERT(index < FirstConstantRegisterIndex);
return this[index];
}
-inline JSValue ExecState::argumentAfterCapture(size_t argument)
+inline Register& ExecState::uncheckedR(VirtualRegister reg)
{
- if (argument >= argumentCount())
- return jsUndefined();
-
- if (!codeBlock())
- return this[argumentOffset(argument)].jsValue();
-
- return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
+ return uncheckedR(reg.offset());
}
-inline void CodeBlockSet::mark(void* candidateCodeBlock)
+inline void CodeBlock::clearVisitWeaklyHasBeenCalled()
{
- // We have to check for 0 and -1 because those are used by the HashMap as markers.
- uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
-
- // This checks for both of those nasty cases in one go.
- // 0 + 1 = 1
- // -1 + 1 = 0
- if (value + 1 <= 1)
- return;
-
- HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
- if (iter == m_set.end())
- return;
-
- (*iter)->m_mayBeExecuting = true;
-#if ENABLE(GGC)
- m_currentlyExecuting.append(static_cast<CodeBlock*>(candidateCodeBlock));
-#endif
+ m_visitWeaklyHasBeenCalled = false;
}
-} // namespace JSC
+template <typename ExecutableType>
+JSObject* ScriptExecutable::prepareForExecution(VM& vm, JSFunction* function, JSScope* scope, CodeSpecializationKind kind, CodeBlock*& resultCodeBlock)
+{
+ if (hasJITCodeFor(kind)) {
+ if (std::is_same<ExecutableType, EvalExecutable>::value)
+ resultCodeBlock = jsCast<CodeBlock*>(jsCast<EvalExecutable*>(this)->codeBlock());
+ else if (std::is_same<ExecutableType, ProgramExecutable>::value)
+ resultCodeBlock = jsCast<CodeBlock*>(jsCast<ProgramExecutable*>(this)->codeBlock());
+ else if (std::is_same<ExecutableType, ModuleProgramExecutable>::value)
+ resultCodeBlock = jsCast<CodeBlock*>(jsCast<ModuleProgramExecutable*>(this)->codeBlock());
+ else if (std::is_same<ExecutableType, FunctionExecutable>::value)
+ resultCodeBlock = jsCast<CodeBlock*>(jsCast<FunctionExecutable*>(this)->codeBlockFor(kind));
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+ }
+ return prepareForExecutionImpl(vm, function, scope, kind, resultCodeBlock);
+}
-#endif // CodeBlock_h
+#define CODEBLOCK_LOG_EVENT(codeBlock, summary, details) \
+ (codeBlock->vm()->logEvent(codeBlock, summary, [&] () { return toCString details; }))
+
+} // namespace JSC