summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp')
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp5420
1 files changed, 3858 insertions, 1562 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index c572e7a3e..a8c260892 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -1,5 +1,5 @@
- /*
- * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
+/*
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,19 +28,35 @@
#if ENABLE(DFG_JIT)
+#include "ArithProfile.h"
#include "ArrayConstructor.h"
+#include "BasicBlockLocation.h"
#include "CallLinkStatus.h"
#include "CodeBlock.h"
#include "CodeBlockWithJITType.h"
+#include "DFGAbstractHeap.h"
#include "DFGArrayMode.h"
#include "DFGCapabilities.h"
+#include "DFGClobberize.h"
+#include "DFGClobbersExitState.h"
+#include "DFGGraph.h"
#include "DFGJITCode.h"
+#include "FunctionCodeBlock.h"
#include "GetByIdStatus.h"
-#include "JSActivation.h"
-#include "Operations.h"
+#include "Heap.h"
+#include "JSCInlines.h"
+#include "JSModuleEnvironment.h"
+#include "JSModuleNamespaceObject.h"
+#include "NumberConstructor.h"
+#include "ObjectConstructor.h"
#include "PreciseJumpTargets.h"
+#include "PutByIdFlags.h"
#include "PutByIdStatus.h"
+#include "RegExpPrototype.h"
+#include "StackAlignment.h"
#include "StringConstructor.h"
+#include "StructureStubInfo.h"
+#include "Watchdog.h"
#include <wtf/CommaPrinter.h>
#include <wtf/HashMap.h>
#include <wtf/MathExtras.h>
@@ -48,6 +64,8 @@
namespace JSC { namespace DFG {
+static const bool verbose = false;
+
class ConstantBufferKey {
public:
ConstantBufferKey()
@@ -130,19 +148,17 @@ public:
, m_graph(graph)
, m_currentBlock(0)
, m_currentIndex(0)
- , m_constantUndefined(UINT_MAX)
- , m_constantNull(UINT_MAX)
- , m_constantNaN(UINT_MAX)
- , m_constant1(UINT_MAX)
- , m_constants(m_codeBlock->numberOfConstantRegisters())
+ , m_constantUndefined(graph.freeze(jsUndefined()))
+ , m_constantNull(graph.freeze(jsNull()))
+ , m_constantNaN(graph.freeze(jsNumber(PNaN)))
+ , m_constantOne(graph.freeze(jsNumber(1)))
, m_numArguments(m_codeBlock->numParameters())
- , m_numLocals(m_codeBlock->m_numCalleeRegisters)
+ , m_numLocals(m_codeBlock->m_numCalleeLocals)
, m_parameterSlots(0)
, m_numPassedVarArgs(0)
, m_inlineStackTop(0)
- , m_haveBuiltOperandMaps(false)
- , m_emptyJSValueIndex(UINT_MAX)
, m_currentInstruction(0)
+ , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
{
ASSERT(m_profiledBlock);
}
@@ -155,56 +171,133 @@ private:
// Just parse from m_currentIndex to the end of the current CodeBlock.
void parseCodeBlock();
+
+ void ensureLocals(unsigned newNumLocals)
+ {
+ if (newNumLocals <= m_numLocals)
+ return;
+ m_numLocals = newNumLocals;
+ for (size_t i = 0; i < m_graph.numBlocks(); ++i)
+ m_graph.block(i)->ensureLocals(newNumLocals);
+ }
// Helper for min and max.
- bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
-
+ template<typename ChecksFunctor>
+ bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
+
+ void refineStatically(CallLinkStatus&, Node* callTarget);
// Handle calls. This resolves issues surrounding inlining and intrinsics.
- void handleCall(Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
- void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind);
- void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
+ enum Terminality { Terminal, NonTerminal };
+ Terminality handleCall(
+ int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
+ Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
+ SpeculatedType prediction);
+ Terminality handleCall(
+ int result, NodeType op, CallMode, unsigned instructionSize,
+ Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
+ Terminality handleCall(int result, NodeType op, CallMode, unsigned instructionSize, int callee, int argCount, int registerOffset);
+ Terminality handleCall(Instruction* pc, NodeType op, CallMode);
+ Terminality handleVarargsCall(Instruction* pc, NodeType op, CallMode);
+ void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
+ void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
+ Node* getArgumentCount();
+ unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CallMode); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
// Handle inlining. Return true if it succeeded, false if we need to plant a call.
- bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
+ bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
+ enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
+ template<typename ChecksFunctor>
+ bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks);
+ template<typename ChecksFunctor>
+ void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, const ChecksFunctor& insertChecks);
+ void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
// Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
- bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
- bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType);
- bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
- Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
- Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset);
- void handleGetByOffset(
- int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
- PropertyOffset);
- void handleGetById(
- int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
- const GetByIdStatus&);
+ template<typename ChecksFunctor>
+ bool handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
+ template<typename ChecksFunctor>
+ bool handleDOMJITCall(Node* callee, int resultOperand, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
+ template<typename ChecksFunctor>
+ bool handleIntrinsicGetter(int resultOperand, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
+ template<typename ChecksFunctor>
+ bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
+ template<typename ChecksFunctor>
+ bool handleConstantInternalFunction(Node* callTargetNode, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
+ Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, const InferredType::Descriptor&, Node* value);
+ Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, const InferredType::Descriptor&, NodeType = GetByOffset);
+ bool handleDOMJITGetter(int resultOperand, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
+ bool handleModuleNamespaceLoad(int resultOperand, SpeculatedType, Node* base, GetByIdStatus);
+
+ // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
+ // check the validity of the condition, but it may return a null one if it encounters a contradiction.
+ ObjectPropertyCondition presenceLike(
+ JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
+
+ // Attempt to watch the presence of a property. It will watch that the property is present in the same
+ // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
+ // Returns true if this all works out.
+ bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
+ void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
+
+ // Works with both GetByIdVariant and the setter form of PutByIdVariant.
+ template<typename VariantType>
+ Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
+
+ Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
- Node* getScope(bool skipTop, unsigned skipCount);
+ void handleGetById(
+ int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
+ void emitPutById(
+ Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
+ void handlePutById(
+ Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
+ bool isDirect);
+
+ // Either register a watchpoint or emit a check for this condition. Returns false if the
+ // condition no longer holds, and therefore no reasonable check can be emitted.
+ bool check(const ObjectPropertyCondition&);
+
+ GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
+
+ // Either register a watchpoint or emit a check for this condition. It must be a Presence
+ // condition. It will attempt to promote a Presence condition to an Equivalence condition.
+ // Emits code for the loaded value that the condition guards, and returns a node containing
+ // the loaded value. Returns null if the condition no longer holds.
+ GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
+ Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
+ Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
- // Prepare to parse a block.
+ // Calls check() for each condition in the set: that is, it either emits checks or registers
+ // watchpoints (or a combination of the two) to make the conditions hold. If any of those
+ // conditions are no longer checkable, returns false.
+ bool check(const ObjectPropertyConditionSet&);
+
+ // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
+ // base. Does a combination of watchpoint registration and check emission to guard the
+ // conditions, and emits code to load the value from the slot base. Returns a node containing
+ // the loaded value. Returns null if any of the conditions were no longer checkable.
+ GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
+ Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
+
void prepareToParseBlock();
+ void clearCaches();
+
// Parse a single basic block of bytecode instructions.
bool parseBlock(unsigned limit);
// Link block successors.
void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
- VariableAccessData* newVariableAccessData(VirtualRegister operand, bool isCaptured)
+ VariableAccessData* newVariableAccessData(VirtualRegister operand)
{
ASSERT(!operand.isConstant());
- m_graph.m_variableAccessData.append(VariableAccessData(operand, isCaptured));
+ m_graph.m_variableAccessData.append(VariableAccessData(operand));
return &m_graph.m_variableAccessData.last();
}
// Get/Set the operands/result of a bytecode instruction.
Node* getDirect(VirtualRegister operand)
{
- // Is this a constant?
- if (operand.isConstant()) {
- unsigned constant = operand.toConstantIndex();
- ASSERT(constant < m_constants.size());
- return getJSConstant(constant);
- }
+ ASSERT(!operand.isConstant());
// Is this an argument?
if (operand.isArgument())
@@ -216,28 +309,81 @@ private:
Node* get(VirtualRegister operand)
{
+ if (operand.isConstant()) {
+ unsigned constantIndex = operand.toConstantIndex();
+ unsigned oldSize = m_constants.size();
+ if (constantIndex >= oldSize || !m_constants[constantIndex]) {
+ const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
+ JSValue value = codeBlock.getConstant(operand.offset());
+ SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
+ if (constantIndex >= oldSize) {
+ m_constants.grow(constantIndex + 1);
+ for (unsigned i = oldSize; i < m_constants.size(); ++i)
+ m_constants[i] = nullptr;
+ }
+
+ Node* constantNode = nullptr;
+ if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
+ constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
+ else
+ constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
+ m_constants[constantIndex] = constantNode;
+ }
+ ASSERT(m_constants[constantIndex]);
+ return m_constants[constantIndex];
+ }
+
if (inlineCallFrame()) {
if (!inlineCallFrame()->isClosureCall) {
JSFunction* callee = inlineCallFrame()->calleeConstant();
- if (operand.offset() == JSStack::Callee)
- return cellConstant(callee);
- if (operand.offset() == JSStack::ScopeChain)
- return cellConstant(callee->scope());
+ if (operand.offset() == CallFrameSlot::callee)
+ return weakJSConstant(callee);
+ }
+ } else if (operand.offset() == CallFrameSlot::callee) {
+ // We have to do some constant-folding here because this enables CreateThis folding. Note
+ // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
+ // case if the function is a singleton then we already know it.
+ if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
+ InferredValue* singleton = executable->singletonFunction();
+ if (JSValue value = singleton->inferredValue()) {
+ m_graph.watchpoints().addLazily(singleton);
+ JSFunction* function = jsCast<JSFunction*>(value);
+ return weakJSConstant(function);
+ }
}
- } else if (operand.offset() == JSStack::Callee)
return addToGraph(GetCallee);
- else if (operand.offset() == JSStack::ScopeChain)
- return addToGraph(GetMyScope);
+ }
return getDirect(m_inlineStackTop->remapOperand(operand));
}
- enum SetMode { NormalSet, ImmediateSet };
+ enum SetMode {
+ // A normal set which follows a two-phase commit that spans code origins. During
+ // the current code origin it issues a MovHint, and at the start of the next
+ // code origin there will be a SetLocal. If the local needs flushing, the second
+ // SetLocal will be preceded with a Flush.
+ NormalSet,
+
+ // A set where the SetLocal happens immediately and there is still a Flush. This
+ // is relevant when assigning to a local in tricky situations for the delayed
+ // SetLocal logic but where we know that we have not performed any side effects
+ // within this code origin. This is a safe replacement for NormalSet anytime we
+ // know that we have not yet performed side effects in this code origin.
+ ImmediateSetWithFlush,
+
+ // A set where the SetLocal happens immediately and we do not Flush it even if
+ // this is a local that is marked as needing it. This is relevant when
+ // initializing locals at the top of a function.
+ ImmediateNakedSet
+ };
Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
addToGraph(MovHint, OpInfo(operand.offset()), value);
-
- DelayedSetLocal delayed = DelayedSetLocal(operand, value);
+
+ // We can't exit anymore because our OSR exit state has changed.
+ m_exitOK = false;
+
+ DelayedSetLocal delayed(currentCodeOrigin(), operand, value);
if (setMode == NormalSet) {
m_setLocalQueue.append(delayed);
@@ -246,6 +392,13 @@ private:
return delayed.execute(this, setMode);
}
+
+ void processSetLocalQueue()
+ {
+ for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
+ m_setLocalQueue[i].execute(this);
+ m_setLocalQueue.resize(0);
+ }
Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
@@ -255,8 +408,8 @@ private:
Node* injectLazyOperandSpeculation(Node* node)
{
ASSERT(node->op() == GetLocal);
- ASSERT(node->codeOrigin.bytecodeIndex == m_currentIndex);
- ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
+ ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
LazyOperandValueProfileKey key(m_currentIndex, node->local());
SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
node->variableAccessData()->predict(prediction);
@@ -268,25 +421,7 @@ private:
{
unsigned local = operand.toLocal();
- if (local < m_localWatchpoints.size()) {
- if (VariableWatchpointSet* set = m_localWatchpoints[local]) {
- if (JSValue value = set->inferredValue()) {
- addToGraph(FunctionReentryWatchpoint, OpInfo(m_codeBlock->symbolTable()));
- addToGraph(VariableWatchpoint, OpInfo(set));
- // Note: this is very special from an OSR exit standpoint. We wouldn't be
- // able to do this for most locals, but it works here because we're dealing
- // with a flushed local. For most locals we would need to issue a GetLocal
- // here and ensure that we have uses in DFG IR wherever there would have
- // been uses in bytecode. Clearly this optimization does not do this. But
- // that's fine, because we don't need to track liveness for captured
- // locals, and this optimization only kicks in for captured locals.
- return inferredConstant(value);
- }
- }
- }
-
Node* node = m_currentBlock->variablesAtTail.local(local);
- bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
// This has two goals: 1) link together variable access datas, and 2)
// try to avoid creating redundant GetLocals. (1) is required for
@@ -298,45 +433,46 @@ private:
if (node) {
variable = node->variableAccessData();
- variable->mergeIsCaptured(isCaptured);
- if (!isCaptured) {
- switch (node->op()) {
- case GetLocal:
- return node;
- case SetLocal:
- return node->child1().node();
- default:
- break;
- }
+ switch (node->op()) {
+ case GetLocal:
+ return node;
+ case SetLocal:
+ return node->child1().node();
+ default:
+ break;
}
} else
- variable = newVariableAccessData(operand, isCaptured);
+ variable = newVariableAccessData(operand);
node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
m_currentBlock->variablesAtTail.local(local) = node;
return node;
}
-
- Node* setLocal(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
+ Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
+ CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
+ m_currentSemanticOrigin = semanticOrigin;
+
unsigned local = operand.toLocal();
- bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
- if (setMode == NormalSet) {
+ if (setMode != ImmediateNakedSet) {
ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
- if (isCaptured || argumentPosition)
+ if (argumentPosition)
flushDirect(operand, argumentPosition);
+ else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
+ flush(operand);
}
- VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
+ VariableAccessData* variableAccessData = newVariableAccessData(operand);
variableAccessData->mergeStructureCheckHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint));
+ m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
variableAccessData->mergeCheckArrayHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
+ m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
m_currentBlock->variablesAtTail.local(local) = node;
+
+ m_currentSemanticOrigin = oldSemanticOrigin;
return node;
}
@@ -347,13 +483,11 @@ private:
ASSERT(argument < m_numArguments);
Node* node = m_currentBlock->variablesAtTail.argument(argument);
- bool isCaptured = m_codeBlock->isCaptured(operand);
VariableAccessData* variable;
if (node) {
variable = node->variableAccessData();
- variable->mergeIsCaptured(isCaptured);
switch (node->op()) {
case GetLocal:
@@ -364,36 +498,40 @@ private:
break;
}
} else
- variable = newVariableAccessData(operand, isCaptured);
+ variable = newVariableAccessData(operand);
node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
m_currentBlock->variablesAtTail.argument(argument) = node;
return node;
}
- Node* setArgument(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
+ Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
{
+ CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
+ m_currentSemanticOrigin = semanticOrigin;
+
unsigned argument = operand.toArgument();
ASSERT(argument < m_numArguments);
- bool isCaptured = m_codeBlock->isCaptured(operand);
-
- VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
+ VariableAccessData* variableAccessData = newVariableAccessData(operand);
// Always flush arguments, except for 'this'. If 'this' is created by us,
// then make sure that it's never unboxed.
- if (argument) {
- if (setMode == NormalSet)
+ if (argument || m_graph.needsFlushedThis()) {
+ if (setMode != ImmediateNakedSet)
flushDirect(operand);
- } else if (m_codeBlock->specializationKind() == CodeForConstruct)
+ }
+
+ if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
variableAccessData->mergeShouldNeverUnbox(true);
variableAccessData->mergeStructureCheckHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint));
+ m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
variableAccessData->mergeCheckArrayHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
+ m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
m_currentBlock->variablesAtTail.argument(argument) = node;
+
+ m_currentSemanticOrigin = oldSemanticOrigin;
return node;
}
@@ -411,7 +549,7 @@ private:
InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
if (!inlineCallFrame)
break;
- if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
+ if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
continue;
if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
continue;
@@ -430,18 +568,6 @@ private:
return findArgumentPositionForLocal(operand);
}
- void addConstant(JSValue value)
- {
- unsigned constantIndex = m_codeBlock->addConstantLazily();
- initializeLazyWriteBarrierForConstant(
- m_graph.m_plan.writeBarriers,
- m_codeBlock->constants()[constantIndex],
- m_codeBlock,
- constantIndex,
- m_codeBlock->ownerExecutable(),
- value);
- }
-
void flush(VirtualRegister operand)
{
flushDirect(m_inlineStackTop->remapOperand(operand));
@@ -451,87 +577,114 @@ private:
{
flushDirect(operand, findArgumentPosition(operand));
}
-
+
void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
{
- bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
-
+ addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
+ }
+
+ template<NodeType nodeType>
+ void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
+ {
ASSERT(!operand.isConstant());
Node* node = m_currentBlock->variablesAtTail.operand(operand);
VariableAccessData* variable;
- if (node) {
+ if (node)
variable = node->variableAccessData();
- variable->mergeIsCaptured(isCaptured);
- } else
- variable = newVariableAccessData(operand, isCaptured);
+ else
+ variable = newVariableAccessData(operand);
- node = addToGraph(Flush, OpInfo(variable));
+ node = addToGraph(nodeType, OpInfo(variable));
m_currentBlock->variablesAtTail.operand(operand) = node;
if (argumentPosition)
argumentPosition->addVariable(variable);
}
+ void phantomLocalDirect(VirtualRegister operand)
+ {
+ addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
+ }
+
void flush(InlineStackEntry* inlineStackEntry)
{
int numArguments;
if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
+ ASSERT(!m_hasDebuggerEnabled);
numArguments = inlineCallFrame->arguments.size();
- if (inlineCallFrame->isClosureCall) {
- flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
- flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ScopeChain)));
- }
+ if (inlineCallFrame->isClosureCall)
+ flushDirect(inlineStackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)));
+ if (inlineCallFrame->isVarargs())
+ flushDirect(inlineStackEntry->remapOperand(VirtualRegister(CallFrameSlot::argumentCount)));
} else
numArguments = inlineStackEntry->m_codeBlock->numParameters();
for (unsigned argument = numArguments; argument-- > 1;)
flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
- for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
- if (!inlineStackEntry->m_codeBlock->isCaptured(virtualRegisterForLocal(local)))
- continue;
- flushDirect(inlineStackEntry->remapOperand(virtualRegisterForLocal(local)));
- }
+ if (!inlineStackEntry->m_inlineCallFrame && m_graph.needsFlushedThis())
+ flushDirect(virtualRegisterForArgument(0));
+ if (m_graph.needsScopeRegister())
+ flush(m_codeBlock->scopeRegister());
}
- void flushAllArgumentsAndCapturedVariablesInInlineStack()
+ void flushForTerminal()
{
- for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
+ CodeOrigin origin = currentCodeOrigin();
+ unsigned bytecodeIndex = origin.bytecodeIndex;
+
+ for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller) {
flush(inlineStackEntry);
+
+ ASSERT(origin.inlineCallFrame == inlineStackEntry->m_inlineCallFrame);
+ InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame;
+ CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
+ FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
+ const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
+
+ for (unsigned local = codeBlock->m_numCalleeLocals; local--;) {
+ if (livenessAtBytecode[local]) {
+ VirtualRegister reg = virtualRegisterForLocal(local);
+ if (inlineCallFrame)
+ reg = inlineStackEntry->remapOperand(reg);
+ phantomLocalDirect(reg);
+ }
+ }
+
+ if (inlineCallFrame) {
+ bytecodeIndex = inlineCallFrame->directCaller.bytecodeIndex;
+ origin = inlineCallFrame->directCaller;
+ }
+ }
}
- void flushArgumentsAndCapturedVariables()
+ void flushForReturn()
{
flush(m_inlineStackTop);
}
-
- // NOTE: Only use this to construct constants that arise from non-speculative
- // constant folding. I.e. creating constants using this if we had constant
- // field inference would be a bad idea, since the bytecode parser's folding
- // doesn't handle liveness preservation.
- Node* getJSConstantForValue(JSValue constantValue, NodeFlags flags = NodeIsStaticConstant)
+
+ void flushIfTerminal(SwitchData& data)
{
- unsigned constantIndex;
- if (!m_codeBlock->findConstant(constantValue, constantIndex)) {
- addConstant(constantValue);
- m_constants.append(ConstantRecord());
- }
+ if (data.fallThrough.bytecodeIndex() > m_currentIndex)
+ return;
- ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
+ for (unsigned i = data.cases.size(); i--;) {
+ if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
+ return;
+ }
- return getJSConstant(constantIndex, flags);
+ flushForTerminal();
}
- Node* getJSConstant(unsigned constant, NodeFlags flags = NodeIsStaticConstant)
+ // Assumes that the constant should be strongly marked.
+ Node* jsConstant(JSValue constantValue)
{
- Node* node = m_constants[constant].asJSValue;
- if (node)
- return node;
+ return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
+ }
- Node* result = addToGraph(JSConstant, OpInfo(constant));
- result->mergeFlags(flags);
- m_constants[constant].asJSValue = result;
- return result;
+ Node* weakJSConstant(JSValue constantValue)
+ {
+ return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
}
// Helper functions to get/set the this value.
@@ -545,275 +698,217 @@ private:
set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
}
- // Convenience methods for checking nodes for constants.
- bool isJSConstant(Node* node)
- {
- return node->op() == JSConstant;
- }
- bool isInt32Constant(Node* node)
- {
- return isJSConstant(node) && valueOfJSConstant(node).isInt32();
- }
- // Convenience methods for getting constant values.
- JSValue valueOfJSConstant(Node* node)
- {
- ASSERT(isJSConstant(node));
- return m_codeBlock->getConstant(FirstConstantRegisterIndex + node->constantNumber());
- }
- int32_t valueOfInt32Constant(Node* node)
+ InlineCallFrame* inlineCallFrame()
{
- ASSERT(isInt32Constant(node));
- return valueOfJSConstant(node).asInt32();
+ return m_inlineStackTop->m_inlineCallFrame;
}
-
- // This method returns a JSConstant with the value 'undefined'.
- Node* constantUndefined()
- {
- // Has m_constantUndefined been set up yet?
- if (m_constantUndefined == UINT_MAX) {
- // Search the constant pool for undefined, if we find it, we can just reuse this!
- unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
- for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
- JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
- if (testMe.isUndefined())
- return getJSConstant(m_constantUndefined);
- }
-
- // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
- ASSERT(m_constants.size() == numberOfConstants);
- addConstant(jsUndefined());
- m_constants.append(ConstantRecord());
- ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
- }
- // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
- ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
- return getJSConstant(m_constantUndefined);
+ bool allInlineFramesAreTailCalls()
+ {
+ return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
}
- // This method returns a JSConstant with the value 'null'.
- Node* constantNull()
+ CodeOrigin currentCodeOrigin()
{
- // Has m_constantNull been set up yet?
- if (m_constantNull == UINT_MAX) {
- // Search the constant pool for null, if we find it, we can just reuse this!
- unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
- for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
- JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
- if (testMe.isNull())
- return getJSConstant(m_constantNull);
- }
-
- // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
- ASSERT(m_constants.size() == numberOfConstants);
- addConstant(jsNull());
- m_constants.append(ConstantRecord());
- ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
- }
-
- // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
- ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
- return getJSConstant(m_constantNull);
+ return CodeOrigin(m_currentIndex, inlineCallFrame());
}
- // This method returns a DoubleConstant with the value 1.
- Node* one()
+ NodeOrigin currentNodeOrigin()
{
- // Has m_constant1 been set up yet?
- if (m_constant1 == UINT_MAX) {
- // Search the constant pool for the value 1, if we find it, we can just reuse this!
- unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
- for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
- JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
- if (testMe.isInt32() && testMe.asInt32() == 1)
- return getJSConstant(m_constant1);
- }
-
- // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
- ASSERT(m_constants.size() == numberOfConstants);
- addConstant(jsNumber(1));
- m_constants.append(ConstantRecord());
- ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
- }
+ CodeOrigin semantic;
+ CodeOrigin forExit;
- // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
- ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
- ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
- return getJSConstant(m_constant1);
- }
-
- // This method returns a DoubleConstant with the value NaN.
- Node* constantNaN()
- {
- JSValue nan = jsNaN();
-
- // Has m_constantNaN been set up yet?
- if (m_constantNaN == UINT_MAX) {
- // Search the constant pool for the value NaN, if we find it, we can just reuse this!
- unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
- for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
- JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
- if (JSValue::encode(testMe) == JSValue::encode(nan))
- return getJSConstant(m_constantNaN);
- }
+ if (m_currentSemanticOrigin.isSet())
+ semantic = m_currentSemanticOrigin;
+ else
+ semantic = currentCodeOrigin();
- // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
- ASSERT(m_constants.size() == numberOfConstants);
- addConstant(nan);
- m_constants.append(ConstantRecord());
- ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
- }
+ forExit = currentCodeOrigin();
- // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
- ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
- ASSERT(std::isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
- return getJSConstant(m_constantNaN);
+ return NodeOrigin(semantic, forExit, m_exitOK);
}
- Node* cellConstant(JSCell* cell)
+ BranchData* branchData(unsigned taken, unsigned notTaken)
{
- HashMap<JSCell*, Node*>::AddResult result = m_cellConstantNodes.add(cell, nullptr);
- if (result.isNewEntry)
- result.iterator->value = addToGraph(WeakJSConstant, OpInfo(cell));
-
- return result.iterator->value;
+ // We assume that branches originating from bytecode always have a fall-through. We
+ // use this assumption to avoid checking for the creation of terminal blocks.
+ ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
+ BranchData* data = m_graph.m_branchData.add();
+ *data = BranchData::withBytecodeIndices(taken, notTaken);
+ return data;
}
- Node* inferredConstant(JSValue value)
+ Node* addToGraph(Node* node)
{
- if (value.isCell())
- return cellConstant(value.asCell());
- return getJSConstantForValue(value, 0);
- }
-
- InlineCallFrame* inlineCallFrame()
- {
- return m_inlineStackTop->m_inlineCallFrame;
- }
-
- CodeOrigin currentCodeOrigin()
- {
- return CodeOrigin(m_currentIndex, inlineCallFrame());
- }
-
- bool canFold(Node* node)
- {
- if (Options::validateFTLOSRExitLiveness()) {
- // The static folding that the bytecode parser does results in the DFG
- // being able to do some DCE that the bytecode liveness analysis would
- // miss. Hence, we disable the static folding if we're validating FTL OSR
- // exit liveness. This may be brutish, but this validator is powerful
- // enough that it's worth it.
- return false;
- }
-
- return node->isStronglyProvedConstantIn(inlineCallFrame());
- }
-
- // Our codegen for constant strict equality performs a bitwise comparison,
- // so we can only select values that have a consistent bitwise identity.
- bool isConstantForCompareStrictEq(Node* node)
- {
- if (!node->isConstant())
- return false;
- JSValue value = valueOfJSConstant(node);
- return value.isBoolean() || value.isUndefinedOrNull();
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog(" appended ", node, " ", Graph::opName(node->op()), "\n");
+ m_currentBlock->append(node);
+ if (clobbersExitState(m_graph, node))
+ m_exitOK = false;
+ return node;
}
Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
{
Node* result = m_graph.addNode(
- SpecNone, op, currentCodeOrigin(), Edge(child1), Edge(child2), Edge(child3));
- ASSERT(op != Phi);
- m_currentBlock->append(result);
- return result;
+ op, currentNodeOrigin(), Edge(child1), Edge(child2),
+ Edge(child3));
+ return addToGraph(result);
}
Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
{
Node* result = m_graph.addNode(
- SpecNone, op, currentCodeOrigin(), child1, child2, child3);
- ASSERT(op != Phi);
- m_currentBlock->append(result);
- return result;
+ op, currentNodeOrigin(), child1, child2, child3);
+ return addToGraph(result);
}
Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
{
Node* result = m_graph.addNode(
- SpecNone, op, currentCodeOrigin(), info, Edge(child1), Edge(child2), Edge(child3));
- ASSERT(op != Phi);
- m_currentBlock->append(result);
- return result;
+ op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
+ Edge(child3));
+ return addToGraph(result);
+ }
+ Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
+ {
+ Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
+ return addToGraph(result);
}
Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
{
Node* result = m_graph.addNode(
- SpecNone, op, currentCodeOrigin(), info1, info2,
+ op, currentNodeOrigin(), info1, info2,
Edge(child1), Edge(child2), Edge(child3));
- ASSERT(op != Phi);
- m_currentBlock->append(result);
- return result;
+ return addToGraph(result);
+ }
+ Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
+ {
+ Node* result = m_graph.addNode(
+ op, currentNodeOrigin(), info1, info2, child1, child2, child3);
+ return addToGraph(result);
}
- Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
+ Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
{
Node* result = m_graph.addNode(
- SpecNone, Node::VarArg, op, currentCodeOrigin(), info1, info2,
+ Node::VarArg, op, currentNodeOrigin(), info1, info2,
m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
- ASSERT(op != Phi);
- m_currentBlock->append(result);
+ addToGraph(result);
m_numPassedVarArgs = 0;
return result;
}
-
+
void addVarArgChild(Node* child)
{
m_graph.m_varArgChildren.append(Edge(child));
m_numPassedVarArgs++;
}
- Node* addCall(Instruction* currentInstruction, NodeType op)
+ Node* addCallWithoutSettingResult(
+ NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
+ OpInfo prediction)
{
- SpeculatedType prediction = getPrediction();
-
- addVarArgChild(get(VirtualRegister(currentInstruction[2].u.operand)));
- int argCount = currentInstruction[3].u.operand;
- if (JSStack::ThisArgument + (unsigned)argCount > m_parameterSlots)
- m_parameterSlots = JSStack::ThisArgument + argCount;
+ addVarArgChild(callee);
+ size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
+
+ if (parameterSlots > m_parameterSlots)
+ m_parameterSlots = parameterSlots;
- int registerOffset = -currentInstruction[4].u.operand;
- int dummyThisArgument = op == Call ? 0 : 1;
- for (int i = 0 + dummyThisArgument; i < argCount; ++i)
+ for (int i = 0; i < argCount; ++i)
addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
- Node* call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
- set(VirtualRegister(currentInstruction[1].u.operand), call);
+ return addToGraph(Node::VarArg, op, opInfo, prediction);
+ }
+
+ Node* addCall(
+ int result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
+ SpeculatedType prediction)
+ {
+ if (op == TailCall) {
+ if (allInlineFramesAreTailCalls())
+ return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
+ op = TailCallInlinedCaller;
+ }
+
+
+ Node* call = addCallWithoutSettingResult(
+ op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
+ VirtualRegister resultReg(result);
+ if (resultReg.isValid())
+ set(resultReg, call);
return call;
}
Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
{
- Node* objectNode = cellConstant(object);
+ // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
+ // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
+ // object's structure as soon as we make it a weakJSCosntant.
+ Node* objectNode = weakJSConstant(object);
addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
return objectNode;
}
- Node* cellConstantWithStructureCheck(JSCell* object)
- {
- return cellConstantWithStructureCheck(object, object->structure());
- }
-
SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
{
- ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
- return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
+ SpeculatedType prediction;
+ {
+ ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
+ }
+
+ if (prediction != SpecNone)
+ return prediction;
+
+ // If we have no information about the values this
+ // node generates, we check if by any chance it is
+ // a tail call opcode. In that case, we walk up the
+ // inline frames to find a call higher in the call
+ // chain and use its prediction. If we only have
+ // inlined tail call frames, we use SpecFullTop
+ // to avoid a spurious OSR exit.
+ Instruction* instruction = m_inlineStackTop->m_profiledBlock->instructions().begin() + bytecodeIndex;
+ OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instruction->u.opcode);
+
+ switch (opcodeID) {
+ case op_tail_call:
+ case op_tail_call_varargs:
+ case op_tail_call_forward_arguments: {
+ // Things should be more permissive to us returning BOTTOM instead of TOP here.
+ // Currently, this will cause us to Force OSR exit. This is bad because returning
+ // TOP will cause anything that transitively touches this speculated type to
+ // also become TOP during prediction propagation.
+ // https://bugs.webkit.org/show_bug.cgi?id=164337
+ if (!inlineCallFrame())
+ return SpecFullTop;
+
+ CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
+ if (!codeOrigin)
+ return SpecFullTop;
+
+ InlineStackEntry* stack = m_inlineStackTop;
+ while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame)
+ stack = stack->m_caller;
+
+ bytecodeIndex = codeOrigin->bytecodeIndex;
+ CodeBlock* profiledBlock = stack->m_profiledBlock;
+ ConcurrentJSLocker locker(profiledBlock->m_lock);
+ return profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
+ }
+
+ default:
+ return SpecNone;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return SpecNone;
}
SpeculatedType getPrediction(unsigned bytecodeIndex)
{
SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
-
+
if (prediction == SpecNone) {
// We have no information about what values this node generates. Give up
// on executing this code, since we're likely to do more damage than good.
@@ -835,9 +930,10 @@ private:
ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
{
- ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
- return ArrayMode::fromObserved(locker, profile, action, false);
+ bool makeSafe = profile->outOfBounds(locker);
+ return ArrayMode::fromObserved(locker, profile, action, makeSafe);
}
ArrayMode getArrayMode(ArrayProfile* profile)
@@ -845,63 +941,73 @@ private:
return getArrayMode(profile, Array::Read);
}
- ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
- {
- ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
-
- profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
-
- bool makeSafe =
- m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
- || profile->outOfBounds(locker);
-
- ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
-
- return result;
- }
-
Node* makeSafe(Node* node)
{
- bool likelyToTakeSlowCase;
- if (!isX86() && node->op() == ArithMod)
- likelyToTakeSlowCase = false;
- else
- likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+ node->mergeFlags(NodeMayOverflowInt32InDFG);
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+ node->mergeFlags(NodeMayNegZeroInDFG);
- if (!likelyToTakeSlowCase
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+ if (!isX86() && node->op() == ArithMod)
return node;
-
- switch (node->op()) {
- case UInt32ToNumber:
- case ArithAdd:
- case ArithSub:
- case ValueAdd:
- case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
- node->mergeFlags(NodeMayOverflow);
- break;
-
- case ArithNegate:
- // Currently we can't tell the difference between a negation overflowing
- // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
- // path then we assume that it did both of those things.
- node->mergeFlags(NodeMayOverflow);
- node->mergeFlags(NodeMayNegZero);
- break;
- case ArithMul:
- if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
- node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
- else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
- node->mergeFlags(NodeMayNegZero);
- break;
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
+ {
+ ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
+ if (arithProfile) {
+ switch (node->op()) {
+ case ArithAdd:
+ case ArithSub:
+ case ValueAdd:
+ if (arithProfile->didObserveDouble())
+ node->mergeFlags(NodeMayHaveDoubleResult);
+ if (arithProfile->didObserveNonNumber())
+ node->mergeFlags(NodeMayHaveNonNumberResult);
+ break;
+
+ case ArithMul: {
+ if (arithProfile->didObserveInt52Overflow())
+ node->mergeFlags(NodeMayOverflowInt52);
+ if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+ node->mergeFlags(NodeMayOverflowInt32InBaseline);
+ if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+ node->mergeFlags(NodeMayNegZeroInBaseline);
+ if (arithProfile->didObserveDouble())
+ node->mergeFlags(NodeMayHaveDoubleResult);
+ if (arithProfile->didObserveNonNumber())
+ node->mergeFlags(NodeMayHaveNonNumberResult);
+ break;
+ }
+ case ArithNegate: {
+ ASSERT_WITH_MESSAGE(!arithProfile->didObserveNonNumber(), "op_negate starts with a toNumber() on the argument, it should only produce numbers.");
+
+ if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
+ node->mergeFlags(NodeMayHaveDoubleResult);
+ if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+ node->mergeFlags(NodeMayNegZeroInBaseline);
+ if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+ node->mergeFlags(NodeMayOverflowInt32InBaseline);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
+ switch (node->op()) {
+ case UInt32ToNumber:
+ case ArithAdd:
+ case ArithSub:
+ case ValueAdd:
+ case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
+ node->mergeFlags(NodeMayOverflowInt32InBaseline);
+ break;
+
+ default:
+ break;
+ }
}
return node;
@@ -911,41 +1017,37 @@ private:
{
ASSERT(node->op() == ArithDiv);
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
+ node->mergeFlags(NodeMayOverflowInt32InDFG);
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+ node->mergeFlags(NodeMayNegZeroInDFG);
+
// The main slow case counter for op_div in the old JIT counts only when
// the operands are not numbers. We don't care about that since we already
// have speculations in place that take care of that separately. We only
// care about when the outcome of the division is not an integer, which
// is what the special fast case counter tells us.
- if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex)
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
+ if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
return node;
- // FIXME: It might be possible to make this more granular. The DFG certainly can
- // distinguish between negative zero and overflow in its exit profiles.
- node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
+ // FIXME: It might be possible to make this more granular.
+ node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
return node;
}
- bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
+ void noticeArgumentsUse()
{
- if (direct)
- return true;
+ // All of the arguments in this function need to be formatted as JSValues because we will
+ // load from them in a random-access fashion and we don't want to have to switch on
+ // format.
- if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
- return false;
-
- for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
- if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
- return false;
- }
-
- return true;
+ for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
+ argument->mergeShouldNeverUnbox(true);
}
-
- void buildOperandMapsIfNecessary();
+
+ bool needsDynamicLookup(ResolveType, OpcodeID);
VM* m_vm;
CodeBlock* m_codeBlock;
@@ -956,54 +1058,32 @@ private:
BasicBlock* m_currentBlock;
// The bytecode index of the current instruction being generated.
unsigned m_currentIndex;
+ // The semantic origin of the current node if different from the current Index.
+ CodeOrigin m_currentSemanticOrigin;
+ // True if it's OK to OSR exit right now.
+ bool m_exitOK { false };
- // We use these values during code generation, and to avoid the need for
- // special handling we make sure they are available as constants in the
- // CodeBlock's constant pool. These variables are initialized to
- // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
- // constant pool, as necessary.
- unsigned m_constantUndefined;
- unsigned m_constantNull;
- unsigned m_constantNaN;
- unsigned m_constant1;
- HashMap<JSCell*, unsigned> m_cellConstants;
- HashMap<JSCell*, Node*> m_cellConstantNodes;
-
- // A constant in the constant pool may be represented by more than one
- // node in the graph, depending on the context in which it is being used.
- struct ConstantRecord {
- ConstantRecord()
- : asInt32(0)
- , asNumeric(0)
- , asJSValue(0)
- {
- }
-
- Node* asInt32;
- Node* asNumeric;
- Node* asJSValue;
- };
-
- // Track the index of the node whose result is the current value for every
- // register value in the bytecode - argument, local, and temporary.
- Vector<ConstantRecord, 16> m_constants;
+ FrozenValue* m_constantUndefined;
+ FrozenValue* m_constantNull;
+ FrozenValue* m_constantNaN;
+ FrozenValue* m_constantOne;
+ Vector<Node*, 16> m_constants;
// The number of arguments passed to the function.
unsigned m_numArguments;
// The number of locals (vars + temporaries) used in the function.
unsigned m_numLocals;
// The number of slots (in units of sizeof(Register)) that we need to
- // preallocate for calls emanating from this frame. This includes the
- // size of the CallFrame, only if this is not a leaf function. (I.e.
- // this is 0 if and only if this function is a leaf.)
+ // preallocate for arguments to outgoing calls from this frame. This
+ // number includes the CallFrame slots that we initialize for the callee
+ // (but not the callee-initialized CallerFrame and ReturnPC slots).
+ // This number is 0 if and only if this function is a leaf.
unsigned m_parameterSlots;
// The number of var args passed to the next var arg node.
unsigned m_numPassedVarArgs;
HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
- Vector<VariableWatchpointSet*, 16> m_localWatchpoints;
-
struct InlineStackEntry {
ByteCodeParser* m_byteCodeParser;
@@ -1011,7 +1091,7 @@ private:
CodeBlock* m_profiledBlock;
InlineCallFrame* m_inlineCallFrame;
- ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
+ ScriptExecutable* executable() { return m_codeBlock->ownerScriptExecutable(); }
QueryableExitProfile m_exitProfile;
@@ -1020,7 +1100,6 @@ private:
// (the machine code block, which is the transitive, though not necessarily
// direct, caller).
Vector<unsigned> m_identifierRemap;
- Vector<unsigned> m_constantRemap;
Vector<unsigned> m_constantBufferRemap;
Vector<unsigned> m_switchRemap;
@@ -1032,8 +1111,7 @@ private:
Vector<UnlinkedBlock> m_unlinkedBlocks;
// Potential block linking targets. Must be sorted by bytecodeBegin, and
- // cannot have two blocks that have the same bytecodeBegin. For this very
- // reason, this is not equivalent to
+ // cannot have two blocks that have the same bytecodeBegin.
Vector<BasicBlock*> m_blockLinkingTargets;
// If the callsite's basic block was split into two, then this will be
@@ -1055,7 +1133,9 @@ private:
// code block had gathered.
LazyOperandValueProfileParser m_lazyOperands;
+ CallLinkInfoMap m_callLinkInfos;
StubInfoMap m_stubInfos;
+ ByValInfoMap m_byValInfos;
// Did we see any returns? We need to handle the (uncommon but necessary)
// case where a procedure that does not return was inlined.
@@ -1078,7 +1158,7 @@ private:
VirtualRegister returnValueVR,
VirtualRegister inlineCallFrameStart,
int argumentCountIncludingThis,
- CodeSpecializationKind);
+ InlineCallFrame::Kind);
~InlineStackEntry()
{
@@ -1090,11 +1170,7 @@ private:
if (!m_inlineCallFrame)
return operand;
- if (operand.isConstant()) {
- VirtualRegister result = VirtualRegister(m_constantRemap[operand.toConstantIndex()]);
- ASSERT(result.isConstant());
- return result;
- }
+ ASSERT(!operand.isConstant());
return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
}
@@ -1103,12 +1179,14 @@ private:
InlineStackEntry* m_inlineStackTop;
struct DelayedSetLocal {
+ CodeOrigin m_origin;
VirtualRegister m_operand;
Node* m_value;
DelayedSetLocal() { }
- DelayedSetLocal(VirtualRegister operand, Node* value)
- : m_operand(operand)
+ DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value)
+ : m_origin(origin)
+ , m_operand(operand)
, m_value(value)
{
}
@@ -1116,206 +1194,371 @@ private:
Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
{
if (m_operand.isArgument())
- return parser->setArgument(m_operand, m_value, setMode);
- return parser->setLocal(m_operand, m_value, setMode);
+ return parser->setArgument(m_origin, m_operand, m_value, setMode);
+ return parser->setLocal(m_origin, m_operand, m_value, setMode);
}
};
Vector<DelayedSetLocal, 2> m_setLocalQueue;
- // Have we built operand maps? We initialize them lazily, and only when doing
- // inlining.
- bool m_haveBuiltOperandMaps;
- // Mapping between identifier names and numbers.
- BorrowedIdentifierMap m_identifierMap;
- // Mapping between values and constant numbers.
- JSValueMap m_jsValueMap;
- // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
- // work-around for the fact that JSValueMap can't handle "empty" values.
- unsigned m_emptyJSValueIndex;
+ CodeBlock* m_dfgCodeBlock;
+ CallLinkStatus::ContextMap m_callContextMap;
+ StubInfoMap m_dfgStubInfos;
Instruction* m_currentInstruction;
+ bool m_hasDebuggerEnabled;
};
+// The idiom:
+// if (true) { ...; goto label; } else label: continue
+// Allows using NEXT_OPCODE as a statement, even in unbraced if+else, while containing a `continue`.
+// The more common idiom:
+// do { ...; } while (false)
+// Doesn't allow using `continue`.
#define NEXT_OPCODE(name) \
- m_currentIndex += OPCODE_LENGTH(name); \
- continue
-
+ if (true) { \
+ m_currentIndex += OPCODE_LENGTH(name); \
+ goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \
+ } else \
+ WTF_CONCAT(NEXT_OPCODE_, __LINE__): \
+ continue
+
+// Chain expressions with comma-operator so LAST_OPCODE can be used as a statement.
#define LAST_OPCODE(name) \
- m_currentIndex += OPCODE_LENGTH(name); \
- return shouldContinueParsing
-
+ return \
+ m_currentIndex += OPCODE_LENGTH(name), \
+ m_exitOK = false, \
+ shouldContinueParsing
-void ByteCodeParser::handleCall(Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
+ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType op, CallMode callMode)
{
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call));
+ return handleCall(
+ pc[1].u.operand, op, callMode, OPCODE_LENGTH(op_call),
+ pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
+}
+
+ByteCodeParser::Terminality ByteCodeParser::handleCall(
+ int result, NodeType op, CallMode callMode, unsigned instructionSize,
+ int callee, int argumentCountIncludingThis, int registerOffset)
+{
+ Node* callTarget = get(VirtualRegister(callee));
+
+ CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
+ m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
+ m_inlineStackTop->m_callLinkInfos, m_callContextMap);
- Node* callTarget = get(VirtualRegister(currentInstruction[2].u.operand));
+ return handleCall(
+ result, op, callMode, instructionSize, callTarget,
+ argumentCountIncludingThis, registerOffset, callLinkStatus);
+}
- CallLinkStatus callLinkStatus;
+ByteCodeParser::Terminality ByteCodeParser::handleCall(
+ int result, NodeType op, CallMode callMode, unsigned instructionSize,
+ Node* callTarget, int argumentCountIncludingThis, int registerOffset,
+ CallLinkStatus callLinkStatus)
+{
+ return handleCall(
+ result, op, InlineCallFrame::kindFor(callMode), instructionSize, callTarget, argumentCountIncludingThis,
+ registerOffset, callLinkStatus, getPrediction());
+}
- if (m_graph.isConstant(callTarget))
- callLinkStatus = CallLinkStatus(m_graph.valueOfJSConstant(callTarget)).setIsProved(true);
- else {
- callLinkStatus = CallLinkStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex);
- callLinkStatus.setHasBadFunctionExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction));
- callLinkStatus.setHasBadCacheExitSite(
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint));
- callLinkStatus.setHasBadExecutableExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadExecutable));
+void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
+{
+ if (callTarget->isCellConstant()) {
+ callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
+ return;
}
+}
+
+ByteCodeParser::Terminality ByteCodeParser::handleCall(
+ int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
+ Node* callTarget, int argumentCountIncludingThis, int registerOffset,
+ CallLinkStatus callLinkStatus, SpeculatedType prediction)
+{
+ ASSERT(registerOffset <= 0);
+
+ refineStatically(callLinkStatus, callTarget);
+
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
if (!callLinkStatus.canOptimize()) {
// Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
// that we cannot optimize them.
-
- addCall(currentInstruction, op);
- return;
+
+ Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
+ if (callNode->op() == TailCall)
+ return Terminal;
+ ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
+ return NonTerminal;
}
- int argumentCountIncludingThis = currentInstruction[3].u.operand;
- int registerOffset = -currentInstruction[4].u.operand;
+ unsigned nextOffset = m_currentIndex + instructionSize;
+
+ if (handleInlining(callTarget, result, callLinkStatus, registerOffset, virtualRegisterForArgument(0, registerOffset), VirtualRegister(), 0, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedCall();
+ return NonTerminal;
+ }
+
+ Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
+ if (callNode->op() == TailCall)
+ return Terminal;
+ ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
+ return NonTerminal;
+}
- int resultOperand = currentInstruction[1].u.operand;
- unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
+ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CallMode callMode)
+{
+ ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs));
+ ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_tail_call_varargs));
+
+ int result = pc[1].u.operand;
+ int callee = pc[2].u.operand;
+ int thisReg = pc[3].u.operand;
+ int arguments = pc[4].u.operand;
+ int firstFreeReg = pc[5].u.operand;
+ int firstVarArgOffset = pc[6].u.operand;
+
SpeculatedType prediction = getPrediction();
-
- if (InternalFunction* function = callLinkStatus.internalFunction()) {
- if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, prediction, kind)) {
- // This phantoming has to be *after* the code for the intrinsic, to signify that
- // the inputs must be kept alive whatever exits the intrinsic may do.
- addToGraph(Phantom, callTarget);
- emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
- return;
- }
-
- // Can only handle this using the generic call handler.
- addCall(currentInstruction, op);
- return;
- }
-
- Intrinsic intrinsic = callLinkStatus.intrinsicFor(kind);
- if (intrinsic != NoIntrinsic) {
- emitFunctionChecks(callLinkStatus, callTarget, registerOffset, kind);
-
- if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
- // This phantoming has to be *after* the code for the intrinsic, to signify that
- // the inputs must be kept alive whatever exits the intrinsic may do.
- addToGraph(Phantom, callTarget);
- emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
- if (m_graph.compilation())
- m_graph.compilation()->noticeInlinedCall();
- return;
- }
- } else if (handleInlining(callTarget, resultOperand, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) {
+
+ Node* callTarget = get(VirtualRegister(callee));
+
+ CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
+ m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
+ m_inlineStackTop->m_callLinkInfos, m_callContextMap);
+ refineStatically(callLinkStatus, callTarget);
+
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
+
+ if (callLinkStatus.canOptimize()
+ && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(callMode), prediction)) {
if (m_graph.compilation())
m_graph.compilation()->noticeInlinedCall();
- return;
+ return NonTerminal;
}
- addCall(currentInstruction, op);
+ CallVarargsData* data = m_graph.m_callVarargsData.add();
+ data->firstVarArgOffset = firstVarArgOffset;
+
+ Node* thisChild = get(VirtualRegister(thisReg));
+ Node* argumentsChild = nullptr;
+ if (op != TailCallForwardVarargs)
+ argumentsChild = get(VirtualRegister(arguments));
+
+ if (op == TailCallVarargs || op == TailCallForwardVarargs) {
+ if (allInlineFramesAreTailCalls()) {
+ addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
+ return Terminal;
+ }
+ op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
+ }
+
+ Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
+ VirtualRegister resultReg(result);
+ if (resultReg.isValid())
+ set(resultReg, call);
+ return NonTerminal;
}
-void ByteCodeParser::emitFunctionChecks(const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
+void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
{
Node* thisArgument;
- if (kind == CodeForCall)
- thisArgument = get(virtualRegisterForArgument(0, registerOffset));
+ if (thisArgumentReg.isValid())
+ thisArgument = get(thisArgumentReg);
else
thisArgument = 0;
- if (callLinkStatus.isProved()) {
- addToGraph(Phantom, callTarget, thisArgument);
- return;
+ JSCell* calleeCell;
+ Node* callTargetForCheck;
+ if (callee.isClosureCall()) {
+ calleeCell = callee.executable();
+ callTargetForCheck = addToGraph(GetExecutable, callTarget);
+ } else {
+ calleeCell = callee.nonExecutableCallee();
+ callTargetForCheck = callTarget;
}
- ASSERT(callLinkStatus.canOptimize());
-
- if (JSFunction* function = callLinkStatus.function())
- addToGraph(CheckFunction, OpInfo(function), callTarget, thisArgument);
- else {
- ASSERT(callLinkStatus.structure());
- ASSERT(callLinkStatus.executable());
-
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(callLinkStatus.structure())), callTarget);
- addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument);
- }
+ ASSERT(calleeCell);
+ addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
}
-void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
+Node* ByteCodeParser::getArgumentCount()
{
- for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
+ Node* argumentCount;
+ if (m_inlineStackTop->m_inlineCallFrame) {
+ if (m_inlineStackTop->m_inlineCallFrame->isVarargs())
+ argumentCount = get(VirtualRegister(CallFrameSlot::argumentCount));
+ else
+ argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->arguments.size()))->value());
+ } else
+ argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(0), OpInfo(SpecInt32Only));
+ return argumentCount;
+}
+
+void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
+{
+ for (int i = 0; i < argumentCountIncludingThis; ++i)
addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
}
-bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
+unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CallMode callMode)
{
- // First, the really simple checks: do we have an actual JS function?
- if (!callLinkStatus.executable())
- return false;
- if (callLinkStatus.executable()->isHostFunction())
- return false;
+ CodeSpecializationKind kind = specializationKindFor(callMode);
+ if (verbose)
+ dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
- FunctionExecutable* executable = jsCast<FunctionExecutable*>(callLinkStatus.executable());
+ if (m_hasDebuggerEnabled) {
+ if (verbose)
+ dataLog(" Failing because the debugger is in use.\n");
+ return UINT_MAX;
+ }
+
+ FunctionExecutable* executable = callee.functionExecutable();
+ if (!executable) {
+ if (verbose)
+ dataLog(" Failing because there is no function executable.\n");
+ return UINT_MAX;
+ }
+ // Do we have a code block, and does the code block's size match the heuristics/requirements for
+ // being an inline candidate? We might not have a code block (1) if code was thrown away,
+ // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
+ // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
+ // to inline it if we had a static proof of what was being called; this might happen for example
+ // if you call a global function, where watchpointing gives us static information. Overall,
+ // it's a rare case because we expect that any hot callees would have already been compiled.
+ CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
+ if (!codeBlock) {
+ if (verbose)
+ dataLog(" Failing because no code block available.\n");
+ return UINT_MAX;
+ }
+
// Does the number of arguments we're passing match the arity of the target? We currently
// inline only if the number of arguments passed is greater than or equal to the number
// arguments expected.
- if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis)
- return false;
+ if (codeBlock->numParameters() > argumentCountIncludingThis) {
+ if (verbose)
+ dataLog(" Failing because of arity mismatch.\n");
+ return UINT_MAX;
+ }
+
+ CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
+ codeBlock, kind, callee.isClosureCall());
+ if (verbose) {
+ dataLog(" Call mode: ", callMode, "\n");
+ dataLog(" Is closure call: ", callee.isClosureCall(), "\n");
+ dataLog(" Capability level: ", capabilityLevel, "\n");
+ dataLog(" Might inline function: ", mightInlineFunctionFor(codeBlock, kind), "\n");
+ dataLog(" Might compile function: ", mightCompileFunctionFor(codeBlock, kind), "\n");
+ dataLog(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
+ dataLog(" Is inlining candidate: ", codeBlock->ownerScriptExecutable()->isInliningCandidate(), "\n");
+ }
+ if (!canInline(capabilityLevel)) {
+ if (verbose)
+ dataLog(" Failing because the function is not inlineable.\n");
+ return UINT_MAX;
+ }
+
+ // Check if the caller is already too large. We do this check here because that's just
+ // where we happen to also have the callee's code block, and we want that for the
+ // purpose of unsetting SABI.
+ if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
+ codeBlock->m_shouldAlwaysBeInlined = false;
+ if (verbose)
+ dataLog(" Failing because the caller is too large.\n");
+ return UINT_MAX;
+ }
+
+ // FIXME: this should be better at predicting how much bloat we will introduce by inlining
+ // this function.
+ // https://bugs.webkit.org/show_bug.cgi?id=127627
+
+ // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
+ // functions have very low fidelity profiling, and presumably they weren't very hot if they
+ // haven't gotten to Baseline yet. Consider not inlining these functions.
+ // https://bugs.webkit.org/show_bug.cgi?id=145503
+
+ // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
+ // too many levels? If either of these are detected, then don't inline. We adjust our
+ // heuristics if we are dealing with a function that cannot otherwise be compiled.
- // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
- // If either of these are detected, then don't inline.
unsigned depth = 0;
+ unsigned recursion = 0;
+
for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
++depth;
- if (depth >= Options::maximumInliningDepth())
- return false; // Depth exceeded.
+ if (depth >= Options::maximumInliningDepth()) {
+ if (verbose)
+ dataLog(" Failing because depth exceeded.\n");
+ return UINT_MAX;
+ }
- if (entry->executable() == executable)
- return false; // Recursion detected.
+ if (entry->executable() == executable) {
+ ++recursion;
+ if (recursion >= Options::maximumInliningRecursion()) {
+ if (verbose)
+ dataLog(" Failing because recursion detected.\n");
+ return UINT_MAX;
+ }
+ }
}
- // Do we have a code block, and does the code block's size match the heuristics/requirements for
- // being an inline candidate? We might not have a code block if code was thrown away or if we
- // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
- // if we had a static proof of what was being called; this might happen for example if you call a
- // global function, where watchpointing gives us static information. Overall, it's a rare case
- // because we expect that any hot callees would have already been compiled.
- CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
- if (!codeBlock)
- return false;
- if (!canInlineFunctionFor(codeBlock, kind, callLinkStatus.isClosureCall()))
- return false;
+ if (verbose)
+ dataLog(" Inlining should be possible.\n");
- // Now we know without a doubt that we are committed to inlining. So begin the process
- // by checking the callee (if necessary) and making sure that arguments and the callee
- // are flushed.
- emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, kind);
+ // It might be possible to inline.
+ return codeBlock->instructionCount();
+}
+
+template<typename ChecksFunctor>
+void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, const ChecksFunctor& insertChecks)
+{
+ CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
+
+ ASSERT(inliningCost(callee, argumentCountIncludingThis, InlineCallFrame::callModeFor(kind)) != UINT_MAX);
+ CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
+ insertChecks(codeBlock);
+
// FIXME: Don't flush constants!
- int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
+ int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + CallFrame::headerSizeInRegisters;
- // Make sure that we have enough locals.
- unsigned newNumLocals = VirtualRegister(inlineCallFrameStart).toLocal() + 1 + JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
- if (newNumLocals > m_numLocals) {
- m_numLocals = newNumLocals;
- for (size_t i = 0; i < m_graph.numBlocks(); ++i)
- m_graph.block(i)->ensureLocals(newNumLocals);
- }
+ ensureLocals(
+ VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
+ CallFrame::headerSizeInRegisters + codeBlock->m_numCalleeLocals);
size_t argumentPositionStart = m_graph.m_argumentPositions.size();
+ VirtualRegister resultReg(resultOperand);
+ if (resultReg.isValid())
+ resultReg = m_inlineStackTop->remapOperand(resultReg);
+
+ VariableAccessData* calleeVariable = nullptr;
+ if (callee.isClosureCall()) {
+ Node* calleeSet = set(
+ VirtualRegister(registerOffset + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
+
+ calleeVariable = calleeSet->variableAccessData();
+ calleeVariable->mergeShouldNeverUnbox(true);
+ }
+
InlineStackEntry inlineStackEntry(
- this, codeBlock, codeBlock, m_graph.lastBlock(), callLinkStatus.function(),
- m_inlineStackTop->remapOperand(VirtualRegister(resultOperand)),
+ this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
(VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
// This is where the actual inlining really happens.
unsigned oldIndex = m_currentIndex;
m_currentIndex = 0;
+ // At this point, it's again OK to OSR exit.
+ m_exitOK = true;
+
InlineVariableData inlineVariableData;
inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
inlineVariableData.argumentPositionStart = argumentPositionStart;
@@ -1323,24 +1566,19 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con
RELEASE_ASSERT(
m_inlineStackTop->m_inlineCallFrame->isClosureCall
- == callLinkStatus.isClosureCall());
- if (callLinkStatus.isClosureCall()) {
- VariableAccessData* calleeVariable =
- set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateSet)->variableAccessData();
- VariableAccessData* scopeVariable =
- set(VirtualRegister(JSStack::ScopeChain), addToGraph(GetScope, callTargetNode), ImmediateSet)->variableAccessData();
-
- calleeVariable->mergeShouldNeverUnbox(true);
- scopeVariable->mergeShouldNeverUnbox(true);
-
+ == callee.isClosureCall());
+ if (callee.isClosureCall()) {
+ RELEASE_ASSERT(calleeVariable);
inlineVariableData.calleeVariable = calleeVariable;
}
m_graph.m_inlineVariableData.append(inlineVariableData);
-
+
parseCodeBlock();
+ clearCaches(); // Reset our state now that we're back to the outer code.
m_currentIndex = oldIndex;
+ m_exitOK = false;
// If the inlined code created some new basic blocks, then we have linking to do.
if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
@@ -1351,20 +1589,8 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con
else
ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
- // It's possible that the callsite block head is not owned by the caller.
- if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
- // It's definitely owned by the caller, because the caller created new blocks.
- // Assert that this all adds up.
- ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_block == inlineStackEntry.m_callsiteBlockHead);
- ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
- inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
- } else {
- // It's definitely not owned by the caller. Tell the caller that he does not
- // need to link his callsite block head, because we did it for him.
- ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
- ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
- inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
- }
+ if (callerLinkability == CallerDoesNormalLinking)
+ cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
} else
@@ -1374,7 +1600,10 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con
// If there was a return, but no early returns, then we're done. We allow parsing of
// the caller to continue in whatever basic block we're in right now.
if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
- ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog(" Allowing parsing to continue in last inlined block.\n");
+
+ ASSERT(lastBlock->isEmpty() || !lastBlock->terminal());
// If we created new blocks then the last block needs linking, but in the
// caller. It doesn't need to be linked to, but it needs outgoing links.
@@ -1382,20 +1611,28 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con
// For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
// for release builds because this block will never serve as a potential target
// in the linker's binary search.
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog(" Repurposing last block from ", lastBlock->bytecodeBegin, " to ", m_currentIndex, "\n");
lastBlock->bytecodeBegin = m_currentIndex;
- m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
+ if (callerLinkability == CallerDoesNormalLinking) {
+ if (verbose)
+ dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
+ m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
+ }
}
m_currentBlock = m_graph.lastBlock();
- return true;
+ return;
}
-
+
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog(" Creating new block after inlining.\n");
+
// If we get to this point then all blocks must end in some sort of terminals.
- ASSERT(lastBlock->last()->isTerminal());
-
+ ASSERT(lastBlock->terminal());
// Need to create a new basic block for the continuation at the caller.
- RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
+ Ref<BasicBlock> block = adoptRef(*new BasicBlock(nextOffset, m_numArguments, m_numLocals, 1));
// Link the early returns to the basic block we're about to create.
for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
@@ -1403,43 +1640,499 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con
continue;
BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
ASSERT(!blockToLink->isLinked);
- Node* node = blockToLink->last();
+ Node* node = blockToLink->terminal();
ASSERT(node->op() == Jump);
- ASSERT(node->takenBlock() == 0);
- node->setTakenBlock(block.get());
+ ASSERT(!node->targetBlock());
+ node->targetBlock() = block.ptr();
inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
-#if !ASSERT_DISABLED
- blockToLink->isLinked = true;
-#endif
+ if (verbose)
+ dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
+ blockToLink->didLink();
}
- m_currentBlock = block.get();
+ m_currentBlock = block.ptr();
ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
- m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
- m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
- m_graph.appendBlock(block);
+ if (verbose)
+ dataLog("Adding unlinked block ", RawPointer(block.ptr()), " (many returns)\n");
+ if (callerLinkability == CallerDoesNormalLinking) {
+ m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.ptr()));
+ m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.ptr());
+ }
+ m_graph.appendBlock(WTFMove(block));
prepareToParseBlock();
+}
+
+void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
+{
+ // It's possible that the callsite block head is not owned by the caller.
+ if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
+ // It's definitely owned by the caller, because the caller created new blocks.
+ // Assert that this all adds up.
+ ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
+ ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
+ inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
+ } else {
+ // It's definitely not owned by the caller. Tell the caller that he does not
+ // need to link his callsite block head, because we did it for him.
+ ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
+ ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
+ inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
+ }
+}
+
+template<typename ChecksFunctor>
+bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks)
+{
+ CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
+
+ if (!inliningBalance)
+ return false;
+
+ if (verbose)
+ dataLog(" Considering callee ", callee, "\n");
+
+ // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
+ // we currently don't have any way of getting profiling information for arguments to non-JS varargs
+ // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
+ // and there are no callsite value profiles and native function won't have callee value profiles for
+ // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
+ // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
+ // calling LoadVarargs twice.
+ if (!InlineCallFrame::isVarargs(kind)) {
+
+ bool didInsertChecks = false;
+ auto insertChecksWithAccounting = [&] () {
+ insertChecks(nullptr);
+ didInsertChecks = true;
+ };
+
+ if (InternalFunction* function = callee.internalFunction()) {
+ if (handleConstantInternalFunction(callTargetNode, resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
+ RELEASE_ASSERT(didInsertChecks);
+ addToGraph(Phantom, callTargetNode);
+ emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
+ inliningBalance--;
+ return true;
+ }
+ RELEASE_ASSERT(!didInsertChecks);
+ return false;
+ }
+
+ Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
+ if (intrinsic != NoIntrinsic) {
+ if (handleIntrinsicCall(callTargetNode, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
+ RELEASE_ASSERT(didInsertChecks);
+ addToGraph(Phantom, callTargetNode);
+ emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
+ inliningBalance--;
+ return true;
+ }
+
+ RELEASE_ASSERT(!didInsertChecks);
+ // We might still try to inline the Intrinsic because it might be a builtin JS function.
+ }
+
+ if (Options::useDOMJIT()) {
+ if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
+ if (handleDOMJITCall(callTargetNode, resultOperand, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
+ RELEASE_ASSERT(didInsertChecks);
+ addToGraph(Phantom, callTargetNode);
+ emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
+ inliningBalance--;
+ return true;
+ }
+ RELEASE_ASSERT(!didInsertChecks);
+ }
+ }
+ }
- // At this point we return and continue to generate code for the caller, but
- // in the new basic block.
+ unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, InlineCallFrame::callModeFor(kind));
+ if (myInliningCost > inliningBalance)
+ return false;
+
+ Instruction* savedCurrentInstruction = m_currentInstruction;
+ inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability, insertChecks);
+ inliningBalance -= myInliningCost;
+ m_currentInstruction = savedCurrentInstruction;
return true;
}
-bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
+bool ByteCodeParser::handleInlining(
+ Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus,
+ int registerOffsetOrFirstFreeReg, VirtualRegister thisArgument,
+ VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis,
+ unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
{
- if (argumentCountIncludingThis == 1) { // Math.min()
- set(VirtualRegister(resultOperand), constantNaN());
+ if (verbose) {
+ dataLog("Handling inlining...\n");
+ dataLog("Stack: ", currentCodeOrigin(), "\n");
+ }
+ CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
+
+ if (!callLinkStatus.size()) {
+ if (verbose)
+ dataLog("Bailing inlining.\n");
+ return false;
+ }
+
+ if (InlineCallFrame::isVarargs(kind)
+ && callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
+ if (verbose)
+ dataLog("Bailing inlining because of varargs.\n");
+ return false;
+ }
+
+ unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
+ if (specializationKind == CodeForConstruct)
+ inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
+ if (callLinkStatus.isClosureCall())
+ inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
+
+ // First check if we can avoid creating control flow. Our inliner does some CFG
+ // simplification on the fly and this helps reduce compile times, but we can only leverage
+ // this in cases where we don't need control flow diamonds to check the callee.
+ if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
+ int registerOffset;
+
+ // Only used for varargs calls.
+ unsigned mandatoryMinimum = 0;
+ unsigned maxNumArguments = 0;
+
+ if (InlineCallFrame::isVarargs(kind)) {
+ if (FunctionExecutable* functionExecutable = callLinkStatus[0].functionExecutable())
+ mandatoryMinimum = functionExecutable->parameterCount();
+ else
+ mandatoryMinimum = 0;
+
+ // includes "this"
+ maxNumArguments = std::max(
+ callLinkStatus.maxNumArguments(),
+ mandatoryMinimum + 1);
+
+ // We sort of pretend that this *is* the number of arguments that were passed.
+ argumentCountIncludingThis = maxNumArguments;
+
+ registerOffset = registerOffsetOrFirstFreeReg + 1;
+ registerOffset -= maxNumArguments; // includes "this"
+ registerOffset -= CallFrame::headerSizeInRegisters;
+ registerOffset = -WTF::roundUpToMultipleOf(
+ stackAlignmentRegisters(),
+ -registerOffset);
+ } else
+ registerOffset = registerOffsetOrFirstFreeReg;
+
+ bool result = attemptToInlineCall(
+ callTargetNode, resultOperand, callLinkStatus[0], registerOffset,
+ argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
+ inliningBalance, [&] (CodeBlock* codeBlock) {
+ emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument);
+
+ // If we have a varargs call, we want to extract the arguments right now.
+ if (InlineCallFrame::isVarargs(kind)) {
+ int remappedRegisterOffset =
+ m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
+
+ ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
+
+ int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
+ int remappedArgumentStart =
+ m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
+
+ LoadVarargsData* data = m_graph.m_loadVarargsData.add();
+ data->start = VirtualRegister(remappedArgumentStart + 1);
+ data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
+ data->offset = argumentsOffset;
+ data->limit = maxNumArguments;
+ data->mandatoryMinimum = mandatoryMinimum;
+
+ if (callOp == TailCallForwardVarargs)
+ addToGraph(ForwardVarargs, OpInfo(data));
+ else
+ addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
+
+ // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
+ // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
+ // callTargetNode because the other 2 are still in use and alive at this point.
+ addToGraph(Phantom, callTargetNode);
+
+ // In DFG IR before SSA, we cannot insert control flow between after the
+ // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
+ // SSA. Fortunately, we also have other reasons for not inserting control flow
+ // before SSA.
+
+ VariableAccessData* countVariable = newVariableAccessData(
+ VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
+ // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
+ // matter very much, since our use of a SetArgument and Flushes for this local slot is
+ // mostly just a formality.
+ countVariable->predict(SpecInt32Only);
+ countVariable->mergeIsProfitableToUnbox(true);
+ Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
+ m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
+
+ set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
+ for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
+ VariableAccessData* variable = newVariableAccessData(
+ VirtualRegister(remappedArgumentStart + argument));
+ variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
+
+ // For a while it had been my intention to do things like this inside the
+ // prediction injection phase. But in this case it's really best to do it here,
+ // because it's here that we have access to the variable access datas for the
+ // inlining we're about to do.
+ //
+ // Something else that's interesting here is that we'd really love to get
+ // predictions from the arguments loaded at the callsite, rather than the
+ // arguments received inside the callee. But that probably won't matter for most
+ // calls.
+ if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
+ ConcurrentJSLocker locker(codeBlock->m_lock);
+ if (ValueProfile* profile = codeBlock->valueProfileForArgument(argument))
+ variable->predict(profile->computeUpdatedPrediction(locker));
+ }
+
+ Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
+ m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
+ }
+ }
+ });
+ if (verbose) {
+ dataLog("Done inlining (simple).\n");
+ dataLog("Stack: ", currentCodeOrigin(), "\n");
+ dataLog("Result: ", result, "\n");
+ }
+ return result;
+ }
+
+ // We need to create some kind of switch over callee. For now we only do this if we believe that
+ // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
+ // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
+ // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
+ // we could improve that aspect of this by doing polymorphic inlining but having the profiling
+ // also.
+ if (!isFTL(m_graph.m_plan.mode) || !Options::usePolymorphicCallInlining()
+ || InlineCallFrame::isVarargs(kind)) {
+ if (verbose) {
+ dataLog("Bailing inlining (hard).\n");
+ dataLog("Stack: ", currentCodeOrigin(), "\n");
+ }
+ return false;
+ }
+
+ // If the claim is that this did not originate from a stub, then we don't want to emit a switch
+ // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
+ // it has no idea.
+ if (!Options::usePolymorphicCallInliningForNonStubStatus()
+ && !callLinkStatus.isBasedOnStub()) {
+ if (verbose) {
+ dataLog("Bailing inlining (non-stub polymorphism).\n");
+ dataLog("Stack: ", currentCodeOrigin(), "\n");
+ }
+ return false;
+ }
+
+ unsigned oldOffset = m_currentIndex;
+
+ bool allAreClosureCalls = true;
+ bool allAreDirectCalls = true;
+ for (unsigned i = callLinkStatus.size(); i--;) {
+ if (callLinkStatus[i].isClosureCall())
+ allAreDirectCalls = false;
+ else
+ allAreClosureCalls = false;
+ }
+
+ Node* thingToSwitchOn;
+ if (allAreDirectCalls)
+ thingToSwitchOn = callTargetNode;
+ else if (allAreClosureCalls)
+ thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
+ else {
+ // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
+ // where it would be beneficial. It might be best to handle these cases as if all calls were
+ // closure calls.
+ // https://bugs.webkit.org/show_bug.cgi?id=136020
+ if (verbose) {
+ dataLog("Bailing inlining (mix).\n");
+ dataLog("Stack: ", currentCodeOrigin(), "\n");
+ }
+ return false;
+ }
+
+ if (verbose) {
+ dataLog("Doing hard inlining...\n");
+ dataLog("Stack: ", currentCodeOrigin(), "\n");
+ }
+
+ int registerOffset = registerOffsetOrFirstFreeReg;
+
+ // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
+ // store the callee so that it will be accessible to all of the blocks we're about to create. We
+ // get away with doing an immediate-set here because we wouldn't have performed any side effects
+ // yet.
+ if (verbose)
+ dataLog("Register offset: ", registerOffset);
+ VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
+ calleeReg = m_inlineStackTop->remapOperand(calleeReg);
+ if (verbose)
+ dataLog("Callee is going to be ", calleeReg, "\n");
+ setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
+
+ // It's OK to exit right now, even though we set some locals. That's because those locals are not
+ // user-visible.
+ m_exitOK = true;
+ addToGraph(ExitOK);
+
+ SwitchData& data = *m_graph.m_switchData.add();
+ data.kind = SwitchCell;
+ addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
+
+ BasicBlock* originBlock = m_currentBlock;
+ if (verbose)
+ dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
+ originBlock->didLink();
+ cancelLinkingForBlock(m_inlineStackTop, originBlock);
+
+ // Each inlined callee will have a landing block that it returns at. They should all have jumps
+ // to the continuation block, which we create last.
+ Vector<BasicBlock*> landingBlocks;
+
+ // We may force this true if we give up on inlining any of the edges.
+ bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
+
+ if (verbose)
+ dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
+
+ for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
+ m_currentIndex = oldOffset;
+ Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
+ m_currentBlock = block.ptr();
+ m_graph.appendBlock(block.copyRef());
+ prepareToParseBlock();
+
+ Node* myCallTargetNode = getDirect(calleeReg);
+
+ bool inliningResult = attemptToInlineCall(
+ myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset,
+ argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
+ inliningBalance, [&] (CodeBlock*) { });
+
+ if (!inliningResult) {
+ // That failed so we let the block die. Nothing interesting should have been added to
+ // the block. We also give up on inlining any of the (less frequent) callees.
+ ASSERT(m_currentBlock == block.ptr());
+ ASSERT(m_graph.m_blocks.last() == block.ptr());
+ m_graph.killBlockAndItsContents(block.ptr());
+ m_graph.m_blocks.removeLast();
+
+ // The fact that inlining failed means we need a slow path.
+ couldTakeSlowPath = true;
+ break;
+ }
+
+ JSCell* thingToCaseOn;
+ if (allAreDirectCalls)
+ thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
+ else {
+ ASSERT(allAreClosureCalls);
+ thingToCaseOn = callLinkStatus[i].executable();
+ }
+ data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.ptr()));
+ m_currentIndex = nextOffset;
+ m_exitOK = true;
+ processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
+ if (Node* terminal = m_currentBlock->terminal())
+ ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
+ else {
+ addToGraph(Jump);
+ landingBlocks.append(m_currentBlock);
+ }
+ if (verbose)
+ dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
+ m_currentBlock->didLink();
+
+ if (verbose)
+ dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
+ }
+
+ Ref<BasicBlock> slowPathBlock = adoptRef(
+ *new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
+ m_currentIndex = oldOffset;
+ m_exitOK = true;
+ data.fallThrough = BranchTarget(slowPathBlock.ptr());
+ m_graph.appendBlock(slowPathBlock.copyRef());
+ if (verbose)
+ dataLog("Marking ", RawPointer(slowPathBlock.ptr()), " as linked (slow path block)\n");
+ slowPathBlock->didLink();
+ prepareToParseBlock();
+ m_currentBlock = slowPathBlock.ptr();
+ Node* myCallTargetNode = getDirect(calleeReg);
+ if (couldTakeSlowPath) {
+ addCall(
+ resultOperand, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
+ registerOffset, prediction);
+ } else {
+ addToGraph(CheckBadCell);
+ addToGraph(Phantom, myCallTargetNode);
+ emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
+
+ set(VirtualRegister(resultOperand), addToGraph(BottomValue));
+ }
+
+ m_currentIndex = nextOffset;
+ m_exitOK = true; // Origin changed, so it's fine to exit again.
+ processSetLocalQueue();
+ if (Node* terminal = m_currentBlock->terminal())
+ ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
+ else {
+ addToGraph(Jump);
+ landingBlocks.append(m_currentBlock);
+ }
+
+ Ref<BasicBlock> continuationBlock = adoptRef(
+ *new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
+ m_graph.appendBlock(continuationBlock.copyRef());
+ if (verbose)
+ dataLog("Adding unlinked block ", RawPointer(continuationBlock.ptr()), " (continuation)\n");
+ m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.ptr()));
+ prepareToParseBlock();
+ m_currentBlock = continuationBlock.ptr();
+
+ for (unsigned i = landingBlocks.size(); i--;)
+ landingBlocks[i]->terminal()->targetBlock() = continuationBlock.ptr();
+
+ m_currentIndex = oldOffset;
+ m_exitOK = true;
+
+ if (verbose) {
+ dataLog("Done inlining (hard).\n");
+ dataLog("Stack: ", currentCodeOrigin(), "\n");
+ }
+ return true;
+}
+
+template<typename ChecksFunctor>
+bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
+{
+ ASSERT(op == ArithMin || op == ArithMax);
+
+ if (argumentCountIncludingThis == 1) {
+ insertChecks();
+ double result = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
+ set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(result)))));
return true;
}
- if (argumentCountIncludingThis == 2) { // Math.min(x)
+ if (argumentCountIncludingThis == 2) {
+ insertChecks();
Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
addToGraph(Phantom, Edge(result, NumberUse));
set(VirtualRegister(resultOperand), result);
return true;
}
- if (argumentCountIncludingThis == 3) { // Math.min(x, y)
+ if (argumentCountIncludingThis == 3) {
+ insertChecks();
set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
return true;
}
@@ -1448,74 +2141,104 @@ bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOf
return false;
}
-bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
+template<typename ChecksFunctor>
+bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
{
switch (intrinsic) {
+
+ // Intrinsic Functions:
+
case AbsIntrinsic: {
if (argumentCountIncludingThis == 1) { // Math.abs()
- set(VirtualRegister(resultOperand), constantNaN());
+ insertChecks();
+ set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
return true;
}
if (!MacroAssembler::supportsFloatingPointAbs())
return false;
+ insertChecks();
Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
- node->mergeFlags(NodeMayOverflow);
+ node->mergeFlags(NodeMayOverflowInt32InDFG);
set(VirtualRegister(resultOperand), node);
return true;
}
case MinIntrinsic:
- return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
+ return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);
case MaxIntrinsic:
- return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
-
- case SqrtIntrinsic:
+ return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);
+
case CosIntrinsic:
- case SinIntrinsic: {
+ case FRoundIntrinsic:
+ case LogIntrinsic:
+ case SinIntrinsic:
+ case SqrtIntrinsic:
+ case TanIntrinsic: {
if (argumentCountIncludingThis == 1) {
- set(VirtualRegister(resultOperand), constantNaN());
+ insertChecks();
+ set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
return true;
}
-
+
+ NodeType nodeType = Unreachable;
switch (intrinsic) {
- case SqrtIntrinsic:
- if (!MacroAssembler::supportsFloatingPointSqrt())
- return false;
-
- set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
- return true;
-
case CosIntrinsic:
- set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
- return true;
-
+ nodeType = ArithCos;
+ break;
+ case FRoundIntrinsic:
+ nodeType = ArithFRound;
+ break;
+ case LogIntrinsic:
+ nodeType = ArithLog;
+ break;
case SinIntrinsic:
- set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
- return true;
-
+ nodeType = ArithSin;
+ break;
+ case SqrtIntrinsic:
+ nodeType = ArithSqrt;
+ break;
+ case TanIntrinsic:
+ nodeType = ArithTan;
+ break;
default:
RELEASE_ASSERT_NOT_REACHED();
- return false;
}
+ insertChecks();
+ set(VirtualRegister(resultOperand), addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
+ return true;
+ }
+
+ case PowIntrinsic: {
+ if (argumentCountIncludingThis < 3) {
+ // Math.pow() and Math.pow(x) return NaN.
+ insertChecks();
+ set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
+ return true;
+ }
+ insertChecks();
+ VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
+ VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
+ set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand)));
+ return true;
}
case ArrayPushIntrinsic: {
if (argumentCountIncludingThis != 2)
return false;
- ArrayMode arrayMode = getArrayMode(m_currentInstruction[6].u.arrayProfile);
+ ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
if (!arrayMode.isJSArray())
return false;
switch (arrayMode.type()) {
- case Array::Undecided:
case Array::Int32:
case Array::Double:
case Array::Contiguous:
case Array::ArrayStorage: {
+ insertChecks();
Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
set(VirtualRegister(resultOperand), arrayPush);
@@ -1526,12 +2249,100 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
return false;
}
}
+
+ case ArraySliceIntrinsic: {
+#if USE(JSVALUE32_64)
+ if (isX86()) {
+ // There aren't enough registers for this to be done easily.
+ return false;
+ }
+#endif
+ if (argumentCountIncludingThis < 2)
+ return false;
+
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
+ || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
+ return false;
+
+ ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
+ if (!arrayMode.isJSArray())
+ return false;
+
+ if (arrayMode.arrayClass() != Array::OriginalArray)
+ return false;
+
+ switch (arrayMode.type()) {
+ case Array::Double:
+ case Array::Int32:
+ case Array::Contiguous: {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
+
+ InlineWatchpointSet& objectPrototypeTransition = globalObject->objectPrototype()->structure()->transitionWatchpointSet();
+ InlineWatchpointSet& arrayPrototypeTransition = globalObject->arrayPrototype()->structure()->transitionWatchpointSet();
+
+ // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
+ if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
+ && globalObject->havingABadTimeWatchpoint()->isStillValid()
+ && arrayPrototypeTransition.isStillValid()
+ && objectPrototypeTransition.isStillValid()
+ && globalObject->arrayPrototypeChainIsSane()) {
+
+ m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
+ m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
+ m_graph.watchpoints().addLazily(arrayPrototypeTransition);
+ m_graph.watchpoints().addLazily(objectPrototypeTransition);
+
+ insertChecks();
+
+ Node* array = get(virtualRegisterForArgument(0, registerOffset));
+ // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
+ // 1. We ensure that the "constructor" property hasn't been changed (because the observable
+ // effects of slice require that we perform a Get(array, "constructor") and we can skip
+ // that if we're an original array structure. (We can relax this in the future by using
+ // TryGetById and CheckCell).
+ //
+ // 2. We check that the array we're calling slice on has the same global object as the lexical
+ // global object that this code is running in. This requirement is necessary because we setup the
+ // watchpoints above on the lexical global object. This means that code that calls slice on
+ // arrays produced by other global objects won't get this optimization. We could relax this
+ // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
+ // we generate instead of registering it as a watchpoint that would invalidate the compilation.
+ //
+ // 3. By proving we're an original array structure, we guarantee that the incoming array
+ // isn't a subclass of Array.
+
+ StructureSet structureSet;
+ structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
+ structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
+ structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
+
+ addVarArgChild(array);
+ addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
+ if (argumentCountIncludingThis >= 3)
+ addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
+ addVarArgChild(addToGraph(GetButterfly, array));
+
+ Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
+ set(VirtualRegister(resultOperand), arraySlice);
+ return true;
+ }
+
+ return false;
+ }
+ default:
+ return false;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
case ArrayPopIntrinsic: {
if (argumentCountIncludingThis != 1)
return false;
- ArrayMode arrayMode = getArrayMode(m_currentInstruction[6].u.arrayProfile);
+ ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
if (!arrayMode.isJSArray())
return false;
switch (arrayMode.type()) {
@@ -1539,6 +2350,7 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
case Array::Double:
case Array::Contiguous:
case Array::ArrayStorage: {
+ insertChecks();
Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
set(VirtualRegister(resultOperand), arrayPop);
return true;
@@ -1549,10 +2361,32 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
}
}
+ case ParseIntIntrinsic: {
+ if (argumentCountIncludingThis < 2)
+ return false;
+
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+ return false;
+
+ insertChecks();
+ VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
+ Node* parseInt;
+ if (argumentCountIncludingThis == 2)
+ parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
+ else {
+ ASSERT(argumentCountIncludingThis > 2);
+ VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
+ parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
+ }
+ set(VirtualRegister(resultOperand), parseInt);
+ return true;
+ }
+
case CharCodeAtIntrinsic: {
if (argumentCountIncludingThis != 2)
return false;
+ insertChecks();
VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
@@ -1565,6 +2399,7 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
if (argumentCountIncludingThis != 2)
return false;
+ insertChecks();
VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
@@ -1572,10 +2407,21 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
set(VirtualRegister(resultOperand), charCode);
return true;
}
+ case Clz32Intrinsic: {
+ insertChecks();
+ if (argumentCountIncludingThis == 1)
+ set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
+ else {
+ Node* operand = get(virtualRegisterForArgument(1, registerOffset));
+ set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand));
+ }
+ return true;
+ }
case FromCharCodeIntrinsic: {
if (argumentCountIncludingThis != 2)
return false;
+ insertChecks();
VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
@@ -1588,25 +2434,164 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
if (argumentCountIncludingThis != 2)
return false;
- Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+ insertChecks();
+ Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
set(VirtualRegister(resultOperand), regExpExec);
return true;
}
- case RegExpTestIntrinsic: {
+ case RegExpTestIntrinsic:
+ case RegExpTestFastIntrinsic: {
if (argumentCountIncludingThis != 2)
return false;
-
- Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
+
+ if (intrinsic == RegExpTestIntrinsic) {
+ // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
+ return false;
+
+ JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+ Structure* regExpStructure = globalObject->regExpStructure();
+ m_graph.registerStructure(regExpStructure);
+ ASSERT(regExpStructure->storedPrototype().isObject());
+ ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
+
+ FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
+ Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
+
+ auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
+ JSValue currentProperty;
+ if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
+ return false;
+
+ return currentProperty == primordialProperty;
+ };
+
+ // Check that RegExp.exec is still the primordial RegExp.prototype.exec
+ if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
+ return false;
+
+ // Check that regExpObject is actually a RegExp object.
+ Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
+ addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
+
+ // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
+ UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
+ unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
+ Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
+ FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
+ addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
+ }
+
+ insertChecks();
+ Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
+ Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
set(VirtualRegister(resultOperand), regExpExec);
return true;
}
+ case IsTypedArrayViewIntrinsic: {
+ ASSERT(argumentCountIncludingThis == 2);
+
+ insertChecks();
+ set(VirtualRegister(resultOperand), addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
+ return true;
+ }
+
+ case StringPrototypeReplaceIntrinsic: {
+ if (argumentCountIncludingThis != 3)
+ return false;
+
+ // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+ return false;
+
+ // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
+ return false;
+
+ JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+ Structure* regExpStructure = globalObject->regExpStructure();
+ m_graph.registerStructure(regExpStructure);
+ ASSERT(regExpStructure->storedPrototype().isObject());
+ ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
+
+ FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
+ Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
+
+ auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
+ JSValue currentProperty;
+ if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
+ return false;
+
+ return currentProperty == primordialProperty;
+ };
+
+ // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
+ if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
+ return false;
+
+ // Check that searchRegExp.global is still the primordial RegExp.prototype.global
+ if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
+ return false;
+
+ // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
+ if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
+ return false;
+
+ // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
+ if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
+ return false;
+
+ insertChecks();
+
+ Node* result = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
+ set(VirtualRegister(resultOperand), result);
+ return true;
+ }
+
+ case StringPrototypeReplaceRegExpIntrinsic: {
+ if (argumentCountIncludingThis != 3)
+ return false;
+
+ insertChecks();
+ Node* result = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
+ set(VirtualRegister(resultOperand), result);
+ return true;
+ }
+
+ case RoundIntrinsic:
+ case FloorIntrinsic:
+ case CeilIntrinsic:
+ case TruncIntrinsic: {
+ if (argumentCountIncludingThis == 1) {
+ insertChecks();
+ set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
+ return true;
+ }
+ insertChecks();
+ Node* operand = get(virtualRegisterForArgument(1, registerOffset));
+ NodeType op;
+ if (intrinsic == RoundIntrinsic)
+ op = ArithRound;
+ else if (intrinsic == FloorIntrinsic)
+ op = ArithFloor;
+ else if (intrinsic == CeilIntrinsic)
+ op = ArithCeil;
+ else {
+ ASSERT(intrinsic == TruncIntrinsic);
+ op = ArithTrunc;
+ }
+ Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
+ set(VirtualRegister(resultOperand), roundNode);
+ return true;
+ }
case IMulIntrinsic: {
if (argumentCountIncludingThis != 3)
return false;
+ insertChecks();
VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
Node* left = get(leftOperand);
@@ -1614,15 +2599,327 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int
set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
return true;
}
+
+ case RandomIntrinsic: {
+ if (argumentCountIncludingThis != 1)
+ return false;
+ insertChecks();
+ set(VirtualRegister(resultOperand), addToGraph(ArithRandom));
+ return true;
+ }
+ case DFGTrueIntrinsic: {
+ insertChecks();
+ set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
+ return true;
+ }
+
+ case OSRExitIntrinsic: {
+ insertChecks();
+ addToGraph(ForceOSRExit);
+ set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
+ return true;
+ }
+
+ case IsFinalTierIntrinsic: {
+ insertChecks();
+ set(VirtualRegister(resultOperand),
+ jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
+ return true;
+ }
+
+ case SetInt32HeapPredictionIntrinsic: {
+ insertChecks();
+ for (int i = 1; i < argumentCountIncludingThis; ++i) {
+ Node* node = get(virtualRegisterForArgument(i, registerOffset));
+ if (node->hasHeapPrediction())
+ node->setHeapPrediction(SpecInt32Only);
+ }
+ set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
+ return true;
+ }
+
+ case CheckInt32Intrinsic: {
+ insertChecks();
+ for (int i = 1; i < argumentCountIncludingThis; ++i) {
+ Node* node = get(virtualRegisterForArgument(i, registerOffset));
+ addToGraph(Phantom, Edge(node, Int32Use));
+ }
+ set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
+ return true;
+ }
+
+ case FiatInt52Intrinsic: {
+ if (argumentCountIncludingThis != 2)
+ return false;
+ insertChecks();
+ VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
+ if (enableInt52())
+ set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
+ else
+ set(VirtualRegister(resultOperand), get(operand));
+ return true;
+ }
+
+ case JSMapGetIntrinsic: {
+ if (argumentCountIncludingThis != 2)
+ return false;
+
+ insertChecks();
+ Node* map = get(virtualRegisterForArgument(0, registerOffset));
+ Node* key = get(virtualRegisterForArgument(1, registerOffset));
+ Node* hash = addToGraph(MapHash, key);
+ Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(key), Edge(hash));
+ Node* result = addToGraph(LoadFromJSMapBucket, OpInfo(), OpInfo(prediction), bucket);
+ set(VirtualRegister(resultOperand), result);
+ return true;
+ }
+
+ case JSSetHasIntrinsic:
+ case JSMapHasIntrinsic: {
+ if (argumentCountIncludingThis != 2)
+ return false;
+
+ insertChecks();
+ Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
+ Node* key = get(virtualRegisterForArgument(1, registerOffset));
+ Node* hash = addToGraph(MapHash, key);
+ UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
+ Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(key), Edge(hash));
+ Node* result = addToGraph(IsNonEmptyMapBucket, bucket);
+ set(VirtualRegister(resultOperand), result);
+ return true;
+ }
+
+ case HasOwnPropertyIntrinsic: {
+ if (argumentCountIncludingThis != 2)
+ return false;
+
+ // This can be racy, that's fine. We know that once we observe that this is created,
+ // that it will never be destroyed until the VM is destroyed. It's unlikely that
+ // we'd ever get to the point where we inline this as an intrinsic without the
+ // cache being created, however, it's possible if we always throw exceptions inside
+ // hasOwnProperty.
+ if (!m_vm->hasOwnPropertyCache())
+ return false;
+
+ insertChecks();
+ Node* object = get(virtualRegisterForArgument(0, registerOffset));
+ Node* key = get(virtualRegisterForArgument(1, registerOffset));
+ Node* result = addToGraph(HasOwnProperty, object, key);
+ set(VirtualRegister(resultOperand), result);
+ return true;
+ }
+
+ case StringPrototypeToLowerCaseIntrinsic: {
+ if (argumentCountIncludingThis != 1)
+ return false;
+
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+ return false;
+
+ insertChecks();
+ Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
+ Node* result = addToGraph(ToLowerCase, thisString);
+ set(VirtualRegister(resultOperand), result);
+ return true;
+ }
+
+ case NumberPrototypeToStringIntrinsic: {
+ if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
+ return false;
+
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+ return false;
+
+ insertChecks();
+ Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
+ if (argumentCountIncludingThis == 1) {
+ Node* result = addToGraph(ToString, thisNumber);
+ set(VirtualRegister(resultOperand), result);
+ } else {
+ Node* radix = get(virtualRegisterForArgument(1, registerOffset));
+ Node* result = addToGraph(NumberToStringWithRadix, thisNumber, radix);
+ set(VirtualRegister(resultOperand), result);
+ }
+ return true;
+ }
+
default:
return false;
}
}
+template<typename ChecksFunctor>
+bool ByteCodeParser::handleDOMJITCall(Node* callTarget, int resultOperand, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
+{
+ if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount))
+ return false;
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
+ return false;
+
+ // FIXME: Currently, we only support functions which arguments are up to 2.
+ // Eventually, we should extend this. But possibly, 2 or 3 can cover typical use cases.
+ // https://bugs.webkit.org/show_bug.cgi?id=164346
+ ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments.");
+
+ insertChecks();
+ addCall(resultOperand, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
+ return true;
+}
+
+
+template<typename ChecksFunctor>
+bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
+{
+ switch (variant.intrinsic()) {
+ case TypedArrayByteLengthIntrinsic: {
+ insertChecks();
+
+ TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
+ Array::Type arrayType = toArrayType(type);
+ size_t logSize = logElementSize(type);
+
+ variant.structureSet().forEach([&] (Structure* structure) {
+ TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
+ ASSERT(logSize == logElementSize(curType));
+ arrayType = refineTypedArrayType(arrayType, curType);
+ ASSERT(arrayType != Array::Generic);
+ });
+
+ Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType).asWord()), thisNode);
+
+ if (!logSize) {
+ set(VirtualRegister(resultOperand), lengthNode);
+ return true;
+ }
+
+ // We can use a BitLShift here because typed arrays will never have a byteLength
+ // that overflows int32.
+ Node* shiftNode = jsConstant(jsNumber(logSize));
+ set(VirtualRegister(resultOperand), addToGraph(BitLShift, lengthNode, shiftNode));
+
+ return true;
+ }
+
+ case TypedArrayLengthIntrinsic: {
+ insertChecks();
+
+ TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
+ Array::Type arrayType = toArrayType(type);
+
+ variant.structureSet().forEach([&] (Structure* structure) {
+ TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
+ arrayType = refineTypedArrayType(arrayType, curType);
+ ASSERT(arrayType != Array::Generic);
+ });
+
+ set(VirtualRegister(resultOperand), addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType).asWord()), thisNode));
+
+ return true;
+
+ }
+
+ case TypedArrayByteOffsetIntrinsic: {
+ insertChecks();
+
+ TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
+ Array::Type arrayType = toArrayType(type);
+
+ variant.structureSet().forEach([&] (Structure* structure) {
+ TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
+ arrayType = refineTypedArrayType(arrayType, curType);
+ ASSERT(arrayType != Array::Generic);
+ });
+
+ set(VirtualRegister(resultOperand), addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType).asWord()), thisNode));
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+static void blessCallDOMGetter(Node* node)
+{
+ DOMJIT::CallDOMGetterPatchpoint* patchpoint = node->callDOMGetterData()->patchpoint;
+ if (!patchpoint->effect.mustGenerate())
+ node->clearFlags(NodeMustGenerate);
+}
+
+bool ByteCodeParser::handleDOMJITGetter(int resultOperand, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction)
+{
+ if (!variant.domJIT())
+ return false;
+
+ DOMJIT::GetterSetter* domJIT = variant.domJIT();
+
+ // We do not need to actually look up CustomGetterSetter here. Checking Structures or registering watchpoints are enough,
+ // since replacement of CustomGetterSetter always incurs Structure transition.
+ if (!check(variant.conditionSet()))
+ return false;
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), thisNode);
+
+ Ref<DOMJIT::Patchpoint> checkDOMPatchpoint = domJIT->checkDOM();
+ m_graph.m_domJITPatchpoints.append(checkDOMPatchpoint.ptr());
+ // We do not need to emit CheckCell thingy here. When the custom accessor is replaced to different one, Structure transition occurs.
+ addToGraph(CheckDOM, OpInfo(checkDOMPatchpoint.ptr()), OpInfo(domJIT->thisClassInfo()), thisNode);
+
+ CallDOMGetterData* callDOMGetterData = m_graph.m_callDOMGetterData.add();
+ Ref<DOMJIT::CallDOMGetterPatchpoint> callDOMGetterPatchpoint = domJIT->callDOMGetter();
+ m_graph.m_domJITPatchpoints.append(callDOMGetterPatchpoint.ptr());
+
+ callDOMGetterData->domJIT = domJIT;
+ callDOMGetterData->patchpoint = callDOMGetterPatchpoint.ptr();
+ callDOMGetterData->identifierNumber = identifierNumber;
+
+ Node* callDOMGetterNode = nullptr;
+ // GlobalObject of thisNode is always used to create a DOMWrapper.
+ if (callDOMGetterPatchpoint->requireGlobalObject) {
+ Node* globalObject = addToGraph(GetGlobalObject, thisNode);
+ callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode, globalObject);
+ } else
+ callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode);
+ blessCallDOMGetter(callDOMGetterNode);
+ set(VirtualRegister(resultOperand), callDOMGetterNode);
+ return true;
+}
+
+bool ByteCodeParser::handleModuleNamespaceLoad(int resultOperand, SpeculatedType prediction, Node* base, GetByIdStatus getById)
+{
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
+ return false;
+ addToGraph(CheckCell, OpInfo(m_graph.freeze(getById.moduleNamespaceObject())), Edge(base, CellUse));
+
+ // Ideally we wouldn't have to do this Phantom. But:
+ //
+ // For the constant case: we must do it because otherwise we would have no way of knowing
+ // that the scope is live at OSR here.
+ //
+ // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
+ // won't be able to handle an Undefined scope.
+ addToGraph(Phantom, base);
+
+ // Constant folding in the bytecode parser is important for performance. This may not
+ // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
+ // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
+ // would recompile. But if we can fold it here, we avoid the exit.
+ m_graph.freeze(getById.moduleEnvironment());
+ if (JSValue value = m_graph.tryGetConstantClosureVar(getById.moduleEnvironment(), getById.scopeOffset())) {
+ set(VirtualRegister(resultOperand), weakJSConstant(value));
+ return true;
+ }
+ set(VirtualRegister(resultOperand), addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment())));
+ return true;
+}
+
+template<typename ChecksFunctor>
bool ByteCodeParser::handleTypedArrayConstructor(
int resultOperand, InternalFunction* function, int registerOffset,
- int argumentCountIncludingThis, TypedArrayType type)
+ int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
{
if (!isTypedView(type))
return false;
@@ -1667,28 +2964,37 @@ bool ByteCodeParser::handleTypedArrayConstructor(
if (argumentCountIncludingThis != 2)
return false;
+ if (!function->globalObject()->typedArrayStructureConcurrently(type))
+ return false;
+
+ insertChecks();
set(VirtualRegister(resultOperand),
addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
return true;
}
+template<typename ChecksFunctor>
bool ByteCodeParser::handleConstantInternalFunction(
- int resultOperand, InternalFunction* function, int registerOffset,
- int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
+ Node* callTargetNode, int resultOperand, InternalFunction* function, int registerOffset,
+ int argumentCountIncludingThis, CodeSpecializationKind kind, SpeculatedType prediction, const ChecksFunctor& insertChecks)
{
- // If we ever find that we have a lot of internal functions that we specialize for,
- // then we should probably have some sort of hashtable dispatch, or maybe even
- // dispatch straight through the MethodTable of the InternalFunction. But for now,
- // it seems that this case is hit infrequently enough, and the number of functions
- // we know about is small enough, that having just a linear cascade of if statements
- // is good enough.
-
- UNUSED_PARAM(prediction); // Remove this once we do more things.
-
+ if (verbose)
+ dataLog(" Handling constant internal function ", JSValue(function), "\n");
+
+ if (kind == CodeForConstruct) {
+ Node* newTargetNode = get(virtualRegisterForArgument(0, registerOffset));
+ // We cannot handle the case where new.target != callee (i.e. a construct from a super call) because we
+ // don't know what the prototype of the constructed object will be.
+ // FIXME: If we have inlined super calls up to the call site, however, we should be able to figure out the structure. https://bugs.webkit.org/show_bug.cgi?id=152700
+ if (newTargetNode != callTargetNode)
+ return false;
+ }
+
if (function->classInfo() == ArrayConstructor::info()) {
if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
return false;
+ insertChecks();
if (argumentCountIncludingThis == 2) {
set(VirtualRegister(resultOperand),
addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
@@ -1701,26 +3007,54 @@ bool ByteCodeParser::handleConstantInternalFunction(
addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
return true;
}
+
+ if (function->classInfo() == NumberConstructor::info()) {
+ if (kind == CodeForConstruct)
+ return false;
+
+ insertChecks();
+ if (argumentCountIncludingThis <= 1)
+ set(VirtualRegister(resultOperand), jsConstant(jsNumber(0)));
+ else
+ set(VirtualRegister(resultOperand), addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
+
+ return true;
+ }
if (function->classInfo() == StringConstructor::info()) {
+ insertChecks();
+
Node* result;
if (argumentCountIncludingThis <= 1)
- result = cellConstant(m_vm->smallStrings.emptyString());
+ result = jsConstant(m_vm->smallStrings.emptyString());
else
- result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset)));
+ result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
if (kind == CodeForConstruct)
- result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
+ result = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject()->stringObjectStructure())), result);
set(VirtualRegister(resultOperand), result);
return true;
}
-
+
+ // FIXME: This should handle construction as well. https://bugs.webkit.org/show_bug.cgi?id=155591
+ if (function->classInfo() == ObjectConstructor::info() && kind == CodeForCall) {
+ insertChecks();
+
+ Node* result;
+ if (argumentCountIncludingThis <= 1)
+ result = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject()->objectStructureForObjectConstructor())));
+ else
+ result = addToGraph(CallObjectConstructor, get(virtualRegisterForArgument(1, registerOffset)));
+ set(VirtualRegister(resultOperand), result);
+ return true;
+ }
+
for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
bool result = handleTypedArrayConstructor(
resultOperand, function, registerOffset, argumentCountIncludingThis,
- indexToTypedArrayType(typeIndex));
+ indexToTypedArrayType(typeIndex), insertChecks);
if (result)
return true;
}
@@ -1728,120 +3062,800 @@ bool ByteCodeParser::handleConstantInternalFunction(
return false;
}
-Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset)
+Node* ByteCodeParser::handleGetByOffset(
+ SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset,
+ const InferredType::Descriptor& inferredType, NodeType op)
{
Node* propertyStorage;
if (isInlineOffset(offset))
propertyStorage = base;
else
propertyStorage = addToGraph(GetButterfly, base);
- Node* getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage, base);
-
- StorageAccessData storageAccessData;
- storageAccessData.offset = offset;
- storageAccessData.identifierNumber = identifierNumber;
- m_graph.m_storageAccessData.append(storageAccessData);
+
+ StorageAccessData* data = m_graph.m_storageAccessData.add();
+ data->offset = offset;
+ data->identifierNumber = identifierNumber;
+ data->inferredType = inferredType;
+ m_graph.registerInferredType(inferredType);
+
+ Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
return getByOffset;
}
-void ByteCodeParser::handleGetByOffset(
- int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
- PropertyOffset offset)
-{
- set(VirtualRegister(destinationOperand), handleGetByOffset(prediction, base, identifierNumber, offset));
-}
-
-Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
+Node* ByteCodeParser::handlePutByOffset(
+ Node* base, unsigned identifier, PropertyOffset offset, const InferredType::Descriptor& inferredType,
+ Node* value)
{
Node* propertyStorage;
if (isInlineOffset(offset))
propertyStorage = base;
else
propertyStorage = addToGraph(GetButterfly, base);
- Node* result = addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
- StorageAccessData storageAccessData;
- storageAccessData.offset = offset;
- storageAccessData.identifierNumber = identifier;
- m_graph.m_storageAccessData.append(storageAccessData);
+ StorageAccessData* data = m_graph.m_storageAccessData.add();
+ data->offset = offset;
+ data->identifierNumber = identifier;
+ data->inferredType = inferredType;
+ m_graph.registerInferredType(inferredType);
+
+ Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
+
+ return result;
+}
+
+bool ByteCodeParser::check(const ObjectPropertyCondition& condition)
+{
+ if (!condition)
+ return false;
+
+ if (m_graph.watchCondition(condition))
+ return true;
+
+ Structure* structure = condition.object()->structure();
+ if (!condition.structureEnsuresValidity(structure))
+ return false;
+
+ addToGraph(
+ CheckStructure,
+ OpInfo(m_graph.addStructureSet(structure)),
+ weakJSConstant(condition.object()));
+ return true;
+}
+
+GetByOffsetMethod ByteCodeParser::promoteToConstant(GetByOffsetMethod method)
+{
+ if (method.kind() == GetByOffsetMethod::LoadFromPrototype
+ && method.prototype()->structure()->dfgShouldWatch()) {
+ if (JSValue constant = m_graph.tryGetConstantProperty(method.prototype()->value(), method.prototype()->structure(), method.offset()))
+ return GetByOffsetMethod::constant(m_graph.freeze(constant));
+ }
+
+ return method;
+}
+bool ByteCodeParser::needsDynamicLookup(ResolveType type, OpcodeID opcode)
+{
+ ASSERT(opcode == op_resolve_scope || opcode == op_get_from_scope || opcode == op_put_to_scope);
+
+ JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+ if (needsVarInjectionChecks(type) && globalObject->varInjectionWatchpoint()->hasBeenInvalidated())
+ return true;
+
+ switch (type) {
+ case GlobalProperty:
+ case GlobalVar:
+ case GlobalLexicalVar:
+ case ClosureVar:
+ case LocalClosureVar:
+ case ModuleVar:
+ return false;
+
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ // The heuristic for UnresolvedProperty scope accesses is we will ForceOSRExit if we
+ // haven't exited from from this access before to let the baseline JIT try to better
+ // cache the access. If we've already exited from this operation, it's unlikely that
+ // the baseline will come up with a better ResolveType and instead we will compile
+ // this as a dynamic scope access.
+
+ // We only track our heuristic through resolve_scope since resolve_scope will
+ // dominate unresolved gets/puts on that scope.
+ if (opcode != op_resolve_scope)
+ return true;
+
+ if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, InadequateCoverage)) {
+ // We've already exited so give up on getting better ResolveType information.
+ return true;
+ }
+
+ // We have not exited yet, so let's have the baseline get better ResolveType information for us.
+ // This type of code is often seen when we tier up in a loop but haven't executed the part
+ // of a function that comes after the loop.
+ return false;
+ }
+
+ case Dynamic:
+ return true;
+
+ case GlobalPropertyWithVarInjectionChecks:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVarWithVarInjectionChecks:
+ case ClosureVarWithVarInjectionChecks:
+ return false;
+ }
+
+ ASSERT_NOT_REACHED();
+ return false;
+}
+
+GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyCondition& condition)
+{
+ if (verbose)
+ dataLog("Planning a load: ", condition, "\n");
+
+ // We might promote this to Equivalence, and a later DFG pass might also do such promotion
+ // even if we fail, but for simplicity this cannot be asked to load an equivalence condition.
+ // None of the clients of this method will request a load of an Equivalence condition anyway,
+ // and supporting it would complicate the heuristics below.
+ RELEASE_ASSERT(condition.kind() == PropertyCondition::Presence);
+
+ // Here's the ranking of how to handle this, from most preferred to least preferred:
+ //
+ // 1) Watchpoint on an equivalence condition and return a constant node for the loaded value.
+ // No other code is emitted, and the structure of the base object is never registered.
+ // Hence this results in zero code and we won't jettison this compilation if the object
+ // transitions, even if the structure is watchable right now.
+ //
+ // 2) Need to emit a load, and the current structure of the base is going to be watched by the
+ // DFG anyway (i.e. dfgShouldWatch). Watch the structure and emit the load. Don't watch the
+ // condition, since the act of turning the base into a constant in IR will cause the DFG to
+ // watch the structure anyway and doing so would subsume watching the condition.
+ //
+ // 3) Need to emit a load, and the current structure of the base is watchable but not by the
+ // DFG (i.e. transitionWatchpointSetIsStillValid() and !dfgShouldWatchIfPossible()). Watch
+ // the condition, and emit a load.
+ //
+ // 4) Need to emit a load, and the current structure of the base is not watchable. Emit a
+ // structure check, and emit a load.
+ //
+ // 5) The condition does not hold. Give up and return null.
+
+ // First, try to promote Presence to Equivalence. We do this before doing anything else
+ // because it's the most profitable. Also, there are cases where the presence is watchable but
+ // we don't want to watch it unless it became an equivalence (see the relationship between
+ // (1), (2), and (3) above).
+ ObjectPropertyCondition equivalenceCondition = condition.attemptToMakeEquivalenceWithoutBarrier(*m_vm);
+ if (m_graph.watchCondition(equivalenceCondition))
+ return GetByOffsetMethod::constant(m_graph.freeze(equivalenceCondition.requiredValue()));
+
+ // At this point, we'll have to materialize the condition's base as a constant in DFG IR. Once
+ // we do this, the frozen value will have its own idea of what the structure is. Use that from
+ // now on just because it's less confusing.
+ FrozenValue* base = m_graph.freeze(condition.object());
+ Structure* structure = base->structure();
+
+ // Check if the structure that we've registered makes the condition hold. If not, just give
+ // up. This is case (5) above.
+ if (!condition.structureEnsuresValidity(structure))
+ return GetByOffsetMethod();
+
+ // If the structure is watched by the DFG already, then just use this fact to emit the load.
+ // This is case (2) above.
+ if (structure->dfgShouldWatch())
+ return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
+
+ // If we can watch the condition right now, then we can emit the load after watching it. This
+ // is case (3) above.
+ if (m_graph.watchCondition(condition))
+ return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
+
+ // We can't watch anything but we know that the current structure satisfies the condition. So,
+ // check for that structure and then emit the load.
+ addToGraph(
+ CheckStructure,
+ OpInfo(m_graph.addStructureSet(structure)),
+ addToGraph(JSConstant, OpInfo(base)));
+ return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
+}
+
+Node* ByteCodeParser::load(
+ SpeculatedType prediction, unsigned identifierNumber, const GetByOffsetMethod& method,
+ NodeType op)
+{
+ switch (method.kind()) {
+ case GetByOffsetMethod::Invalid:
+ return nullptr;
+ case GetByOffsetMethod::Constant:
+ return addToGraph(JSConstant, OpInfo(method.constant()));
+ case GetByOffsetMethod::LoadFromPrototype: {
+ Node* baseNode = addToGraph(JSConstant, OpInfo(method.prototype()));
+ return handleGetByOffset(
+ prediction, baseNode, identifierNumber, method.offset(), InferredType::Top, op);
+ }
+ case GetByOffsetMethod::Load:
+ // Will never see this from planLoad().
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+}
+
+Node* ByteCodeParser::load(
+ SpeculatedType prediction, const ObjectPropertyCondition& condition, NodeType op)
+{
+ GetByOffsetMethod method = planLoad(condition);
+ return load(prediction, m_graph.identifiers().ensure(condition.uid()), method, op);
+}
+
+bool ByteCodeParser::check(const ObjectPropertyConditionSet& conditionSet)
+{
+ for (const ObjectPropertyCondition condition : conditionSet) {
+ if (!check(condition))
+ return false;
+ }
+ return true;
+}
+
+GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyConditionSet& conditionSet)
+{
+ if (verbose)
+ dataLog("conditionSet = ", conditionSet, "\n");
+
+ GetByOffsetMethod result;
+ for (const ObjectPropertyCondition condition : conditionSet) {
+ switch (condition.kind()) {
+ case PropertyCondition::Presence:
+ RELEASE_ASSERT(!result); // Should only see exactly one of these.
+ result = planLoad(condition);
+ if (!result)
+ return GetByOffsetMethod();
+ break;
+ default:
+ if (!check(condition))
+ return GetByOffsetMethod();
+ break;
+ }
+ }
+ if (!result) {
+ // We have a unset property.
+ ASSERT(!conditionSet.numberOfConditionsWithKind(PropertyCondition::Presence));
+ return GetByOffsetMethod::constant(m_constantUndefined);
+ }
return result;
}
+Node* ByteCodeParser::load(
+ SpeculatedType prediction, const ObjectPropertyConditionSet& conditionSet, NodeType op)
+{
+ GetByOffsetMethod method = planLoad(conditionSet);
+ return load(
+ prediction,
+ m_graph.identifiers().ensure(conditionSet.slotBaseCondition().uid()),
+ method, op);
+}
+
+ObjectPropertyCondition ByteCodeParser::presenceLike(
+ JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
+{
+ if (set.isEmpty())
+ return ObjectPropertyCondition();
+ unsigned attributes;
+ PropertyOffset firstOffset = set[0]->getConcurrently(uid, attributes);
+ if (firstOffset != offset)
+ return ObjectPropertyCondition();
+ for (unsigned i = 1; i < set.size(); ++i) {
+ unsigned otherAttributes;
+ PropertyOffset otherOffset = set[i]->getConcurrently(uid, otherAttributes);
+ if (otherOffset != offset || otherAttributes != attributes)
+ return ObjectPropertyCondition();
+ }
+ return ObjectPropertyCondition::presenceWithoutBarrier(knownBase, uid, offset, attributes);
+}
+
+bool ByteCodeParser::checkPresenceLike(
+ JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
+{
+ return check(presenceLike(knownBase, uid, offset, set));
+}
+
+void ByteCodeParser::checkPresenceLike(
+ Node* base, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
+{
+ if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>(*m_vm)) {
+ if (checkPresenceLike(knownBase, uid, offset, set))
+ return;
+ }
+
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(set)), base);
+}
+
+template<typename VariantType>
+Node* ByteCodeParser::load(
+ SpeculatedType prediction, Node* base, unsigned identifierNumber, const VariantType& variant)
+{
+ // Make sure backwards propagation knows that we've used base.
+ addToGraph(Phantom, base);
+
+ bool needStructureCheck = true;
+
+ UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
+
+ if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>(*m_vm)) {
+ // Try to optimize away the structure check. Note that it's not worth doing anything about this
+ // if the base's structure is watched.
+ Structure* structure = base->constant()->structure();
+ if (!structure->dfgShouldWatch()) {
+ if (!variant.conditionSet().isEmpty()) {
+ // This means that we're loading from a prototype or we have a property miss. We expect
+ // the base not to have the property. We can only use ObjectPropertyCondition if all of
+ // the structures in the variant.structureSet() agree on the prototype (it would be
+ // hilariously rare if they didn't). Note that we are relying on structureSet() having
+ // at least one element. That will always be true here because of how GetByIdStatus/PutByIdStatus work.
+ JSObject* prototype = variant.structureSet()[0]->storedPrototypeObject();
+ bool allAgree = true;
+ for (unsigned i = 1; i < variant.structureSet().size(); ++i) {
+ if (variant.structureSet()[i]->storedPrototypeObject() != prototype) {
+ allAgree = false;
+ break;
+ }
+ }
+ if (allAgree) {
+ ObjectPropertyCondition condition = ObjectPropertyCondition::absenceWithoutBarrier(
+ knownBase, uid, prototype);
+ if (check(condition))
+ needStructureCheck = false;
+ }
+ } else {
+ // This means we're loading directly from base. We can avoid all of the code that follows
+ // if we can prove that the property is a constant. Otherwise, we try to prove that the
+ // property is watchably present, in which case we get rid of the structure check.
+
+ ObjectPropertyCondition presenceCondition =
+ presenceLike(knownBase, uid, variant.offset(), variant.structureSet());
+ if (presenceCondition) {
+ ObjectPropertyCondition equivalenceCondition =
+ presenceCondition.attemptToMakeEquivalenceWithoutBarrier(*m_vm);
+ if (m_graph.watchCondition(equivalenceCondition))
+ return weakJSConstant(equivalenceCondition.requiredValue());
+
+ if (check(presenceCondition))
+ needStructureCheck = false;
+ }
+ }
+ }
+ }
+
+ if (needStructureCheck)
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
+
+ if (variant.isPropertyUnset()) {
+ if (m_graph.watchConditions(variant.conditionSet()))
+ return jsConstant(jsUndefined());
+ return nullptr;
+ }
+
+ SpeculatedType loadPrediction;
+ NodeType loadOp;
+ if (variant.callLinkStatus() || variant.intrinsic() != NoIntrinsic) {
+ loadPrediction = SpecCellOther;
+ loadOp = GetGetterSetterByOffset;
+ } else {
+ loadPrediction = prediction;
+ loadOp = GetByOffset;
+ }
+
+ Node* loadedValue;
+ if (!variant.conditionSet().isEmpty())
+ loadedValue = load(loadPrediction, variant.conditionSet(), loadOp);
+ else {
+ if (needStructureCheck && base->hasConstant()) {
+ // We did emit a structure check. That means that we have an opportunity to do constant folding
+ // here, since we didn't do it above.
+ JSValue constant = m_graph.tryGetConstantProperty(
+ base->asJSValue(), *m_graph.addStructureSet(variant.structureSet()), variant.offset());
+ if (constant)
+ return weakJSConstant(constant);
+ }
+
+ InferredType::Descriptor inferredType;
+ if (needStructureCheck) {
+ for (Structure* structure : variant.structureSet()) {
+ InferredType::Descriptor thisType = m_graph.inferredTypeForProperty(structure, uid);
+ inferredType.merge(thisType);
+ }
+ } else
+ inferredType = InferredType::Top;
+
+ loadedValue = handleGetByOffset(
+ loadPrediction, base, identifierNumber, variant.offset(), inferredType, loadOp);
+ }
+
+ return loadedValue;
+}
+
+Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVariant& variant, Node* value)
+{
+ RELEASE_ASSERT(variant.kind() == PutByIdVariant::Replace);
+
+ checkPresenceLike(base, m_graph.identifiers()[identifier], variant.offset(), variant.structure());
+ return handlePutByOffset(base, identifier, variant.offset(), variant.requiredType(), value);
+}
+
void ByteCodeParser::handleGetById(
int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
- const GetByIdStatus& getByIdStatus)
+ GetByIdStatus getByIdStatus, AccessType type, unsigned instructionSize)
{
- if (!getByIdStatus.isSimple()
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCacheWatchpoint)) {
+ // Attempt to reduce the set of things in the GetByIdStatus.
+ if (base->op() == NewObject) {
+ bool ok = true;
+ for (unsigned i = m_currentBlock->size(); i--;) {
+ Node* node = m_currentBlock->at(i);
+ if (node == base)
+ break;
+ if (writesOverlap(m_graph, node, JSCell_structureID)) {
+ ok = false;
+ break;
+ }
+ }
+ if (ok)
+ getByIdStatus.filter(base->structure().get());
+ }
+
+ NodeType getById;
+ if (type == AccessType::Get)
+ getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
+ else
+ getById = TryGetById;
+
+ if (getById != TryGetById && getByIdStatus.isModuleNamespace()) {
+ if (handleModuleNamespaceLoad(destinationOperand, prediction, base, getByIdStatus)) {
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedGetById();
+ return;
+ }
+ }
+
+ // Special path for custom accessors since custom's offset does not have any meanings.
+ // So, this is completely different from Simple one. But we have a chance to optimize it when we use DOMJIT.
+ if (Options::useDOMJIT() && getByIdStatus.isCustom()) {
+ ASSERT(getByIdStatus.numVariants() == 1);
+ ASSERT(!getByIdStatus.makesCalls());
+ GetByIdVariant variant = getByIdStatus[0];
+ ASSERT(variant.domJIT());
+ if (handleDOMJITGetter(destinationOperand, variant, base, identifierNumber, prediction)) {
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedGetById();
+ return;
+ }
+ }
+
+ ASSERT(type == AccessType::Get || !getByIdStatus.makesCalls());
+ if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::useAccessInlining()) {
set(VirtualRegister(destinationOperand),
- addToGraph(
- getByIdStatus.makesCalls() ? GetByIdFlush : GetById,
- OpInfo(identifierNumber), OpInfo(prediction), base));
+ addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
+ return;
+ }
+
+ if (getByIdStatus.numVariants() > 1) {
+ if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
+ || !Options::usePolymorphicAccessInlining()) {
+ set(VirtualRegister(destinationOperand),
+ addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
+ return;
+ }
+
+ Vector<MultiGetByOffsetCase, 2> cases;
+
+ // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
+ // optimal, if there is some rarely executed case in the chain that requires a lot
+ // of checks and those checks are not watchpointable.
+ for (const GetByIdVariant& variant : getByIdStatus.variants()) {
+ if (variant.intrinsic() != NoIntrinsic) {
+ set(VirtualRegister(destinationOperand),
+ addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
+ return;
+ }
+
+ if (variant.conditionSet().isEmpty()) {
+ cases.append(
+ MultiGetByOffsetCase(
+ *m_graph.addStructureSet(variant.structureSet()),
+ GetByOffsetMethod::load(variant.offset())));
+ continue;
+ }
+
+ GetByOffsetMethod method = planLoad(variant.conditionSet());
+ if (!method) {
+ set(VirtualRegister(destinationOperand),
+ addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
+ return;
+ }
+
+ cases.append(MultiGetByOffsetCase(*m_graph.addStructureSet(variant.structureSet()), method));
+ }
+
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedGetById();
+
+ // 2) Emit a MultiGetByOffset
+ MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
+ data->cases = cases;
+ data->identifierNumber = identifierNumber;
+ set(VirtualRegister(destinationOperand),
+ addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
return;
}
- ASSERT(getByIdStatus.structureSet().size());
+ ASSERT(getByIdStatus.numVariants() == 1);
+ GetByIdVariant variant = getByIdStatus[0];
+ Node* loadedValue = load(prediction, base, identifierNumber, variant);
+ if (!loadedValue) {
+ set(VirtualRegister(destinationOperand),
+ addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
+ return;
+ }
+
if (m_graph.compilation())
m_graph.compilation()->noticeInlinedGetById();
+
+ ASSERT(type == AccessType::Get || !variant.callLinkStatus());
+ if (!variant.callLinkStatus() && variant.intrinsic() == NoIntrinsic) {
+ set(VirtualRegister(destinationOperand), loadedValue);
+ return;
+ }
- Node* originalBaseForBaselineJIT = base;
-
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
+ Node* getter = addToGraph(GetGetter, loadedValue);
+
+ if (handleIntrinsicGetter(destinationOperand, variant, base,
+ [&] () {
+ addToGraph(CheckCell, OpInfo(m_graph.freeze(variant.intrinsicFunction())), getter, base);
+ })) {
+ addToGraph(Phantom, getter);
+ return;
+ }
+
+ ASSERT(variant.intrinsic() == NoIntrinsic);
+
+ // Make a call. We don't try to get fancy with using the smallest operand number because
+ // the stack layout phase should compress the stack anyway.
+
+ unsigned numberOfParameters = 0;
+ numberOfParameters++; // The 'this' argument.
+ numberOfParameters++; // True return PC.
+
+ // Start with a register offset that corresponds to the last in-use register.
+ int registerOffset = virtualRegisterForLocal(
+ m_inlineStackTop->m_profiledBlock->m_numCalleeLocals - 1).offset();
+ registerOffset -= numberOfParameters;
+ registerOffset -= CallFrame::headerSizeInRegisters;
+
+ // Get the alignment right.
+ registerOffset = -WTF::roundUpToMultipleOf(
+ stackAlignmentRegisters(),
+ -registerOffset);
+
+ ensureLocals(
+ m_inlineStackTop->remapOperand(
+ VirtualRegister(registerOffset)).toLocal());
- if (getByIdStatus.chain()) {
- m_graph.chains().addLazily(getByIdStatus.chain());
- Structure* currentStructure = getByIdStatus.structureSet().singletonStructure();
- JSObject* currentObject = 0;
- for (unsigned i = 0; i < getByIdStatus.chain()->size(); ++i) {
- currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock));
- currentStructure = getByIdStatus.chain()->at(i);
- base = cellConstantWithStructureCheck(currentObject, currentStructure);
+ // Issue SetLocals. This has two effects:
+ // 1) That's how handleCall() sees the arguments.
+ // 2) If we inline then this ensures that the arguments are flushed so that if you use
+ // the dreaded arguments object on the getter, the right things happen. Well, sort of -
+ // since we only really care about 'this' in this case. But we're not going to take that
+ // shortcut.
+ int nextRegister = registerOffset + CallFrame::headerSizeInRegisters;
+ set(VirtualRegister(nextRegister++), base, ImmediateNakedSet);
+
+ // We've set some locals, but they are not user-visible. It's still OK to exit from here.
+ m_exitOK = true;
+ addToGraph(ExitOK);
+
+ handleCall(
+ destinationOperand, Call, InlineCallFrame::GetterCall, instructionSize,
+ getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
+}
+
+void ByteCodeParser::emitPutById(
+ Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
+{
+ if (isDirect)
+ addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
+ else
+ addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
+}
+
+void ByteCodeParser::handlePutById(
+ Node* base, unsigned identifierNumber, Node* value,
+ const PutByIdStatus& putByIdStatus, bool isDirect)
+{
+ if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::useAccessInlining()) {
+ if (!putByIdStatus.isSet())
+ addToGraph(ForceOSRExit);
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+
+ if (putByIdStatus.numVariants() > 1) {
+ if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
+ || !Options::usePolymorphicAccessInlining()) {
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
}
+
+ if (!isDirect) {
+ for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
+ if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
+ continue;
+ if (!check(putByIdStatus[variantIndex].conditionSet())) {
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+ }
+ }
+
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedPutById();
+
+ for (const PutByIdVariant& variant : putByIdStatus.variants()) {
+ m_graph.registerInferredType(variant.requiredType());
+ for (Structure* structure : variant.oldStructure())
+ m_graph.registerStructure(structure);
+ if (variant.kind() == PutByIdVariant::Transition)
+ m_graph.registerStructure(variant.newStructure());
+ }
+
+ MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
+ data->variants = putByIdStatus.variants();
+ data->identifierNumber = identifierNumber;
+ addToGraph(MultiPutByOffset, OpInfo(data), base, value);
+ return;
+ }
+
+ ASSERT(putByIdStatus.numVariants() == 1);
+ const PutByIdVariant& variant = putByIdStatus[0];
+
+ switch (variant.kind()) {
+ case PutByIdVariant::Replace: {
+ store(base, identifierNumber, variant, value);
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedPutById();
+ return;
}
- // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
- // ensure that the base of the original get_by_id is kept alive until we're done with
- // all of the speculations. We only insert the Phantom if there had been a CheckStructure
- // on something other than the base following the CheckStructure on base, or if the
- // access was compiled to a WeakJSConstant specific value, in which case we might not
- // have any explicit use of the base at all.
- if (getByIdStatus.specificValue() || originalBaseForBaselineJIT != base)
- addToGraph(Phantom, originalBaseForBaselineJIT);
+ case PutByIdVariant::Transition: {
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
+ if (!check(variant.conditionSet())) {
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+
+ ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
- if (getByIdStatus.specificValue()) {
- ASSERT(getByIdStatus.specificValue().isCell());
+ Node* propertyStorage;
+ Transition* transition = m_graph.m_transitions.add(
+ m_graph.registerStructure(variant.oldStructureForTransition()), m_graph.registerStructure(variant.newStructure()));
+
+ if (variant.reallocatesStorage()) {
+
+ // If we're growing the property storage then it must be because we're
+ // storing into the out-of-line storage.
+ ASSERT(!isInlineOffset(variant.offset()));
+
+ if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
+ propertyStorage = addToGraph(
+ AllocatePropertyStorage, OpInfo(transition), base);
+ } else {
+ propertyStorage = addToGraph(
+ ReallocatePropertyStorage, OpInfo(transition),
+ base, addToGraph(GetButterfly, base));
+ }
+ } else {
+ if (isInlineOffset(variant.offset()))
+ propertyStorage = base;
+ else
+ propertyStorage = addToGraph(GetButterfly, base);
+ }
+
+ StorageAccessData* data = m_graph.m_storageAccessData.add();
+ data->offset = variant.offset();
+ data->identifierNumber = identifierNumber;
+ data->inferredType = variant.requiredType();
+ m_graph.registerInferredType(data->inferredType);
+
+ // NOTE: We could GC at this point because someone could insert an operation that GCs.
+ // That's fine because:
+ // - Things already in the structure will get scanned because we haven't messed with
+ // the object yet.
+ // - The value we are fixing to put is going to be kept live by OSR exit handling. So
+ // if the GC does a conservative scan here it will see the new value.
- set(VirtualRegister(destinationOperand), cellConstant(getByIdStatus.specificValue().asCell()));
+ addToGraph(
+ PutByOffset,
+ OpInfo(data),
+ propertyStorage,
+ base,
+ value);
+
+ if (variant.reallocatesStorage())
+ addToGraph(NukeStructureAndSetButterfly, base, propertyStorage);
+
+ // FIXME: PutStructure goes last until we fix either
+ // https://bugs.webkit.org/show_bug.cgi?id=142921 or
+ // https://bugs.webkit.org/show_bug.cgi?id=142924.
+ addToGraph(PutStructure, OpInfo(transition), base);
+
+ if (m_graph.compilation())
+ m_graph.compilation()->noticeInlinedPutById();
return;
}
+
+ case PutByIdVariant::Setter: {
+ Node* loadedValue = load(SpecCellOther, base, identifierNumber, variant);
+ if (!loadedValue) {
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ }
+
+ Node* setter = addToGraph(GetSetter, loadedValue);
+
+ // Make a call. We don't try to get fancy with using the smallest operand number because
+ // the stack layout phase should compress the stack anyway.
+
+ unsigned numberOfParameters = 0;
+ numberOfParameters++; // The 'this' argument.
+ numberOfParameters++; // The new value.
+ numberOfParameters++; // True return PC.
+
+ // Start with a register offset that corresponds to the last in-use register.
+ int registerOffset = virtualRegisterForLocal(
+ m_inlineStackTop->m_profiledBlock->m_numCalleeLocals - 1).offset();
+ registerOffset -= numberOfParameters;
+ registerOffset -= CallFrame::headerSizeInRegisters;
- handleGetByOffset(
- destinationOperand, prediction, base, identifierNumber, getByIdStatus.offset());
+ // Get the alignment right.
+ registerOffset = -WTF::roundUpToMultipleOf(
+ stackAlignmentRegisters(),
+ -registerOffset);
+
+ ensureLocals(
+ m_inlineStackTop->remapOperand(
+ VirtualRegister(registerOffset)).toLocal());
+
+ int nextRegister = registerOffset + CallFrame::headerSizeInRegisters;
+ set(VirtualRegister(nextRegister++), base, ImmediateNakedSet);
+ set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
+
+ // We've set some locals, but they are not user-visible. It's still OK to exit from here.
+ m_exitOK = true;
+ addToGraph(ExitOK);
+
+ handleCall(
+ VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
+ OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
+ *variant.callLinkStatus(), SpecOther);
+ return;
+ }
+
+ default: {
+ emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ return;
+ } }
}
void ByteCodeParser::prepareToParseBlock()
{
- for (unsigned i = 0; i < m_constants.size(); ++i)
- m_constants[i] = ConstantRecord();
- m_cellConstantNodes.clear();
+ clearCaches();
+ ASSERT(m_setLocalQueue.isEmpty());
}
-Node* ByteCodeParser::getScope(bool skipTop, unsigned skipCount)
+void ByteCodeParser::clearCaches()
{
- Node* localBase = get(VirtualRegister(JSStack::ScopeChain));
- if (skipTop) {
- ASSERT(!inlineCallFrame());
- localBase = addToGraph(SkipTopScope, localBase);
- }
- for (unsigned n = skipCount; n--;)
- localBase = addToGraph(SkipScope, localBase);
- return localBase;
+ m_constants.resize(0);
}
bool ByteCodeParser::parseBlock(unsigned limit)
@@ -1851,18 +3865,20 @@ bool ByteCodeParser::parseBlock(unsigned limit)
Interpreter* interpreter = m_vm->interpreter;
Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
unsigned blockBegin = m_currentIndex;
-
+
// If we are the first basic block, introduce markers for arguments. This allows
// us to track if a use of an argument may use the actual argument passed, as
// opposed to using a value we set explicitly.
if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
m_graph.m_arguments.resize(m_numArguments);
+ // We will emit SetArgument nodes. They don't exit, but we're at the top of an op_enter so
+ // exitOK = true.
+ m_exitOK = true;
for (unsigned argument = 0; argument < m_numArguments; ++argument) {
VariableAccessData* variable = newVariableAccessData(
- virtualRegisterForArgument(argument), m_codeBlock->isCaptured(virtualRegisterForArgument(argument)));
+ virtualRegisterForArgument(argument));
variable->mergeStructureCheckHoistingFailed(
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint));
+ m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
variable->mergeCheckArrayHoistingFailed(
m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
@@ -1873,9 +3889,11 @@ bool ByteCodeParser::parseBlock(unsigned limit)
}
while (true) {
- for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
- m_setLocalQueue[i].execute(this);
- m_setLocalQueue.resize(0);
+ // We're staring a new bytecode instruction. Hence, we once again have a place that we can exit
+ // to.
+ m_exitOK = true;
+
+ processSetLocalQueue();
// Don't extend over jump destinations.
if (m_currentIndex == limit) {
@@ -1886,6 +3904,9 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// logic relies on every bytecode resulting in one or more nodes, which would
// be true anyway except for op_loop_hint, which emits a Phantom to force this
// to be true.
+ // We also don't insert a jump if the block already has a terminal,
+ // which could happen after a tail call.
+ ASSERT(m_currentBlock->isEmpty() || !m_currentBlock->terminal());
if (!m_currentBlock->isEmpty())
addToGraph(Jump, OpInfo(m_currentIndex));
return shouldContinueParsing;
@@ -1896,6 +3917,9 @@ bool ByteCodeParser::parseBlock(unsigned limit)
m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog(" parsing ", currentCodeOrigin(), ": ", opcodeID, "\n");
+
if (m_graph.compilation()) {
addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
@@ -1905,26 +3929,24 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// === Function entry opcodes ===
- case op_enter:
+ case op_enter: {
+ Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
// Initialize all locals to undefined.
for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
- set(virtualRegisterForLocal(i), constantUndefined(), ImmediateSet);
+ set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
NEXT_OPCODE(op_enter);
-
- case op_touch_entry:
- if (m_inlineStackTop->m_codeBlock->symbolTable()->m_functionEnteredOnce.isStillValid())
- addToGraph(ForceOSRExit);
- NEXT_OPCODE(op_touch_entry);
+ }
case op_to_this: {
Node* op1 = getThis();
if (op1->op() != ToThis) {
Structure* cachedStructure = currentInstruction[2].u.structure.get();
- if (!cachedStructure
+ if (currentInstruction[2].u.toThisStatus != ToThisOK
+ || !cachedStructure
|| cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
|| m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)) {
+ || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
setThis(addToGraph(ToThis, op1));
} else {
addToGraph(
@@ -1939,18 +3961,33 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_create_this: {
int calleeOperand = currentInstruction[2].u.operand;
Node* callee = get(VirtualRegister(calleeOperand));
+
+ JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm);
+ if (!function) {
+ JSCell* cachedFunction = currentInstruction[4].u.jsCell.unvalidatedGet();
+ if (cachedFunction
+ && cachedFunction != JSCell::seenMultipleCalleeObjects()
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
+ ASSERT(cachedFunction->inherits(*m_vm, JSFunction::info()));
+
+ FrozenValue* frozen = m_graph.freeze(cachedFunction);
+ addToGraph(CheckCell, OpInfo(frozen), callee);
+
+ function = static_cast<JSFunction*>(cachedFunction);
+ }
+ }
+
bool alreadyEmitted = false;
- if (callee->op() == WeakJSConstant) {
- JSCell* cell = callee->weakConstant();
- ASSERT(cell->inherits(JSFunction::info()));
-
- JSFunction* function = jsCast<JSFunction*>(cell);
- if (Structure* structure = function->allocationStructure()) {
- addToGraph(AllocationProfileWatchpoint, OpInfo(function));
- // The callee is still live up to this point.
- addToGraph(Phantom, callee);
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
- alreadyEmitted = true;
+ if (function) {
+ if (FunctionRareData* rareData = function->rareData()) {
+ if (Structure* structure = rareData->objectAllocationStructure()) {
+ m_graph.freeze(rareData);
+ m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet());
+ // The callee is still live up to this point.
+ addToGraph(Phantom, callee);
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(m_graph.registerStructure(structure))));
+ alreadyEmitted = true;
+ }
}
}
if (!alreadyEmitted) {
@@ -1963,7 +4000,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_new_object: {
set(VirtualRegister(currentInstruction[1].u.operand),
addToGraph(NewObject,
- OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
+ OpInfo(m_graph.registerStructure(currentInstruction[3].u.objectAllocationProfile->structure()))));
NEXT_OPCODE(op_new_object);
}
@@ -1976,6 +4013,27 @@ bool ByteCodeParser::parseBlock(unsigned limit)
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
NEXT_OPCODE(op_new_array);
}
+
+ case op_new_array_with_spread: {
+ int startOperand = currentInstruction[2].u.operand;
+ int numOperands = currentInstruction[3].u.operand;
+ const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(currentInstruction[4].u.unsignedValue);
+ for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
+ addVarArgChild(get(VirtualRegister(operandIdx)));
+
+ BitVector* copy = m_graph.m_bitVectors.add(bitVector);
+ ASSERT(*copy == bitVector);
+
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(Node::VarArg, NewArrayWithSpread, OpInfo(copy)));
+ NEXT_OPCODE(op_new_array_with_spread);
+ }
+
+ case op_spread: {
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(Spread, get(VirtualRegister(currentInstruction[2].u.operand))));
+ NEXT_OPCODE(op_spread);
+ }
case op_new_array_with_size: {
int lengthOperand = currentInstruction[2].u.operand;
@@ -2007,25 +4065,39 @@ bool ByteCodeParser::parseBlock(unsigned limit)
}
case op_new_regexp: {
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
+ RegExp* regexp = m_inlineStackTop->m_codeBlock->regexp(currentInstruction[2].u.operand);
+ FrozenValue* frozen = m_graph.freezeStrong(regexp);
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(frozen)));
NEXT_OPCODE(op_new_regexp);
}
-
- case op_get_callee: {
- JSCell* cachedFunction = currentInstruction[2].u.jsCell.get();
- if (!cachedFunction
- || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction)) {
- set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee)));
- } else {
- ASSERT(cachedFunction->inherits(JSFunction::info()));
- Node* actualCallee = get(VirtualRegister(JSStack::Callee));
- addToGraph(CheckFunction, OpInfo(cachedFunction), actualCallee);
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(WeakJSConstant, OpInfo(cachedFunction)));
- }
- NEXT_OPCODE(op_get_callee);
+
+ case op_get_rest_length: {
+ InlineCallFrame* inlineCallFrame = this->inlineCallFrame();
+ Node* length;
+ if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
+ unsigned argumentsLength = inlineCallFrame->arguments.size() - 1;
+ unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue;
+ JSValue restLength;
+ if (argumentsLength <= numParamsToSkip)
+ restLength = jsNumber(0);
+ else
+ restLength = jsNumber(argumentsLength - numParamsToSkip);
+
+ length = jsConstant(restLength);
+ } else
+ length = addToGraph(GetRestLength, OpInfo(currentInstruction[2].u.unsignedValue));
+ set(VirtualRegister(currentInstruction[1].u.operand), length);
+ NEXT_OPCODE(op_get_rest_length);
}
+ case op_create_rest: {
+ noticeArgumentsUse();
+ Node* arrayLength = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(CreateRest, OpInfo(currentInstruction[3].u.unsignedValue), arrayLength));
+ NEXT_OPCODE(op_create_rest);
+ }
+
// === Bitwise operations ===
case op_bitand: {
@@ -2085,7 +4157,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
int srcDst = currentInstruction[1].u.operand;
VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
Node* op = get(srcDstVirtualRegister);
- set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, one())));
+ set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
NEXT_OPCODE(op_inc);
}
@@ -2093,7 +4165,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
int srcDst = currentInstruction[1].u.operand;
VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
Node* op = get(srcDstVirtualRegister);
- set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, one())));
+ set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
NEXT_OPCODE(op_dec);
}
@@ -2137,6 +4209,15 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_mod);
}
+ case op_pow: {
+ // FIXME: ArithPow(Untyped, Untyped) should be supported as the same to ArithMul, ArithSub etc.
+ // https://bugs.webkit.org/show_bug.cgi?id=160012
+ Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ArithPow, op1, op2));
+ NEXT_OPCODE(op_pow);
+ }
+
case op_div: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
@@ -2146,18 +4227,11 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// === Misc operations ===
- case op_debug:
- addToGraph(Breakpoint);
+ case op_debug: {
+ // This is a nop in the DFG/FTL because when we set a breakpoint in the debugger,
+ // we will jettison all optimized CodeBlocks that contains the breakpoint.
+ addToGraph(Check); // We add a nop here so that basic block linking doesn't break.
NEXT_OPCODE(op_debug);
-
- case op_profile_will_call: {
- addToGraph(ProfileWillCall);
- NEXT_OPCODE(op_profile_will_call);
- }
-
- case op_profile_did_call: {
- addToGraph(ProfileDidCall);
- NEXT_OPCODE(op_profile_did_call);
}
case op_mov: {
@@ -2165,20 +4239,21 @@ bool ByteCodeParser::parseBlock(unsigned limit)
set(VirtualRegister(currentInstruction[1].u.operand), op);
NEXT_OPCODE(op_mov);
}
-
- case op_captured_mov: {
- Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
- if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet) {
- if (set->state() != IsInvalidated)
- addToGraph(NotifyWrite, OpInfo(set), op);
- }
- set(VirtualRegister(currentInstruction[1].u.operand), op);
- NEXT_OPCODE(op_captured_mov);
+
+ case op_check_tdz: {
+ addToGraph(CheckNotEmpty, get(VirtualRegister(currentInstruction[1].u.operand)));
+ NEXT_OPCODE(op_check_tdz);
}
- case op_check_has_instance:
- addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
- NEXT_OPCODE(op_check_has_instance);
+ case op_overrides_has_instance: {
+ JSFunction* defaultHasInstanceSymbolFunction = m_inlineStackTop->m_codeBlock->globalObjectFor(currentCodeOrigin())->functionProtoHasInstanceSymbolFunction();
+
+ Node* constructor = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* hasInstanceValue = get(VirtualRegister(currentInstruction[3].u.operand));
+
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue));
+ NEXT_OPCODE(op_overrides_has_instance);
+ }
case op_instanceof: {
Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
@@ -2186,7 +4261,19 @@ bool ByteCodeParser::parseBlock(unsigned limit)
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
NEXT_OPCODE(op_instanceof);
}
-
+
+ case op_instanceof_custom: {
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* constructor = get(VirtualRegister(currentInstruction[3].u.operand));
+ Node* hasInstanceValue = get(VirtualRegister(currentInstruction[4].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue));
+ NEXT_OPCODE(op_instanceof_custom);
+ }
+ case op_is_empty: {
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsEmpty, value));
+ NEXT_OPCODE(op_is_empty);
+ }
case op_is_undefined: {
Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
@@ -2205,10 +4292,11 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_is_number);
}
- case op_is_string: {
+ case op_is_cell_with_type: {
+ JSType type = static_cast<JSType>(currentInstruction[3].u.operand);
Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
- NEXT_OPCODE(op_is_string);
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsCellWithType, OpInfo(type), value));
+ NEXT_OPCODE(op_is_cell_with_type);
}
case op_is_object: {
@@ -2217,6 +4305,12 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_is_object);
}
+ case op_is_object_or_null: {
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value));
+ NEXT_OPCODE(op_is_object_or_null);
+ }
+
case op_is_function: {
Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
@@ -2239,52 +4333,37 @@ bool ByteCodeParser::parseBlock(unsigned limit)
int startOperand = currentInstruction[2].u.operand;
int numOperands = currentInstruction[3].u.operand;
#if CPU(X86)
- // X86 doesn't have enough registers to compile MakeRope with three arguments.
- // Rather than try to be clever, we just make MakeRope dumber on this processor.
- const unsigned maxRopeArguments = 2;
+ // X86 doesn't have enough registers to compile MakeRope with three arguments. The
+ // StrCat we emit here may be turned into a MakeRope. Rather than try to be clever,
+ // we just make StrCat dumber on this processor.
+ const unsigned maxArguments = 2;
#else
- const unsigned maxRopeArguments = 3;
+ const unsigned maxArguments = 3;
#endif
- auto toStringNodes = std::make_unique<Node*[]>(numOperands);
- for (int i = 0; i < numOperands; i++)
- toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
-
- for (int i = 0; i < numOperands; i++)
- addToGraph(Phantom, toStringNodes[i]);
-
Node* operands[AdjacencyList::Size];
unsigned indexInOperands = 0;
for (unsigned i = 0; i < AdjacencyList::Size; ++i)
operands[i] = 0;
for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
- if (indexInOperands == maxRopeArguments) {
- operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
+ if (indexInOperands == maxArguments) {
+ operands[0] = addToGraph(StrCat, operands[0], operands[1], operands[2]);
for (unsigned i = 1; i < AdjacencyList::Size; ++i)
operands[i] = 0;
indexInOperands = 1;
}
ASSERT(indexInOperands < AdjacencyList::Size);
- ASSERT(indexInOperands < maxRopeArguments);
- operands[indexInOperands++] = toStringNodes[operandIdx];
+ ASSERT(indexInOperands < maxArguments);
+ operands[indexInOperands++] = get(VirtualRegister(startOperand - operandIdx));
}
set(VirtualRegister(currentInstruction[1].u.operand),
- addToGraph(MakeRope, operands[0], operands[1], operands[2]));
+ addToGraph(StrCat, operands[0], operands[1], operands[2]));
NEXT_OPCODE(op_strcat);
}
case op_less: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- if (a.isNumber() && b.isNumber()) {
- set(VirtualRegister(currentInstruction[1].u.operand),
- getJSConstantForValue(jsBoolean(a.asNumber() < b.asNumber())));
- NEXT_OPCODE(op_less);
- }
- }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
NEXT_OPCODE(op_less);
}
@@ -2292,15 +4371,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_lesseq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- if (a.isNumber() && b.isNumber()) {
- set(VirtualRegister(currentInstruction[1].u.operand),
- getJSConstantForValue(jsBoolean(a.asNumber() <= b.asNumber())));
- NEXT_OPCODE(op_lesseq);
- }
- }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
NEXT_OPCODE(op_lesseq);
}
@@ -2308,15 +4378,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_greater: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- if (a.isNumber() && b.isNumber()) {
- set(VirtualRegister(currentInstruction[1].u.operand),
- getJSConstantForValue(jsBoolean(a.asNumber() > b.asNumber())));
- NEXT_OPCODE(op_greater);
- }
- }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
NEXT_OPCODE(op_greater);
}
@@ -2324,15 +4385,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_greatereq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- if (a.isNumber() && b.isNumber()) {
- set(VirtualRegister(currentInstruction[1].u.operand),
- getJSConstantForValue(jsBoolean(a.asNumber() >= b.asNumber())));
- NEXT_OPCODE(op_greatereq);
- }
- }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
NEXT_OPCODE(op_greatereq);
}
@@ -2340,79 +4392,43 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_eq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- set(VirtualRegister(currentInstruction[1].u.operand),
- getJSConstantForValue(jsBoolean(JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b))));
- NEXT_OPCODE(op_eq);
- }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
NEXT_OPCODE(op_eq);
}
case op_eq_null: {
Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, constantNull()));
+ Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, value, nullConstant));
NEXT_OPCODE(op_eq_null);
}
case op_stricteq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- set(VirtualRegister(currentInstruction[1].u.operand),
- getJSConstantForValue(jsBoolean(JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b))));
- NEXT_OPCODE(op_stricteq);
- }
- if (isConstantForCompareStrictEq(op1))
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEqConstant, op2, op1));
- else if (isConstantForCompareStrictEq(op2))
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEqConstant, op1, op2));
- else
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
NEXT_OPCODE(op_stricteq);
}
case op_neq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- set(VirtualRegister(currentInstruction[1].u.operand),
- getJSConstantForValue(jsBoolean(!JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b))));
- NEXT_OPCODE(op_neq);
- }
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
NEXT_OPCODE(op_neq);
}
case op_neq_null: {
Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, constantNull())));
+ Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant)));
NEXT_OPCODE(op_neq_null);
}
case op_nstricteq: {
Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue a = valueOfJSConstant(op1);
- JSValue b = valueOfJSConstant(op2);
- set(VirtualRegister(currentInstruction[1].u.operand),
- getJSConstantForValue(jsBoolean(!JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b))));
- NEXT_OPCODE(op_nstricteq);
- }
Node* invertedResult;
- if (isConstantForCompareStrictEq(op1))
- invertedResult = addToGraph(CompareStrictEqConstant, op2, op1);
- else if (isConstantForCompareStrictEq(op2))
- invertedResult = addToGraph(CompareStrictEqConstant, op1, op2);
- else
- invertedResult = addToGraph(CompareStrictEq, op1, op2);
+ invertedResult = addToGraph(CompareStrictEq, op1, op2);
set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
NEXT_OPCODE(op_nstricteq);
}
@@ -2420,234 +4436,330 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// === Property access operations ===
case op_get_by_val: {
- SpeculatedType prediction = getPrediction();
-
+ SpeculatedType prediction = getPredictionWithoutOSRExit();
+
Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
- ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
- Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
- set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
+ bool compiledAsGetById = false;
+ GetByIdStatus getByIdStatus;
+ unsigned identifierNumber = 0;
+ {
+ ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ ByValInfo* byValInfo = m_inlineStackTop->m_byValInfos.get(CodeOrigin(currentCodeOrigin().bytecodeIndex));
+ // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
+ // At that time, there is no information.
+ if (byValInfo && byValInfo->stubInfo && !byValInfo->tookSlowPath && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent) && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
+ compiledAsGetById = true;
+ identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
+ UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
+
+ if (Symbol* symbol = byValInfo->cachedSymbol.get()) {
+ FrozenValue* frozen = m_graph.freezeStrong(symbol);
+ addToGraph(CheckCell, OpInfo(frozen), property);
+ } else {
+ ASSERT(!uid->isSymbol());
+ addToGraph(CheckStringIdent, OpInfo(uid), property);
+ }
+
+ getByIdStatus = GetByIdStatus::computeForStubInfo(
+ locker, m_inlineStackTop->m_profiledBlock,
+ byValInfo->stubInfo, currentCodeOrigin(), uid);
+ }
+ }
+
+ if (compiledAsGetById)
+ handleGetById(currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, OPCODE_LENGTH(op_get_by_val));
+ else {
+ ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read);
+ Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
+ m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic.
+ set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
+ }
NEXT_OPCODE(op_get_by_val);
}
+ case op_get_by_val_with_this: {
+ SpeculatedType prediction = getPrediction();
+
+ Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* thisValue = get(VirtualRegister(currentInstruction[3].u.operand));
+ Node* property = get(VirtualRegister(currentInstruction[4].u.operand));
+ Node* getByValWithThis = addToGraph(GetByValWithThis, OpInfo(), OpInfo(prediction), base, thisValue, property);
+ set(VirtualRegister(currentInstruction[1].u.operand), getByValWithThis);
+
+ NEXT_OPCODE(op_get_by_val_with_this);
+ }
+
case op_put_by_val_direct:
case op_put_by_val: {
Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
+ bool isDirect = opcodeID == op_put_by_val_direct;
+ bool compiledAsPutById = false;
+ {
+ unsigned identifierNumber = std::numeric_limits<unsigned>::max();
+ PutByIdStatus putByIdStatus;
+ {
+ ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ ByValInfo* byValInfo = m_inlineStackTop->m_byValInfos.get(CodeOrigin(currentCodeOrigin().bytecodeIndex));
+ // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
+ // At that time, there is no information.
+ if (byValInfo
+ && byValInfo->stubInfo
+ && !byValInfo->tookSlowPath
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)
+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
+ compiledAsPutById = true;
+ identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
+ UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
+
+ if (Symbol* symbol = byValInfo->cachedSymbol.get()) {
+ FrozenValue* frozen = m_graph.freezeStrong(symbol);
+ addToGraph(CheckCell, OpInfo(frozen), property);
+ } else {
+ ASSERT(!uid->isSymbol());
+ addToGraph(CheckStringIdent, OpInfo(uid), property);
+ }
- ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
-
+ putByIdStatus = PutByIdStatus::computeForStubInfo(
+ locker, m_inlineStackTop->m_profiledBlock,
+ byValInfo->stubInfo, currentCodeOrigin(), uid);
+
+ }
+ }
+
+ if (compiledAsPutById)
+ handlePutById(base, identifierNumber, value, putByIdStatus, isDirect);
+ }
+
+ if (!compiledAsPutById) {
+ ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Write);
+
+ addVarArgChild(base);
+ addVarArgChild(property);
+ addVarArgChild(value);
+ addVarArgChild(0); // Leave room for property storage.
+ addVarArgChild(0); // Leave room for length.
+ addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
+ }
+
+ NEXT_OPCODE(op_put_by_val);
+ }
+
+ case op_put_by_val_with_this: {
+ Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* thisValue = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
+ Node* value = get(VirtualRegister(currentInstruction[4].u.operand));
+
+ addVarArgChild(base);
+ addVarArgChild(thisValue);
+ addVarArgChild(property);
+ addVarArgChild(value);
+ addToGraph(Node::VarArg, PutByValWithThis, OpInfo(0), OpInfo(0));
+
+ NEXT_OPCODE(op_put_by_val_with_this);
+ }
+
+ case op_define_data_property: {
+ Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
-
+ Node* attributes = get(VirtualRegister(currentInstruction[4].u.operand));
+
addVarArgChild(base);
addVarArgChild(property);
addVarArgChild(value);
- addVarArgChild(0); // Leave room for property storage.
- addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
+ addVarArgChild(attributes);
+ addToGraph(Node::VarArg, DefineDataProperty, OpInfo(0), OpInfo(0));
- NEXT_OPCODE(op_put_by_val);
+ NEXT_OPCODE(op_define_data_property);
}
-
+
+ case op_define_accessor_property: {
+ Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* getter = get(VirtualRegister(currentInstruction[3].u.operand));
+ Node* setter = get(VirtualRegister(currentInstruction[4].u.operand));
+ Node* attributes = get(VirtualRegister(currentInstruction[5].u.operand));
+
+ addVarArgChild(base);
+ addVarArgChild(property);
+ addVarArgChild(getter);
+ addVarArgChild(setter);
+ addVarArgChild(attributes);
+ addToGraph(Node::VarArg, DefineAccessorProperty, OpInfo(0), OpInfo(0));
+
+ NEXT_OPCODE(op_define_accessor_property);
+ }
+
+ case op_try_get_by_id:
case op_get_by_id:
- case op_get_by_id_out_of_line:
+ case op_get_by_id_proto_load:
+ case op_get_by_id_unset:
case op_get_array_length: {
SpeculatedType prediction = getPrediction();
Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
- StringImpl* uid = m_graph.identifiers()[identifierNumber];
+ UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_stubInfos,
- m_currentIndex, uid);
-
+ m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
+ m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
+ currentCodeOrigin(), uid);
+ AccessType type = op_try_get_by_id == opcodeID ? AccessType::TryGet : AccessType::Get;
+
+ unsigned opcodeLength = opcodeID == op_try_get_by_id ? OPCODE_LENGTH(op_try_get_by_id) : OPCODE_LENGTH(op_get_by_id);
+
handleGetById(
- currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
+ currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength);
- NEXT_OPCODE(op_get_by_id);
+ if (op_try_get_by_id == opcodeID)
+ NEXT_OPCODE(op_try_get_by_id); // Opcode's length is different from others in this case.
+ else
+ NEXT_OPCODE(op_get_by_id);
}
- case op_put_by_id:
- case op_put_by_id_out_of_line:
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line: {
+ case op_get_by_id_with_this: {
+ SpeculatedType prediction = getPrediction();
+
+ Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* thisValue = get(VirtualRegister(currentInstruction[3].u.operand));
+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[4].u.operand];
+
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(GetByIdWithThis, OpInfo(identifierNumber), OpInfo(prediction), base, thisValue));
+
+ NEXT_OPCODE(op_get_by_id_with_this);
+ }
+ case op_put_by_id: {
Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
- bool direct = currentInstruction[8].u.operand;
+ bool direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect;
PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_stubInfos,
- m_currentIndex, m_graph.identifiers()[identifierNumber]);
- bool canCountAsInlined = true;
- if (!putByIdStatus.isSet()) {
- addToGraph(ForceOSRExit);
- canCountAsInlined = false;
- }
-
- bool hasExitSite =
- m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache)
- || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCacheWatchpoint);
+ m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
+ m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
+ currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
- if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
- handlePutByOffset(base, identifierNumber, putByIdStatus.offset(), value);
- } else if (
- !hasExitSite
- && putByIdStatus.isSimpleTransition()
- && (!putByIdStatus.structureChain()
- || putByIdStatus.structureChain()->isStillValid())) {
-
- m_graph.chains().addLazily(putByIdStatus.structureChain());
-
- addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
- if (!direct) {
- if (!putByIdStatus.oldStructure()->storedPrototype().isNull()) {
- cellConstantWithStructureCheck(
- putByIdStatus.oldStructure()->storedPrototype().asCell());
- }
-
- for (unsigned i = 0; i < putByIdStatus.structureChain()->size(); ++i) {
- JSValue prototype = putByIdStatus.structureChain()->at(i)->storedPrototype();
- if (prototype.isNull())
- continue;
- cellConstantWithStructureCheck(prototype.asCell());
- }
- }
- ASSERT(putByIdStatus.oldStructure()->transitionWatchpointSetHasBeenInvalidated());
-
- Node* propertyStorage;
- StructureTransitionData* transitionData =
- m_graph.addStructureTransitionData(
- StructureTransitionData(
- putByIdStatus.oldStructure(),
- putByIdStatus.newStructure()));
-
- if (putByIdStatus.oldStructure()->outOfLineCapacity()
- != putByIdStatus.newStructure()->outOfLineCapacity()) {
-
- // If we're growing the property storage then it must be because we're
- // storing into the out-of-line storage.
- ASSERT(!isInlineOffset(putByIdStatus.offset()));
-
- if (!putByIdStatus.oldStructure()->outOfLineCapacity()) {
- propertyStorage = addToGraph(
- AllocatePropertyStorage, OpInfo(transitionData), base);
- } else {
- propertyStorage = addToGraph(
- ReallocatePropertyStorage, OpInfo(transitionData),
- base, addToGraph(GetButterfly, base));
- }
- } else {
- if (isInlineOffset(putByIdStatus.offset()))
- propertyStorage = base;
- else
- propertyStorage = addToGraph(GetButterfly, base);
- }
-
- addToGraph(PutStructure, OpInfo(transitionData), base);
-
- addToGraph(
- PutByOffset,
- OpInfo(m_graph.m_storageAccessData.size()),
- propertyStorage,
- base,
- value);
-
- StorageAccessData storageAccessData;
- storageAccessData.offset = putByIdStatus.offset();
- storageAccessData.identifierNumber = identifierNumber;
- m_graph.m_storageAccessData.append(storageAccessData);
- } else {
- if (direct)
- addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
- else
- addToGraph(PutById, OpInfo(identifierNumber), base, value);
- canCountAsInlined = false;
- }
-
- if (canCountAsInlined && m_graph.compilation())
- m_graph.compilation()->noticeInlinedPutById();
-
+ handlePutById(base, identifierNumber, value, putByIdStatus, direct);
NEXT_OPCODE(op_put_by_id);
}
- case op_init_global_const_nop: {
- NEXT_OPCODE(op_init_global_const_nop);
+ case op_put_by_id_with_this: {
+ Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* thisValue = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* value = get(VirtualRegister(currentInstruction[4].u.operand));
+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+
+ addToGraph(PutByIdWithThis, OpInfo(identifierNumber), base, thisValue, value);
+ NEXT_OPCODE(op_put_by_id_with_this);
}
- case op_init_global_const: {
- Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
- addToGraph(
- PutGlobalVar,
- OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
- value);
- NEXT_OPCODE(op_init_global_const);
+ case op_put_getter_by_id:
+ case op_put_setter_by_id: {
+ Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ unsigned attributes = currentInstruction[3].u.operand;
+ Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand));
+ NodeType op = (opcodeID == op_put_getter_by_id) ? PutGetterById : PutSetterById;
+ addToGraph(op, OpInfo(identifierNumber), OpInfo(attributes), base, accessor);
+ NEXT_OPCODE(op_put_getter_by_id);
+ }
+
+ case op_put_getter_setter_by_id: {
+ Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ unsigned attributes = currentInstruction[3].u.operand;
+ Node* getter = get(VirtualRegister(currentInstruction[4].u.operand));
+ Node* setter = get(VirtualRegister(currentInstruction[5].u.operand));
+ addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(attributes), base, getter, setter);
+ NEXT_OPCODE(op_put_getter_setter_by_id);
+ }
+
+ case op_put_getter_by_val:
+ case op_put_setter_by_val: {
+ Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* subscript = get(VirtualRegister(currentInstruction[2].u.operand));
+ unsigned attributes = currentInstruction[3].u.operand;
+ Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand));
+ NodeType op = (opcodeID == op_put_getter_by_val) ? PutGetterByVal : PutSetterByVal;
+ addToGraph(op, OpInfo(attributes), base, subscript, accessor);
+ NEXT_OPCODE(op_put_getter_by_val);
+ }
+
+ case op_del_by_id: {
+ Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(DeleteById, OpInfo(identifierNumber), base));
+ NEXT_OPCODE(op_del_by_id);
+ }
+
+ case op_del_by_val: {
+ int dst = currentInstruction[1].u.operand;
+ Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* key = get(VirtualRegister(currentInstruction[3].u.operand));
+ set(VirtualRegister(dst), addToGraph(DeleteByVal, base, key));
+ NEXT_OPCODE(op_del_by_val);
+ }
+
+ case op_profile_type: {
+ Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand));
+ addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile);
+ NEXT_OPCODE(op_profile_type);
+ }
+
+ case op_profile_control_flow: {
+ BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
+ addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
+ NEXT_OPCODE(op_profile_control_flow);
}
// === Block terminators. ===
case op_jmp: {
- unsigned relativeOffset = currentInstruction[1].u.operand;
+ ASSERT(!m_currentBlock->terminal());
+ int relativeOffset = currentInstruction[1].u.operand;
addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
+ if (relativeOffset <= 0)
+ flushForTerminal();
LAST_OPCODE(op_jmp);
}
case op_jtrue: {
unsigned relativeOffset = currentInstruction[2].u.operand;
Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
- if (canFold(condition)) {
- TriState state = valueOfJSConstant(condition).pureToBoolean();
- if (state == TrueTriState) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jtrue);
- } else if (state == FalseTriState) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jtrue);
- }
- }
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
LAST_OPCODE(op_jtrue);
}
case op_jfalse: {
unsigned relativeOffset = currentInstruction[2].u.operand;
Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
- if (canFold(condition)) {
- TriState state = valueOfJSConstant(condition).pureToBoolean();
- if (state == FalseTriState) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jfalse);
- } else if (state == TrueTriState) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jfalse);
- }
- }
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jfalse);
}
case op_jeq_null: {
unsigned relativeOffset = currentInstruction[2].u.operand;
Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
- Node* condition = addToGraph(CompareEqConstant, value, constantNull());
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
+ Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
+ Node* condition = addToGraph(CompareEq, value, nullConstant);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
LAST_OPCODE(op_jeq_null);
}
case op_jneq_null: {
unsigned relativeOffset = currentInstruction[2].u.operand;
Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
- Node* condition = addToGraph(CompareEqConstant, value, constantNull());
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
+ Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
+ Node* condition = addToGraph(CompareEq, value, nullConstant);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jneq_null);
}
@@ -2655,25 +4767,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a < b) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jless);
- } else {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jless);
- }
- }
- }
Node* condition = addToGraph(CompareLess, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
LAST_OPCODE(op_jless);
}
@@ -2681,25 +4776,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a <= b) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jlesseq);
- } else {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jlesseq);
- }
- }
- }
Node* condition = addToGraph(CompareLessEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
LAST_OPCODE(op_jlesseq);
}
@@ -2707,25 +4785,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a > b) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jgreater);
- } else {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jgreater);
- }
- }
- }
Node* condition = addToGraph(CompareGreater, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
LAST_OPCODE(op_jgreater);
}
@@ -2733,25 +4794,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a >= b) {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jgreatereq);
- } else {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jgreatereq);
- }
- }
- }
Node* condition = addToGraph(CompareGreaterEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
LAST_OPCODE(op_jgreatereq);
}
@@ -2759,25 +4803,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a < b) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jnless);
- } else {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jnless);
- }
- }
- }
Node* condition = addToGraph(CompareLess, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jnless);
}
@@ -2785,25 +4812,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a <= b) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jnlesseq);
- } else {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jnlesseq);
- }
- }
- }
Node* condition = addToGraph(CompareLessEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jnlesseq);
}
@@ -2811,25 +4821,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a > b) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jngreater);
- } else {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jngreater);
- }
- }
- }
Node* condition = addToGraph(CompareGreater, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jngreater);
}
@@ -2837,92 +4830,76 @@ bool ByteCodeParser::parseBlock(unsigned limit)
unsigned relativeOffset = currentInstruction[3].u.operand;
Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
- if (canFold(op1) && canFold(op2)) {
- JSValue aValue = valueOfJSConstant(op1);
- JSValue bValue = valueOfJSConstant(op2);
- if (aValue.isNumber() && bValue.isNumber()) {
- double a = aValue.asNumber();
- double b = bValue.asNumber();
- if (a >= b) {
- // Emit a placeholder for this bytecode operation but otherwise
- // just fall through.
- addToGraph(Phantom);
- NEXT_OPCODE(op_jngreatereq);
- } else {
- addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
- LAST_OPCODE(op_jngreatereq);
- }
- }
- }
Node* condition = addToGraph(CompareGreaterEq, op1, op2);
- addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
LAST_OPCODE(op_jngreatereq);
}
case op_switch_imm: {
- SwitchData data;
+ SwitchData& data = *m_graph.m_switchData.add();
data.kind = SwitchImm;
data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
- data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+ data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
if (!table.branchOffsets[i])
continue;
unsigned target = m_currentIndex + table.branchOffsets[i];
- if (target == data.fallThroughBytecodeIndex())
+ if (target == data.fallThrough.bytecodeIndex())
continue;
- data.cases.append(SwitchCase::withBytecodeIndex(jsNumber(static_cast<int32_t>(table.min + i)), target));
+ data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
}
- m_graph.m_switchData.append(data);
- addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(VirtualRegister(currentInstruction[3].u.operand)));
+ addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+ flushIfTerminal(data);
LAST_OPCODE(op_switch_imm);
}
case op_switch_char: {
- SwitchData data;
+ SwitchData& data = *m_graph.m_switchData.add();
data.kind = SwitchChar;
data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
- data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+ data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
if (!table.branchOffsets[i])
continue;
unsigned target = m_currentIndex + table.branchOffsets[i];
- if (target == data.fallThroughBytecodeIndex())
+ if (target == data.fallThrough.bytecodeIndex())
continue;
data.cases.append(
SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
}
- m_graph.m_switchData.append(data);
- addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(VirtualRegister(currentInstruction[3].u.operand)));
+ addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+ flushIfTerminal(data);
LAST_OPCODE(op_switch_char);
}
case op_switch_string: {
- SwitchData data;
+ SwitchData& data = *m_graph.m_switchData.add();
data.kind = SwitchString;
data.switchTableIndex = currentInstruction[1].u.operand;
- data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
+ data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
StringJumpTable::StringOffsetTable::iterator iter;
StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
for (iter = table.offsetTable.begin(); iter != end; ++iter) {
unsigned target = m_currentIndex + iter->value.branchOffset;
- if (target == data.fallThroughBytecodeIndex())
+ if (target == data.fallThrough.bytecodeIndex())
continue;
data.cases.append(
SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
}
- m_graph.m_switchData.append(data);
- addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(VirtualRegister(currentInstruction[3].u.operand)));
+ addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
+ flushIfTerminal(data);
LAST_OPCODE(op_switch_string);
}
case op_ret:
- flushArgumentsAndCapturedVariables();
+ ASSERT(!m_currentBlock->terminal());
if (inlineCallFrame()) {
- ASSERT(m_inlineStackTop->m_returnValue.isValid());
- setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSet);
+ flushForReturn();
+ if (m_inlineStackTop->m_returnValue.isValid())
+ setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
m_inlineStackTop->m_didReturn = true;
if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
// If we're returning from the first block, then we're done parsing.
@@ -2944,101 +4921,193 @@ bool ByteCodeParser::parseBlock(unsigned limit)
LAST_OPCODE(op_ret);
}
addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
+ flushForReturn();
LAST_OPCODE(op_ret);
case op_end:
- flushArgumentsAndCapturedVariables();
ASSERT(!inlineCallFrame());
addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
+ flushForReturn();
LAST_OPCODE(op_end);
case op_throw:
addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
- flushAllArgumentsAndCapturedVariablesInInlineStack();
+ flushForTerminal();
addToGraph(Unreachable);
LAST_OPCODE(op_throw);
case op_throw_static_error:
- addToGraph(ThrowReferenceError);
- flushAllArgumentsAndCapturedVariablesInInlineStack();
+ addToGraph(Phantom, get(VirtualRegister(currentInstruction[1].u.operand))); // Keep argument live.
+ addToGraph(ThrowStaticError);
+ flushForTerminal();
addToGraph(Unreachable);
LAST_OPCODE(op_throw_static_error);
+
+ case op_catch:
+ m_graph.m_hasExceptionHandlers = true;
+ NEXT_OPCODE(op_catch);
case op_call:
- handleCall(currentInstruction, Call, CodeForCall);
+ handleCall(currentInstruction, Call, CallMode::Regular);
+ ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
NEXT_OPCODE(op_call);
-
+
+ case op_tail_call: {
+ flushForReturn();
+ Terminality terminality = handleCall(currentInstruction, TailCall, CallMode::Tail);
+ ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
+ // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
+ // If the call is not terminal, however, then we want the subsequent op_ret/op_jump to update metadata and clean
+ // things up.
+ if (terminality == NonTerminal)
+ NEXT_OPCODE(op_tail_call);
+ else
+ LAST_OPCODE(op_tail_call);
+ }
+
case op_construct:
- handleCall(currentInstruction, Construct, CodeForConstruct);
+ handleCall(currentInstruction, Construct, CallMode::Construct);
+ ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
NEXT_OPCODE(op_construct);
case op_call_varargs: {
- ASSERT(inlineCallFrame());
- ASSERT(currentInstruction[4].u.operand == m_inlineStackTop->m_codeBlock->argumentsRegister().offset());
- ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
- // It would be cool to funnel this into handleCall() so that it can handle
- // inlining. But currently that won't be profitable anyway, since none of the
- // uses of call_varargs will be inlineable. So we set this up manually and
- // without inline/intrinsic detection.
-
- SpeculatedType prediction = getPrediction();
-
- addToGraph(CheckArgumentsNotCreated);
-
- unsigned argCount = inlineCallFrame()->arguments.size();
- if (JSStack::ThisArgument + argCount > m_parameterSlots)
- m_parameterSlots = JSStack::ThisArgument + argCount;
-
- addVarArgChild(get(VirtualRegister(currentInstruction[2].u.operand))); // callee
- addVarArgChild(get(VirtualRegister(currentInstruction[3].u.operand))); // this
- for (unsigned argument = 1; argument < argCount; ++argument)
- addVarArgChild(get(virtualRegisterForArgument(argument)));
+ handleVarargsCall(currentInstruction, CallVarargs, CallMode::Regular);
+ ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
+ NEXT_OPCODE(op_call_varargs);
+ }
+
+ case op_tail_call_varargs: {
+ flushForReturn();
+ Terminality terminality = handleVarargsCall(currentInstruction, TailCallVarargs, CallMode::Tail);
+ ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
+ // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
+ // If the call is not terminal, however, then we want the subsequent op_ret/op_jump to update metadata and clean
+ // things up.
+ if (terminality == NonTerminal)
+ NEXT_OPCODE(op_tail_call_varargs);
+ else
+ LAST_OPCODE(op_tail_call_varargs);
+ }
+
+ case op_tail_call_forward_arguments: {
+ // We need to make sure that we don't unbox our arguments here since that won't be
+ // done by the arguments object creation node as that node may not exist.
+ noticeArgumentsUse();
+ flushForReturn();
+ Terminality terminality = handleVarargsCall(currentInstruction, TailCallForwardVarargs, CallMode::Tail);
+ ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
+ // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
+ // If the call is not terminal, however, then we want the subsequent op_ret/op_jump to update metadata and clean
+ // things up.
+ if (terminality == NonTerminal)
+ NEXT_OPCODE(op_tail_call);
+ else
+ LAST_OPCODE(op_tail_call);
+ }
- set(VirtualRegister(currentInstruction[1].u.operand),
- addToGraph(Node::VarArg, Call, OpInfo(0), OpInfo(prediction)));
+ case op_construct_varargs: {
+ handleVarargsCall(currentInstruction, ConstructVarargs, CallMode::Construct);
+ ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
+ NEXT_OPCODE(op_construct_varargs);
+ }
- NEXT_OPCODE(op_call_varargs);
+ case op_call_eval: {
+ int result = currentInstruction[1].u.operand;
+ int callee = currentInstruction[2].u.operand;
+ int argumentCountIncludingThis = currentInstruction[3].u.operand;
+ int registerOffset = -currentInstruction[4].u.operand;
+ addCall(result, CallEval, nullptr, get(VirtualRegister(callee)), argumentCountIncludingThis, registerOffset, getPrediction());
+ NEXT_OPCODE(op_call_eval);
}
- case op_jneq_ptr:
- // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
- // support simmer for a while before making it more general, since it's
- // already gnarly enough as it is.
- ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
- addToGraph(
- CheckFunction,
- OpInfo(actualPointerFor(m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)),
- get(VirtualRegister(currentInstruction[1].u.operand)));
- addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
- LAST_OPCODE(op_jneq_ptr);
+ case op_jneq_ptr: {
+ Special::Pointer specialPointer = currentInstruction[2].u.specialPointer;
+ ASSERT(pointerIsCell(specialPointer));
+ JSCell* actualPointer = static_cast<JSCell*>(
+ actualPointerFor(m_inlineStackTop->m_codeBlock, specialPointer));
+ FrozenValue* frozenPointer = m_graph.freeze(actualPointer);
+ int operand = currentInstruction[1].u.operand;
+ unsigned relativeOffset = currentInstruction[3].u.operand;
+ Node* child = get(VirtualRegister(operand));
+ if (currentInstruction[4].u.operand) {
+ Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child);
+ addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr), m_currentIndex + relativeOffset)), condition);
+ LAST_OPCODE(op_jneq_ptr);
+ }
+ addToGraph(CheckCell, OpInfo(frozenPointer), child);
+ NEXT_OPCODE(op_jneq_ptr);
+ }
case op_resolve_scope: {
int dst = currentInstruction[1].u.operand;
- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
- unsigned depth = currentInstruction[4].u.operand;
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
+ unsigned depth = currentInstruction[5].u.operand;
+ int scope = currentInstruction[2].u.operand;
+
+ if (needsDynamicLookup(resolveType, op_resolve_scope)) {
+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+ set(VirtualRegister(dst), addToGraph(ResolveScope, OpInfo(identifierNumber), get(VirtualRegister(scope))));
+ NEXT_OPCODE(op_resolve_scope);
+ }
// get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
if (needsVarInjectionChecks(resolveType))
- addToGraph(VarInjectionWatchpoint);
+ m_graph.watchpoints().addLazily(m_inlineStackTop->m_codeBlock->globalObject()->varInjectionWatchpoint());
switch (resolveType) {
case GlobalProperty:
case GlobalVar:
case GlobalPropertyWithVarInjectionChecks:
case GlobalVarWithVarInjectionChecks:
- set(VirtualRegister(dst), cellConstant(m_inlineStackTop->m_codeBlock->globalObject()));
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks: {
+ JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock);
+ RELEASE_ASSERT(constantScope);
+ RELEASE_ASSERT(static_cast<JSScope*>(currentInstruction[6].u.pointer) == constantScope);
+ set(VirtualRegister(dst), weakJSConstant(constantScope));
+ addToGraph(Phantom, get(VirtualRegister(scope)));
break;
+ }
+ case ModuleVar: {
+ // Since the value of the "scope" virtual register is not used in LLInt / baseline op_resolve_scope with ModuleVar,
+ // we need not to keep it alive by the Phantom node.
+ JSModuleEnvironment* moduleEnvironment = jsCast<JSModuleEnvironment*>(currentInstruction[6].u.jsCell.get());
+ // Module environment is already strongly referenced by the CodeBlock.
+ set(VirtualRegister(dst), weakJSConstant(moduleEnvironment));
+ break;
+ }
+ case LocalClosureVar:
case ClosureVar:
case ClosureVarWithVarInjectionChecks: {
- JSActivation* activation = currentInstruction[5].u.activation.get();
- if (activation
- && activation->symbolTable()->m_functionEnteredOnce.isStillValid()) {
- addToGraph(FunctionReentryWatchpoint, OpInfo(activation->symbolTable()));
- set(VirtualRegister(dst), cellConstant(activation));
+ Node* localBase = get(VirtualRegister(scope));
+ addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope.
+
+ // We have various forms of constant folding here. This is necessary to avoid
+ // spurious recompiles in dead-but-foldable code.
+ if (SymbolTable* symbolTable = currentInstruction[6].u.symbolTable.get()) {
+ InferredValue* singleton = symbolTable->singletonScope();
+ if (JSValue value = singleton->inferredValue()) {
+ m_graph.watchpoints().addLazily(singleton);
+ set(VirtualRegister(dst), weakJSConstant(value));
+ break;
+ }
+ }
+ if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>(*m_vm)) {
+ for (unsigned n = depth; n--;)
+ scope = scope->next();
+ set(VirtualRegister(dst), weakJSConstant(scope));
break;
}
- set(VirtualRegister(dst),
- getScope(m_inlineStackTop->m_codeBlock->needsActivation(), depth));
+ for (unsigned n = depth; n--;)
+ localBase = addToGraph(SkipScope, localBase);
+ set(VirtualRegister(dst), localBase);
+ break;
+ }
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ addToGraph(Phantom, get(VirtualRegister(scope)));
+ addToGraph(ForceOSRExit);
+ set(VirtualRegister(dst), addToGraph(JSConstant, OpInfo(m_constantNull)));
break;
}
case Dynamic:
@@ -3052,81 +5121,157 @@ bool ByteCodeParser::parseBlock(unsigned limit)
int dst = currentInstruction[1].u.operand;
int scope = currentInstruction[2].u.operand;
unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
- StringImpl* uid = m_graph.identifiers()[identifierNumber];
- ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+ UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
+ ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
Structure* structure = 0;
WatchpointSet* watchpoints = 0;
uintptr_t operand;
{
- ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
- if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
+ ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
watchpoints = currentInstruction[5].u.watchpointSet;
- else
+ else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
structure = currentInstruction[5].u.structure.get();
operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
}
+ if (needsDynamicLookup(resolveType, op_get_from_scope)) {
+ set(VirtualRegister(dst),
+ addToGraph(GetDynamicVar, OpInfo(identifierNumber), OpInfo(currentInstruction[4].u.operand), get(VirtualRegister(scope))));
+ NEXT_OPCODE(op_get_from_scope);
+ }
+
UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
- SpeculatedType prediction = getPrediction();
JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
switch (resolveType) {
case GlobalProperty:
case GlobalPropertyWithVarInjectionChecks: {
- GetByIdStatus status = GetByIdStatus::computeFor(*m_vm, structure, uid);
- if (status.takesSlowPath()) {
+ SpeculatedType prediction = getPrediction();
+
+ GetByIdStatus status = GetByIdStatus::computeFor(structure, uid);
+ if (status.state() != GetByIdStatus::Simple
+ || status.numVariants() != 1
+ || status[0].structureSet().size() != 1) {
set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
break;
}
- Node* base = cellConstantWithStructureCheck(globalObject, status.structureSet().singletonStructure());
+
+ Node* base = weakJSConstant(globalObject);
+ Node* result = load(prediction, base, identifierNumber, status[0]);
addToGraph(Phantom, get(VirtualRegister(scope)));
- if (JSValue specificValue = status.specificValue())
- set(VirtualRegister(dst), cellConstant(specificValue.asCell()));
- else
- set(VirtualRegister(dst), handleGetByOffset(prediction, base, identifierNumber, operand));
+ set(VirtualRegister(dst), result);
break;
}
case GlobalVar:
- case GlobalVarWithVarInjectionChecks: {
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks: {
addToGraph(Phantom, get(VirtualRegister(scope)));
- SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
- VariableWatchpointSet* watchpointSet = entry.watchpointSet();
- JSValue specificValue =
- watchpointSet ? watchpointSet->inferredValue() : JSValue();
- if (!specificValue) {
- set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction)));
- break;
+ WatchpointSet* watchpointSet;
+ ScopeOffset offset;
+ JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
+ {
+ ConcurrentJSLocker locker(scopeObject->symbolTable()->m_lock);
+ SymbolTableEntry entry = scopeObject->symbolTable()->get(locker, uid);
+ watchpointSet = entry.watchpointSet();
+ offset = entry.scopeOffset();
+ }
+ if (watchpointSet && watchpointSet->state() == IsWatched) {
+ // This has a fun concurrency story. There is the possibility of a race in two
+ // directions:
+ //
+ // We see that the set IsWatched, but in the meantime it gets invalidated: this is
+ // fine because if we saw that it IsWatched then we add a watchpoint. If it gets
+ // invalidated, then this compilation is invalidated. Note that in the meantime we
+ // may load an absurd value from the global object. It's fine to load an absurd
+ // value if the compilation is invalidated anyway.
+ //
+ // We see that the set IsWatched, but the value isn't yet initialized: this isn't
+ // possible because of the ordering of operations.
+ //
+ // Here's how we order operations:
+ //
+ // Main thread stores to the global object: always store a value first, and only
+ // after that do we touch the watchpoint set. There is a fence in the touch, that
+ // ensures that the store to the global object always happens before the touch on the
+ // set.
+ //
+ // Compilation thread: always first load the state of the watchpoint set, and then
+ // load the value. The WatchpointSet::state() method does fences for us to ensure
+ // that the load of the state happens before our load of the value.
+ //
+ // Finalizing compilation: this happens on the main thread and synchronously checks
+ // validity of all watchpoint sets.
+ //
+ // We will only perform optimizations if the load of the state yields IsWatched. That
+ // means that at least one store would have happened to initialize the original value
+ // of the variable (that is, the value we'd like to constant fold to). There may be
+ // other stores that happen after that, but those stores will invalidate the
+ // watchpoint set and also the compilation.
+
+ // Note that we need to use the operand, which is a direct pointer at the global,
+ // rather than looking up the global by doing variableAt(offset). That's because the
+ // internal data structures of JSSegmentedVariableObject are not thread-safe even
+ // though accessing the global itself is. The segmentation involves a vector spine
+ // that resizes with malloc/free, so if new globals unrelated to the one we are
+ // reading are added, we might access freed memory if we do variableAt().
+ WriteBarrier<Unknown>* pointer = bitwise_cast<WriteBarrier<Unknown>*>(operand);
+
+ ASSERT(scopeObject->findVariableIndex(pointer) == offset);
+
+ JSValue value = pointer->get();
+ if (value) {
+ m_graph.watchpoints().addLazily(watchpointSet);
+ set(VirtualRegister(dst), weakJSConstant(value));
+ break;
+ }
}
- addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
- set(VirtualRegister(dst), inferredConstant(specificValue));
+ SpeculatedType prediction = getPrediction();
+ NodeType nodeType;
+ if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
+ nodeType = GetGlobalVar;
+ else
+ nodeType = GetGlobalLexicalVariable;
+ Node* value = addToGraph(nodeType, OpInfo(operand), OpInfo(prediction));
+ if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
+ addToGraph(CheckNotEmpty, value);
+ set(VirtualRegister(dst), value);
break;
}
+ case LocalClosureVar:
case ClosureVar:
case ClosureVarWithVarInjectionChecks: {
Node* scopeNode = get(VirtualRegister(scope));
- if (JSActivation* activation = m_graph.tryGetActivation(scopeNode)) {
- SymbolTable* symbolTable = activation->symbolTable();
- ConcurrentJITLocker locker(symbolTable->m_lock);
- SymbolTable::Map::iterator iter = symbolTable->find(locker, uid);
- ASSERT(iter != symbolTable->end(locker));
- VariableWatchpointSet* watchpointSet = iter->value.watchpointSet();
- if (watchpointSet) {
- if (JSValue value = watchpointSet->inferredValue()) {
- addToGraph(Phantom, scopeNode);
- addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
- set(VirtualRegister(dst), inferredConstant(value));
- break;
- }
- }
+
+ // Ideally we wouldn't have to do this Phantom. But:
+ //
+ // For the constant case: we must do it because otherwise we would have no way of knowing
+ // that the scope is live at OSR here.
+ //
+ // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
+ // won't be able to handle an Undefined scope.
+ addToGraph(Phantom, scopeNode);
+
+ // Constant folding in the bytecode parser is important for performance. This may not
+ // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
+ // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
+ // would recompile. But if we can fold it here, we avoid the exit.
+ if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) {
+ set(VirtualRegister(dst), weakJSConstant(value));
+ break;
}
+ SpeculatedType prediction = getPrediction();
set(VirtualRegister(dst),
- addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction),
- addToGraph(GetClosureRegisters, scopeNode)));
+ addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode));
break;
}
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ case ModuleVar:
case Dynamic:
RELEASE_ASSERT_NOT_REACHED();
break;
@@ -3136,60 +5281,107 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_put_to_scope: {
unsigned scope = currentInstruction[1].u.operand;
- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ unsigned identifierNumber = currentInstruction[2].u.operand;
+ if (identifierNumber != UINT_MAX)
+ identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber];
unsigned value = currentInstruction[3].u.operand;
- ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
- StringImpl* uid = m_graph.identifiers()[identifierNumber];
-
- Structure* structure = 0;
- VariableWatchpointSet* watchpoints = 0;
+ GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
+ ResolveType resolveType = getPutInfo.resolveType();
+ UniquedStringImpl* uid;
+ if (identifierNumber != UINT_MAX)
+ uid = m_graph.identifiers()[identifierNumber];
+ else
+ uid = nullptr;
+
+ Structure* structure = nullptr;
+ WatchpointSet* watchpoints = nullptr;
uintptr_t operand;
{
- ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
- if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
+ ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
watchpoints = currentInstruction[5].u.watchpointSet;
- else
+ else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
structure = currentInstruction[5].u.structure.get();
operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
}
JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+ if (needsDynamicLookup(resolveType, op_put_to_scope)) {
+ ASSERT(identifierNumber != UINT_MAX);
+ addToGraph(PutDynamicVar, OpInfo(identifierNumber), OpInfo(currentInstruction[4].u.operand), get(VirtualRegister(scope)), get(VirtualRegister(value)));
+ NEXT_OPCODE(op_put_to_scope);
+ }
+
switch (resolveType) {
case GlobalProperty:
case GlobalPropertyWithVarInjectionChecks: {
- PutByIdStatus status = PutByIdStatus::computeFor(*m_vm, globalObject, structure, uid, false);
- if (!status.isSimpleReplace()) {
+ PutByIdStatus status;
+ if (uid)
+ status = PutByIdStatus::computeFor(globalObject, structure, uid, false);
+ else
+ status = PutByIdStatus(PutByIdStatus::TakesSlowPath);
+ if (status.numVariants() != 1
+ || status[0].kind() != PutByIdVariant::Replace
+ || status[0].structure().size() != 1) {
addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value)));
break;
}
- Node* base = cellConstantWithStructureCheck(globalObject, status.oldStructure());
- addToGraph(Phantom, get(VirtualRegister(scope)));
- handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(VirtualRegister(value)));
+ Node* base = weakJSConstant(globalObject);
+ store(base, identifierNumber, status[0], get(VirtualRegister(value)));
// Keep scope alive until after put.
addToGraph(Phantom, get(VirtualRegister(scope)));
break;
}
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks:
case GlobalVar:
case GlobalVarWithVarInjectionChecks: {
- SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
- ASSERT(watchpoints == entry.watchpointSet());
+ if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
+ SpeculatedType prediction = SpecEmpty;
+ Node* value = addToGraph(GetGlobalLexicalVariable, OpInfo(operand), OpInfo(prediction));
+ addToGraph(CheckNotEmpty, value);
+ }
+
+ JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
+ if (watchpoints) {
+ SymbolTableEntry entry = scopeObject->symbolTable()->get(uid);
+ ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet());
+ }
Node* valueNode = get(VirtualRegister(value));
- addToGraph(PutGlobalVar, OpInfo(operand), valueNode);
- if (watchpoints->state() != IsInvalidated)
- addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode);
+ addToGraph(PutGlobalVariable, OpInfo(operand), weakJSConstant(scopeObject), valueNode);
+ if (watchpoints && watchpoints->state() != IsInvalidated) {
+ // Must happen after the store. See comment for GetGlobalVar.
+ addToGraph(NotifyWrite, OpInfo(watchpoints));
+ }
// Keep scope alive until after put.
addToGraph(Phantom, get(VirtualRegister(scope)));
break;
}
+ case LocalClosureVar:
case ClosureVar:
case ClosureVarWithVarInjectionChecks: {
Node* scopeNode = get(VirtualRegister(scope));
- Node* scopeRegisters = addToGraph(GetClosureRegisters, scopeNode);
- addToGraph(PutClosureVar, OpInfo(operand), scopeNode, scopeRegisters, get(VirtualRegister(value)));
+ Node* valueNode = get(VirtualRegister(value));
+
+ addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode);
+
+ if (watchpoints && watchpoints->state() != IsInvalidated) {
+ // Must happen after the store. See comment for GetGlobalVar.
+ addToGraph(NotifyWrite, OpInfo(watchpoints));
+ }
break;
}
+
+ case ModuleVar:
+ // Need not to keep "scope" and "value" register values here by Phantom because
+ // they are not used in LLInt / baseline op_put_to_scope with ModuleVar.
+ addToGraph(ForceOSRExit);
+ break;
+
case Dynamic:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
RELEASE_ASSERT_NOT_REACHED();
break;
}
@@ -3209,88 +5401,146 @@ bool ByteCodeParser::parseBlock(unsigned limit)
m_currentBlock->isOSRTarget = true;
addToGraph(LoopHint);
-
- if (m_vm->watchdog.isEnabled())
- addToGraph(CheckWatchdogTimer);
-
NEXT_OPCODE(op_loop_hint);
}
-
- case op_init_lazy_reg: {
- set(VirtualRegister(currentInstruction[1].u.operand), getJSConstantForValue(JSValue()));
- ASSERT(operandIsLocal(currentInstruction[1].u.operand));
- m_graph.m_lazyVars.set(VirtualRegister(currentInstruction[1].u.operand).toLocal());
- NEXT_OPCODE(op_init_lazy_reg);
+
+ case op_watchdog: {
+ addToGraph(CheckWatchdogTimer);
+ NEXT_OPCODE(op_watchdog);
}
- case op_create_activation: {
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CreateActivation, get(VirtualRegister(currentInstruction[1].u.operand))));
- NEXT_OPCODE(op_create_activation);
+ case op_create_lexical_environment: {
+ VirtualRegister symbolTableRegister(currentInstruction[3].u.operand);
+ VirtualRegister initialValueRegister(currentInstruction[4].u.operand);
+ ASSERT(symbolTableRegister.isConstant() && initialValueRegister.isConstant());
+ FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(symbolTableRegister.offset()));
+ FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(initialValueRegister.offset()));
+ Node* scope = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope);
+ set(VirtualRegister(currentInstruction[1].u.operand), lexicalEnvironment);
+ NEXT_OPCODE(op_create_lexical_environment);
+ }
+
+ case op_get_parent_scope: {
+ Node* currentScope = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* newScope = addToGraph(SkipScope, currentScope);
+ set(VirtualRegister(currentInstruction[1].u.operand), newScope);
+ addToGraph(Phantom, currentScope);
+ NEXT_OPCODE(op_get_parent_scope);
+ }
+
+ case op_get_scope: {
+ // Help the later stages a bit by doing some small constant folding here. Note that this
+ // only helps for the first basic block. It's extremely important not to constant fold
+ // loads from the scope register later, as that would prevent the DFG from tracking the
+ // bytecode-level liveness of the scope register.
+ Node* callee = get(VirtualRegister(CallFrameSlot::callee));
+ Node* result;
+ if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm))
+ result = weakJSConstant(function->scope());
+ else
+ result = addToGraph(GetScope, callee);
+ set(VirtualRegister(currentInstruction[1].u.operand), result);
+ NEXT_OPCODE(op_get_scope);
}
-
- case op_create_arguments: {
- m_graph.m_hasArguments = true;
- Node* createArguments = addToGraph(CreateArguments, get(VirtualRegister(currentInstruction[1].u.operand)));
+
+ case op_argument_count: {
+ Node* sub = addToGraph(ArithSub, OpInfo(Arith::Unchecked), OpInfo(SpecInt32Only), getArgumentCount(), addToGraph(JSConstant, OpInfo(m_constantOne)));
+
+ set(VirtualRegister(currentInstruction[1].u.operand), sub);
+ NEXT_OPCODE(op_argument_count);
+ }
+
+ case op_create_direct_arguments: {
+ noticeArgumentsUse();
+ Node* createArguments = addToGraph(CreateDirectArguments);
set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
- set(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand)), createArguments);
- NEXT_OPCODE(op_create_arguments);
+ NEXT_OPCODE(op_create_direct_arguments);
}
- case op_tear_off_activation: {
- addToGraph(TearOffActivation, get(VirtualRegister(currentInstruction[1].u.operand)));
- NEXT_OPCODE(op_tear_off_activation);
+ case op_create_scoped_arguments: {
+ noticeArgumentsUse();
+ Node* createArguments = addToGraph(CreateScopedArguments, get(VirtualRegister(currentInstruction[2].u.operand)));
+ set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
+ NEXT_OPCODE(op_create_scoped_arguments);
}
- case op_tear_off_arguments: {
- m_graph.m_hasArguments = true;
- addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand))), get(VirtualRegister(currentInstruction[2].u.operand)));
- NEXT_OPCODE(op_tear_off_arguments);
- }
-
- case op_get_arguments_length: {
- m_graph.m_hasArguments = true;
- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetMyArgumentsLengthSafe));
- NEXT_OPCODE(op_get_arguments_length);
+ case op_create_cloned_arguments: {
+ noticeArgumentsUse();
+ Node* createArguments = addToGraph(CreateClonedArguments);
+ set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
+ NEXT_OPCODE(op_create_cloned_arguments);
}
- case op_get_argument_by_val: {
- m_graph.m_hasArguments = true;
+ case op_get_from_arguments: {
set(VirtualRegister(currentInstruction[1].u.operand),
addToGraph(
- GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
- get(VirtualRegister(currentInstruction[3].u.operand))));
- NEXT_OPCODE(op_get_argument_by_val);
+ GetFromArguments,
+ OpInfo(currentInstruction[3].u.operand),
+ OpInfo(getPrediction()),
+ get(VirtualRegister(currentInstruction[2].u.operand))));
+ NEXT_OPCODE(op_get_from_arguments);
}
- case op_new_func: {
- if (!currentInstruction[3].u.operand) {
- set(VirtualRegister(currentInstruction[1].u.operand),
- addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)));
- } else {
- set(VirtualRegister(currentInstruction[1].u.operand),
- addToGraph(
- NewFunction,
- OpInfo(currentInstruction[2].u.operand),
- get(VirtualRegister(currentInstruction[1].u.operand))));
- }
- NEXT_OPCODE(op_new_func);
+ case op_put_to_arguments: {
+ addToGraph(
+ PutToArguments,
+ OpInfo(currentInstruction[2].u.operand),
+ get(VirtualRegister(currentInstruction[1].u.operand)),
+ get(VirtualRegister(currentInstruction[3].u.operand)));
+ NEXT_OPCODE(op_put_to_arguments);
+ }
+
+ case op_get_argument: {
+ InlineCallFrame* inlineCallFrame = this->inlineCallFrame();
+ Node* argument;
+ int32_t argumentIndexIncludingThis = currentInstruction[2].u.operand;
+ if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
+ int32_t argumentCountIncludingThis = inlineCallFrame->arguments.size();
+ if (argumentIndexIncludingThis < argumentCountIncludingThis)
+ argument = get(virtualRegisterForArgument(argumentIndexIncludingThis));
+ else
+ argument = addToGraph(JSConstant, OpInfo(m_constantUndefined));
+ } else
+ argument = addToGraph(GetArgument, OpInfo(argumentIndexIncludingThis), OpInfo(getPrediction()));
+ set(VirtualRegister(currentInstruction[1].u.operand), argument);
+ NEXT_OPCODE(op_get_argument);
}
- case op_new_captured_func: {
- Node* function = addToGraph(
- NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand));
- if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet)
- addToGraph(NotifyWrite, OpInfo(set), function);
- set(VirtualRegister(currentInstruction[1].u.operand), function);
- NEXT_OPCODE(op_new_captured_func);
+ case op_new_func:
+ case op_new_generator_func:
+ case op_new_async_func: {
+ FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(currentInstruction[3].u.operand);
+ FrozenValue* frozen = m_graph.freezeStrong(decl);
+ NodeType op = (opcodeID == op_new_generator_func) ? NewGeneratorFunction :
+ (opcodeID == op_new_async_func) ? NewAsyncFunction : NewFunction;
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), get(VirtualRegister(currentInstruction[2].u.operand))));
+ static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_generator_func), "The length of op_new_func should eqaual to one of op_new_generator_func");
+ static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_async_func), "The length of op_new_func should eqaual to one of op_new_async_func");
+ NEXT_OPCODE(op_new_func);
}
-
- case op_new_func_exp: {
- set(VirtualRegister(currentInstruction[1].u.operand),
- addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand)));
+
+ case op_new_func_exp:
+ case op_new_generator_func_exp:
+ case op_new_async_func_exp: {
+ FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(currentInstruction[3].u.operand);
+ FrozenValue* frozen = m_graph.freezeStrong(expr);
+ NodeType op = (opcodeID == op_new_generator_func_exp) ? NewGeneratorFunction :
+ (opcodeID == op_new_async_func_exp) ? NewAsyncFunction : NewFunction;
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), get(VirtualRegister(currentInstruction[2].u.operand))));
+
+ static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_generator_func_exp), "The length of op_new_func_exp should eqaual to one of op_new_generator_func_exp");
+ static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_async_func_exp), "The length of op_new_func_exp should eqaual to one of op_new_async_func_exp");
NEXT_OPCODE(op_new_func_exp);
}
+ case op_set_function_name: {
+ Node* func = get(VirtualRegister(currentInstruction[1].u.operand));
+ Node* name = get(VirtualRegister(currentInstruction[2].u.operand));
+ addToGraph(SetFunctionName, func, name);
+ NEXT_OPCODE(op_set_function_name);
+ }
+
case op_typeof: {
set(VirtualRegister(currentInstruction[1].u.operand),
addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand))));
@@ -3298,17 +5548,115 @@ bool ByteCodeParser::parseBlock(unsigned limit)
}
case op_to_number: {
- set(VirtualRegister(currentInstruction[1].u.operand),
- addToGraph(Identity, Edge(get(VirtualRegister(currentInstruction[2].u.operand)), NumberUse)));
+ SpeculatedType prediction = getPrediction();
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value));
NEXT_OPCODE(op_to_number);
}
-
+
+ case op_to_string: {
+ Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToString, value));
+ NEXT_OPCODE(op_to_string);
+ }
+
case op_in: {
+ ArrayMode arrayMode = getArrayMode(currentInstruction[OPCODE_LENGTH(op_in) - 1].u.arrayProfile);
set(VirtualRegister(currentInstruction[1].u.operand),
- addToGraph(In, get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand))));
+ addToGraph(In, OpInfo(arrayMode.asWord()), get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand))));
NEXT_OPCODE(op_in);
}
+ case op_get_enumerable_length: {
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength,
+ get(VirtualRegister(currentInstruction[2].u.operand))));
+ NEXT_OPCODE(op_get_enumerable_length);
+ }
+
+ case op_has_generic_property: {
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty,
+ get(VirtualRegister(currentInstruction[2].u.operand)),
+ get(VirtualRegister(currentInstruction[3].u.operand))));
+ NEXT_OPCODE(op_has_generic_property);
+ }
+
+ case op_has_structure_property: {
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty,
+ get(VirtualRegister(currentInstruction[2].u.operand)),
+ get(VirtualRegister(currentInstruction[3].u.operand)),
+ get(VirtualRegister(currentInstruction[4].u.operand))));
+ NEXT_OPCODE(op_has_structure_property);
+ }
+
+ case op_has_indexed_property: {
+ Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
+ ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read);
+ Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
+ Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty)), base, property);
+ set(VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty);
+ NEXT_OPCODE(op_has_indexed_property);
+ }
+
+ case op_get_direct_pname: {
+ SpeculatedType prediction = getPredictionWithoutOSRExit();
+
+ Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
+ Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
+ Node* index = get(VirtualRegister(currentInstruction[4].u.operand));
+ Node* enumerator = get(VirtualRegister(currentInstruction[5].u.operand));
+
+ addVarArgChild(base);
+ addVarArgChild(property);
+ addVarArgChild(index);
+ addVarArgChild(enumerator);
+ set(VirtualRegister(currentInstruction[1].u.operand),
+ addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
+
+ NEXT_OPCODE(op_get_direct_pname);
+ }
+
+ case op_get_property_enumerator: {
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetPropertyEnumerator,
+ get(VirtualRegister(currentInstruction[2].u.operand))));
+ NEXT_OPCODE(op_get_property_enumerator);
+ }
+
+ case op_enumerator_structure_pname: {
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorStructurePname,
+ get(VirtualRegister(currentInstruction[2].u.operand)),
+ get(VirtualRegister(currentInstruction[3].u.operand))));
+ NEXT_OPCODE(op_enumerator_structure_pname);
+ }
+
+ case op_enumerator_generic_pname: {
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorGenericPname,
+ get(VirtualRegister(currentInstruction[2].u.operand)),
+ get(VirtualRegister(currentInstruction[3].u.operand))));
+ NEXT_OPCODE(op_enumerator_generic_pname);
+ }
+
+ case op_to_index_string: {
+ set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString,
+ get(VirtualRegister(currentInstruction[2].u.operand))));
+ NEXT_OPCODE(op_to_index_string);
+ }
+
+ case op_log_shadow_chicken_prologue: {
+ if (!m_inlineStackTop->m_inlineCallFrame)
+ addToGraph(LogShadowChickenPrologue, get(VirtualRegister(currentInstruction[1].u.operand)));
+ NEXT_OPCODE(op_log_shadow_chicken_prologue);
+ }
+
+ case op_log_shadow_chicken_tail: {
+ if (!m_inlineStackTop->m_inlineCallFrame) {
+ // FIXME: The right solution for inlining is to elide these whenever the tail call
+ // ends up being inlined.
+ // https://bugs.webkit.org/show_bug.cgi?id=155686
+ addToGraph(LogShadowChickenTail, get(VirtualRegister(currentInstruction[1].u.operand)), get(VirtualRegister(currentInstruction[2].u.operand)));
+ }
+ NEXT_OPCODE(op_log_shadow_chicken_tail);
+ }
+
default:
// Parse failed! This should not happen because the capabilities checker
// should have caught it.
@@ -3322,62 +5670,52 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleT
{
ASSERT(!block->isLinked);
ASSERT(!block->isEmpty());
- Node* node = block->last();
+ Node* node = block->terminal();
ASSERT(node->isTerminal());
switch (node->op()) {
case Jump:
- node->setTakenBlock(blockForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
+ node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing());
break;
- case Branch:
- node->setTakenBlock(blockForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
- node->setNotTakenBlock(blockForBytecodeOffset(possibleTargets, node->notTakenBytecodeOffsetDuringParsing()));
+ case Branch: {
+ BranchData* data = node->branchData();
+ data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex());
+ data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex());
break;
+ }
- case Switch:
+ case Switch: {
+ SwitchData* data = node->switchData();
for (unsigned i = node->switchData()->cases.size(); i--;)
- node->switchData()->cases[i].target = blockForBytecodeOffset(possibleTargets, node->switchData()->cases[i].targetBytecodeIndex());
- node->switchData()->fallThrough = blockForBytecodeOffset(possibleTargets, node->switchData()->fallThroughBytecodeIndex());
+ data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex());
+ data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex());
break;
+ }
default:
break;
}
-#if !ASSERT_DISABLED
- block->isLinked = true;
-#endif
+ if (verbose)
+ dataLog("Marking ", RawPointer(block), " as linked (actually did linking)\n");
+ block->didLink();
}
void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets)
{
for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
+ if (verbose)
+ dataLog("Attempting to link ", RawPointer(unlinkedBlocks[i].m_block), "\n");
if (unlinkedBlocks[i].m_needsNormalLinking) {
+ if (verbose)
+ dataLog(" Does need normal linking.\n");
linkBlock(unlinkedBlocks[i].m_block, possibleTargets);
unlinkedBlocks[i].m_needsNormalLinking = false;
}
}
}
-void ByteCodeParser::buildOperandMapsIfNecessary()
-{
- if (m_haveBuiltOperandMaps)
- return;
-
- for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
- m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
- for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) {
- JSValue value = m_codeBlock->getConstant(i + FirstConstantRegisterIndex);
- if (!value)
- m_emptyJSValueIndex = i + FirstConstantRegisterIndex;
- else
- m_jsValueMap.add(JSValue::encode(value), i + FirstConstantRegisterIndex);
- }
-
- m_haveBuiltOperandMaps = true;
-}
-
ByteCodeParser::InlineStackEntry::InlineStackEntry(
ByteCodeParser* byteCodeParser,
CodeBlock* codeBlock,
@@ -3387,7 +5725,7 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
VirtualRegister returnValueVR,
VirtualRegister inlineCallFrameStart,
int argumentCountIncludingThis,
- CodeSpecializationKind kind)
+ InlineCallFrame::Kind kind)
: m_byteCodeParser(byteCodeParser)
, m_codeBlock(codeBlock)
, m_profiledBlock(profiledBlock)
@@ -3398,15 +5736,18 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
, m_caller(byteCodeParser->m_inlineStackTop)
{
{
- ConcurrentJITLocker locker(m_profiledBlock->m_lock);
+ ConcurrentJSLocker locker(m_profiledBlock->m_lock);
m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles());
m_exitProfile.initialize(locker, profiledBlock->exitProfile());
// We do this while holding the lock because we want to encourage StructureStubInfo's
// to be potentially added to operations and because the profiled block could be in the
// middle of LLInt->JIT tier-up in which case we would be adding the info's right now.
- if (m_profiledBlock->hasBaselineJITProfiling())
+ if (m_profiledBlock->hasBaselineJITProfiling()) {
m_profiledBlock->getStubInfoMap(locker, m_stubInfos);
+ m_profiledBlock->getCallLinkInfoMap(locker, m_callLinkInfos);
+ m_profiledBlock->getByValInfoMap(locker, m_byValInfos);
+ }
}
m_argumentPositions.resize(argumentCountIncludingThis);
@@ -3416,87 +5757,35 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_argumentPositions[i] = argumentPosition;
}
- // Track the code-block-global exit sites.
- if (m_exitProfile.hasExitSite(ArgumentsEscaped)) {
- byteCodeParser->m_graph.m_executablesWhoseArgumentsEscaped.add(
- codeBlock->ownerExecutable());
- }
-
if (m_caller) {
// Inline case.
ASSERT(codeBlock != byteCodeParser->m_codeBlock);
ASSERT(inlineCallFrameStart.isValid());
ASSERT(callsiteBlockHead);
- m_inlineCallFrame = byteCodeParser->m_graph.m_inlineCallFrames->add();
- initializeLazyWriteBarrierForInlineCallFrameExecutable(
- byteCodeParser->m_graph.m_plan.writeBarriers,
- m_inlineCallFrame->executable,
- byteCodeParser->m_codeBlock,
- m_inlineCallFrame,
- byteCodeParser->m_codeBlock->ownerExecutable(),
- codeBlock->ownerExecutable());
- m_inlineCallFrame->stackOffset = inlineCallFrameStart.offset() - JSStack::CallFrameHeaderSize;
+ m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames->add();
+
+ // The owner is the machine code block, and we already have a barrier on that when the
+ // plan finishes.
+ m_inlineCallFrame->baselineCodeBlock.setWithoutWriteBarrier(codeBlock->baselineVersion());
+ m_inlineCallFrame->setStackOffset(inlineCallFrameStart.offset() - CallFrame::headerSizeInRegisters);
if (callee) {
m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee);
m_inlineCallFrame->isClosureCall = false;
} else
m_inlineCallFrame->isClosureCall = true;
- m_inlineCallFrame->caller = byteCodeParser->currentCodeOrigin();
- m_inlineCallFrame->arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
- m_inlineCallFrame->isCall = isCall(kind);
-
- if (m_inlineCallFrame->caller.inlineCallFrame)
- m_inlineCallFrame->capturedVars = m_inlineCallFrame->caller.inlineCallFrame->capturedVars;
- else {
- for (int i = byteCodeParser->m_codeBlock->m_numVars; i--;) {
- if (byteCodeParser->m_codeBlock->isCaptured(virtualRegisterForLocal(i)))
- m_inlineCallFrame->capturedVars.set(i);
- }
- }
-
- for (int i = argumentCountIncludingThis; i--;) {
- VirtualRegister argument = virtualRegisterForArgument(i);
- if (codeBlock->isCaptured(argument))
- m_inlineCallFrame->capturedVars.set(VirtualRegister(argument.offset() + m_inlineCallFrame->stackOffset).toLocal());
- }
- for (size_t i = codeBlock->m_numVars; i--;) {
- VirtualRegister local = virtualRegisterForLocal(i);
- if (codeBlock->isCaptured(local))
- m_inlineCallFrame->capturedVars.set(VirtualRegister(local.offset() + m_inlineCallFrame->stackOffset).toLocal());
- }
-
- byteCodeParser->buildOperandMapsIfNecessary();
+ m_inlineCallFrame->directCaller = byteCodeParser->currentCodeOrigin();
+ m_inlineCallFrame->arguments.resizeToFit(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
+ m_inlineCallFrame->kind = kind;
m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
- m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
- StringImpl* rep = codeBlock->identifier(i).impl();
- BorrowedIdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_graph.identifiers().numberOfIdentifiers());
- if (result.isNewEntry)
- byteCodeParser->m_graph.identifiers().addLazily(rep);
- m_identifierRemap[i] = result.iterator->value;
- }
- for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
- JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
- if (!value) {
- if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) {
- byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex;
- byteCodeParser->addConstant(JSValue());
- byteCodeParser->m_constants.append(ConstantRecord());
- }
- m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex;
- continue;
- }
- JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
- if (result.isNewEntry) {
- byteCodeParser->addConstant(value);
- byteCodeParser->m_constants.append(ConstantRecord());
- }
- m_constantRemap[i] = result.iterator->value;
+ UniquedStringImpl* rep = codeBlock->identifier(i).impl();
+ unsigned index = byteCodeParser->m_graph.identifiers().ensure(rep);
+ m_identifierRemap[i] = index;
}
for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) {
// If we inline the same code block multiple times, we don't want to needlessly
@@ -3528,13 +5817,10 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_inlineCallFrame = 0;
m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
- m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
m_identifierRemap[i] = i;
- for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
- m_constantRemap[i] = i + FirstConstantRegisterIndex;
for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i)
m_constantBufferRemap[i] = i;
for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i)
@@ -3542,14 +5828,13 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_callsiteBlockHeadNeedsLinking = false;
}
- for (size_t i = 0; i < m_constantRemap.size(); ++i)
- ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
-
byteCodeParser->m_inlineStackTop = this;
}
void ByteCodeParser::parseCodeBlock()
{
+ clearCaches();
+
CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
if (m_graph.compilation()) {
@@ -3557,19 +5842,24 @@ void ByteCodeParser::parseCodeBlock()
*m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock);
}
- bool shouldDumpBytecode = Options::dumpBytecodeAtDFGTime();
- if (shouldDumpBytecode) {
+ if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
+ Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback->ensureDeferredSourceDump();
+ if (inlineCallFrame()) {
+ DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->directCaller);
+ deferredSourceDump.append(dump);
+ } else
+ deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion()));
+ }
+
+ if (Options::dumpBytecodeAtDFGTime()) {
dataLog("Parsing ", *codeBlock);
if (inlineCallFrame()) {
dataLog(
" for inlining at ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT),
- " ", inlineCallFrame()->caller);
+ " ", inlineCallFrame()->directCaller);
}
dataLog(
- ": captureCount = ", codeBlock->symbolTable() ? codeBlock->symbolTable()->captureCount() : 0,
- ", needsFullScopeChain = ", codeBlock->needsFullScopeChain(),
- ", needsActivation = ", codeBlock->ownerExecutable()->needsActivation(),
- ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n");
+ ", isStrictMode = ", codeBlock->ownerScriptExecutable()->isStrictMode(), "\n");
codeBlock->baselineVersion()->dumpBytecode();
}
@@ -3607,21 +5897,26 @@ void ByteCodeParser::parseCodeBlock()
m_currentBlock = m_graph.lastBlock();
m_currentBlock->bytecodeBegin = m_currentIndex;
} else {
- RefPtr<BasicBlock> block = adoptRef(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals));
- m_currentBlock = block.get();
+ Ref<BasicBlock> block = adoptRef(*new BasicBlock(m_currentIndex, m_numArguments, m_numLocals, 1));
+ m_currentBlock = block.ptr();
// This assertion checks two things:
// 1) If the bytecodeBegin is greater than currentIndex, then something has gone
// horribly wrong. So, we're probably generating incorrect code.
// 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do
// a peephole coalescing of this block in the if statement above. So, we're
// generating suboptimal code and leaving more work for the CFG simplifier.
- ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin < m_currentIndex);
- m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
- m_inlineStackTop->m_blockLinkingTargets.append(block.get());
+ if (!m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
+ unsigned lastBegin =
+ m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin;
+ ASSERT_UNUSED(
+ lastBegin, lastBegin == UINT_MAX || lastBegin < m_currentIndex);
+ }
+ m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(block.ptr()));
+ m_inlineStackTop->m_blockLinkingTargets.append(block.ptr());
// The first block is definitely an OSR target.
if (!m_graph.numBlocks())
block->isOSRTarget = true;
- m_graph.appendBlock(block);
+ m_graph.appendBlock(WTFMove(block));
prepareToParseBlock();
}
}
@@ -3636,17 +5931,23 @@ void ByteCodeParser::parseCodeBlock()
// are at the end of an inline function, or we realized that we
// should stop parsing because there was a return in the first
// basic block.
- ASSERT(m_currentBlock->isEmpty() || m_currentBlock->last()->isTerminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing);
+ ASSERT(m_currentBlock->isEmpty() || m_currentBlock->terminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing);
- if (!shouldContinueParsing)
+ if (!shouldContinueParsing) {
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog("Done parsing ", *codeBlock, "\n");
return;
+ }
- m_currentBlock = 0;
+ m_currentBlock = nullptr;
} while (m_currentIndex < limit);
}
// Should have reached the end of the instructions.
ASSERT(m_currentIndex == codeBlock->instructions().size());
+
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog("Done parsing ", *codeBlock, " (fell off end)\n");
}
bool ByteCodeParser::parse()
@@ -3654,25 +5955,21 @@ bool ByteCodeParser::parse()
// Set during construction.
ASSERT(!m_currentIndex);
- if (m_codeBlock->captureCount()) {
- SymbolTable* symbolTable = m_codeBlock->symbolTable();
- ConcurrentJITLocker locker(symbolTable->m_lock);
- SymbolTable::Map::iterator iter = symbolTable->begin(locker);
- SymbolTable::Map::iterator end = symbolTable->end(locker);
- for (; iter != end; ++iter) {
- VariableWatchpointSet* set = iter->value.watchpointSet();
- if (!set)
- continue;
- size_t index = static_cast<size_t>(VirtualRegister(iter->value.getIndex()).toLocal());
- while (m_localWatchpoints.size() <= index)
- m_localWatchpoints.append(nullptr);
- m_localWatchpoints[index] = set;
- }
+ if (Options::verboseDFGByteCodeParsing())
+ dataLog("Parsing ", *m_codeBlock, "\n");
+
+ m_dfgCodeBlock = m_graph.m_plan.profiledDFGCodeBlock;
+ if (isFTL(m_graph.m_plan.mode) && m_dfgCodeBlock
+ && Options::usePolyvariantDevirtualization()) {
+ if (Options::usePolyvariantCallInlining())
+ CallLinkStatus::computeDFGStatuses(m_dfgCodeBlock, m_callContextMap);
+ if (Options::usePolyvariantByIdInlining())
+ m_dfgCodeBlock->getStubInfoMap(m_dfgStubInfos);
}
InlineStackEntry inlineStackEntry(
this, m_codeBlock, m_profiledBlock, 0, 0, VirtualRegister(), VirtualRegister(),
- m_codeBlock->numParameters(), CodeForCall);
+ m_codeBlock->numParameters(), InlineCallFrame::Call);
parseCodeBlock();
@@ -3698,7 +5995,6 @@ bool ByteCodeParser::parse()
bool parse(Graph& graph)
{
- SamplingRegion samplingRegion("DFG Parsing");
return ByteCodeParser(graph).parse();
}