summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg/DFGNode.h
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/dfg/DFGNode.h
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGNode.h')
-rw-r--r--Source/JavaScriptCore/dfg/DFGNode.h1928
1 files changed, 1504 insertions, 424 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h
index 55a9ede8d..81d540a40 100644
--- a/Source/JavaScriptCore/dfg/DFGNode.h
+++ b/Source/JavaScriptCore/dfg/DFGNode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,47 +23,76 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGNode_h
-#define DFGNode_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(DFG_JIT)
+#include "BasicBlockLocation.h"
#include "CodeBlock.h"
-#include "CodeOrigin.h"
#include "DFGAbstractValue.h"
#include "DFGAdjacencyList.h"
#include "DFGArithMode.h"
#include "DFGArrayMode.h"
#include "DFGCommon.h"
+#include "DFGEpoch.h"
#include "DFGLazyJSValue.h"
+#include "DFGMultiGetByOffsetData.h"
#include "DFGNodeFlags.h"
+#include "DFGNodeOrigin.h"
#include "DFGNodeType.h"
+#include "DFGObjectMaterializationData.h"
+#include "DFGOpInfo.h"
+#include "DFGRegisteredStructure.h"
+#include "DFGRegisteredStructureSet.h"
+#include "DFGTransition.h"
+#include "DFGUseKind.h"
#include "DFGVariableAccessData.h"
+#include "GetByIdVariant.h"
#include "JSCJSValue.h"
#include "Operands.h"
+#include "PutByIdVariant.h"
#include "SpeculatedType.h"
#include "StructureSet.h"
+#include "TypeLocation.h"
#include "ValueProfile.h"
+#include <type_traits>
#include <wtf/ListDump.h>
-namespace JSC { namespace DFG {
+namespace JSC {
+
+namespace DOMJIT {
+class GetterSetter;
+class Patchpoint;
+class CallDOMGetterPatchpoint;
+class Signature;
+}
+
+namespace Profiler {
+class ExecutionCounter;
+}
+
+namespace DFG {
class Graph;
+class PromotedLocationDescriptor;
struct BasicBlock;
-struct StructureTransitionData {
- Structure* previousStructure;
- Structure* newStructure;
-
- StructureTransitionData() { }
+struct StorageAccessData {
+ PropertyOffset offset;
+ unsigned identifierNumber;
+
+ // This needs to know the inferred type. For puts, this is necessary because we need to remember
+ // what check is needed. For gets, this is necessary because otherwise AI might forget what type is
+ // guaranteed.
+ InferredType::Descriptor inferredType;
+};
+
+struct MultiPutByOffsetData {
+ unsigned identifierNumber;
+ Vector<PutByIdVariant, 2> variants;
- StructureTransitionData(Structure* previousStructure, Structure* newStructure)
- : previousStructure(previousStructure)
- , newStructure(newStructure)
- {
- }
+ bool writesStructures() const;
+ bool reallocatesStorage() const;
};
struct NewArrayBufferData {
@@ -72,6 +101,55 @@ struct NewArrayBufferData {
IndexingType indexingType;
};
+struct BranchTarget {
+ BranchTarget()
+ : block(0)
+ , count(PNaN)
+ {
+ }
+
+ explicit BranchTarget(BasicBlock* block)
+ : block(block)
+ , count(PNaN)
+ {
+ }
+
+ void setBytecodeIndex(unsigned bytecodeIndex)
+ {
+ block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(bytecodeIndex));
+ }
+ unsigned bytecodeIndex() const { return bitwise_cast<uintptr_t>(block); }
+
+ void dump(PrintStream&) const;
+
+ BasicBlock* block;
+ float count;
+};
+
+struct BranchData {
+ static BranchData withBytecodeIndices(
+ unsigned takenBytecodeIndex, unsigned notTakenBytecodeIndex)
+ {
+ BranchData result;
+ result.taken.block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(takenBytecodeIndex));
+ result.notTaken.block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(notTakenBytecodeIndex));
+ return result;
+ }
+
+ unsigned takenBytecodeIndex() const { return taken.bytecodeIndex(); }
+ unsigned notTakenBytecodeIndex() const { return notTaken.bytecodeIndex(); }
+
+ BasicBlock*& forCondition(bool condition)
+ {
+ if (condition)
+ return taken.block;
+ return notTaken.block;
+ }
+
+ BranchTarget taken;
+ BranchTarget notTaken;
+};
+
// The SwitchData and associated data structures duplicate the information in
// JumpTable. The DFG may ultimately end up using the JumpTable, though it may
// instead decide to do something different - this is entirely up to the DFG.
@@ -85,7 +163,6 @@ struct NewArrayBufferData {
// values.
struct SwitchCase {
SwitchCase()
- : target(0)
{
}
@@ -99,20 +176,12 @@ struct SwitchCase {
{
SwitchCase result;
result.value = value;
- result.target = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(bytecodeIndex));
+ result.target.setBytecodeIndex(bytecodeIndex);
return result;
}
- unsigned targetBytecodeIndex() const { return bitwise_cast<uintptr_t>(target); }
-
LazyJSValue value;
- BasicBlock* target;
-};
-
-enum SwitchKind {
- SwitchImm,
- SwitchChar,
- SwitchString
+ BranchTarget target;
};
struct SwitchData {
@@ -120,128 +189,175 @@ struct SwitchData {
// constructing this should make sure to initialize everything they
// care about manually.
SwitchData()
- : fallThrough(0)
- , kind(static_cast<SwitchKind>(-1))
+ : kind(static_cast<SwitchKind>(-1))
, switchTableIndex(UINT_MAX)
, didUseJumpTable(false)
{
}
- void setFallThroughBytecodeIndex(unsigned bytecodeIndex)
- {
- fallThrough = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(bytecodeIndex));
- }
- unsigned fallThroughBytecodeIndex() const { return bitwise_cast<uintptr_t>(fallThrough); }
-
Vector<SwitchCase> cases;
- BasicBlock* fallThrough;
+ BranchTarget fallThrough;
SwitchKind kind;
unsigned switchTableIndex;
bool didUseJumpTable;
};
-// This type used in passing an immediate argument to Node constructor;
-// distinguishes an immediate value (typically an index into a CodeBlock data structure -
-// a constant index, argument, or identifier) from a Node*.
-struct OpInfo {
- explicit OpInfo(int32_t value) : m_value(static_cast<uintptr_t>(value)) { }
- explicit OpInfo(uint32_t value) : m_value(static_cast<uintptr_t>(value)) { }
-#if OS(DARWIN) || USE(JSVALUE64)
- explicit OpInfo(size_t value) : m_value(static_cast<uintptr_t>(value)) { }
-#endif
- explicit OpInfo(void* value) : m_value(reinterpret_cast<uintptr_t>(value)) { }
- uintptr_t m_value;
+struct CallVarargsData {
+ int firstVarArgOffset;
+};
+
+struct LoadVarargsData {
+ VirtualRegister start; // Local for the first element. This is the first actual argument, not this.
+ VirtualRegister count; // Local for the count.
+ VirtualRegister machineStart;
+ VirtualRegister machineCount;
+ unsigned offset; // Which array element to start with. Usually this is 0.
+ unsigned mandatoryMinimum; // The number of elements on the stack that must be initialized; if the array is too short then the missing elements must get undefined. Does not include "this".
+ unsigned limit; // Maximum number of elements to load. Includes "this".
+};
+
+struct StackAccessData {
+ StackAccessData()
+ : format(DeadFlush)
+ {
+ }
+
+ StackAccessData(VirtualRegister local, FlushFormat format)
+ : local(local)
+ , format(format)
+ {
+ }
+
+ VirtualRegister local;
+ VirtualRegister machineLocal;
+ FlushFormat format;
+
+ FlushedAt flushedAt() { return FlushedAt(format, machineLocal); }
+};
+
+struct CallDOMGetterData {
+ DOMJIT::GetterSetter* domJIT { nullptr };
+ DOMJIT::CallDOMGetterPatchpoint* patchpoint { nullptr };
+ unsigned identifierNumber { 0 };
};
// === Node ===
//
// Node represents a single operation in the data flow graph.
struct Node {
+public:
enum VarArgTag { VarArg };
Node() { }
- Node(NodeType op, CodeOrigin codeOrigin, const AdjacencyList& children)
- : codeOrigin(codeOrigin)
- , codeOriginForExitTarget(codeOrigin)
+ Node(NodeType op, NodeOrigin nodeOrigin, const AdjacencyList& children)
+ : origin(nodeOrigin)
, children(children)
, m_virtualRegister(VirtualRegister())
, m_refCount(1)
, m_prediction(SpecNone)
+ , owner(nullptr)
{
- misc.replacement = 0;
+ m_misc.replacement = nullptr;
setOpAndDefaultFlags(op);
}
// Construct a node with up to 3 children, no immediate value.
- Node(NodeType op, CodeOrigin codeOrigin, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
- : codeOrigin(codeOrigin)
- , codeOriginForExitTarget(codeOrigin)
+ Node(NodeType op, NodeOrigin nodeOrigin, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
+ : origin(nodeOrigin)
, children(AdjacencyList::Fixed, child1, child2, child3)
, m_virtualRegister(VirtualRegister())
, m_refCount(1)
, m_prediction(SpecNone)
- , m_opInfo(0)
- , m_opInfo2(0)
+ , owner(nullptr)
{
- misc.replacement = 0;
+ m_misc.replacement = nullptr;
+ setOpAndDefaultFlags(op);
+ ASSERT(!(m_flags & NodeHasVarArgs));
+ }
+
+ // Construct a node with up to 3 children, no immediate value.
+ Node(NodeFlags result, NodeType op, NodeOrigin nodeOrigin, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
+ : origin(nodeOrigin)
+ , children(AdjacencyList::Fixed, child1, child2, child3)
+ , m_virtualRegister(VirtualRegister())
+ , m_refCount(1)
+ , m_prediction(SpecNone)
+ , owner(nullptr)
+ {
+ m_misc.replacement = nullptr;
+ setOpAndDefaultFlags(op);
+ setResult(result);
+ ASSERT(!(m_flags & NodeHasVarArgs));
+ }
+
+ // Construct a node with up to 3 children and an immediate value.
+ Node(NodeType op, NodeOrigin nodeOrigin, OpInfo imm, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
+ : origin(nodeOrigin)
+ , children(AdjacencyList::Fixed, child1, child2, child3)
+ , m_virtualRegister(VirtualRegister())
+ , m_refCount(1)
+ , m_prediction(SpecNone)
+ , m_opInfo(imm.m_value)
+ , owner(nullptr)
+ {
+ m_misc.replacement = nullptr;
setOpAndDefaultFlags(op);
ASSERT(!(m_flags & NodeHasVarArgs));
}
// Construct a node with up to 3 children and an immediate value.
- Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
- : codeOrigin(codeOrigin)
- , codeOriginForExitTarget(codeOrigin)
+ Node(NodeFlags result, NodeType op, NodeOrigin nodeOrigin, OpInfo imm, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
+ : origin(nodeOrigin)
, children(AdjacencyList::Fixed, child1, child2, child3)
, m_virtualRegister(VirtualRegister())
, m_refCount(1)
, m_prediction(SpecNone)
, m_opInfo(imm.m_value)
- , m_opInfo2(0)
+ , owner(nullptr)
{
- misc.replacement = 0;
+ m_misc.replacement = nullptr;
setOpAndDefaultFlags(op);
+ setResult(result);
ASSERT(!(m_flags & NodeHasVarArgs));
}
// Construct a node with up to 3 children and two immediate values.
- Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
- : codeOrigin(codeOrigin)
- , codeOriginForExitTarget(codeOrigin)
+ Node(NodeType op, NodeOrigin nodeOrigin, OpInfo imm1, OpInfo imm2, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
+ : origin(nodeOrigin)
, children(AdjacencyList::Fixed, child1, child2, child3)
, m_virtualRegister(VirtualRegister())
, m_refCount(1)
, m_prediction(SpecNone)
, m_opInfo(imm1.m_value)
, m_opInfo2(imm2.m_value)
+ , owner(nullptr)
{
- misc.replacement = 0;
+ m_misc.replacement = nullptr;
setOpAndDefaultFlags(op);
ASSERT(!(m_flags & NodeHasVarArgs));
}
// Construct a node with a variable number of children and two immediate values.
- Node(VarArgTag, NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, unsigned firstChild, unsigned numChildren)
- : codeOrigin(codeOrigin)
- , codeOriginForExitTarget(codeOrigin)
+ Node(VarArgTag, NodeType op, NodeOrigin nodeOrigin, OpInfo imm1, OpInfo imm2, unsigned firstChild, unsigned numChildren)
+ : origin(nodeOrigin)
, children(AdjacencyList::Variable, firstChild, numChildren)
, m_virtualRegister(VirtualRegister())
, m_refCount(1)
, m_prediction(SpecNone)
, m_opInfo(imm1.m_value)
, m_opInfo2(imm2.m_value)
+ , owner(nullptr)
{
- misc.replacement = 0;
+ m_misc.replacement = nullptr;
setOpAndDefaultFlags(op);
ASSERT(m_flags & NodeHasVarArgs);
}
NodeType op() const { return static_cast<NodeType>(m_op); }
NodeFlags flags() const { return m_flags; }
-
- // This is not a fast method.
- unsigned index() const;
+
+ unsigned index() const { return m_index; }
void setOp(NodeType op)
{
@@ -255,7 +371,6 @@ struct Node {
bool mergeFlags(NodeFlags flags)
{
- ASSERT(!(flags & NodeDoesNotExit));
NodeFlags newFlags = m_flags | flags;
if (newFlags == m_flags)
return false;
@@ -265,7 +380,6 @@ struct Node {
bool filterFlags(NodeFlags flags)
{
- ASSERT(flags & NodeDoesNotExit);
NodeFlags newFlags = m_flags & flags;
if (newFlags == m_flags)
return false;
@@ -278,160 +392,242 @@ struct Node {
return filterFlags(~flags);
}
+ void setResult(NodeFlags result)
+ {
+ ASSERT(!(result & ~NodeResultMask));
+ clearFlags(NodeResultMask);
+ mergeFlags(result);
+ }
+
+ NodeFlags result() const
+ {
+ return flags() & NodeResultMask;
+ }
+
void setOpAndDefaultFlags(NodeType op)
{
m_op = op;
m_flags = defaultFlags(op);
}
- void convertToPhantom()
+ void remove();
+
+ void convertToCheckStructure(RegisteredStructureSet* set)
{
- setOpAndDefaultFlags(Phantom);
+ setOpAndDefaultFlags(CheckStructure);
+ m_opInfo = set;
}
- void convertToPhantomUnchecked()
+ void convertToCheckStructureImmediate(Node* structure)
{
- setOpAndDefaultFlags(Phantom);
+ ASSERT(op() == CheckStructure);
+ m_op = CheckStructureImmediate;
+ children.setChild1(Edge(structure, CellUse));
}
-
- void convertToIdentity()
+
+ void replaceWith(Node* other)
{
- RELEASE_ASSERT(child1());
- RELEASE_ASSERT(!child2());
- setOpAndDefaultFlags(Identity);
+ remove();
+ setReplacement(other);
}
+ void convertToIdentity();
+ void convertToIdentityOn(Node*);
+
bool mustGenerate()
{
return m_flags & NodeMustGenerate;
}
- void setCanExit(bool exits)
- {
- if (exits)
- m_flags &= ~NodeDoesNotExit;
- else
- m_flags |= NodeDoesNotExit;
- }
-
- bool canExit()
- {
- return !(m_flags & NodeDoesNotExit);
- }
-
bool isConstant()
{
- return op() == JSConstant;
- }
-
- bool isWeakConstant()
- {
- return op() == WeakJSConstant;
- }
-
- bool isStronglyProvedConstantIn(InlineCallFrame* inlineCallFrame)
- {
- return !!(flags() & NodeIsStaticConstant)
- && codeOrigin.inlineCallFrame == inlineCallFrame;
- }
-
- bool isStronglyProvedConstantIn(const CodeOrigin& codeOrigin)
- {
- return isStronglyProvedConstantIn(codeOrigin.inlineCallFrame);
- }
-
- bool isPhantomArguments()
- {
- return op() == PhantomArguments;
+ switch (op()) {
+ case JSConstant:
+ case DoubleConstant:
+ case Int52Constant:
+ return true;
+ default:
+ return false;
+ }
}
bool hasConstant()
{
switch (op()) {
case JSConstant:
- case WeakJSConstant:
- case PhantomArguments:
+ case DoubleConstant:
+ case Int52Constant:
+ return true;
+
+ case PhantomDirectArguments:
+ case PhantomClonedArguments:
+ // These pretend to be the empty value constant for the benefit of the DFG backend, which
+ // otherwise wouldn't take kindly to a node that doesn't compute a value.
return true;
+
default:
return false;
}
}
- unsigned constantNumber()
+ FrozenValue* constant()
{
- ASSERT(isConstant());
- return m_opInfo;
- }
-
- void convertToConstant(unsigned constantNumber)
- {
- m_op = JSConstant;
- m_flags &= ~(NodeMustGenerate | NodeMightClobber | NodeClobbersWorld);
- m_opInfo = constantNumber;
- children.reset();
+ ASSERT(hasConstant());
+
+ if (op() == PhantomDirectArguments || op() == PhantomClonedArguments) {
+ // These pretend to be the empty value constant for the benefit of the DFG backend, which
+ // otherwise wouldn't take kindly to a node that doesn't compute a value.
+ return FrozenValue::emptySingleton();
+ }
+
+ return m_opInfo.as<FrozenValue*>();
}
- void convertToWeakConstant(JSCell* cell)
+ // Don't call this directly - use Graph::convertToConstant() instead!
+ void convertToConstant(FrozenValue* value)
{
- m_op = WeakJSConstant;
- m_flags &= ~(NodeMustGenerate | NodeMightClobber | NodeClobbersWorld);
- m_opInfo = bitwise_cast<uintptr_t>(cell);
+ if (hasDoubleResult())
+ m_op = DoubleConstant;
+ else if (hasInt52Result())
+ m_op = Int52Constant;
+ else
+ m_op = JSConstant;
+ m_flags &= ~NodeMustGenerate;
+ m_opInfo = value;
children.reset();
}
+
+ void convertToLazyJSConstant(Graph&, LazyJSValue);
void convertToConstantStoragePointer(void* pointer)
{
ASSERT(op() == GetIndexedPropertyStorage);
m_op = ConstantStoragePointer;
- m_opInfo = bitwise_cast<uintptr_t>(pointer);
+ m_opInfo = pointer;
+ children.reset();
}
void convertToGetLocalUnlinked(VirtualRegister local)
{
m_op = GetLocalUnlinked;
- m_flags &= ~(NodeMustGenerate | NodeMightClobber | NodeClobbersWorld);
+ m_flags &= ~NodeMustGenerate;
m_opInfo = local.offset();
m_opInfo2 = VirtualRegister().offset();
children.reset();
}
- void convertToStructureTransitionWatchpoint(Structure* structure)
+ void convertToPutStack(StackAccessData* data)
{
- ASSERT(m_op == CheckStructure || m_op == ArrayifyToStructure);
- ASSERT(!child2());
- ASSERT(!child3());
- m_opInfo = bitwise_cast<uintptr_t>(structure);
- m_op = StructureTransitionWatchpoint;
+ m_op = PutStack;
+ m_flags |= NodeMustGenerate;
+ m_opInfo = data;
+ m_opInfo2 = OpInfoWrapper();
}
- void convertToStructureTransitionWatchpoint()
+ void convertToGetStack(StackAccessData* data)
{
- convertToStructureTransitionWatchpoint(structureSet().singletonStructure());
+ m_op = GetStack;
+ m_flags &= ~NodeMustGenerate;
+ m_opInfo = data;
+ m_opInfo2 = OpInfoWrapper();
+ children.reset();
}
- void convertToGetByOffset(unsigned storageAccessDataIndex, Edge storage)
+ void convertToGetByOffset(StorageAccessData& data, Edge storage, Edge base)
{
- ASSERT(m_op == GetById || m_op == GetByIdFlush);
- m_opInfo = storageAccessDataIndex;
- children.setChild2(children.child1());
- children.child2().setUseKind(KnownCellUse);
+ ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == MultiGetByOffset);
+ m_opInfo = &data;
children.setChild1(storage);
+ children.setChild2(base);
m_op = GetByOffset;
- m_flags &= ~NodeClobbersWorld;
+ m_flags &= ~NodeMustGenerate;
+ }
+
+ void convertToMultiGetByOffset(MultiGetByOffsetData* data)
+ {
+ ASSERT(m_op == GetById || m_op == GetByIdFlush);
+ m_opInfo = data;
+ child1().setUseKind(CellUse);
+ m_op = MultiGetByOffset;
+ ASSERT(m_flags & NodeMustGenerate);
}
- void convertToPutByOffset(unsigned storageAccessDataIndex, Edge storage)
+ void convertToPutByOffset(StorageAccessData& data, Edge storage, Edge base)
{
- ASSERT(m_op == PutById || m_op == PutByIdDirect);
- m_opInfo = storageAccessDataIndex;
+ ASSERT(m_op == PutById || m_op == PutByIdDirect || m_op == PutByIdFlush || m_op == MultiPutByOffset);
+ m_opInfo = &data;
children.setChild3(children.child2());
- children.setChild2(children.child1());
+ children.setChild2(base);
children.setChild1(storage);
m_op = PutByOffset;
- m_flags &= ~NodeClobbersWorld;
}
- void convertToPhantomLocal()
+ void convertToMultiPutByOffset(MultiPutByOffsetData* data)
+ {
+ ASSERT(m_op == PutById || m_op == PutByIdDirect || m_op == PutByIdFlush);
+ m_opInfo = data;
+ m_op = MultiPutByOffset;
+ }
+
+ void convertToPutHint(const PromotedLocationDescriptor&, Node* base, Node* value);
+
+ void convertToPutByOffsetHint();
+ void convertToPutStructureHint(Node* structure);
+ void convertToPutClosureVarHint();
+
+ void convertToPhantomNewObject()
+ {
+ ASSERT(m_op == NewObject || m_op == MaterializeNewObject);
+ m_op = PhantomNewObject;
+ m_flags &= ~NodeHasVarArgs;
+ m_flags |= NodeMustGenerate;
+ m_opInfo = OpInfoWrapper();
+ m_opInfo2 = OpInfoWrapper();
+ children = AdjacencyList();
+ }
+
+ void convertToPhantomNewFunction()
+ {
+ ASSERT(m_op == NewFunction || m_op == NewGeneratorFunction || m_op == NewAsyncFunction);
+ m_op = PhantomNewFunction;
+ m_flags |= NodeMustGenerate;
+ m_opInfo = OpInfoWrapper();
+ m_opInfo2 = OpInfoWrapper();
+ children = AdjacencyList();
+ }
+
+ void convertToPhantomNewGeneratorFunction()
+ {
+ ASSERT(m_op == NewGeneratorFunction);
+ m_op = PhantomNewGeneratorFunction;
+ m_flags |= NodeMustGenerate;
+ m_opInfo = OpInfoWrapper();
+ m_opInfo2 = OpInfoWrapper();
+ children = AdjacencyList();
+ }
+
+ void convertToPhantomNewAsyncFunction()
+ {
+ ASSERT(m_op == NewAsyncFunction);
+ m_op = PhantomNewAsyncFunction;
+ m_flags |= NodeMustGenerate;
+ m_opInfo = OpInfoWrapper();
+ m_opInfo2 = OpInfoWrapper();
+ children = AdjacencyList();
+ }
+
+ void convertToPhantomCreateActivation()
+ {
+ ASSERT(m_op == CreateActivation || m_op == MaterializeCreateActivation);
+ m_op = PhantomCreateActivation;
+ m_flags &= ~NodeHasVarArgs;
+ m_flags |= NodeMustGenerate;
+ m_opInfo = OpInfoWrapper();
+ m_opInfo2 = OpInfoWrapper();
+ children = AdjacencyList();
+ }
+
+ void convertPhantomToPhantomLocal()
{
ASSERT(m_op == Phantom && (child1()->op() == Phi || child1()->op() == SetLocal || child1()->op() == SetArgument));
m_op = PhantomLocal;
@@ -439,12 +635,19 @@ struct Node {
children.setChild1(Edge());
}
+ void convertFlushToPhantomLocal()
+ {
+ ASSERT(m_op == Flush);
+ m_op = PhantomLocal;
+ children = AdjacencyList();
+ }
+
void convertToGetLocal(VariableAccessData* variable, Node* phi)
{
ASSERT(m_op == GetLocalUnlinked);
m_op = GetLocal;
- m_opInfo = bitwise_cast<uintptr_t>(variable);
- m_opInfo2 = 0;
+ m_opInfo = variable;
+ m_opInfo2 = OpInfoWrapper();
children.setChild1(Edge(phi));
}
@@ -453,53 +656,166 @@ struct Node {
ASSERT(m_op == ToPrimitive);
m_op = ToString;
}
+
+ void convertToArithNegate()
+ {
+ ASSERT(m_op == ArithAbs && child1().useKind() == Int32Use);
+ m_op = ArithNegate;
+ }
- JSCell* weakConstant()
+ void convertToDirectCall(FrozenValue*);
+
+ void convertToCallDOM(Graph&);
+
+ JSValue asJSValue()
+ {
+ return constant()->value();
+ }
+
+ bool isInt32Constant()
+ {
+ return isConstant() && constant()->value().isInt32();
+ }
+
+ int32_t asInt32()
+ {
+ return asJSValue().asInt32();
+ }
+
+ uint32_t asUInt32()
{
- ASSERT(op() == WeakJSConstant);
- return bitwise_cast<JSCell*>(m_opInfo);
+ return asInt32();
+ }
+
+ bool isDoubleConstant()
+ {
+ return isConstant() && constant()->value().isDouble();
+ }
+
+ bool isNumberConstant()
+ {
+ return isConstant() && constant()->value().isNumber();
}
- JSValue valueOfJSConstant(CodeBlock* codeBlock)
+ double asNumber()
{
- switch (op()) {
- case WeakJSConstant:
- return JSValue(weakConstant());
- case JSConstant:
- return codeBlock->constantRegister(FirstConstantRegisterIndex + constantNumber()).get();
- case PhantomArguments:
- return JSValue();
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return JSValue(); // Have to return something in release mode.
- }
+ return asJSValue().asNumber();
+ }
+
+ bool isAnyIntConstant()
+ {
+ return isConstant() && constant()->value().isAnyInt();
+ }
+
+ int64_t asAnyInt()
+ {
+ return asJSValue().asAnyInt();
+ }
+
+ bool isBooleanConstant()
+ {
+ return isConstant() && constant()->value().isBoolean();
+ }
+
+ bool asBoolean()
+ {
+ return constant()->value().asBoolean();
}
- bool isInt32Constant(CodeBlock* codeBlock)
+ bool isUndefinedOrNullConstant()
{
- return isConstant() && valueOfJSConstant(codeBlock).isInt32();
+ return isConstant() && constant()->value().isUndefinedOrNull();
}
-
- bool isDoubleConstant(CodeBlock* codeBlock)
+
+ bool isCellConstant()
{
- bool result = isConstant() && valueOfJSConstant(codeBlock).isDouble();
- if (result)
- ASSERT(!isInt32Constant(codeBlock));
- return result;
+ return isConstant() && constant()->value() && constant()->value().isCell();
+ }
+
+ JSCell* asCell()
+ {
+ return constant()->value().asCell();
+ }
+
+ template<typename T>
+ T dynamicCastConstant(VM& vm)
+ {
+ if (!isCellConstant())
+ return nullptr;
+ return jsDynamicCast<T>(vm, asCell());
}
- bool isNumberConstant(CodeBlock* codeBlock)
+ template<typename T>
+ T castConstant(VM& vm)
{
- bool result = isConstant() && valueOfJSConstant(codeBlock).isNumber();
- ASSERT(result == (isInt32Constant(codeBlock) || isDoubleConstant(codeBlock)));
+ T result = dynamicCastConstant<T>(vm);
+ RELEASE_ASSERT(result);
return result;
}
-
- bool isBooleanConstant(CodeBlock* codeBlock)
+
+ bool hasLazyJSValue()
{
- return isConstant() && valueOfJSConstant(codeBlock).isBoolean();
+ return op() == LazyJSConstant;
}
-
+
+ LazyJSValue lazyJSValue()
+ {
+ ASSERT(hasLazyJSValue());
+ return *m_opInfo.as<LazyJSValue*>();
+ }
+
+ String tryGetString(Graph&);
+
+ JSValue initializationValueForActivation() const
+ {
+ ASSERT(op() == CreateActivation);
+ return m_opInfo2.as<FrozenValue*>()->value();
+ }
+
+ bool hasArgumentsChild()
+ {
+ switch (op()) {
+ case GetMyArgumentByVal:
+ case GetMyArgumentByValOutOfBounds:
+ case LoadVarargs:
+ case ForwardVarargs:
+ case CallVarargs:
+ case CallForwardVarargs:
+ case ConstructVarargs:
+ case ConstructForwardVarargs:
+ case TailCallVarargs:
+ case TailCallForwardVarargs:
+ case TailCallVarargsInlinedCaller:
+ case TailCallForwardVarargsInlinedCaller:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ Edge& argumentsChild()
+ {
+ switch (op()) {
+ case GetMyArgumentByVal:
+ case GetMyArgumentByValOutOfBounds:
+ case LoadVarargs:
+ case ForwardVarargs:
+ return child1();
+ case CallVarargs:
+ case CallForwardVarargs:
+ case ConstructVarargs:
+ case ConstructForwardVarargs:
+ case TailCallVarargs:
+ case TailCallForwardVarargs:
+ case TailCallVarargsInlinedCaller:
+ case TailCallForwardVarargsInlinedCaller:
+ return child3();
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return child1();
+ }
+ }
+
bool containsMovHint()
{
switch (op()) {
@@ -512,14 +828,24 @@ struct Node {
}
bool hasVariableAccessData(Graph&);
- bool hasLocal(Graph& graph)
+ bool accessesStack(Graph& graph)
{
return hasVariableAccessData(graph);
}
+ // This is useful for debugging code, where a node that should have a variable
+ // access data doesn't have one because it hasn't been initialized yet.
+ VariableAccessData* tryGetVariableAccessData()
+ {
+ VariableAccessData* result = m_opInfo.as<VariableAccessData*>();
+ if (!result)
+ return 0;
+ return result->find();
+ }
+
VariableAccessData* variableAccessData()
{
- return reinterpret_cast<VariableAccessData*>(m_opInfo)->find();
+ return m_opInfo.as<VariableAccessData*>()->find();
}
VirtualRegister local()
@@ -539,6 +865,7 @@ struct Node {
case ExtractOSREntryLocal:
case MovHint:
case ZombieHint:
+ case KillStack:
return true;
default:
return false;
@@ -548,7 +875,7 @@ struct Node {
VirtualRegister unlinkedLocal()
{
ASSERT(hasUnlinkedLocal());
- return static_cast<VirtualRegister>(m_opInfo);
+ return VirtualRegister(m_opInfo.as<int32_t>());
}
bool hasUnlinkedMachineLocal()
@@ -565,7 +892,24 @@ struct Node {
VirtualRegister unlinkedMachineLocal()
{
ASSERT(hasUnlinkedMachineLocal());
- return VirtualRegister(m_opInfo2);
+ return VirtualRegister(m_opInfo2.as<int32_t>());
+ }
+
+ bool hasStackAccessData()
+ {
+ switch (op()) {
+ case PutStack:
+ case GetStack:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ StackAccessData* stackAccessData()
+ {
+ ASSERT(hasStackAccessData());
+ return m_opInfo.as<StackAccessData*>();
}
bool hasPhi()
@@ -576,28 +920,32 @@ struct Node {
Node* phi()
{
ASSERT(hasPhi());
- return bitwise_cast<Node*>(m_opInfo);
+ return m_opInfo.as<Node*>();
}
bool isStoreBarrier()
{
- switch (op()) {
- case StoreBarrier:
- case ConditionalStoreBarrier:
- case StoreBarrierWithNullCheck:
- return true;
- default:
- return false;
- }
+ return op() == StoreBarrier || op() == FencedStoreBarrier;
}
bool hasIdentifier()
{
switch (op()) {
+ case TryGetById:
case GetById:
case GetByIdFlush:
+ case GetByIdWithThis:
case PutById:
+ case PutByIdFlush:
case PutByIdDirect:
+ case PutByIdWithThis:
+ case PutGetterById:
+ case PutSetterById:
+ case PutGetterSetterById:
+ case DeleteById:
+ case GetDynamicVar:
+ case PutDynamicVar:
+ case ResolveScope:
return true;
default:
return false;
@@ -607,28 +955,63 @@ struct Node {
unsigned identifierNumber()
{
ASSERT(hasIdentifier());
- return m_opInfo;
+ return m_opInfo.as<unsigned>();
}
-
- bool hasArithNodeFlags()
+
+ bool hasGetPutInfo()
{
switch (op()) {
- case UInt32ToNumber:
- case ArithAdd:
- case ArithSub:
- case ArithNegate:
- case ArithMul:
- case ArithAbs:
- case ArithMin:
- case ArithMax:
- case ArithMod:
- case ArithDiv:
- case ValueAdd:
+ case GetDynamicVar:
+ case PutDynamicVar:
return true;
default:
return false;
}
}
+
+ unsigned getPutInfo()
+ {
+ ASSERT(hasGetPutInfo());
+ return m_opInfo2.as<unsigned>();
+ }
+
+ bool hasAccessorAttributes()
+ {
+ switch (op()) {
+ case PutGetterById:
+ case PutSetterById:
+ case PutGetterSetterById:
+ case PutGetterByVal:
+ case PutSetterByVal:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ int32_t accessorAttributes()
+ {
+ ASSERT(hasAccessorAttributes());
+ switch (op()) {
+ case PutGetterById:
+ case PutSetterById:
+ case PutGetterSetterById:
+ return m_opInfo2.as<int32_t>();
+ case PutGetterByVal:
+ case PutSetterByVal:
+ return m_opInfo.as<int32_t>();
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+ }
+ }
+
+ bool hasPromotedLocationDescriptor()
+ {
+ return op() == PutHint;
+ }
+
+ PromotedLocationDescriptor promotedLocationDescriptor();
// This corrects the arithmetic node flags, so that irrelevant bits are
// ignored. In particular, anything other than ArithMul does not need
@@ -636,11 +1019,26 @@ struct Node {
NodeFlags arithNodeFlags()
{
NodeFlags result = m_flags & NodeArithFlagsMask;
- if (op() == ArithMul || op() == ArithDiv || op() == ArithMod || op() == ArithNegate || op() == DoubleAsInt32)
+ if (op() == ArithMul || op() == ArithDiv || op() == ArithMod || op() == ArithNegate || op() == ArithPow || op() == ArithRound || op() == ArithFloor || op() == ArithCeil || op() == ArithTrunc || op() == DoubleAsInt32)
return result;
return result & ~NodeBytecodeNeedsNegZero;
}
+
+ bool mayHaveNonIntResult()
+ {
+ return m_flags & NodeMayHaveNonIntResult;
+ }
+ bool mayHaveDoubleResult()
+ {
+ return m_flags & NodeMayHaveDoubleResult;
+ }
+
+ bool mayHaveNonNumberResult()
+ {
+ return m_flags & NodeMayHaveNonNumberResult;
+ }
+
bool hasConstantBuffer()
{
return op() == NewArrayBuffer;
@@ -649,7 +1047,7 @@ struct Node {
NewArrayBufferData* newArrayBufferData()
{
ASSERT(hasConstantBuffer());
- return reinterpret_cast<NewArrayBufferData*>(m_opInfo);
+ return m_opInfo.as<NewArrayBufferData*>();
}
unsigned startConstant()
@@ -673,13 +1071,27 @@ struct Node {
return false;
}
}
-
+
+ BitVector* bitVector()
+ {
+ ASSERT(op() == NewArrayWithSpread || op() == PhantomNewArrayWithSpread);
+ return m_opInfo.as<BitVector*>();
+ }
+
+ // Return the indexing type that an array allocation *wants* to use. It may end up using a different
+ // type if we're having a bad time. You can determine the actual indexing type by asking the global
+ // object:
+ //
+ // m_graph.globalObjectFor(node->origin.semantic)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())
+ //
+ // This will give you a Structure*, and that will have some indexing type that may be different from
+ // the this one.
IndexingType indexingType()
{
ASSERT(hasIndexingType());
if (op() == NewArrayBuffer)
return newArrayBufferData()->indexingType;
- return m_opInfo;
+ return static_cast<IndexingType>(m_opInfo.as<uint32_t>());
}
bool hasTypedArrayType()
@@ -695,7 +1107,7 @@ struct Node {
TypedArrayType typedArrayType()
{
ASSERT(hasTypedArrayType());
- TypedArrayType result = static_cast<TypedArrayType>(m_opInfo);
+ TypedArrayType result = static_cast<TypedArrayType>(m_opInfo.as<uint32_t>());
ASSERT(isTypedView(result));
return result;
}
@@ -708,7 +1120,7 @@ struct Node {
unsigned inlineCapacity()
{
ASSERT(hasInlineCapacity());
- return m_opInfo;
+ return m_opInfo.as<unsigned>();
}
void setIndexingType(IndexingType indexingType)
@@ -717,66 +1129,136 @@ struct Node {
m_opInfo = indexingType;
}
- bool hasRegexpIndex()
+ bool hasScopeOffset()
{
- return op() == NewRegexp;
+ return op() == GetClosureVar || op() == PutClosureVar;
}
-
- unsigned regexpIndex()
+
+ ScopeOffset scopeOffset()
{
- ASSERT(hasRegexpIndex());
- return m_opInfo;
+ ASSERT(hasScopeOffset());
+ return ScopeOffset(m_opInfo.as<uint32_t>());
}
- bool hasVarNumber()
+ bool hasDirectArgumentsOffset()
{
- return op() == GetClosureVar || op() == PutClosureVar;
+ return op() == GetFromArguments || op() == PutToArguments;
}
-
- int varNumber()
+
+ DirectArgumentsOffset capturedArgumentsOffset()
{
- ASSERT(hasVarNumber());
- return m_opInfo;
+ ASSERT(hasDirectArgumentsOffset());
+ return DirectArgumentsOffset(m_opInfo.as<uint32_t>());
}
bool hasRegisterPointer()
{
- return op() == GetGlobalVar || op() == PutGlobalVar;
+ return op() == GetGlobalVar || op() == GetGlobalLexicalVariable || op() == PutGlobalVariable;
}
- WriteBarrier<Unknown>* registerPointer()
+ WriteBarrier<Unknown>* variablePointer()
{
- return bitwise_cast<WriteBarrier<Unknown>*>(m_opInfo);
+ return m_opInfo.as<WriteBarrier<Unknown>*>();
}
- bool hasResult()
+ bool hasCallVarargsData()
{
- return m_flags & NodeResultMask;
+ switch (op()) {
+ case CallVarargs:
+ case CallForwardVarargs:
+ case TailCallVarargs:
+ case TailCallForwardVarargs:
+ case TailCallVarargsInlinedCaller:
+ case TailCallForwardVarargsInlinedCaller:
+ case ConstructVarargs:
+ case ConstructForwardVarargs:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ CallVarargsData* callVarargsData()
+ {
+ ASSERT(hasCallVarargsData());
+ return m_opInfo.as<CallVarargsData*>();
+ }
+
+ bool hasLoadVarargsData()
+ {
+ return op() == LoadVarargs || op() == ForwardVarargs;
+ }
+
+ LoadVarargsData* loadVarargsData()
+ {
+ ASSERT(hasLoadVarargsData());
+ return m_opInfo.as<LoadVarargsData*>();
}
- bool hasInt32Result()
+ bool hasQueriedType()
{
- return (m_flags & NodeResultMask) == NodeResultInt32;
+ return op() == IsCellWithType;
+ }
+
+ JSType queriedType()
+ {
+ static_assert(std::is_same<uint8_t, std::underlying_type<JSType>::type>::value, "Ensure that uint8_t is the underlying type for JSType.");
+ return static_cast<JSType>(m_opInfo.as<uint32_t>());
+ }
+
+ bool hasSpeculatedTypeForQuery()
+ {
+ return op() == IsCellWithType;
+ }
+
+ SpeculatedType speculatedTypeForQuery()
+ {
+ return speculationFromJSType(queriedType());
+ }
+
+ bool hasResult()
+ {
+ return !!result();
+ }
+
+ bool hasInt52Result()
+ {
+ return result() == NodeResultInt52;
}
bool hasNumberResult()
{
- return (m_flags & NodeResultMask) == NodeResultNumber;
+ return result() == NodeResultNumber;
+ }
+
+ bool hasDoubleResult()
+ {
+ return result() == NodeResultDouble;
}
bool hasJSResult()
{
- return (m_flags & NodeResultMask) == NodeResultJS;
+ return result() == NodeResultJS;
}
bool hasBooleanResult()
{
- return (m_flags & NodeResultMask) == NodeResultBoolean;
+ return result() == NodeResultBoolean;
}
bool hasStorageResult()
{
- return (m_flags & NodeResultMask) == NodeResultStorage;
+ return result() == NodeResultStorage;
+ }
+
+ UseKind defaultUseKind()
+ {
+ return useKindForResult(result());
+ }
+
+ Edge defaultEdge()
+ {
+ return Edge(this, defaultUseKind());
}
bool isJump()
@@ -801,6 +1283,10 @@ struct Node {
case Branch:
case Switch:
case Return:
+ case TailCall:
+ case DirectTailCall:
+ case TailCallVarargs:
+ case TailCallForwardVarargs:
case Unreachable:
return true;
default:
@@ -808,46 +1294,36 @@ struct Node {
}
}
- unsigned takenBytecodeOffsetDuringParsing()
+ bool isFunctionTerminal()
{
- ASSERT(isBranch() || isJump());
- return m_opInfo;
- }
+ if (isTerminal() && !numSuccessors())
+ return true;
- unsigned notTakenBytecodeOffsetDuringParsing()
- {
- ASSERT(isBranch());
- return m_opInfo2;
+ return false;
}
-
- void setTakenBlock(BasicBlock* block)
- {
- ASSERT(isBranch() || isJump());
- m_opInfo = bitwise_cast<uintptr_t>(block);
- }
-
- void setNotTakenBlock(BasicBlock* block)
+
+ unsigned targetBytecodeOffsetDuringParsing()
{
- ASSERT(isBranch());
- m_opInfo2 = bitwise_cast<uintptr_t>(block);
+ ASSERT(isJump());
+ return m_opInfo.as<unsigned>();
}
-
- BasicBlock*& takenBlock()
+
+ BasicBlock*& targetBlock()
{
- ASSERT(isBranch() || isJump());
- return *bitwise_cast<BasicBlock**>(&m_opInfo);
+ ASSERT(isJump());
+ return *bitwise_cast<BasicBlock**>(&m_opInfo.u.pointer);
}
- BasicBlock*& notTakenBlock()
+ BranchData* branchData()
{
ASSERT(isBranch());
- return *bitwise_cast<BasicBlock**>(&m_opInfo2);
+ return m_opInfo.as<BranchData*>();
}
SwitchData* switchData()
{
ASSERT(isSwitch());
- return bitwise_cast<SwitchData*>(m_opInfo);
+ return m_opInfo.as<SwitchData*>();
}
unsigned numSuccessors()
@@ -868,44 +1344,147 @@ struct Node {
{
if (isSwitch()) {
if (index < switchData()->cases.size())
- return switchData()->cases[index].target;
+ return switchData()->cases[index].target.block;
RELEASE_ASSERT(index == switchData()->cases.size());
- return switchData()->fallThrough;
+ return switchData()->fallThrough.block;
}
switch (index) {
case 0:
- return takenBlock();
+ if (isJump())
+ return targetBlock();
+ return branchData()->taken.block;
case 1:
- return notTakenBlock();
+ return branchData()->notTaken.block;
default:
RELEASE_ASSERT_NOT_REACHED();
- return takenBlock();
+ return targetBlock();
+ }
+ }
+
+ class SuccessorsIterable {
+ public:
+ SuccessorsIterable()
+ : m_terminal(nullptr)
+ {
+ }
+
+ SuccessorsIterable(Node* terminal)
+ : m_terminal(terminal)
+ {
+ }
+
+ class iterator {
+ public:
+ iterator()
+ : m_terminal(nullptr)
+ , m_index(UINT_MAX)
+ {
+ }
+
+ iterator(Node* terminal, unsigned index)
+ : m_terminal(terminal)
+ , m_index(index)
+ {
+ }
+
+ BasicBlock* operator*()
+ {
+ return m_terminal->successor(m_index);
+ }
+
+ iterator& operator++()
+ {
+ m_index++;
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+ private:
+ Node* m_terminal;
+ unsigned m_index;
+ };
+
+ iterator begin()
+ {
+ return iterator(m_terminal, 0);
+ }
+
+ iterator end()
+ {
+ return iterator(m_terminal, m_terminal->numSuccessors());
}
+
+ size_t size() const { return m_terminal->numSuccessors(); }
+ BasicBlock* at(size_t index) const { return m_terminal->successor(index); }
+ BasicBlock* operator[](size_t index) const { return at(index); }
+
+ private:
+ Node* m_terminal;
+ };
+
+ SuccessorsIterable successors()
+ {
+ return SuccessorsIterable(this);
}
BasicBlock*& successorForCondition(bool condition)
{
- ASSERT(isBranch());
- return condition ? takenBlock() : notTakenBlock();
+ return branchData()->forCondition(condition);
}
bool hasHeapPrediction()
{
switch (op()) {
+ case ArithAbs:
+ case ArithRound:
+ case ArithFloor:
+ case ArithCeil:
+ case ArithTrunc:
+ case GetDirectPname:
case GetById:
case GetByIdFlush:
+ case GetByIdWithThis:
+ case TryGetById:
case GetByVal:
- case GetMyArgumentByVal:
- case GetMyArgumentByValSafe:
+ case GetByValWithThis:
case Call:
+ case DirectCall:
+ case TailCallInlinedCaller:
+ case DirectTailCallInlinedCaller:
case Construct:
+ case DirectConstruct:
+ case CallVarargs:
+ case CallEval:
+ case TailCallVarargsInlinedCaller:
+ case ConstructVarargs:
+ case CallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
case GetByOffset:
+ case MultiGetByOffset:
case GetClosureVar:
+ case GetFromArguments:
+ case GetArgument:
case ArrayPop:
case ArrayPush:
case RegExpExec:
case RegExpTest:
case GetGlobalVar:
+ case GetGlobalLexicalVariable:
+ case StringReplace:
+ case StringReplaceRegExp:
+ case ToNumber:
+ case LoadFromJSMapBucket:
+ case CallDOMGetter:
+ case CallDOM:
+ case ParseInt:
return true;
default:
return false;
@@ -915,80 +1494,103 @@ struct Node {
SpeculatedType getHeapPrediction()
{
ASSERT(hasHeapPrediction());
- return static_cast<SpeculatedType>(m_opInfo2);
+ return m_opInfo2.as<SpeculatedType>();
}
-
- bool predictHeap(SpeculatedType prediction)
+
+ void setHeapPrediction(SpeculatedType prediction)
{
ASSERT(hasHeapPrediction());
-
- return mergeSpeculation(m_opInfo2, prediction);
+ m_opInfo2 = prediction;
}
- bool hasFunction()
+ bool hasCellOperand()
{
switch (op()) {
- case CheckFunction:
- case AllocationProfileWatchpoint:
+ case CheckCell:
+ case OverridesHasInstance:
+ case NewFunction:
+ case NewGeneratorFunction:
+ case NewAsyncFunction:
+ case CreateActivation:
+ case MaterializeCreateActivation:
+ case NewRegexp:
+ case CompareEqPtr:
+ case DirectCall:
+ case DirectTailCall:
+ case DirectConstruct:
+ case DirectTailCallInlinedCaller:
return true;
default:
return false;
}
}
- JSCell* function()
+ FrozenValue* cellOperand()
{
- ASSERT(hasFunction());
- JSCell* result = reinterpret_cast<JSFunction*>(m_opInfo);
- ASSERT(JSValue(result).isFunction());
- return result;
+ ASSERT(hasCellOperand());
+ return m_opInfo.as<FrozenValue*>();
}
- bool hasExecutable()
+ template<typename T>
+ T castOperand()
{
- return op() == CheckExecutable;
+ return cellOperand()->cast<T>();
}
- ExecutableBase* executable()
+ void setCellOperand(FrozenValue* value)
{
- return jsCast<ExecutableBase*>(reinterpret_cast<JSCell*>(m_opInfo));
+ ASSERT(hasCellOperand());
+ m_opInfo = value;
}
- bool hasVariableWatchpointSet()
+ bool hasWatchpointSet()
{
- return op() == NotifyWrite || op() == VariableWatchpoint;
+ return op() == NotifyWrite;
}
- VariableWatchpointSet* variableWatchpointSet()
+ WatchpointSet* watchpointSet()
{
- return reinterpret_cast<VariableWatchpointSet*>(m_opInfo);
+ ASSERT(hasWatchpointSet());
+ return m_opInfo.as<WatchpointSet*>();
}
- bool hasTypedArray()
+ bool hasStoragePointer()
{
- return op() == TypedArrayWatchpoint;
+ return op() == ConstantStoragePointer;
}
- JSArrayBufferView* typedArray()
+ void* storagePointer()
{
- return reinterpret_cast<JSArrayBufferView*>(m_opInfo);
+ ASSERT(hasStoragePointer());
+ return m_opInfo.as<void*>();
}
-
- bool hasStoragePointer()
+
+ bool hasUidOperand()
{
- return op() == ConstantStoragePointer;
+ return op() == CheckStringIdent;
}
-
- void* storagePointer()
+
+ UniquedStringImpl* uidOperand()
{
- return reinterpret_cast<void*>(m_opInfo);
+ ASSERT(hasUidOperand());
+ return m_opInfo.as<UniquedStringImpl*>();
}
- bool hasStructureTransitionData()
+ bool hasTypeInfoOperand()
+ {
+ return op() == CheckTypeInfoFlags;
+ }
+
+ unsigned typeInfoOperand()
+ {
+ ASSERT(hasTypeInfoOperand() && m_opInfo.as<uint32_t>() <= static_cast<uint32_t>(UCHAR_MAX));
+ return m_opInfo.as<uint32_t>();
+ }
+
+ bool hasTransition()
{
switch (op()) {
case PutStructure:
- case PhantomPutStructure:
case AllocatePropertyStorage:
case ReallocatePropertyStorage:
return true;
@@ -997,32 +1599,33 @@ struct Node {
}
}
- StructureTransitionData& structureTransitionData()
+ Transition* transition()
{
- ASSERT(hasStructureTransitionData());
- return *reinterpret_cast<StructureTransitionData*>(m_opInfo);
+ ASSERT(hasTransition());
+ return m_opInfo.as<Transition*>();
}
bool hasStructureSet()
{
switch (op()) {
case CheckStructure:
+ case CheckStructureImmediate:
+ case MaterializeNewObject:
return true;
default:
return false;
}
}
- StructureSet& structureSet()
+ const RegisteredStructureSet& structureSet()
{
ASSERT(hasStructureSet());
- return *reinterpret_cast<StructureSet*>(m_opInfo);
+ return *m_opInfo.as<RegisteredStructureSet*>();
}
bool hasStructure()
{
switch (op()) {
- case StructureTransitionWatchpoint:
case ArrayifyToStructure:
case NewObject:
case NewStringObject:
@@ -1032,55 +1635,153 @@ struct Node {
}
}
- Structure* structure()
+ RegisteredStructure structure()
{
ASSERT(hasStructure());
- return reinterpret_cast<Structure*>(m_opInfo);
+ return m_opInfo.asRegisteredStructure();
}
bool hasStorageAccessData()
{
- return op() == GetByOffset || op() == PutByOffset;
+ switch (op()) {
+ case GetByOffset:
+ case PutByOffset:
+ case GetGetterSetterByOffset:
+ return true;
+ default:
+ return false;
+ }
}
- unsigned storageAccessDataIndex()
+ StorageAccessData& storageAccessData()
{
ASSERT(hasStorageAccessData());
- return m_opInfo;
+ return *m_opInfo.as<StorageAccessData*>();
}
- bool hasFunctionDeclIndex()
+ bool hasMultiGetByOffsetData()
{
- return op() == NewFunction
- || op() == NewFunctionNoCheck;
+ return op() == MultiGetByOffset;
}
- unsigned functionDeclIndex()
+ MultiGetByOffsetData& multiGetByOffsetData()
{
- ASSERT(hasFunctionDeclIndex());
- return m_opInfo;
+ ASSERT(hasMultiGetByOffsetData());
+ return *m_opInfo.as<MultiGetByOffsetData*>();
}
- bool hasFunctionExprIndex()
+ bool hasMultiPutByOffsetData()
{
- return op() == NewFunctionExpression;
+ return op() == MultiPutByOffset;
}
- unsigned functionExprIndex()
+ MultiPutByOffsetData& multiPutByOffsetData()
{
- ASSERT(hasFunctionExprIndex());
- return m_opInfo;
+ ASSERT(hasMultiPutByOffsetData());
+ return *m_opInfo.as<MultiPutByOffsetData*>();
}
- bool hasSymbolTable()
+ bool hasObjectMaterializationData()
{
- return op() == FunctionReentryWatchpoint;
+ switch (op()) {
+ case MaterializeNewObject:
+ case MaterializeCreateActivation:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ ObjectMaterializationData& objectMaterializationData()
+ {
+ ASSERT(hasObjectMaterializationData());
+ return *m_opInfo2.as<ObjectMaterializationData*>();
+ }
+
+ bool isObjectAllocation()
+ {
+ switch (op()) {
+ case NewObject:
+ case MaterializeNewObject:
+ return true;
+ default:
+ return false;
+ }
}
- SymbolTable* symbolTable()
+ bool isPhantomObjectAllocation()
{
- ASSERT(hasSymbolTable());
- return reinterpret_cast<SymbolTable*>(m_opInfo);
+ switch (op()) {
+ case PhantomNewObject:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isActivationAllocation()
+ {
+ switch (op()) {
+ case CreateActivation:
+ case MaterializeCreateActivation:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isPhantomActivationAllocation()
+ {
+ switch (op()) {
+ case PhantomCreateActivation:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isFunctionAllocation()
+ {
+ switch (op()) {
+ case NewFunction:
+ case NewGeneratorFunction:
+ case NewAsyncFunction:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isPhantomFunctionAllocation()
+ {
+ switch (op()) {
+ case PhantomNewFunction:
+ case PhantomNewGeneratorFunction:
+ case PhantomNewAsyncFunction:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isPhantomAllocation()
+ {
+ switch (op()) {
+ case PhantomNewObject:
+ case PhantomDirectArguments:
+ case PhantomCreateRest:
+ case PhantomSpread:
+ case PhantomNewArrayWithSpread:
+ case PhantomClonedArguments:
+ case PhantomNewFunction:
+ case PhantomNewGeneratorFunction:
+ case PhantomNewAsyncFunction:
+ case PhantomCreateActivation:
+ return true;
+ default:
+ return false;
+ }
}
bool hasArrayMode()
@@ -1088,6 +1789,7 @@ struct Node {
switch (op()) {
case GetIndexedPropertyStorage:
case GetArrayLength:
+ case In:
case PutByValDirect:
case PutByVal:
case PutByValAlias:
@@ -1099,6 +1801,7 @@ struct Node {
case ArrayifyToStructure:
case ArrayPush:
case ArrayPop:
+ case HasIndexedProperty:
return true;
default:
return false;
@@ -1109,8 +1812,8 @@ struct Node {
{
ASSERT(hasArrayMode());
if (op() == ArrayifyToStructure)
- return ArrayMode::fromWord(m_opInfo2);
- return ArrayMode::fromWord(m_opInfo);
+ return ArrayMode::fromWord(m_opInfo2.as<uint32_t>());
+ return ArrayMode::fromWord(m_opInfo.as<uint32_t>());
}
bool setArrayMode(ArrayMode arrayMode)
@@ -1125,6 +1828,7 @@ struct Node {
bool hasArithMode()
{
switch (op()) {
+ case ArithAbs:
case ArithAdd:
case ArithSub:
case ArithNegate:
@@ -1142,13 +1846,30 @@ struct Node {
Arith::Mode arithMode()
{
ASSERT(hasArithMode());
- return static_cast<Arith::Mode>(m_opInfo);
+ return static_cast<Arith::Mode>(m_opInfo.as<uint32_t>());
}
void setArithMode(Arith::Mode mode)
{
m_opInfo = mode;
}
+
+ bool hasArithRoundingMode()
+ {
+ return op() == ArithRound || op() == ArithFloor || op() == ArithCeil || op() == ArithTrunc;
+ }
+
+ Arith::RoundingMode arithRoundingMode()
+ {
+ ASSERT(hasArithRoundingMode());
+ return static_cast<Arith::RoundingMode>(m_opInfo.as<uint32_t>());
+ }
+
+ void setArithRoundingMode(Arith::RoundingMode mode)
+ {
+ ASSERT(hasArithRoundingMode());
+ m_opInfo = static_cast<uint32_t>(mode);
+ }
bool hasVirtualRegister()
{
@@ -1176,7 +1897,7 @@ struct Node {
Profiler::ExecutionCounter* executionCounter()
{
- return bitwise_cast<Profiler::ExecutionCounter*>(m_opInfo);
+ return m_opInfo.as<Profiler::ExecutionCounter*>();
}
bool shouldGenerate()
@@ -1184,19 +1905,9 @@ struct Node {
return m_refCount;
}
- bool willHaveCodeGenOrOSR()
+ bool isSemanticallySkippable()
{
- switch (op()) {
- case SetLocal:
- case MovHint:
- case ZombieHint:
- case PhantomArguments:
- return true;
- case Phantom:
- return child1().useKindUnchecked() != UntypedUse || child2().useKindUnchecked() != UntypedUse || child3().useKindUnchecked() != UntypedUse;
- default:
- return shouldGenerate();
- }
+ return op() == CountExecution;
}
unsigned refCount()
@@ -1263,9 +1974,25 @@ struct Node {
return child1().useKind();
}
+ bool isBinaryUseKind(UseKind left, UseKind right)
+ {
+ return child1().useKind() == left && child2().useKind() == right;
+ }
+
bool isBinaryUseKind(UseKind useKind)
{
- return child1().useKind() == useKind && child2().useKind() == useKind;
+ return isBinaryUseKind(useKind, useKind);
+ }
+
+ Edge childFor(UseKind useKind)
+ {
+ if (child1().useKind() == useKind)
+ return child1();
+ if (child2().useKind() == useKind)
+ return child2();
+ if (child3().useKind() == useKind)
+ return child3();
+ return Edge();
}
SpeculatedType prediction()
@@ -1282,30 +2009,40 @@ struct Node {
{
return isInt32Speculation(prediction());
}
+
+ bool shouldSpeculateNotInt32()
+ {
+ return isNotInt32Speculation(prediction());
+ }
- bool shouldSpeculateInt32ForArithmetic()
+ bool sawBooleans()
{
- return isInt32SpeculationForArithmetic(prediction());
+ return !!(prediction() & SpecBoolean);
+ }
+
+ bool shouldSpeculateInt32OrBoolean()
+ {
+ return isInt32OrBooleanSpeculation(prediction());
}
- bool shouldSpeculateInt32ExpectingDefined()
+ bool shouldSpeculateInt32ForArithmetic()
{
- return isInt32SpeculationExpectingDefined(prediction());
+ return isInt32SpeculationForArithmetic(prediction());
}
- bool shouldSpeculateMachineInt()
+ bool shouldSpeculateInt32OrBooleanForArithmetic()
{
- return isMachineIntSpeculation(prediction());
+ return isInt32OrBooleanSpeculationForArithmetic(prediction());
}
- bool shouldSpeculateMachineIntForArithmetic()
+ bool shouldSpeculateInt32OrBooleanExpectingDefined()
{
- return isMachineIntSpeculationForArithmetic(prediction());
+ return isInt32OrBooleanSpeculationExpectingDefined(prediction());
}
- bool shouldSpeculateMachineIntExpectingDefined()
+ bool shouldSpeculateAnyInt()
{
- return isMachineIntSpeculationExpectingDefined(prediction());
+ return isAnyIntSpeculation(prediction());
}
bool shouldSpeculateDouble()
@@ -1313,9 +2050,9 @@ struct Node {
return isDoubleSpeculation(prediction());
}
- bool shouldSpeculateDoubleForArithmetic()
+ bool shouldSpeculateDoubleReal()
{
- return isDoubleSpeculationForArithmetic(prediction());
+ return isDoubleRealSpeculation(prediction());
}
bool shouldSpeculateNumber()
@@ -1323,25 +2060,60 @@ struct Node {
return isFullNumberSpeculation(prediction());
}
- bool shouldSpeculateNumberExpectingDefined()
+ bool shouldSpeculateNumberOrBoolean()
+ {
+ return isFullNumberOrBooleanSpeculation(prediction());
+ }
+
+ bool shouldSpeculateNumberOrBooleanExpectingDefined()
{
- return isFullNumberSpeculationExpectingDefined(prediction());
+ return isFullNumberOrBooleanSpeculationExpectingDefined(prediction());
}
bool shouldSpeculateBoolean()
{
return isBooleanSpeculation(prediction());
}
+
+ bool shouldSpeculateNotBoolean()
+ {
+ return isNotBooleanSpeculation(prediction());
+ }
+
+ bool shouldSpeculateOther()
+ {
+ return isOtherSpeculation(prediction());
+ }
+
+ bool shouldSpeculateMisc()
+ {
+ return isMiscSpeculation(prediction());
+ }
bool shouldSpeculateStringIdent()
{
return isStringIdentSpeculation(prediction());
}
+
+ bool shouldSpeculateNotStringVar()
+ {
+ return isNotStringVarSpeculation(prediction());
+ }
bool shouldSpeculateString()
{
return isStringSpeculation(prediction());
}
+
+ bool shouldSpeculateNotString()
+ {
+ return isNotStringSpeculation(prediction());
+ }
+
+ bool shouldSpeculateStringOrOther()
+ {
+ return isStringOrOtherSpeculation(prediction());
+ }
bool shouldSpeculateStringObject()
{
@@ -1352,6 +2124,16 @@ struct Node {
{
return isStringOrStringObjectSpeculation(prediction());
}
+
+ bool shouldSpeculateRegExpObject()
+ {
+ return isRegExpObjectSpeculation(prediction());
+ }
+
+ bool shouldSpeculateSymbol()
+ {
+ return isSymbolSpeculation(prediction());
+ }
bool shouldSpeculateFinalObject()
{
@@ -1367,10 +2149,25 @@ struct Node {
{
return isArraySpeculation(prediction());
}
+
+ bool shouldSpeculateProxyObject()
+ {
+ return isProxyObjectSpeculation(prediction());
+ }
+
+ bool shouldSpeculateDerivedArray()
+ {
+ return isDerivedArraySpeculation(prediction());
+ }
- bool shouldSpeculateArguments()
+ bool shouldSpeculateDirectArguments()
{
- return isArgumentsSpeculation(prediction());
+ return isDirectArgumentsSpeculation(prediction());
+ }
+
+ bool shouldSpeculateScopedArguments()
+ {
+ return isScopedArgumentsSpeculation(prediction());
}
bool shouldSpeculateInt8Array()
@@ -1438,6 +2235,36 @@ struct Node {
return isCellSpeculation(prediction());
}
+ bool shouldSpeculateCellOrOther()
+ {
+ return isCellOrOtherSpeculation(prediction());
+ }
+
+ bool shouldSpeculateNotCell()
+ {
+ return isNotCellSpeculation(prediction());
+ }
+
+ bool shouldSpeculateUntypedForArithmetic()
+ {
+ return isUntypedSpeculationForArithmetic(prediction());
+ }
+
+ static bool shouldSpeculateUntypedForArithmetic(Node* op1, Node* op2)
+ {
+ return op1->shouldSpeculateUntypedForArithmetic() || op2->shouldSpeculateUntypedForArithmetic();
+ }
+
+ bool shouldSpeculateUntypedForBitOps()
+ {
+ return isUntypedSpeculationForBitOps(prediction());
+ }
+
+ static bool shouldSpeculateUntypedForBitOps(Node* op1, Node* op2)
+ {
+ return op1->shouldSpeculateUntypedForBitOps() || op2->shouldSpeculateUntypedForBitOps();
+ }
+
static bool shouldSpeculateBoolean(Node* op1, Node* op2)
{
return op1->shouldSpeculateBoolean() && op2->shouldSpeculateBoolean();
@@ -1448,44 +2275,49 @@ struct Node {
return op1->shouldSpeculateInt32() && op2->shouldSpeculateInt32();
}
- static bool shouldSpeculateInt32ForArithmetic(Node* op1, Node* op2)
+ static bool shouldSpeculateInt32OrBoolean(Node* op1, Node* op2)
{
- return op1->shouldSpeculateInt32ForArithmetic() && op2->shouldSpeculateInt32ForArithmetic();
+ return op1->shouldSpeculateInt32OrBoolean()
+ && op2->shouldSpeculateInt32OrBoolean();
}
- static bool shouldSpeculateInt32ExpectingDefined(Node* op1, Node* op2)
+ static bool shouldSpeculateInt32OrBooleanForArithmetic(Node* op1, Node* op2)
{
- return op1->shouldSpeculateInt32ExpectingDefined() && op2->shouldSpeculateInt32ExpectingDefined();
+ return op1->shouldSpeculateInt32OrBooleanForArithmetic()
+ && op2->shouldSpeculateInt32OrBooleanForArithmetic();
}
- static bool shouldSpeculateMachineInt(Node* op1, Node* op2)
+ static bool shouldSpeculateInt32OrBooleanExpectingDefined(Node* op1, Node* op2)
{
- return op1->shouldSpeculateMachineInt() && op2->shouldSpeculateMachineInt();
+ return op1->shouldSpeculateInt32OrBooleanExpectingDefined()
+ && op2->shouldSpeculateInt32OrBooleanExpectingDefined();
}
- static bool shouldSpeculateMachineIntForArithmetic(Node* op1, Node* op2)
+ static bool shouldSpeculateAnyInt(Node* op1, Node* op2)
{
- return op1->shouldSpeculateMachineIntForArithmetic() && op2->shouldSpeculateMachineIntForArithmetic();
+ return op1->shouldSpeculateAnyInt() && op2->shouldSpeculateAnyInt();
}
- static bool shouldSpeculateMachineIntExpectingDefined(Node* op1, Node* op2)
+ static bool shouldSpeculateNumber(Node* op1, Node* op2)
{
- return op1->shouldSpeculateMachineIntExpectingDefined() && op2->shouldSpeculateMachineIntExpectingDefined();
+ return op1->shouldSpeculateNumber() && op2->shouldSpeculateNumber();
}
- static bool shouldSpeculateDoubleForArithmetic(Node* op1, Node* op2)
+ static bool shouldSpeculateNumberOrBoolean(Node* op1, Node* op2)
{
- return op1->shouldSpeculateDoubleForArithmetic() && op2->shouldSpeculateDoubleForArithmetic();
+ return op1->shouldSpeculateNumberOrBoolean()
+ && op2->shouldSpeculateNumberOrBoolean();
}
- static bool shouldSpeculateNumber(Node* op1, Node* op2)
+ static bool shouldSpeculateNumberOrBooleanExpectingDefined(Node* op1, Node* op2)
{
- return op1->shouldSpeculateNumber() && op2->shouldSpeculateNumber();
+ return op1->shouldSpeculateNumberOrBooleanExpectingDefined()
+ && op2->shouldSpeculateNumberOrBooleanExpectingDefined();
}
-
- static bool shouldSpeculateNumberExpectingDefined(Node* op1, Node* op2)
+
+ static bool shouldSpeculateSymbol(Node* op1, Node* op2)
{
- return op1->shouldSpeculateNumberExpectingDefined() && op2->shouldSpeculateNumberExpectingDefined();
+ return op1->shouldSpeculateSymbol() && op2->shouldSpeculateSymbol();
}
static bool shouldSpeculateFinalObject(Node* op1, Node* op2)
@@ -1498,16 +2330,153 @@ struct Node {
return op1->shouldSpeculateArray() && op2->shouldSpeculateArray();
}
- bool canSpeculateInt32()
+ bool canSpeculateInt32(RareCaseProfilingSource source)
+ {
+ return nodeCanSpeculateInt32(arithNodeFlags(), source);
+ }
+
+ bool canSpeculateInt52(RareCaseProfilingSource source)
+ {
+ return nodeCanSpeculateInt52(arithNodeFlags(), source);
+ }
+
+ RareCaseProfilingSource sourceFor(PredictionPass pass)
+ {
+ if (pass == PrimaryPass || child1()->sawBooleans() || (child2() && child2()->sawBooleans()))
+ return DFGRareCase;
+ return AllRareCases;
+ }
+
+ bool canSpeculateInt32(PredictionPass pass)
+ {
+ return canSpeculateInt32(sourceFor(pass));
+ }
+
+ bool canSpeculateInt52(PredictionPass pass)
+ {
+ return canSpeculateInt52(sourceFor(pass));
+ }
+
+ bool hasTypeLocation()
+ {
+ return op() == ProfileType;
+ }
+
+ TypeLocation* typeLocation()
+ {
+ ASSERT(hasTypeLocation());
+ return m_opInfo.as<TypeLocation*>();
+ }
+
+ bool hasBasicBlockLocation()
+ {
+ return op() == ProfileControlFlow;
+ }
+
+ BasicBlockLocation* basicBlockLocation()
+ {
+ ASSERT(hasBasicBlockLocation());
+ return m_opInfo.as<BasicBlockLocation*>();
+ }
+
+ bool hasCheckDOMPatchpoint() const
+ {
+ return op() == CheckDOM;
+ }
+
+ DOMJIT::Patchpoint* checkDOMPatchpoint()
+ {
+ ASSERT(hasCheckDOMPatchpoint());
+ return m_opInfo.as<DOMJIT::Patchpoint*>();
+ }
+
+ bool hasCallDOMGetterData() const
+ {
+ return op() == CallDOMGetter;
+ }
+
+ CallDOMGetterData* callDOMGetterData()
+ {
+ ASSERT(hasCallDOMGetterData());
+ return m_opInfo.as<CallDOMGetterData*>();
+ }
+
+ bool hasClassInfo() const
+ {
+ return op() == CheckDOM;
+ }
+
+ const ClassInfo* classInfo()
+ {
+ return m_opInfo2.as<const ClassInfo*>();
+ }
+
+ bool hasSignature() const
+ {
+ // Note that this does not include TailCall node types intentionally.
+ // CallDOM node types are always converted from Call.
+ return op() == Call || op() == CallDOM;
+ }
+
+ const DOMJIT::Signature* signature()
+ {
+ return m_opInfo.as<const DOMJIT::Signature*>();
+ }
+
+ bool hasInternalMethodType() const
+ {
+ return op() == HasIndexedProperty;
+ }
+
+ PropertySlot::InternalMethodType internalMethodType() const
+ {
+ ASSERT(hasInternalMethodType());
+ return static_cast<PropertySlot::InternalMethodType>(m_opInfo2.as<uint32_t>());
+ }
+
+ void setInternalMethodType(PropertySlot::InternalMethodType type)
+ {
+ ASSERT(hasInternalMethodType());
+ m_opInfo2 = static_cast<uint32_t>(type);
+ }
+
+ Node* replacement() const
+ {
+ return m_misc.replacement;
+ }
+
+ void setReplacement(Node* replacement)
{
- return nodeCanSpeculateInt32(arithNodeFlags());
+ m_misc.replacement = replacement;
}
- bool canSpeculateInt52()
+ Epoch epoch() const
{
- return nodeCanSpeculateInt52(arithNodeFlags());
+ return Epoch::fromUnsigned(m_misc.epoch);
}
+ void setEpoch(Epoch epoch)
+ {
+ m_misc.epoch = epoch.toUnsigned();
+ }
+
+ unsigned numberOfArgumentsToSkip()
+ {
+ ASSERT(op() == CreateRest || op() == PhantomCreateRest || op() == GetRestLength || op() == GetMyArgumentByVal || op() == GetMyArgumentByValOutOfBounds);
+ return m_opInfo.as<unsigned>();
+ }
+
+ bool hasArgumentIndex()
+ {
+ return op() == GetArgument;
+ }
+
+ unsigned argumentIndex()
+ {
+ ASSERT(hasArgumentIndex());
+ return m_opInfo.as<unsigned>();
+ }
+
void dumpChildren(PrintStream& out)
{
if (!child1())
@@ -1522,59 +2491,155 @@ struct Node {
}
// NB. This class must have a trivial destructor.
-
- // Used for determining what bytecode this came from. This is important for
- // debugging, exceptions, and even basic execution semantics.
- CodeOrigin codeOrigin;
- // Code origin for where the node exits to.
- CodeOrigin codeOriginForExitTarget;
+
+ NodeOrigin origin;
+
// References to up to 3 children, or links to a variable length set of children.
AdjacencyList children;
private:
+ friend class Graph;
+
+ unsigned m_index { std::numeric_limits<unsigned>::max() };
unsigned m_op : 10; // real type is NodeType
- unsigned m_flags : 22;
+ unsigned m_flags : 20;
// The virtual register number (spill location) associated with this .
VirtualRegister m_virtualRegister;
// The number of uses of the result of this operation (+1 for 'must generate' nodes, which have side-effects).
unsigned m_refCount;
// The prediction ascribed to this node after propagation.
- SpeculatedType m_prediction;
- // Immediate values, accesses type-checked via accessors above. The first one is
- // big enough to store a pointer.
- uintptr_t m_opInfo;
- uintptr_t m_opInfo2;
+ SpeculatedType m_prediction { SpecNone };
+ // Immediate values, accesses type-checked via accessors above.
+ struct OpInfoWrapper {
+ OpInfoWrapper()
+ {
+ u.int64 = 0;
+ }
+ OpInfoWrapper(uint32_t intValue)
+ {
+ u.int64 = 0;
+ u.int32 = intValue;
+ }
+ OpInfoWrapper(uint64_t intValue)
+ {
+ u.int64 = intValue;
+ }
+ OpInfoWrapper(void* pointer)
+ {
+ u.int64 = 0;
+ u.pointer = pointer;
+ }
+ OpInfoWrapper(const void* constPointer)
+ {
+ u.int64 = 0;
+ u.constPointer = constPointer;
+ }
+ OpInfoWrapper(RegisteredStructure structure)
+ {
+ u.int64 = 0;
+ u.pointer = bitwise_cast<void*>(structure);
+ }
+ OpInfoWrapper& operator=(uint32_t int32)
+ {
+ u.int64 = 0;
+ u.int32 = int32;
+ return *this;
+ }
+ OpInfoWrapper& operator=(int32_t int32)
+ {
+ u.int64 = 0;
+ u.int32 = int32;
+ return *this;
+ }
+ OpInfoWrapper& operator=(uint64_t int64)
+ {
+ u.int64 = int64;
+ return *this;
+ }
+ OpInfoWrapper& operator=(void* pointer)
+ {
+ u.int64 = 0;
+ u.pointer = pointer;
+ return *this;
+ }
+ OpInfoWrapper& operator=(const void* constPointer)
+ {
+ u.int64 = 0;
+ u.constPointer = constPointer;
+ return *this;
+ }
+ OpInfoWrapper& operator=(RegisteredStructure structure)
+ {
+ u.int64 = 0;
+ u.pointer = bitwise_cast<void*>(structure);
+ return *this;
+ }
+ template <typename T>
+ ALWAYS_INLINE auto as() const -> typename std::enable_if<std::is_pointer<T>::value && !std::is_const<typename std::remove_pointer<T>::type>::value, T>::type
+ {
+ return static_cast<T>(u.pointer);
+ }
+ template <typename T>
+ ALWAYS_INLINE auto as() const -> typename std::enable_if<std::is_pointer<T>::value && std::is_const<typename std::remove_pointer<T>::type>::value, T>::type
+ {
+ return static_cast<T>(u.constPointer);
+ }
+ template <typename T>
+ ALWAYS_INLINE auto as() const -> typename std::enable_if<std::is_integral<T>::value && sizeof(T) == 4, T>::type
+ {
+ return u.int32;
+ }
+ template <typename T>
+ ALWAYS_INLINE auto as() const -> typename std::enable_if<std::is_integral<T>::value && sizeof(T) == 8, T>::type
+ {
+ return u.int64;
+ }
+ ALWAYS_INLINE RegisteredStructure asRegisteredStructure() const
+ {
+ return bitwise_cast<RegisteredStructure>(u.pointer);
+ }
+
+ union {
+ uint32_t int32;
+ uint64_t int64;
+ void* pointer;
+ const void* constPointer;
+ } u;
+ };
+ OpInfoWrapper m_opInfo;
+ OpInfoWrapper m_opInfo2;
-public:
- // Fields used by various analyses.
- AbstractValue value;
-
// Miscellaneous data that is usually meaningless, but can hold some analysis results
- // if you ask right. For example, if you do Graph::initializeNodeOwners(), misc.owner
+ // if you ask right. For example, if you do Graph::initializeNodeOwners(), Node::owner
// will tell you which basic block a node belongs to. You cannot rely on this persisting
// across transformations unless you do the maintenance work yourself. Other phases use
- // misc.replacement, but they do so manually: first you do Graph::clearReplacements()
- // and then you set, and use, replacement's yourself.
+ // Node::replacement, but they do so manually: first you do Graph::clearReplacements()
+ // and then you set, and use, replacement's yourself. Same thing for epoch.
//
// Bottom line: don't use these fields unless you initialize them yourself, or by
// calling some appropriate methods that initialize them the way you want. Otherwise,
// these fields are meaningless.
+private:
union {
Node* replacement;
- BasicBlock* owner;
- bool needsBarrier;
- } misc;
+ unsigned epoch;
+ } m_misc;
+public:
+ BasicBlock* owner;
};
-inline bool nodeComparator(Node* a, Node* b)
-{
- return a->index() < b->index();
-}
+struct NodeComparator {
+ template<typename NodePtrType>
+ bool operator()(NodePtrType a, NodePtrType b) const
+ {
+ return a->index() < b->index();
+ }
+};
template<typename T>
CString nodeListDump(const T& nodeList)
{
- return sortedListDump(nodeList, nodeComparator);
+ return sortedListDump(nodeList, NodeComparator());
}
template<typename T>
@@ -1585,7 +2650,7 @@ CString nodeMapDump(const T& nodeMap, DumpContext* context = 0)
typename T::const_iterator iter = nodeMap.begin();
iter != nodeMap.end(); ++iter)
keys.append(iter->key);
- std::sort(keys.begin(), keys.end(), nodeComparator);
+ std::sort(keys.begin(), keys.end(), NodeComparator());
StringPrintStream out;
CommaPrinter comma;
for(unsigned i = 0; i < keys.size(); ++i)
@@ -1593,6 +2658,22 @@ CString nodeMapDump(const T& nodeMap, DumpContext* context = 0)
return out.toCString();
}
+template<typename T>
+CString nodeValuePairListDump(const T& nodeValuePairList, DumpContext* context = 0)
+{
+ using V = typename T::ValueType;
+ T sortedList = nodeValuePairList;
+ std::sort(sortedList.begin(), sortedList.end(), [](const V& a, const V& b) {
+ return NodeComparator()(a.node, b.node);
+ });
+
+ StringPrintStream out;
+ CommaPrinter comma;
+ for (const auto& pair : sortedList)
+ out.print(comma, pair.node, "=>", inContext(pair.value, context));
+ return out.toCString();
+}
+
} } // namespace JSC::DFG
namespace WTF {
@@ -1607,4 +2688,3 @@ inline JSC::DFG::Node* inContext(JSC::DFG::Node* node, JSC::DumpContext*) { retu
using WTF::inContext;
#endif
-#endif