summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg/DFGGraph.h
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/dfg/DFGGraph.h
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGGraph.h')
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h1090
1 files changed, 612 insertions, 478 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index 7a5170048..8431d4813 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,27 +23,25 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGGraph_h
-#define DFGGraph_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(DFG_JIT)
#include "AssemblyHelpers.h"
+#include "BytecodeLivenessAnalysisInlines.h"
#include "CodeBlock.h"
#include "DFGArgumentPosition.h"
#include "DFGBasicBlock.h"
-#include "DFGDominators.h"
+#include "DFGFrozenValue.h"
#include "DFGLongLivedState.h"
-#include "DFGNaturalLoops.h"
#include "DFGNode.h"
#include "DFGNodeAllocator.h"
#include "DFGPlan.h"
-#include "DFGVariadicFunction.h"
-#include "InlineCallFrameSet.h"
-#include "JSStack.h"
+#include "DFGPropertyTypeKey.h"
+#include "DFGScannable.h"
+#include "FullBytecodeLiveness.h"
#include "MethodOfGettingAValueProfile.h"
+#include <unordered_map>
#include <wtf/BitVector.h>
#include <wtf/HashMap.h>
#include <wtf/Vector.h>
@@ -56,10 +54,58 @@ class ExecState;
namespace DFG {
-struct StorageAccessData {
- PropertyOffset offset;
- unsigned identifierNumber;
-};
+class BackwardsCFG;
+class BackwardsDominators;
+class CFG;
+class ControlEquivalenceAnalysis;
+class Dominators;
+class FlowIndexing;
+class NaturalLoops;
+class PrePostNumbering;
+
+template<typename> class FlowMap;
+
+#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
+ Node* _node = (node); \
+ if (_node->flags() & NodeHasVarArgs) { \
+ for (unsigned _childIdx = _node->firstChild(); \
+ _childIdx < _node->firstChild() + _node->numChildren(); \
+ _childIdx++) { \
+ if (!!(graph).m_varArgChildren[_childIdx]) \
+ thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
+ } \
+ } else { \
+ if (!_node->child1()) { \
+ ASSERT( \
+ !_node->child2() \
+ && !_node->child3()); \
+ break; \
+ } \
+ thingToDo(_node, _node->child1()); \
+ \
+ if (!_node->child2()) { \
+ ASSERT(!_node->child3()); \
+ break; \
+ } \
+ thingToDo(_node, _node->child2()); \
+ \
+ if (!_node->child3()) \
+ break; \
+ thingToDo(_node, _node->child3()); \
+ } \
+ } while (false)
+
+#define DFG_ASSERT(graph, node, assertion) do { \
+ if (!!(assertion)) \
+ break; \
+ (graph).handleAssertionFailure( \
+ (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
+ } while (false)
+
+#define DFG_CRASH(graph, node, reason) do { \
+ (graph).handleAssertionFailure( \
+ (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason)); \
+ } while (false)
struct InlineVariableData {
InlineCallFrame* inlineCallFrame;
@@ -78,7 +124,7 @@ enum AddSpeculationMode {
//
// The order may be significant for nodes with side-effects (property accesses, value conversions).
// Nodes that are 'dead' remain in the vector with refCount 0.
-class Graph {
+class Graph : public virtual Scannable {
public:
Graph(VM&, Plan&, LongLivedState&);
~Graph();
@@ -126,7 +172,7 @@ public:
return;
// Check if there is any replacement.
- Node* replacement = child->misc.replacement;
+ Node* replacement = child->replacement();
if (!replacement)
return;
@@ -134,55 +180,54 @@ public:
// There is definitely a replacement. Assert that the replacement does not
// have a replacement.
- ASSERT(!child->misc.replacement);
+ ASSERT(!child->replacement());
}
-#define DFG_DEFINE_ADD_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \
- templatePre typeParams templatePost Node* addNode(SpeculatedType type valueParamsComma valueParams) \
- { \
- Node* node = new (m_allocator) Node(valueArgs); \
- node->predict(type); \
- return node; \
+ template<typename... Params>
+ Node* addNode(Params... params)
+ {
+ Node* node = new (m_allocator) Node(params...);
+ addNodeToMapByIndex(node);
+ return node;
}
- DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_ADD_NODE)
-#undef DFG_DEFINE_ADD_NODE
+ template<typename... Params>
+ Node* addNode(SpeculatedType type, Params... params)
+ {
+ Node* node = new (m_allocator) Node(params...);
+ node->predict(type);
+ addNodeToMapByIndex(node);
+ return node;
+ }
+
+ void deleteNode(Node*);
+ unsigned maxNodeCount() const { return m_nodesByIndex.size(); }
+ Node* nodeAt(unsigned index) const { return m_nodesByIndex[index]; }
+ void packNodeIndices();
void dethread();
- void convertToConstant(Node* node, unsigned constantNumber)
- {
- if (node->op() == GetLocal)
- dethread();
- else
- ASSERT(!node->hasVariableAccessData(*this));
- node->convertToConstant(constantNumber);
- }
-
- unsigned constantRegisterForConstant(JSValue value)
- {
- unsigned constantRegister;
- if (!m_codeBlock->findConstant(value, constantRegister)) {
- constantRegister = m_codeBlock->addConstantLazily();
- initializeLazyWriteBarrierForConstant(
- m_plan.writeBarriers,
- m_codeBlock->constants()[constantRegister],
- m_codeBlock,
- constantRegister,
- m_codeBlock->ownerExecutable(),
- value);
- }
- return constantRegister;
- }
+ FrozenValue* freeze(JSValue); // We use weak freezing by default.
+ FrozenValue* freezeStrong(JSValue); // Shorthand for freeze(value)->strengthenTo(StrongValue).
+
+ void convertToConstant(Node* node, FrozenValue* value);
+ void convertToConstant(Node* node, JSValue value);
+ void convertToStrongConstant(Node* node, JSValue value);
- void convertToConstant(Node* node, JSValue value)
+ RegisteredStructure registerStructure(Structure* structure)
{
- convertToConstant(node, constantRegisterForConstant(value));
+ StructureRegistrationResult ignored;
+ return registerStructure(structure, ignored);
}
-
+ RegisteredStructure registerStructure(Structure*, StructureRegistrationResult&);
+ void assertIsRegistered(Structure* structure);
+
// CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0);
+
+ bool terminalsAreValid();
+
enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
- void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext* context);
+ void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
void dump(PrintStream&, Edge);
void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0);
static int amountOfNodeWhiteSpace(Node*);
@@ -190,52 +235,57 @@ public:
// Dump the code origin of the given node as a diff from the code origin of the
// preceding node. Returns true if anything was printed.
- bool dumpCodeOrigin(PrintStream&, const char* prefix, Node* previousNode, Node* currentNode, DumpContext* context);
+ bool dumpCodeOrigin(PrintStream&, const char* prefix, Node*& previousNode, Node* currentNode, DumpContext*);
- SpeculatedType getJSConstantSpeculation(Node* node)
- {
- return speculationFromValue(node->valueOfJSConstant(m_codeBlock));
- }
-
- AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32)
+ AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
{
ASSERT(add->op() == ValueAdd || add->op() == ArithAdd || add->op() == ArithSub);
+ RareCaseProfilingSource source = add->sourceFor(pass);
+
Node* left = add->child1().node();
Node* right = add->child2().node();
if (left->hasConstant())
- return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, left);
+ return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, right, left, source);
if (right->hasConstant())
- return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, right);
+ return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, left, right, source);
- return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32()) ? SpeculateInt32 : DontSpeculateInt32;
+ return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
}
- AddSpeculationMode valueAddSpeculationMode(Node* add)
+ AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
{
- return addSpeculationMode(add, add->child1()->shouldSpeculateInt32ExpectingDefined(), add->child2()->shouldSpeculateInt32ExpectingDefined());
+ return addSpeculationMode(
+ add,
+ add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
+ add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
+ pass);
}
- AddSpeculationMode arithAddSpeculationMode(Node* add)
+ AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
{
- return addSpeculationMode(add, add->child1()->shouldSpeculateInt32ForArithmetic(), add->child2()->shouldSpeculateInt32ForArithmetic());
+ return addSpeculationMode(
+ add,
+ add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
+ add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
+ pass);
}
- AddSpeculationMode addSpeculationMode(Node* add)
+ AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
{
if (add->op() == ValueAdd)
- return valueAddSpeculationMode(add);
+ return valueAddSpeculationMode(add, pass);
- return arithAddSpeculationMode(add);
+ return arithAddSpeculationMode(add, pass);
}
- bool addShouldSpeculateInt32(Node* add)
+ bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
{
- return addSpeculationMode(add) != DontSpeculateInt32;
+ return addSpeculationMode(add, pass) != DontSpeculateInt32;
}
- bool addShouldSpeculateMachineInt(Node* add)
+ bool addShouldSpeculateAnyInt(Node* add)
{
if (!enableInt52())
return false;
@@ -243,153 +293,115 @@ public:
Node* left = add->child1().node();
Node* right = add->child2().node();
- bool speculation;
- if (add->op() == ValueAdd)
- speculation = Node::shouldSpeculateMachineInt(left, right);
- else
- speculation = Node::shouldSpeculateMachineInt(left, right);
+ if (hasExitSite(add, Int52Overflow))
+ return false;
+
+ if (Node::shouldSpeculateAnyInt(left, right))
+ return true;
+
+ auto shouldSpeculateAnyIntForAdd = [](Node* node) {
+ auto isAnyIntSpeculationForAdd = [](SpeculatedType value) {
+ return !!value && (value & (SpecAnyInt | SpecAnyIntAsDouble)) == value;
+ };
- return speculation && !hasExitSite(add, Int52Overflow);
+ // When DoubleConstant node appears, it means that users explicitly write a constant in their code with double form instead of integer form (1.0 instead of 1).
+ // In that case, we should honor this decision: using it as integer is not appropriate.
+ if (node->op() == DoubleConstant)
+ return false;
+ return isAnyIntSpeculationForAdd(node->prediction());
+ };
+
+ // Allow AnyInt ArithAdd only when the one side of the binary operation should be speculated AnyInt. It is a bit conservative
+ // decision. This is because Double to Int52 conversion is not so cheap. Frequent back-and-forth conversions between Double and Int52
+ // rather hurt the performance. If the one side of the operation is already Int52, the cost for constructing ArithAdd becomes
+ // cheap since only one Double to Int52 conversion could be required.
+ // This recovers some regression in assorted tests while keeping kraken crypto improvements.
+ if (!left->shouldSpeculateAnyInt() && !right->shouldSpeculateAnyInt())
+ return false;
+
+ auto usesAsNumbers = [](Node* node) {
+ NodeFlags flags = node->flags() & NodeBytecodeBackPropMask;
+ if (!flags)
+ return false;
+ return (flags & (NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex)) == flags;
+ };
+
+ // Wrapping Int52 to Value is also not so cheap. Thus, we allow Int52 addition only when the node is used as number.
+ if (!usesAsNumbers(add))
+ return false;
+
+ return shouldSpeculateAnyIntForAdd(left) && shouldSpeculateAnyIntForAdd(right);
}
- bool mulShouldSpeculateInt32(Node* mul)
+ bool binaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
{
- ASSERT(mul->op() == ArithMul);
+ Node* left = node->child1().node();
+ Node* right = node->child2().node();
- Node* left = mul->child1().node();
- Node* right = mul->child2().node();
-
- return Node::shouldSpeculateInt32ForArithmetic(left, right)
- && mul->canSpeculateInt32();
+ return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
+ && node->canSpeculateInt32(node->sourceFor(pass));
}
- bool mulShouldSpeculateMachineInt(Node* mul)
+ bool binaryArithShouldSpeculateAnyInt(Node* node, PredictionPass pass)
{
- ASSERT(mul->op() == ArithMul);
-
if (!enableInt52())
return false;
- Node* left = mul->child1().node();
- Node* right = mul->child2().node();
+ Node* left = node->child1().node();
+ Node* right = node->child2().node();
- return Node::shouldSpeculateMachineInt(left, right)
- && mul->canSpeculateInt52()
- && !hasExitSite(mul, Int52Overflow);
+ return Node::shouldSpeculateAnyInt(left, right)
+ && node->canSpeculateInt52(pass)
+ && !hasExitSite(node, Int52Overflow);
}
- bool negateShouldSpeculateInt32(Node* negate)
+ bool unaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
{
- ASSERT(negate->op() == ArithNegate);
- return negate->child1()->shouldSpeculateInt32ForArithmetic() && negate->canSpeculateInt32();
+ return node->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
+ && node->canSpeculateInt32(pass);
}
- bool negateShouldSpeculateMachineInt(Node* negate)
+ bool unaryArithShouldSpeculateAnyInt(Node* node, PredictionPass pass)
{
- ASSERT(negate->op() == ArithNegate);
if (!enableInt52())
return false;
- return negate->child1()->shouldSpeculateMachineInt()
- && !hasExitSite(negate, Int52Overflow)
- && negate->canSpeculateInt52();
+ return node->child1()->shouldSpeculateAnyInt()
+ && node->canSpeculateInt52(pass)
+ && !hasExitSite(node, Int52Overflow);
}
-
- VirtualRegister bytecodeRegisterForArgument(CodeOrigin codeOrigin, int argument)
+
+ bool canOptimizeStringObjectAccess(const CodeOrigin&);
+
+ bool getRegExpPrototypeProperty(JSObject* regExpPrototype, Structure* regExpPrototypeStructure, UniquedStringImpl* uid, JSValue& returnJSValue);
+
+ bool roundShouldSpeculateInt32(Node* arithRound, PredictionPass pass)
{
- return VirtualRegister(
- codeOrigin.inlineCallFrame->stackOffset +
- baselineCodeBlockFor(codeOrigin)->argumentIndexAfterCapture(argument));
+ ASSERT(arithRound->op() == ArithRound || arithRound->op() == ArithFloor || arithRound->op() == ArithCeil || arithRound->op() == ArithTrunc);
+ return arithRound->canSpeculateInt32(pass) && !hasExitSite(arithRound->origin.semantic, Overflow) && !hasExitSite(arithRound->origin.semantic, NegativeZero);
}
- // Helper methods to check nodes for constants.
- bool isConstant(Node* node)
- {
- return node->hasConstant();
- }
- bool isJSConstant(Node* node)
- {
- return node->hasConstant();
- }
- bool isInt32Constant(Node* node)
- {
- return node->isInt32Constant(m_codeBlock);
- }
- bool isDoubleConstant(Node* node)
- {
- return node->isDoubleConstant(m_codeBlock);
- }
- bool isNumberConstant(Node* node)
- {
- return node->isNumberConstant(m_codeBlock);
- }
- bool isBooleanConstant(Node* node)
- {
- return node->isBooleanConstant(m_codeBlock);
- }
- bool isCellConstant(Node* node)
- {
- if (!isJSConstant(node))
- return false;
- JSValue value = valueOfJSConstant(node);
- return value.isCell() && !!value;
- }
- bool isFunctionConstant(Node* node)
- {
- if (!isJSConstant(node))
- return false;
- if (!getJSFunction(valueOfJSConstant(node)))
- return false;
- return true;
- }
- bool isInternalFunctionConstant(Node* node)
- {
- if (!isJSConstant(node))
- return false;
- JSValue value = valueOfJSConstant(node);
- if (!value.isCell() || !value)
- return false;
- JSCell* cell = value.asCell();
- if (!cell->inherits(InternalFunction::info()))
- return false;
- return true;
- }
- // Helper methods get constant values from nodes.
- JSValue valueOfJSConstant(Node* node)
- {
- return node->valueOfJSConstant(m_codeBlock);
- }
- int32_t valueOfInt32Constant(Node* node)
- {
- return valueOfJSConstant(node).asInt32();
- }
- double valueOfNumberConstant(Node* node)
- {
- return valueOfJSConstant(node).asNumber();
- }
- bool valueOfBooleanConstant(Node* node)
- {
- return valueOfJSConstant(node).asBoolean();
- }
- JSFunction* valueOfFunctionConstant(Node* node)
- {
- JSCell* function = getJSFunction(valueOfJSConstant(node));
- ASSERT(function);
- return jsCast<JSFunction*>(function);
- }
-
static const char *opName(NodeType);
- StructureSet* addStructureSet(const StructureSet& structureSet)
+ RegisteredStructureSet* addStructureSet(const StructureSet& structureSet)
{
- ASSERT(structureSet.size());
- m_structureSet.append(structureSet);
- return &m_structureSet.last();
+ m_structureSets.append();
+ RegisteredStructureSet* result = &m_structureSets.last();
+
+ for (Structure* structure : structureSet)
+ result->add(registerStructure(structure));
+
+ return result;
}
-
- StructureTransitionData* addStructureTransitionData(const StructureTransitionData& structureTransitionData)
+
+ RegisteredStructureSet* addStructureSet(const RegisteredStructureSet& structureSet)
{
- m_structureTransitionData.append(structureTransitionData);
- return &m_structureTransitionData.last();
+ m_structureSets.append();
+ RegisteredStructureSet* result = &m_structureSets.last();
+
+ for (RegisteredStructure structure : structureSet)
+ result->add(structure);
+
+ return result;
}
JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
@@ -406,9 +418,9 @@ public:
ScriptExecutable* executableFor(InlineCallFrame* inlineCallFrame)
{
if (!inlineCallFrame)
- return m_codeBlock->ownerExecutable();
+ return m_codeBlock->ownerScriptExecutable();
- return inlineCallFrame->executable.get();
+ return inlineCallFrame->baselineCodeBlock->ownerScriptExecutable();
}
ScriptExecutable* executableFor(const CodeOrigin& codeOrigin)
@@ -432,7 +444,7 @@ public:
{
if (!codeOrigin.inlineCallFrame)
return m_codeBlock->isStrictMode();
- return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
+ return codeOrigin.inlineCallFrame->isStrictMode();
}
ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
@@ -442,8 +454,7 @@ public:
bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
{
- return m_plan.watchpoints.isStillValid(
- globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint());
+ return globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid();
}
bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
@@ -458,142 +469,24 @@ public:
bool hasExitSite(Node* node, ExitKind exitKind)
{
- return hasExitSite(node->codeOrigin, exitKind);
- }
-
- VirtualRegister argumentsRegisterFor(InlineCallFrame* inlineCallFrame)
- {
- if (!inlineCallFrame)
- return m_profiledBlock->argumentsRegister();
-
- return VirtualRegister(baselineCodeBlockForInlineCallFrame(
- inlineCallFrame)->argumentsRegister().offset() +
- inlineCallFrame->stackOffset);
- }
-
- VirtualRegister argumentsRegisterFor(const CodeOrigin& codeOrigin)
- {
- return argumentsRegisterFor(codeOrigin.inlineCallFrame);
- }
-
- VirtualRegister machineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
- {
- if (!inlineCallFrame)
- return m_codeBlock->argumentsRegister();
-
- return inlineCallFrame->argumentsRegister;
- }
-
- VirtualRegister machineArgumentsRegisterFor(const CodeOrigin& codeOrigin)
- {
- return machineArgumentsRegisterFor(codeOrigin.inlineCallFrame);
- }
-
- VirtualRegister uncheckedArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
- {
- if (!inlineCallFrame)
- return m_profiledBlock->uncheckedArgumentsRegister();
-
- CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
- if (!codeBlock->usesArguments())
- return VirtualRegister();
-
- return VirtualRegister(codeBlock->argumentsRegister().offset() +
- inlineCallFrame->stackOffset);
- }
-
- VirtualRegister uncheckedArgumentsRegisterFor(const CodeOrigin& codeOrigin)
- {
- return uncheckedArgumentsRegisterFor(codeOrigin.inlineCallFrame);
- }
-
- VirtualRegister activationRegister()
- {
- return m_profiledBlock->activationRegister();
- }
-
- VirtualRegister uncheckedActivationRegister()
- {
- return m_profiledBlock->uncheckedActivationRegister();
- }
-
- VirtualRegister machineActivationRegister()
- {
- return m_profiledBlock->activationRegister();
- }
-
- VirtualRegister uncheckedMachineActivationRegister()
- {
- return m_profiledBlock->uncheckedActivationRegister();
- }
-
- ValueProfile* valueProfileFor(Node* node)
- {
- if (!node)
- return 0;
-
- CodeBlock* profiledBlock = baselineCodeBlockFor(node->codeOrigin);
-
- if (node->op() == GetArgument)
- return profiledBlock->valueProfileForArgument(node->local().toArgument());
-
- if (node->hasLocal(*this)) {
- if (m_form == SSA)
- return 0;
- if (!node->local().isArgument())
- return 0;
- int argument = node->local().toArgument();
- if (node->variableAccessData() != m_arguments[argument]->variableAccessData())
- return 0;
- return profiledBlock->valueProfileForArgument(argument);
- }
-
- if (node->hasHeapPrediction())
- return profiledBlock->valueProfileForBytecodeOffset(node->codeOrigin.bytecodeIndex);
-
- return 0;
- }
-
- MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* node)
- {
- if (!node)
- return MethodOfGettingAValueProfile();
-
- CodeBlock* profiledBlock = baselineCodeBlockFor(node->codeOrigin);
-
- if (node->op() == GetLocal) {
- return MethodOfGettingAValueProfile::fromLazyOperand(
- profiledBlock,
- LazyOperandValueProfileKey(
- node->codeOrigin.bytecodeIndex, node->local()));
- }
-
- return MethodOfGettingAValueProfile(valueProfileFor(node));
- }
-
- bool needsActivation() const
- {
- return m_codeBlock->needsFullScopeChain() && m_codeBlock->codeType() != GlobalCode;
+ return hasExitSite(node->origin.semantic, exitKind);
}
- bool usesArguments() const
- {
- return m_codeBlock->usesArguments();
- }
+ MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* currentNode, Node* operandNode);
BlockIndex numBlocks() const { return m_blocks.size(); }
BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
BasicBlock* lastBlock() const { return block(numBlocks() - 1); }
- void appendBlock(PassRefPtr<BasicBlock> basicBlock)
+ void appendBlock(Ref<BasicBlock>&& basicBlock)
{
basicBlock->index = m_blocks.size();
- m_blocks.append(basicBlock);
+ m_blocks.append(WTFMove(basicBlock));
}
void killBlock(BlockIndex blockIndex)
{
- m_blocks[blockIndex].clear();
+ m_blocks[blockIndex] = nullptr;
}
void killBlock(BasicBlock* basicBlock)
@@ -605,76 +498,10 @@ public:
void killUnreachableBlocks();
- bool isPredictedNumerical(Node* node)
- {
- return isNumerical(node->child1().useKind()) && isNumerical(node->child2().useKind());
- }
-
- // Note that a 'true' return does not actually mean that the ByVal access clobbers nothing.
- // It really means that it will not clobber the entire world. It's still up to you to
- // carefully consider things like:
- // - PutByVal definitely changes the array it stores to, and may even change its length.
- // - PutByOffset definitely changes the object it stores to.
- // - and so on.
- bool byValIsPure(Node* node)
- {
- switch (node->arrayMode().type()) {
- case Array::Generic:
- return false;
- case Array::Int32:
- case Array::Double:
- case Array::Contiguous:
- case Array::ArrayStorage:
- return !node->arrayMode().isOutOfBounds();
- case Array::SlowPutArrayStorage:
- return !node->arrayMode().mayStoreToHole();
- case Array::String:
- return node->op() == GetByVal && node->arrayMode().isInBounds();
-#if USE(JSVALUE32_64)
- case Array::Arguments:
- if (node->op() == GetByVal)
- return true;
- return false;
-#endif // USE(JSVALUE32_64)
- default:
- return true;
- }
- }
-
- bool clobbersWorld(Node* node)
- {
- if (node->flags() & NodeClobbersWorld)
- return true;
- if (!(node->flags() & NodeMightClobber))
- return false;
- switch (node->op()) {
- case GetByVal:
- case PutByValDirect:
- case PutByVal:
- case PutByValAlias:
- return !byValIsPure(node);
- case ToString:
- switch (node->child1().useKind()) {
- case StringObjectUse:
- case StringOrStringObjectUse:
- return false;
- case CellUse:
- case UntypedUse:
- return true;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return true;
- }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
- }
- }
-
void determineReachability();
void resetReachability();
- void resetExitStates();
+ void computeRefCounts();
unsigned varArgNumChildren(Node* node)
{
@@ -702,7 +529,7 @@ public:
return node->children.child(index);
}
- void voteNode(Node* node, unsigned ballot)
+ void voteNode(Node* node, unsigned ballot, float weight = 1)
{
switch (node->op()) {
case ValueToInt32:
@@ -714,35 +541,35 @@ public:
}
if (node->op() == GetLocal)
- node->variableAccessData()->vote(ballot);
+ node->variableAccessData()->vote(ballot, weight);
}
- void voteNode(Edge edge, unsigned ballot)
+ void voteNode(Edge edge, unsigned ballot, float weight = 1)
{
- voteNode(edge.node(), ballot);
+ voteNode(edge.node(), ballot, weight);
}
- void voteChildren(Node* node, unsigned ballot)
+ void voteChildren(Node* node, unsigned ballot, float weight = 1)
{
if (node->flags() & NodeHasVarArgs) {
for (unsigned childIdx = node->firstChild();
childIdx < node->firstChild() + node->numChildren();
childIdx++) {
if (!!m_varArgChildren[childIdx])
- voteNode(m_varArgChildren[childIdx], ballot);
+ voteNode(m_varArgChildren[childIdx], ballot, weight);
}
return;
}
if (!node->child1())
return;
- voteNode(node->child1(), ballot);
+ voteNode(node->child1(), ballot, weight);
if (!node->child2())
return;
- voteNode(node->child2(), ballot);
+ voteNode(node->child2(), ballot, weight);
if (!node->child3())
return;
- voteNode(node->child3(), ballot);
+ voteNode(node->child3(), ballot, weight);
}
template<typename T> // T = Node* or Edge
@@ -776,33 +603,319 @@ public:
void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal);
void invalidateCFG();
+ void invalidateNodeLiveness();
+
+ void clearFlagsOnAllNodes(NodeFlags);
void clearReplacements();
+ void clearEpochs();
void initializeNodeOwners();
- void getBlocksInDepthFirstOrder(Vector<BasicBlock*>& result);
+ BlockList blocksInPreOrder();
+ BlockList blocksInPostOrder();
+
+ class NaturalBlockIterable {
+ public:
+ NaturalBlockIterable()
+ : m_graph(nullptr)
+ {
+ }
+
+ NaturalBlockIterable(Graph& graph)
+ : m_graph(&graph)
+ {
+ }
+
+ class iterator {
+ public:
+ iterator()
+ : m_graph(nullptr)
+ , m_index(0)
+ {
+ }
+
+ iterator(Graph& graph, BlockIndex index)
+ : m_graph(&graph)
+ , m_index(findNext(index))
+ {
+ }
+
+ BasicBlock *operator*()
+ {
+ return m_graph->block(m_index);
+ }
+
+ iterator& operator++()
+ {
+ m_index = findNext(m_index + 1);
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ BlockIndex findNext(BlockIndex index)
+ {
+ while (index < m_graph->numBlocks() && !m_graph->block(index))
+ index++;
+ return index;
+ }
+
+ Graph* m_graph;
+ BlockIndex m_index;
+ };
+
+ iterator begin()
+ {
+ return iterator(*m_graph, 0);
+ }
+
+ iterator end()
+ {
+ return iterator(*m_graph, m_graph->numBlocks());
+ }
+
+ private:
+ Graph* m_graph;
+ };
+
+ NaturalBlockIterable blocksInNaturalOrder()
+ {
+ return NaturalBlockIterable(*this);
+ }
+
+ template<typename ChildFunctor>
+ void doToChildrenWithNode(Node* node, const ChildFunctor& functor)
+ {
+ DFG_NODE_DO_TO_CHILDREN(*this, node, functor);
+ }
+
+ template<typename ChildFunctor>
+ void doToChildren(Node* node, const ChildFunctor& functor)
+ {
+ doToChildrenWithNode(
+ node,
+ [&functor] (Node*, Edge& edge) {
+ functor(edge);
+ });
+ }
+
+ bool uses(Node* node, Node* child)
+ {
+ bool result = false;
+ doToChildren(node, [&] (Edge edge) { result |= edge == child; });
+ return result;
+ }
+
+ bool isWatchingHavingABadTimeWatchpoint(Node* node)
+ {
+ JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
+ return watchpoints().isWatched(globalObject->havingABadTimeWatchpoint());
+ }
+
+ bool isWatchingArrayIteratorProtocolWatchpoint(Node* node)
+ {
+ JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
+ InlineWatchpointSet& set = globalObject->arrayIteratorProtocolWatchpoint();
+ if (watchpoints().isWatched(set))
+ return true;
+
+ if (set.isStillValid()) {
+ // Since the global object owns this watchpoint, we make ourselves have a weak pointer to it.
+ // If the global object got deallocated, it wouldn't fire the watchpoint. It's unlikely the
+ // global object would get deallocated without this code ever getting thrown away, however,
+ // it's more sound logically to depend on the global object lifetime weakly.
+ freeze(globalObject);
+ watchpoints().addLazily(set);
+ return true;
+ }
+
+ return false;
+ }
Profiler::Compilation* compilation() { return m_plan.compilation.get(); }
DesiredIdentifiers& identifiers() { return m_plan.identifiers; }
DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; }
- DesiredStructureChains& chains() { return m_plan.chains; }
+
+ // Returns false if the key is already invalid or unwatchable. If this is a Presence condition,
+ // this also makes it cheap to query if the condition holds. Also makes sure that the GC knows
+ // what's going on.
+ bool watchCondition(const ObjectPropertyCondition&);
+ bool watchConditions(const ObjectPropertyConditionSet&);
+
+ // Checks if it's known that loading from the given object at the given offset is fine. This is
+ // computed by tracking which conditions we track with watchCondition().
+ bool isSafeToLoad(JSObject* base, PropertyOffset);
+
+ void registerInferredType(const InferredType::Descriptor& type)
+ {
+ if (type.structure())
+ registerStructure(type.structure());
+ }
+
+ // Tells us what inferred type we are able to prove the property to have now and in the future.
+ InferredType::Descriptor inferredTypeFor(const PropertyTypeKey&);
+ InferredType::Descriptor inferredTypeForProperty(Structure* structure, UniquedStringImpl* uid)
+ {
+ return inferredTypeFor(PropertyTypeKey(structure, uid));
+ }
+
+ AbstractValue inferredValueForProperty(
+ const RegisteredStructureSet& base, UniquedStringImpl* uid, StructureClobberState = StructuresAreWatched);
+
+ // This uses either constant property inference or property type inference to derive a good abstract
+ // value for some property accessed with the given abstract value base.
+ AbstractValue inferredValueForProperty(
+ const AbstractValue& base, UniquedStringImpl* uid, PropertyOffset, StructureClobberState);
FullBytecodeLiveness& livenessFor(CodeBlock*);
FullBytecodeLiveness& livenessFor(InlineCallFrame*);
+
+ // Quickly query if a single local is live at the given point. This is faster than calling
+ // forAllLiveInBytecode() if you will only query one local. But, if you want to know all of the
+ // locals live, then calling this for each local is much slower than forAllLiveInBytecode().
bool isLiveInBytecode(VirtualRegister, CodeOrigin);
+ // Quickly get all of the non-argument locals live at the given point. This doesn't give you
+ // any arguments because those are all presumed live. You can call forAllLiveInBytecode() to
+ // also get the arguments. This is much faster than calling isLiveInBytecode() for each local.
+ template<typename Functor>
+ void forAllLocalsLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
+ {
+ // Support for not redundantly reporting arguments. Necessary because in case of a varargs
+ // call, only the callee knows that arguments are live while in the case of a non-varargs
+ // call, both callee and caller will see the variables live.
+ VirtualRegister exclusionStart;
+ VirtualRegister exclusionEnd;
+
+ CodeOrigin* codeOriginPtr = &codeOrigin;
+
+ for (;;) {
+ InlineCallFrame* inlineCallFrame = codeOriginPtr->inlineCallFrame;
+ VirtualRegister stackOffset(inlineCallFrame ? inlineCallFrame->stackOffset : 0);
+
+ if (inlineCallFrame) {
+ if (inlineCallFrame->isClosureCall)
+ functor(stackOffset + CallFrameSlot::callee);
+ if (inlineCallFrame->isVarargs())
+ functor(stackOffset + CallFrameSlot::argumentCount);
+ }
+
+ CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
+ FullBytecodeLiveness& fullLiveness = livenessFor(codeBlock);
+ const FastBitVector& liveness = fullLiveness.getLiveness(codeOriginPtr->bytecodeIndex);
+ for (unsigned relativeLocal = codeBlock->m_numCalleeLocals; relativeLocal--;) {
+ VirtualRegister reg = stackOffset + virtualRegisterForLocal(relativeLocal);
+
+ // Don't report if our callee already reported.
+ if (reg >= exclusionStart && reg < exclusionEnd)
+ continue;
+
+ if (liveness[relativeLocal])
+ functor(reg);
+ }
+
+ if (!inlineCallFrame)
+ break;
+
+ // Arguments are always live. This would be redundant if it wasn't for our
+ // op_call_varargs inlining. See the comment above.
+ exclusionStart = stackOffset + CallFrame::argumentOffsetIncludingThis(0);
+ exclusionEnd = stackOffset + CallFrame::argumentOffsetIncludingThis(inlineCallFrame->arguments.size());
+
+ // We will always have a "this" argument and exclusionStart should be a smaller stack
+ // offset than exclusionEnd.
+ ASSERT(exclusionStart < exclusionEnd);
+
+ for (VirtualRegister reg = exclusionStart; reg < exclusionEnd; reg += 1)
+ functor(reg);
+
+ codeOriginPtr = inlineCallFrame->getCallerSkippingTailCalls();
+
+ // The first inline call frame could be an inline tail call
+ if (!codeOriginPtr)
+ break;
+ }
+ }
+
+ // Get a BitVector of all of the non-argument locals live right now. This is mostly useful if
+ // you want to compare two sets of live locals from two different CodeOrigins.
+ BitVector localsLiveInBytecode(CodeOrigin);
+
+ // Tells you all of the arguments and locals live at the given CodeOrigin. This is a small
+ // extension to forAllLocalsLiveInBytecode(), since all arguments are always presumed live.
+ template<typename Functor>
+ void forAllLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
+ {
+ forAllLocalsLiveInBytecode(codeOrigin, functor);
+
+ // Report all arguments as being live.
+ for (unsigned argument = block(0)->variablesAtHead.numberOfArguments(); argument--;)
+ functor(virtualRegisterForArgument(argument));
+ }
+
+ BytecodeKills& killsFor(CodeBlock*);
+ BytecodeKills& killsFor(InlineCallFrame*);
+
+ static unsigned parameterSlotsForArgCount(unsigned);
+
unsigned frameRegisterCount();
+ unsigned stackPointerOffset();
unsigned requiredRegisterCountForExit();
unsigned requiredRegisterCountForExecutionAndExit();
- JSActivation* tryGetActivation(Node*);
- WriteBarrierBase<Unknown>* tryGetRegisters(Node*);
+ JSValue tryGetConstantProperty(JSValue base, const RegisteredStructureSet&, PropertyOffset);
+ JSValue tryGetConstantProperty(JSValue base, Structure*, PropertyOffset);
+ JSValue tryGetConstantProperty(JSValue base, const StructureAbstractValue&, PropertyOffset);
+ JSValue tryGetConstantProperty(const AbstractValue&, PropertyOffset);
+
+ JSValue tryGetConstantClosureVar(JSValue base, ScopeOffset);
+ JSValue tryGetConstantClosureVar(const AbstractValue&, ScopeOffset);
+ JSValue tryGetConstantClosureVar(Node*, ScopeOffset);
+
+ JSArrayBufferView* tryGetFoldableView(JSValue);
+ JSArrayBufferView* tryGetFoldableView(JSValue, ArrayMode arrayMode);
- JSArrayBufferView* tryGetFoldableView(Node*);
- JSArrayBufferView* tryGetFoldableView(Node*, ArrayMode);
- JSArrayBufferView* tryGetFoldableViewForChild1(Node*);
+ void registerFrozenValues();
+ void visitChildren(SlotVisitor&) override;
+
+ NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
+ std::nullptr_t, const char* file, int line, const char* function,
+ const char* assertion);
+ NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
+ Node*, const char* file, int line, const char* function,
+ const char* assertion);
+ NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
+ BasicBlock*, const char* file, int line, const char* function,
+ const char* assertion);
+
+ bool hasDebuggerEnabled() const { return m_hasDebuggerEnabled; }
+
+ Dominators& ensureDominators();
+ PrePostNumbering& ensurePrePostNumbering();
+ NaturalLoops& ensureNaturalLoops();
+ BackwardsCFG& ensureBackwardsCFG();
+ BackwardsDominators& ensureBackwardsDominators();
+ ControlEquivalenceAnalysis& ensureControlEquivalenceAnalysis();
+
+ // This function only makes sense to call after bytecode parsing
+ // because it queries the m_hasExceptionHandlers boolean whose value
+ // is only fully determined after bytcode parsing.
+ bool willCatchExceptionInMachineFrame(CodeOrigin, CodeOrigin& opCatchOriginOut, HandlerInfo*& catchHandlerOut);
+
+ bool needsScopeRegister() const { return m_hasDebuggerEnabled || m_codeBlock->usesEval(); }
+ bool needsFlushedThis() const { return m_codeBlock->usesEval(); }
+
VM& m_vm;
Plan& m_plan;
CodeBlock* m_codeBlock;
@@ -810,54 +923,126 @@ public:
NodeAllocator& m_allocator;
- Operands<AbstractValue> m_mustHandleAbstractValues;
-
Vector< RefPtr<BasicBlock> , 8> m_blocks;
Vector<Edge, 16> m_varArgChildren;
- Vector<StorageAccessData> m_storageAccessData;
+
+ HashMap<EncodedJSValue, FrozenValue*, EncodedJSValueHash, EncodedJSValueHashTraits> m_frozenValueMap;
+ Bag<FrozenValue> m_frozenValues;
+
+ Vector<uint32_t> m_uint32ValuesInUse;
+
+ Bag<StorageAccessData> m_storageAccessData;
+
+ // In CPS, this is all of the SetArgument nodes for the arguments in the machine code block
+ // that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush
+ // nodes.
+ //
+ // In SSA, this is all of the GetStack nodes for the arguments in the machine code block that
+ // may have some speculation in the prologue and survived DCE. Note that to get the speculation
+ // for an argument in SSA, you must use m_argumentFormats, since we still have to speculate
+ // even if the argument got killed. For example:
+ //
+ // function foo(x) {
+ // var tmp = x + 1;
+ // }
+ //
+ // Assume that x is always int during profiling. The ArithAdd for "x + 1" will be dead and will
+ // have a proven check for the edge to "x". So, we will not insert a Check node and we will
+ // kill the GetStack for "x". But, we must do the int check in the progolue, because that's the
+ // thing we used to allow DCE of ArithAdd. Otherwise the add could be impure:
+ //
+ // var o = {
+ // valueOf: function() { do side effects; }
+ // };
+ // foo(o);
+ //
+ // If we DCE the ArithAdd and we remove the int check on x, then this won't do the side
+ // effects.
Vector<Node*, 8> m_arguments;
+
+ // In CPS, this is meaningless. In SSA, this is the argument speculation that we've locked in.
+ Vector<FlushFormat> m_argumentFormats;
+
SegmentedVector<VariableAccessData, 16> m_variableAccessData;
SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
- SegmentedVector<StructureSet, 16> m_structureSet;
- SegmentedVector<StructureTransitionData, 8> m_structureTransitionData;
+ Bag<Transition> m_transitions;
SegmentedVector<NewArrayBufferData, 4> m_newArrayBufferData;
- SegmentedVector<SwitchData, 4> m_switchData;
+ Bag<BranchData> m_branchData;
+ Bag<SwitchData> m_switchData;
+ Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
+ Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
+ Bag<ObjectMaterializationData> m_objectMaterializationData;
+ Bag<CallVarargsData> m_callVarargsData;
+ Bag<LoadVarargsData> m_loadVarargsData;
+ Bag<StackAccessData> m_stackAccessData;
+ Bag<LazyJSValue> m_lazyJSValues;
+ Bag<CallDOMGetterData> m_callDOMGetterData;
+ Bag<BitVector> m_bitVectors;
Vector<InlineVariableData, 4> m_inlineVariableData;
- OwnPtr<InlineCallFrameSet> m_inlineCallFrames;
HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
- bool m_hasArguments;
- HashSet<ExecutableBase*> m_executablesWhoseArgumentsEscaped;
- BitVector m_lazyVars;
- Dominators m_dominators;
- NaturalLoops m_naturalLoops;
+ HashMap<CodeBlock*, std::unique_ptr<BytecodeKills>> m_bytecodeKills;
+ HashSet<std::pair<JSObject*, PropertyOffset>> m_safeToLoad;
+ HashMap<PropertyTypeKey, InferredType::Descriptor> m_inferredTypes;
+ Vector<RefPtr<DOMJIT::Patchpoint>> m_domJITPatchpoints;
+ std::unique_ptr<Dominators> m_dominators;
+ std::unique_ptr<PrePostNumbering> m_prePostNumbering;
+ std::unique_ptr<NaturalLoops> m_naturalLoops;
+ std::unique_ptr<CFG> m_cfg;
+ std::unique_ptr<BackwardsCFG> m_backwardsCFG;
+ std::unique_ptr<BackwardsDominators> m_backwardsDominators;
+ std::unique_ptr<ControlEquivalenceAnalysis> m_controlEquivalenceAnalysis;
unsigned m_localVars;
unsigned m_nextMachineLocal;
unsigned m_parameterSlots;
- int m_machineCaptureStart;
- std::unique_ptr<SlowArgument[]> m_slowArguments;
+
+ HashSet<String> m_localStrings;
+ HashMap<const StringImpl*, String> m_copiedStrings;
+
+#if USE(JSVALUE32_64)
+ std::unordered_map<int64_t, double*> m_doubleConstantsMap;
+ std::unique_ptr<Bag<double>> m_doubleConstants;
+#endif
OptimizationFixpointState m_fixpointState;
+ StructureRegistrationState m_structureRegistrationState;
GraphForm m_form;
UnificationState m_unificationState;
+ PlanStage m_planStage { PlanStage::Initial };
RefCountState m_refCountState;
+ bool m_hasDebuggerEnabled;
+ bool m_hasExceptionHandlers { false };
+ std::unique_ptr<FlowIndexing> m_indexingCache;
+ std::unique_ptr<FlowMap<AbstractValue>> m_abstractValuesCache;
+
+ RegisteredStructure stringStructure;
+ RegisteredStructure symbolStructure;
+
private:
-
+ void addNodeToMapByIndex(Node*);
+
+ bool isStringPrototypeMethodSane(JSGlobalObject*, UniquedStringImpl*);
+
void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
- void addForDepthFirstSort(Vector<BasicBlock*>& result, Vector<BasicBlock*, 16>& worklist, HashSet<BasicBlock*>& seen, BasicBlock*);
- AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* immediate)
+ AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* operand, Node*immediate, RareCaseProfilingSource source)
{
ASSERT(immediate->hasConstant());
- JSValue immediateValue = immediate->valueOfJSConstant(m_codeBlock);
- if (!immediateValue.isNumber())
+ JSValue immediateValue = immediate->asJSValue();
+ if (!immediateValue.isNumber() && !immediateValue.isBoolean())
return DontSpeculateInt32;
if (!variableShouldSpeculateInt32)
return DontSpeculateInt32;
+
+ // Integer constants can be typed Double if they are written like a double in the source code (e.g. 42.0).
+ // In that case, we stay conservative unless the other operand was explicitly typed as integer.
+ NodeFlags operandResultType = operand->result();
+ if (operandResultType != NodeResultInt32 && immediateValue.isDouble())
+ return DontSpeculateInt32;
- if (immediateValue.isInt32())
- return add->canSpeculateInt32() ? SpeculateInt32 : DontSpeculateInt32;
+ if (immediateValue.isBoolean() || jsNumber(immediateValue.asNumber()).isInt32())
+ return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
double doubleImmediate = immediateValue.asDouble();
const double twoToThe48 = 281474976710656.0;
@@ -866,63 +1051,12 @@ private:
return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
}
-
- bool mulImmediateShouldSpeculateInt32(Node* mul, Node* variable, Node* immediate)
- {
- ASSERT(immediate->hasConstant());
-
- JSValue immediateValue = immediate->valueOfJSConstant(m_codeBlock);
- if (!immediateValue.isInt32())
- return false;
-
- if (!variable->shouldSpeculateInt32ForArithmetic())
- return false;
-
- int32_t intImmediate = immediateValue.asInt32();
- // Doubles have a 53 bit mantissa so we expect a multiplication of 2^31 (the highest
- // magnitude possible int32 value) and any value less than 2^22 to not result in any
- // rounding in a double multiplication - hence it will be equivalent to an integer
- // multiplication, if we are doing int32 truncation afterwards (which is what
- // canSpeculateInt32() implies).
- const int32_t twoToThe22 = 1 << 22;
- if (intImmediate <= -twoToThe22 || intImmediate >= twoToThe22)
- return mul->canSpeculateInt32() && !nodeMayOverflow(mul->arithNodeFlags());
-
- return mul->canSpeculateInt32();
- }
-};
-#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
- Node* _node = (node); \
- if (_node->flags() & NodeHasVarArgs) { \
- for (unsigned _childIdx = _node->firstChild(); \
- _childIdx < _node->firstChild() + _node->numChildren(); \
- _childIdx++) { \
- if (!!(graph).m_varArgChildren[_childIdx]) \
- thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
- } \
- } else { \
- if (!_node->child1()) { \
- ASSERT( \
- !_node->child2() \
- && !_node->child3()); \
- break; \
- } \
- thingToDo(_node, _node->child1()); \
- \
- if (!_node->child2()) { \
- ASSERT(!_node->child3()); \
- break; \
- } \
- thingToDo(_node, _node->child2()); \
- \
- if (!_node->child3()) \
- break; \
- thingToDo(_node, _node->child3()); \
- } \
- } while (false)
+ Vector<Node*, 0, UnsafeVectorOverflow> m_nodesByIndex;
+ Vector<unsigned, 0, UnsafeVectorOverflow> m_nodeIndexFreeList;
+ SegmentedVector<RegisteredStructureSet, 16> m_structureSets;
+};
} } // namespace JSC::DFG
#endif
-#endif