diff options
| author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-05-24 08:28:08 +0000 |
|---|---|---|
| committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-05-24 08:28:08 +0000 |
| commit | a4e969f4965059196ca948db781e52f7cfebf19e (patch) | |
| tree | 6ca352808c8fdc52006a0f33f6ae3c593b23867d /Source/JavaScriptCore/dfg | |
| parent | 41386e9cb918eed93b3f13648cbef387e371e451 (diff) | |
| download | WebKitGtk-tarball-a4e969f4965059196ca948db781e52f7cfebf19e.tar.gz | |
webkitgtk-2.12.3webkitgtk-2.12.3
Diffstat (limited to 'Source/JavaScriptCore/dfg')
314 files changed, 41626 insertions, 16845 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractHeap.cpp b/Source/JavaScriptCore/dfg/DFGAbstractHeap.cpp index ad597aaea..1e1101902 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractHeap.cpp +++ b/Source/JavaScriptCore/dfg/DFGAbstractHeap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,6 +28,8 @@ #if ENABLE(DFG_JIT) +#include "JSCInlines.h" + namespace JSC { namespace DFG { void AbstractHeap::Payload::dump(PrintStream& out) const @@ -41,7 +43,7 @@ void AbstractHeap::Payload::dump(PrintStream& out) const void AbstractHeap::dump(PrintStream& out) const { out.print(kind()); - if (kind() == InvalidAbstractHeap || kind() == World || payload().isTop()) + if (kind() == InvalidAbstractHeap || kind() == World || kind() == Heap || payload().isTop()) return; out.print("(", payload(), ")"); } diff --git a/Source/JavaScriptCore/dfg/DFGAbstractHeap.h b/Source/JavaScriptCore/dfg/DFGAbstractHeap.h index b42b0bbf1..4dec8fa03 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractHeap.h +++ b/Source/JavaScriptCore/dfg/DFGAbstractHeap.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGAbstractHeap_h #define DFGAbstractHeap_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "VirtualRegister.h" @@ -36,45 +34,47 @@ namespace JSC { namespace DFG { -// Implements a three-level type hierarchy: +// Implements a four-level type hierarchy: // - World is the supertype of all of the things. -// - Kind with TOP payload is the direct subtype of World. -// - Kind with non-TOP payload is the direct subtype of its corresponding TOP Kind. +// - Stack with a TOP payload is a direct subtype of World +// - Stack with a non-TOP payload is a direct subtype of Stack with a TOP payload. +// - Heap is a direct subtype of World. +// - SideState is a direct subtype of World. +// - Any other kind with TOP payload is the direct subtype of Heap. +// - Any other kind with non-TOP payload is the direct subtype of the same kind with a TOP payload. #define FOR_EACH_ABSTRACT_HEAP_KIND(macro) \ macro(InvalidAbstractHeap) \ macro(World) \ - macro(Arguments_numArguments) \ - macro(Arguments_overrideLength) \ - macro(Arguments_registers) \ - macro(Arguments_slowArguments) \ - macro(ArrayBuffer_data) \ - macro(Butterfly_arrayBuffer) \ + macro(Stack) \ + macro(Heap) \ macro(Butterfly_publicLength) \ macro(Butterfly_vectorLength) \ - macro(JSArrayBufferView_length) \ - macro(JSArrayBufferView_mode) \ - macro(JSArrayBufferView_vector) \ - macro(JSCell_structure) \ - macro(JSFunction_executable) \ - macro(JSFunction_scopeChain) \ + macro(GetterSetter_getter) \ + macro(GetterSetter_setter) \ + macro(JSCell_structureID) \ + macro(JSCell_indexingType) \ + macro(JSCell_typeInfoFlags) \ + macro(JSCell_typeInfoType) \ macro(JSObject_butterfly) \ - macro(JSVariableObject_registers) \ + macro(JSPropertyNameEnumerator_cachedPropertyNames) \ macro(NamedProperties) \ macro(IndexedInt32Properties) \ macro(IndexedDoubleProperties) \ macro(IndexedContiguousProperties) \ + macro(IndexedArrayStorageProperties) \ macro(ArrayStorageProperties) \ - macro(Variables) \ + macro(DirectArgumentsProperties) \ + macro(ScopeProperties) \ macro(TypedArrayProperties) \ - macro(GCState) \ - macro(BarrierState) \ + macro(HeapObjectCount) /* Used to reflect the fact that some allocations reveal object identity */\ macro(RegExpState) \ + macro(MathDotRandomState) \ macro(InternalState) \ macro(Absolute) \ /* Use this for writes only, to indicate that this may fire watchpoints. Usually this is never directly written but instead we test to see if a node clobbers this; it just so happens that you have to write world to clobber it. */\ macro(Watchpoint_fire) \ - /* Use this for reads only, just to indicate that if the world got clobbered, then this operation will not work. */\ + /* Use these for reads only, just to indicate that if the world got clobbered, then this operation will not work. */\ macro(MiscFields) \ /* Use this for writes only, just to indicate that hoisting the node is invalid. This works because we don't hoist anything that has any side effects at all. */\ macro(SideState) @@ -133,6 +133,11 @@ public: return m_value; } + int32_t value32() const + { + return static_cast<int32_t>(value()); + } + bool operator==(const Payload& other) const { return m_isTop == other.m_isTop @@ -187,7 +192,7 @@ public: AbstractHeap(AbstractHeapKind kind, Payload payload) { - ASSERT(kind != InvalidAbstractHeap && kind != World); + ASSERT(kind != InvalidAbstractHeap && kind != World && kind != Heap && kind != SideState); m_value = encode(kind, payload); } @@ -205,32 +210,49 @@ public: return payloadImpl(); } - bool isDisjoint(const AbstractHeap& other) + AbstractHeap supertype() const { ASSERT(kind() != InvalidAbstractHeap); - ASSERT(other.kind() != InvalidAbstractHeap); - if (kind() == World) - return false; - if (other.kind() == World) - return false; - if (kind() != other.kind()) - return true; - return payload().isDisjoint(other.payload()); + switch (kind()) { + case World: + return AbstractHeap(); + case Heap: + case SideState: + return World; + default: + if (payload().isTop()) { + if (kind() == Stack) + return World; + return Heap; + } + return AbstractHeap(kind()); + } + } + + bool isStrictSubtypeOf(const AbstractHeap& other) const + { + AbstractHeap current = *this; + while (current.kind() != World) { + current = current.supertype(); + if (current == other) + return true; + } + return false; } - bool overlaps(const AbstractHeap& other) + bool isSubtypeOf(const AbstractHeap& other) const { - return !isDisjoint(other); + return *this == other || isStrictSubtypeOf(other); } - AbstractHeap supertype() const + bool overlaps(const AbstractHeap& other) const { - ASSERT(kind() != InvalidAbstractHeap); - if (kind() == World) - return AbstractHeap(); - if (payload().isTop()) - return World; - return AbstractHeap(kind()); + return *this == other || isStrictSubtypeOf(other) || other.isStrictSubtypeOf(*this); + } + + bool isDisjoint(const AbstractHeap& other) const + { + return !overlaps(other); } unsigned hash() const diff --git a/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h b/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h index eb4e5219f..e1969179e 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h +++ b/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,21 +26,20 @@ #ifndef DFGAbstractInterpreter_h #define DFGAbstractInterpreter_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGAbstractValue.h" #include "DFGBranchDirection.h" #include "DFGGraph.h" #include "DFGNode.h" +#include "DFGPhiChildren.h" namespace JSC { namespace DFG { template<typename AbstractStateType> class AbstractInterpreter { public: - AbstractInterpreter(Graph&, AbstractStateType& state); + AbstractInterpreter(Graph&, AbstractStateType&); ~AbstractInterpreter(); AbstractValue& forNode(Node* node) @@ -82,22 +81,15 @@ public: // // This is guaranteed to be equivalent to doing: // - // if (state.startExecuting(index)) { - // state.executeEdges(index); - // result = state.executeEffects(index); - // } else - // result = true; + // state.startExecuting() + // state.executeEdges(index); + // result = state.executeEffects(index); bool execute(unsigned indexInBlock); bool execute(Node*); - // Indicate the start of execution of the node. It resets any state in the node, - // that is progressively built up by executeEdges() and executeEffects(). In - // particular, this resets canExit(), so if you want to "know" between calls of - // startExecuting() and executeEdges()/Effects() whether the last run of the - // analysis concluded that the node can exit, you should probably set that - // information aside prior to calling startExecuting(). - bool startExecuting(Node*); - bool startExecuting(unsigned indexInBlock); + // Indicate the start of execution of a node. It resets any state in the node + // that is progressively built up by executeEdges() and executeEffects(). + void startExecuting(); // Abstractly execute the edges of the given node. This runs filterEdgeByUse() // on all edges of the node. You can skip this step, if you have already used @@ -105,10 +97,14 @@ public: void executeEdges(Node*); void executeEdges(unsigned indexInBlock); - ALWAYS_INLINE void filterEdgeByUse(Node* node, Edge& edge) + ALWAYS_INLINE void filterEdgeByUse(Edge& edge) { ASSERT(mayHaveTypeCheck(edge.useKind()) || !needsTypeCheck(edge)); - filterByType(node, edge, typeFilterFor(edge.useKind())); + filterByType(edge, typeFilterFor(edge.useKind())); + } + ALWAYS_INLINE void filterEdgeByUse(Node*, Edge& edge) + { + filterEdgeByUse(edge); } // Abstractly execute the effects of the given node. This changes the abstract @@ -116,12 +112,13 @@ public: bool executeEffects(unsigned indexInBlock); bool executeEffects(unsigned clobberLimit, Node*); + void dump(PrintStream& out) const; void dump(PrintStream& out); template<typename T> - FiltrationResult filter(T node, const StructureSet& set) + FiltrationResult filter(T node, const StructureSet& set, SpeculatedType admittedTypes = SpecNone) { - return filter(forNode(node), set); + return filter(forNode(node), set, admittedTypes); } template<typename T> @@ -137,20 +134,28 @@ public: } template<typename T> - FiltrationResult filterByValue(T node, JSValue value) + FiltrationResult filterByValue(T node, FrozenValue value) { return filterByValue(forNode(node), value); } - FiltrationResult filter(AbstractValue&, const StructureSet&); + FiltrationResult filter(AbstractValue&, const StructureSet&, SpeculatedType admittedTypes = SpecNone); FiltrationResult filterArrayModes(AbstractValue&, ArrayModes); FiltrationResult filter(AbstractValue&, SpeculatedType); - FiltrationResult filterByValue(AbstractValue&, JSValue); + FiltrationResult filterByValue(AbstractValue&, FrozenValue); + + PhiChildren* phiChildren() { return m_phiChildren.get(); } private: void clobberWorld(const CodeOrigin&, unsigned indexInBlock); - void clobberCapturedVars(const CodeOrigin&); + + template<typename Functor> + void forAllValues(unsigned indexInBlock, Functor&); + void clobberStructures(unsigned indexInBlock); + void observeTransition(unsigned indexInBlock, Structure* from, Structure* to); + void observeTransitions(unsigned indexInBlock, const TransitionVector&); + void setDidClobber(); enum BooleanResult { UnknownBooleanResult, @@ -159,19 +164,25 @@ private: }; BooleanResult booleanResult(Node*, AbstractValue&); - void setConstant(Node* node, JSValue value) + void setBuiltInConstant(Node* node, FrozenValue value) + { + AbstractValue& abstractValue = forNode(node); + abstractValue.set(m_graph, value, m_state.structureClobberState()); + abstractValue.fixTypeForRepresentation(m_graph, node); + } + + void setConstant(Node* node, FrozenValue value) { - forNode(node).set(m_graph, value); + setBuiltInConstant(node, value); m_state.setFoundConstants(true); } - ALWAYS_INLINE void filterByType(Node* node, Edge& edge, SpeculatedType type) + ALWAYS_INLINE void filterByType(Edge& edge, SpeculatedType type) { AbstractValue& value = forNode(edge); - if (!value.isType(type)) { - node->setCanExit(true); + if (!value.isType(type)) edge.setProofStatus(NeedsCheck); - } else + else edge.setProofStatus(IsProved); filter(value, type); @@ -183,6 +194,7 @@ private: CodeBlock* m_codeBlock; Graph& m_graph; AbstractStateType& m_state; + std::unique_ptr<PhiChildren> m_phiChildren; }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h b/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h index 3f68aced1..338a90381 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h +++ b/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,12 +26,13 @@ #ifndef DFGAbstractInterpreterInlines_h #define DFGAbstractInterpreterInlines_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGAbstractInterpreter.h" #include "GetByIdStatus.h" +#include "GetterSetter.h" +#include "JITOperations.h" +#include "MathCommon.h" #include "Operations.h" #include "PutByIdStatus.h" #include "StringObject.h" @@ -44,6 +45,8 @@ AbstractInterpreter<AbstractStateType>::AbstractInterpreter(Graph& graph, Abstra , m_graph(graph) , m_state(state) { + if (m_graph.m_form == SSA) + m_phiChildren = std::make_unique<PhiChildren>(m_graph); } template<typename AbstractStateType> @@ -58,17 +61,23 @@ AbstractInterpreter<AbstractStateType>::booleanResult( { JSValue childConst = value.value(); if (childConst) { - if (childConst.toBoolean(m_codeBlock->globalObjectFor(node->codeOrigin)->globalExec())) + if (childConst.toBoolean(m_codeBlock->globalObjectFor(node->origin.semantic)->globalExec())) return DefinitelyTrue; return DefinitelyFalse; } // Next check if we can fold because we know that the source is an object or string and does not equal undefined. - if (isCellSpeculation(value.m_type) - && value.m_currentKnownStructure.hasSingleton()) { - Structure* structure = value.m_currentKnownStructure.singleton(); - if (!structure->masqueradesAsUndefined(m_codeBlock->globalObjectFor(node->codeOrigin)) - && structure->typeInfo().type() != StringType) + if (isCellSpeculation(value.m_type) && !value.m_structure.isTop()) { + bool allTrue = true; + for (unsigned i = value.m_structure.size(); i--;) { + Structure* structure = value.m_structure[i]; + if (structure->masqueradesAsUndefined(m_codeBlock->globalObjectFor(node->origin.semantic)) + || structure->typeInfo().type() == StringType) { + allTrue = false; + break; + } + } + if (allTrue) return DefinitelyTrue; } @@ -76,22 +85,12 @@ AbstractInterpreter<AbstractStateType>::booleanResult( } template<typename AbstractStateType> -bool AbstractInterpreter<AbstractStateType>::startExecuting(Node* node) +void AbstractInterpreter<AbstractStateType>::startExecuting() { ASSERT(m_state.block()); ASSERT(m_state.isValid()); m_state.setDidClobber(false); - - node->setCanExit(false); - - return node->shouldGenerate(); -} - -template<typename AbstractStateType> -bool AbstractInterpreter<AbstractStateType>::startExecuting(unsigned indexInBlock) -{ - return startExecuting(m_state.block()->at(indexInBlock)); } template<typename AbstractStateType> @@ -107,9 +106,18 @@ void AbstractInterpreter<AbstractStateType>::executeEdges(unsigned indexInBlock) } template<typename AbstractStateType> -void AbstractInterpreter<AbstractStateType>::verifyEdge(Node*, Edge edge) +void AbstractInterpreter<AbstractStateType>::verifyEdge(Node* node, Edge edge) { - RELEASE_ASSERT(!(forNode(edge).m_type & ~typeFilterFor(edge.useKind()))); + // Some use kinds are required to not have checks, because we know somehow that the incoming + // value will already have the type we want. In those cases, AI may not be smart enough to + // prove that this is indeed the case. + if (shouldNotHaveTypeCheck(edge.useKind())) + return; + + if (!(forNode(edge).m_type & ~typeFilterFor(edge.useKind()))) + return; + + DFG_CRASH(m_graph, node, toCString("Edge verification error: ", node, "->", edge, " was expected to have type ", SpeculationDump(typeFilterFor(edge.useKind())), " but has type ", SpeculationDump(forNode(edge).m_type), " (", forNode(edge).m_type, ")").data()); } template<typename AbstractStateType> @@ -128,54 +136,40 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi switch (node->op()) { case JSConstant: - case WeakJSConstant: - case PhantomArguments: { - forNode(node).set(m_graph, m_graph.valueOfJSConstant(node)); + case DoubleConstant: + case Int52Constant: { + setBuiltInConstant(node, *node->constant()); break; } case Identity: { forNode(node) = forNode(node->child1()); - break; - } - - case GetArgument: { - ASSERT(m_graph.m_form == SSA); - VariableAccessData* variable = node->variableAccessData(); - AbstractValue& value = m_state.variables().operand(variable->local().offset()); - ASSERT(value.isHeapTop()); - FiltrationResult result = - value.filter(typeFilterFor(useKindFor(variable->flushFormat()))); - ASSERT_UNUSED(result, result == FiltrationOK); - forNode(node) = value; + if (forNode(node).value()) + m_state.setFoundConstants(true); break; } case ExtractOSREntryLocal: { - if (!(node->unlinkedLocal().isArgument()) - && m_graph.m_lazyVars.get(node->unlinkedLocal().toLocal())) { - // This is kind of pessimistic - we could know in some cases that the - // DFG code at the point of the OSR had already initialized the lazy - // variable. But maybe this is fine, since we're inserting OSR - // entrypoints very early in the pipeline - so any lazy initializations - // ought to be hoisted out anyway. - forNode(node).makeBytecodeTop(); - } else - forNode(node).makeHeapTop(); + forNode(node).makeBytecodeTop(); break; } case GetLocal: { VariableAccessData* variableAccessData = node->variableAccessData(); - if (variableAccessData->prediction() == SpecNone) { - m_state.setIsValid(false); - break; - } AbstractValue value = m_state.variables().operand(variableAccessData->local().offset()); - if (!variableAccessData->isCaptured()) { - if (value.isClear()) - node->setCanExit(true); - } + // The value in the local should already be checked. + DFG_ASSERT(m_graph, node, value.isType(typeFilterFor(variableAccessData->flushFormat()))); + if (value.value()) + m_state.setFoundConstants(true); + forNode(node) = value; + break; + } + + case GetStack: { + StackAccessData* data = node->stackAccessData(); + AbstractValue value = m_state.variables().operand(data->local); + // The value in the local should already be checked. + DFG_ASSERT(m_graph, node, value.isType(typeFilterFor(data->format))); if (value.value()) m_state.setFoundConstants(true); forNode(node) = value; @@ -191,7 +185,12 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } case SetLocal: { - m_state.variables().operand(node->local().offset()) = forNode(node->child1()); + m_state.variables().operand(node->local()) = forNode(node->child1()); + break; + } + + case PutStack: { + m_state.variables().operand(node->stackAccessData()->local) = forNode(node->child1()); break; } @@ -202,10 +201,31 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; } + case KillStack: { + // This is just a hint telling us that the OSR state of the local is no longer inside the + // flushed data. + break; + } + case SetArgument: - // Assert that the state of arguments has been set. - ASSERT(!m_state.block()->valuesAtHead.operand(node->local()).isClear()); + // Assert that the state of arguments has been set. SetArgument means that someone set + // the argument values out-of-band, and currently this always means setting to a + // non-clear value. + ASSERT(!m_state.variables().operand(node->local()).isClear()); + break; + + case LoadVarargs: + case ForwardVarargs: { + // FIXME: ForwardVarargs should check if the count becomes known, and if it does, it should turn + // itself into a straight-line sequence of GetStack/PutStack. + // https://bugs.webkit.org/show_bug.cgi?id=143071 + clobberWorld(node->origin.semantic, clobberLimit); + LoadVarargsData* data = node->loadVarargsData(); + m_state.variables().operand(data->count).setType(SpecInt32); + for (unsigned i = data->limit - 1; i--;) + m_state.variables().operand(data->start.offset() + i).makeHeapTop(); break; + } case BitAnd: case BitOr: @@ -213,6 +233,12 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi case BitRShift: case BitLShift: case BitURShift: { + if (node->child1().useKind() == UntypedUse || node->child2().useKind() == UntypedUse) { + clobberWorld(node->origin.semantic, clobberLimit); + forNode(node).setType(m_graph, SpecInt32); + break; + } + JSValue left = forNode(node->child1()).value(); JSValue right = forNode(node->child2()).value(); if (left && right && left.isInt32() && right.isInt32()) { @@ -243,6 +269,14 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } break; } + + if (node->op() == BitAnd + && (isBoolInt32Speculation(forNode(node->child1()).m_type) || + isBoolInt32Speculation(forNode(node->child2()).m_type))) { + forNode(node).setType(SpecBoolInt32); + break; + } + forNode(node).setType(SpecInt32); break; } @@ -255,7 +289,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi setConstant(node, jsNumber(value)); break; } - forNode(node).setType(SpecDouble); + forNode(node).setType(SpecInt52AsDouble); break; } if (child && child.isInt32()) { @@ -266,7 +300,26 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } } forNode(node).setType(SpecInt32); - node->setCanExit(true); + break; + } + + case BooleanToNumber: { + JSValue concreteValue = forNode(node->child1()).value(); + if (concreteValue) { + if (concreteValue.isBoolean()) + setConstant(node, jsNumber(concreteValue.asBoolean())); + else + setConstant(node, *m_graph.freeze(concreteValue)); + break; + } + AbstractValue& value = forNode(node); + value = forNode(node->child1()); + if (node->child1().useKind() == UntypedUse && !(value.m_type & ~SpecBoolean)) + m_state.setFoundConstants(true); + if (value.m_type & SpecBoolean) { + value.merge(SpecBoolInt32); + value.filter(~SpecBoolean); + } break; } @@ -280,7 +333,6 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; } } - node->setCanExit(true); forNode(node).setType(SpecInt32); break; } @@ -296,55 +348,91 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; } if (child.isBoolean()) { - setConstant(node, JSValue(child.asBoolean())); + setConstant(node, jsNumber(child.asBoolean())); break; } + if (child.isUndefinedOrNull()) { + setConstant(node, jsNumber(0)); + break; + } + } + + if (isBooleanSpeculation(forNode(node->child1()).m_type)) { + forNode(node).setType(SpecBoolInt32); + break; } forNode(node).setType(SpecInt32); break; } - case Int32ToDouble: { + case DoubleRep: { JSValue child = forNode(node->child1()).value(); if (child && child.isNumber()) { - setConstant(node, JSValue(JSValue::EncodeAsDouble, child.asNumber())); + setConstant(node, jsDoubleNumber(child.asNumber())); break; } - if (isInt32Speculation(forNode(node->child1()).m_type)) - forNode(node).setType(SpecDoubleReal); - else - forNode(node).setType(SpecDouble); + + SpeculatedType type = forNode(node->child1()).m_type; + switch (node->child1().useKind()) { + case NotCellUse: { + if (type & SpecOther) { + type &= ~SpecOther; + type |= SpecDoublePureNaN | SpecBoolInt32; // Null becomes zero, undefined becomes NaN. + } + if (type & SpecBoolean) { + type &= ~SpecBoolean; + type |= SpecBoolInt32; // True becomes 1, false becomes 0. + } + type &= SpecBytecodeNumber; + break; + } + + case Int52RepUse: + case NumberUse: + case RealNumberUse: + break; + + default: + RELEASE_ASSERT_NOT_REACHED(); + } + forNode(node).setType(type); + forNode(node).fixTypeForRepresentation(m_graph, node); break; } - case Int52ToDouble: { + case Int52Rep: { JSValue child = forNode(node->child1()).value(); - if (child && child.isNumber()) { + if (child && child.isMachineInt()) { setConstant(node, child); break; } - forNode(node).setType(SpecDouble); + + forNode(node).setType(SpecInt32); break; } - case Int52ToValue: { - JSValue child = forNode(node->child1()).value(); - if (child && child.isNumber()) { - setConstant(node, child); + case ValueRep: { + JSValue value = forNode(node->child1()).value(); + if (value) { + setConstant(node, value); break; } - SpeculatedType type = forNode(node->child1()).m_type; - if (type & SpecInt52) - type = (type | SpecInt32 | SpecInt52AsDouble) & ~SpecInt52; - forNode(node).setType(type); + + forNode(node).setType(m_graph, forNode(node->child1()).m_type & ~SpecDoubleImpureNaN); + forNode(node).fixTypeForRepresentation(m_graph, node); break; } case ValueAdd: { ASSERT(node->binaryUseKind() == UntypedUse); - clobberWorld(node->codeOrigin, clobberLimit); - forNode(node).setType(SpecString | SpecBytecodeNumber); + clobberWorld(node->origin.semantic, clobberLimit); + forNode(node).setType(m_graph, SpecString | SpecBytecodeNumber); + break; + } + + case StrCat: { + forNode(node).setType(m_graph, SpecString); break; } @@ -365,10 +453,8 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } } forNode(node).setType(SpecInt32); - if (shouldCheckOverflow(node->arithMode())) - node->setCanExit(true); break; - case MachineIntUse: + case Int52RepUse: if (left && right && left.isMachineInt() && right.isMachineInt()) { JSValue result = jsNumber(left.asMachineInt() + right.asMachineInt()); if (result.isMachineInt()) { @@ -376,21 +462,16 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; } } - forNode(node).setType(SpecInt52); - if (!forNode(node->child1()).isType(SpecInt32) - || !forNode(node->child2()).isType(SpecInt32)) - node->setCanExit(true); + forNode(node).setType(SpecMachineInt); break; - case NumberUse: + case DoubleRepUse: if (left && right && left.isNumber() && right.isNumber()) { - setConstant(node, jsNumber(left.asNumber() + right.asNumber())); + setConstant(node, jsDoubleNumber(left.asNumber() + right.asNumber())); break; } - if (isFullRealNumberSpeculation(forNode(node->child1()).m_type) - && isFullRealNumberSpeculation(forNode(node->child2()).m_type)) - forNode(node).setType(SpecDoubleReal); - else - forNode(node).setType(SpecDouble); + forNode(node).setType( + typeOfDoubleSum( + forNode(node->child1()).m_type, forNode(node->child2()).m_type)); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -398,9 +479,19 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } break; } - + + case ArithClz32: { + JSValue operand = forNode(node->child1()).value(); + if (operand && operand.isNumber()) { + uint32_t value = toUInt32(operand.asNumber()); + setConstant(node, jsNumber(clz32(value))); + break; + } + forNode(node).setType(SpecInt32); + break; + } + case MakeRope: { - node->setCanExit(true); forNode(node).set(m_graph, m_graph.m_vm.stringStructure.get()); break; } @@ -422,10 +513,8 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } } forNode(node).setType(SpecInt32); - if (shouldCheckOverflow(node->arithMode())) - node->setCanExit(true); break; - case MachineIntUse: + case Int52RepUse: if (left && right && left.isMachineInt() && right.isMachineInt()) { JSValue result = jsNumber(left.asMachineInt() - right.asMachineInt()); if (result.isMachineInt() || !shouldCheckOverflow(node->arithMode())) { @@ -433,17 +522,20 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; } } - forNode(node).setType(SpecInt52); - if (!forNode(node->child1()).isType(SpecInt32) - || !forNode(node->child2()).isType(SpecInt32)) - node->setCanExit(true); + forNode(node).setType(SpecMachineInt); break; - case NumberUse: + case DoubleRepUse: if (left && right && left.isNumber() && right.isNumber()) { - setConstant(node, jsNumber(left.asNumber() - right.asNumber())); + setConstant(node, jsDoubleNumber(left.asNumber() - right.asNumber())); break; } - forNode(node).setType(SpecDouble); + forNode(node).setType( + typeOfDoubleDifference( + forNode(node->child1()).m_type, forNode(node->child2()).m_type)); + break; + case UntypedUse: + clobberWorld(node->origin.semantic, clobberLimit); + forNode(node).setType(m_graph, SpecBytecodeNumber); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -473,10 +565,8 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } } forNode(node).setType(SpecInt32); - if (shouldCheckOverflow(node->arithMode())) - node->setCanExit(true); break; - case MachineIntUse: + case Int52RepUse: if (child && child.isMachineInt()) { double doubleResult; if (shouldCheckNegativeZero(node->arithMode())) @@ -489,18 +579,16 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; } } - forNode(node).setType(SpecInt52); - if (m_state.forNode(node->child1()).couldBeType(SpecInt52)) - node->setCanExit(true); - if (shouldCheckNegativeZero(node->arithMode())) - node->setCanExit(true); + forNode(node).setType(SpecMachineInt); break; - case NumberUse: + case DoubleRepUse: if (child && child.isNumber()) { - setConstant(node, jsNumber(-child.asNumber())); + setConstant(node, jsDoubleNumber(-child.asNumber())); break; } - forNode(node).setType(SpecDouble); + forNode(node).setType( + typeOfDoubleNegation( + forNode(node->child1()).m_type)); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -529,10 +617,8 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } } forNode(node).setType(SpecInt32); - if (shouldCheckOverflow(node->arithMode())) - node->setCanExit(true); break; - case MachineIntUse: + case Int52RepUse: if (left && right && left.isMachineInt() && right.isMachineInt()) { double doubleResult = left.asNumber() * right.asNumber(); if (!shouldCheckNegativeZero(node->arithMode())) @@ -543,19 +629,20 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; } } - forNode(node).setType(SpecInt52); - node->setCanExit(true); + forNode(node).setType(SpecMachineInt); break; - case NumberUse: + case DoubleRepUse: if (left && right && left.isNumber() && right.isNumber()) { - setConstant(node, jsNumber(left.asNumber() * right.asNumber())); + setConstant(node, jsDoubleNumber(left.asNumber() * right.asNumber())); break; } - if (isFullRealNumberSpeculation(forNode(node->child1()).m_type) - || isFullRealNumberSpeculation(forNode(node->child2()).m_type)) - forNode(node).setType(SpecDoubleReal); - else - forNode(node).setType(SpecDouble); + forNode(node).setType( + typeOfDoubleProduct( + forNode(node->child1()).m_type, forNode(node->child2()).m_type)); + break; + case UntypedUse: + clobberWorld(node->origin.semantic, clobberLimit); + forNode(node).setType(m_graph, SpecBytecodeNumber); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -582,14 +669,19 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } } forNode(node).setType(SpecInt32); - node->setCanExit(true); break; - case NumberUse: + case DoubleRepUse: if (left && right && left.isNumber() && right.isNumber()) { - setConstant(node, jsNumber(left.asNumber() / right.asNumber())); + setConstant(node, jsDoubleNumber(left.asNumber() / right.asNumber())); break; } - forNode(node).setType(SpecDouble); + forNode(node).setType( + typeOfDoubleQuotient( + forNode(node->child1()).m_type, forNode(node->child2()).m_type)); + break; + case UntypedUse: + clobberWorld(node->origin.semantic, clobberLimit); + forNode(node).setType(m_graph, SpecBytecodeNumber); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -616,14 +708,15 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } } forNode(node).setType(SpecInt32); - node->setCanExit(true); break; - case NumberUse: + case DoubleRepUse: if (left && right && left.isNumber() && right.isNumber()) { - setConstant(node, jsNumber(fmod(left.asNumber(), right.asNumber()))); + setConstant(node, jsDoubleNumber(fmod(left.asNumber(), right.asNumber()))); break; } - forNode(node).setType(SpecDouble); + forNode(node).setType( + typeOfDoubleBinaryOp( + forNode(node->child1()).m_type, forNode(node->child2()).m_type)); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -642,16 +735,17 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; } forNode(node).setType(SpecInt32); - node->setCanExit(true); break; - case NumberUse: + case DoubleRepUse: if (left && right && left.isNumber() && right.isNumber()) { double a = left.asNumber(); double b = right.asNumber(); - setConstant(node, jsNumber(a < b ? a : (b <= a ? b : a + b))); + setConstant(node, jsDoubleNumber(a < b ? a : (b <= a ? b : a + b))); break; } - forNode(node).setType(SpecDouble); + forNode(node).setType( + typeOfDoubleMinMax( + forNode(node->child1()).m_type, forNode(node->child2()).m_type)); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -670,16 +764,17 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; } forNode(node).setType(SpecInt32); - node->setCanExit(true); break; - case NumberUse: + case DoubleRepUse: if (left && right && left.isNumber() && right.isNumber()) { double a = left.asNumber(); double b = right.asNumber(); - setConstant(node, jsNumber(a > b ? a : (b >= a ? b : a + b))); + setConstant(node, jsDoubleNumber(a > b ? a : (b >= a ? b : a + b))); break; } - forNode(node).setType(SpecDouble); + forNode(node).setType( + typeOfDoubleMinMax( + forNode(node->child1()).m_type, forNode(node->child2()).m_type)); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -700,14 +795,13 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } } forNode(node).setType(SpecInt32); - node->setCanExit(true); break; - case NumberUse: + case DoubleRepUse: if (child && child.isNumber()) { - setConstant(node, jsNumber(child.asNumber())); + setConstant(node, jsDoubleNumber(fabs(child.asNumber()))); break; } - forNode(node).setType(SpecDouble); + forNode(node).setType(typeOfDoubleAbs(forNode(node->child1()).m_type)); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -715,34 +809,117 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } break; } + + case ArithPow: { + JSValue childY = forNode(node->child2()).value(); + if (childY && childY.isNumber()) { + if (!childY.asNumber()) { + setConstant(node, jsDoubleNumber(1)); + break; + } + + JSValue childX = forNode(node->child1()).value(); + if (childX && childX.isNumber()) { + setConstant(node, jsDoubleNumber(operationMathPow(childX.asNumber(), childY.asNumber()))); + break; + } + } + forNode(node).setType(typeOfDoublePow(forNode(node->child1()).m_type, forNode(node->child2()).m_type)); + break; + } + + case ArithRandom: { + forNode(node).setType(m_graph, SpecDoubleReal); + break; + } + + case ArithRound: + case ArithFloor: + case ArithCeil: { + JSValue operand = forNode(node->child1()).value(); + if (operand && operand.isNumber()) { + double roundedValue = 0; + if (node->op() == ArithRound) + roundedValue = jsRound(operand.asNumber()); + else if (node->op() == ArithFloor) + roundedValue = floor(operand.asNumber()); + else { + ASSERT(node->op() == ArithCeil); + roundedValue = ceil(operand.asNumber()); + } + + if (producesInteger(node->arithRoundingMode())) { + int32_t roundedValueAsInt32 = static_cast<int32_t>(roundedValue); + if (roundedValueAsInt32 == roundedValue) { + if (shouldCheckNegativeZero(node->arithRoundingMode())) { + if (roundedValueAsInt32 || !std::signbit(roundedValue)) { + setConstant(node, jsNumber(roundedValueAsInt32)); + break; + } + } else { + setConstant(node, jsNumber(roundedValueAsInt32)); + break; + } + } + } else { + setConstant(node, jsDoubleNumber(roundedValue)); + break; + } + } + if (producesInteger(node->arithRoundingMode())) + forNode(node).setType(SpecInt32); + else + forNode(node).setType(typeOfDoubleRounding(forNode(node->child1()).m_type)); + break; + } case ArithSqrt: { JSValue child = forNode(node->child1()).value(); if (child && child.isNumber()) { - setConstant(node, jsNumber(sqrt(child.asNumber()))); + setConstant(node, jsDoubleNumber(sqrt(child.asNumber()))); break; } - forNode(node).setType(SpecDouble); + forNode(node).setType(typeOfDoubleUnaryOp(forNode(node->child1()).m_type)); + break; + } + + case ArithFRound: { + JSValue child = forNode(node->child1()).value(); + if (child && child.isNumber()) { + setConstant(node, jsDoubleNumber(static_cast<float>(child.asNumber()))); + break; + } + forNode(node).setType(typeOfDoubleRounding(forNode(node->child1()).m_type)); break; } case ArithSin: { JSValue child = forNode(node->child1()).value(); if (child && child.isNumber()) { - setConstant(node, jsNumber(sin(child.asNumber()))); + setConstant(node, jsDoubleNumber(sin(child.asNumber()))); break; } - forNode(node).setType(SpecDouble); + forNode(node).setType(typeOfDoubleUnaryOp(forNode(node->child1()).m_type)); break; } case ArithCos: { JSValue child = forNode(node->child1()).value(); if (child && child.isNumber()) { - setConstant(node, jsNumber(cos(child.asNumber()))); + setConstant(node, jsDoubleNumber(cos(child.asNumber()))); + break; + } + forNode(node).setType(typeOfDoubleUnaryOp(forNode(node->child1()).m_type)); + break; + } + + case ArithLog: { + JSValue child = forNode(node->child1()).value(); + if (child && child.isNumber()) { + setConstant(node, jsDoubleNumber(log(child.asNumber()))); break; } - forNode(node).setType(SpecDouble); + forNode(node).setType(typeOfDoubleUnaryOp(forNode(node->child1()).m_type)); break; } @@ -755,20 +932,6 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi setConstant(node, jsBoolean(true)); break; default: - switch (node->child1().useKind()) { - case BooleanUse: - case Int32Use: - case NumberUse: - case UntypedUse: - case StringUse: - break; - case ObjectOrOtherUse: - node->setCanExit(true); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - break; - } forNode(node).setType(SpecBoolean); break; } @@ -780,35 +943,59 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi case IsNumber: case IsString: case IsObject: + case IsObjectOrNull: case IsFunction: { - node->setCanExit( - node->op() == IsUndefined - && m_graph.masqueradesAsUndefinedWatchpointIsStillValid(node->codeOrigin)); - JSValue child = forNode(node->child1()).value(); - if (child) { + AbstractValue child = forNode(node->child1()); + if (child.value()) { bool constantWasSet = true; switch (node->op()) { case IsUndefined: setConstant(node, jsBoolean( - child.isCell() - ? child.asCell()->structure()->masqueradesAsUndefined(m_codeBlock->globalObjectFor(node->codeOrigin)) - : child.isUndefined())); + child.value().isCell() + ? child.value().asCell()->structure()->masqueradesAsUndefined(m_codeBlock->globalObjectFor(node->origin.semantic)) + : child.value().isUndefined())); break; case IsBoolean: - setConstant(node, jsBoolean(child.isBoolean())); + setConstant(node, jsBoolean(child.value().isBoolean())); break; case IsNumber: - setConstant(node, jsBoolean(child.isNumber())); + setConstant(node, jsBoolean(child.value().isNumber())); break; case IsString: - setConstant(node, jsBoolean(isJSString(child))); + setConstant(node, jsBoolean(isJSString(child.value()))); break; case IsObject: - if (child.isNull() || !child.isObject()) { - setConstant(node, jsBoolean(child.isNull())); - break; - } - constantWasSet = false; + setConstant(node, jsBoolean(child.value().isObject())); + break; + case IsObjectOrNull: + if (child.value().isObject()) { + JSObject* object = asObject(child.value()); + if (object->type() == JSFunctionType) + setConstant(node, jsBoolean(false)); + else if (!(object->inlineTypeFlags() & TypeOfShouldCallGetCallData)) + setConstant(node, jsBoolean(!child.value().asCell()->structure()->masqueradesAsUndefined(m_codeBlock->globalObjectFor(node->origin.semantic)))); + else { + // FIXME: This could just call getCallData. + // https://bugs.webkit.org/show_bug.cgi?id=144457 + constantWasSet = false; + } + } else + setConstant(node, jsBoolean(child.value().isNull())); + break; + case IsFunction: + if (child.value().isObject()) { + JSObject* object = asObject(child.value()); + if (object->type() == JSFunctionType) + setConstant(node, jsBoolean(true)); + else if (!(object->inlineTypeFlags() & TypeOfShouldCallGetCallData)) + setConstant(node, jsBoolean(false)); + else { + // FIXME: This could just call getCallData. + // https://bugs.webkit.org/show_bug.cgi?id=144457 + constantWasSet = false; + } + } else + setConstant(node, jsBoolean(false)); break; default: constantWasSet = false; @@ -817,7 +1004,131 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi if (constantWasSet) break; } - + + // FIXME: This code should really use AbstractValue::isType() and + // AbstractValue::couldBeType(). + // https://bugs.webkit.org/show_bug.cgi?id=146870 + + bool constantWasSet = false; + switch (node->op()) { + case IsUndefined: + // FIXME: Use the masquerades-as-undefined watchpoint thingy. + // https://bugs.webkit.org/show_bug.cgi?id=144456 + + if (!(child.m_type & (SpecOther | SpecObjectOther))) { + setConstant(node, jsBoolean(false)); + constantWasSet = true; + break; + } + + break; + case IsBoolean: + if (!(child.m_type & ~SpecBoolean)) { + setConstant(node, jsBoolean(true)); + constantWasSet = true; + break; + } + + if (!(child.m_type & SpecBoolean)) { + setConstant(node, jsBoolean(false)); + constantWasSet = true; + break; + } + + break; + case IsNumber: + if (!(child.m_type & ~SpecFullNumber)) { + setConstant(node, jsBoolean(true)); + constantWasSet = true; + break; + } + + if (!(child.m_type & SpecFullNumber)) { + setConstant(node, jsBoolean(false)); + constantWasSet = true; + break; + } + + break; + case IsString: + if (!(child.m_type & ~SpecString)) { + setConstant(node, jsBoolean(true)); + constantWasSet = true; + break; + } + + if (!(child.m_type & SpecString)) { + setConstant(node, jsBoolean(false)); + constantWasSet = true; + break; + } + + break; + case IsObject: + if (!(child.m_type & ~SpecObject)) { + setConstant(node, jsBoolean(true)); + constantWasSet = true; + break; + } + + if (!(child.m_type & SpecObject)) { + setConstant(node, jsBoolean(false)); + constantWasSet = true; + break; + } + + break; + case IsObjectOrNull: + // FIXME: Use the masquerades-as-undefined watchpoint thingy. + // https://bugs.webkit.org/show_bug.cgi?id=144456 + + // These expressions are complicated to parse. A helpful way to parse this is that + // "!(T & ~S)" means "T is a subset of S". Conversely, "!(T & S)" means "T is a + // disjoint set from S". Things like "T - S" means that, provided that S is a + // subset of T, it's the "set of all things in T but not in S". Things like "T | S" + // mean the "union of T and S". + + // Is the child's type an object that isn't an other-object (i.e. object that could + // have masquaredes-as-undefined traps) and isn't a function? Then: we should fold + // this to true. + if (!(child.m_type & ~(SpecObject - SpecObjectOther - SpecFunction))) { + setConstant(node, jsBoolean(true)); + constantWasSet = true; + break; + } + + // Is the child's type definitely not either of: an object that isn't a function, + // or either undefined or null? Then: we should fold this to false. This means + // for example that if it's any non-function object, including those that have + // masquerades-as-undefined traps, then we don't fold. It also means we won't fold + // if it's undefined-or-null, since the type bits don't distinguish between + // undefined (which should fold to false) and null (which should fold to true). + if (!(child.m_type & ((SpecObject - SpecFunction) | SpecOther))) { + setConstant(node, jsBoolean(false)); + constantWasSet = true; + break; + } + + break; + case IsFunction: + if (!(child.m_type & ~SpecFunction)) { + setConstant(node, jsBoolean(true)); + constantWasSet = true; + break; + } + + if (!(child.m_type & (SpecFunction | SpecObjectOther))) { + setConstant(node, jsBoolean(false)); + constantWasSet = true; + break; + } + break; + default: + break; + } + if (constantWasSet) + break; + forNode(node).setType(SpecBoolean); break; } @@ -827,48 +1138,44 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi JSValue child = forNode(node->child1()).value(); AbstractValue& abstractChild = forNode(node->child1()); if (child) { - JSValue typeString = jsTypeStringForValue(*vm, m_codeBlock->globalObjectFor(node->codeOrigin), child); - setConstant(node, typeString); + JSValue typeString = jsTypeStringForValue(*vm, m_codeBlock->globalObjectFor(node->origin.semantic), child); + setConstant(node, *m_graph.freeze(typeString)); break; } if (isFullNumberSpeculation(abstractChild.m_type)) { - setConstant(node, vm->smallStrings.numberString()); + setConstant(node, *m_graph.freeze(vm->smallStrings.numberString())); break; } if (isStringSpeculation(abstractChild.m_type)) { - setConstant(node, vm->smallStrings.stringString()); + setConstant(node, *m_graph.freeze(vm->smallStrings.stringString())); break; } - - if (isFinalObjectSpeculation(abstractChild.m_type) || isArraySpeculation(abstractChild.m_type) || isArgumentsSpeculation(abstractChild.m_type)) { - setConstant(node, vm->smallStrings.objectString()); + + // FIXME: We could use the masquerades-as-undefined watchpoint here. + // https://bugs.webkit.org/show_bug.cgi?id=144456 + if (!(abstractChild.m_type & ~(SpecObject - SpecObjectOther))) { + setConstant(node, *m_graph.freeze(vm->smallStrings.objectString())); break; } if (isFunctionSpeculation(abstractChild.m_type)) { - setConstant(node, vm->smallStrings.functionString()); + setConstant(node, *m_graph.freeze(vm->smallStrings.functionString())); break; } if (isBooleanSpeculation(abstractChild.m_type)) { - setConstant(node, vm->smallStrings.booleanString()); + setConstant(node, *m_graph.freeze(vm->smallStrings.booleanString())); break; } - switch (node->child1().useKind()) { - case StringUse: - case CellUse: - node->setCanExit(true); - break; - case UntypedUse: - break; - default: - RELEASE_ASSERT_NOT_REACHED(); + if (isSymbolSpeculation(abstractChild.m_type)) { + setConstant(node, *m_graph.freeze(vm->smallStrings.symbolString())); break; } - forNode(node).set(m_graph, m_graph.m_vm.stringStructure.get()); + + forNode(node).setType(m_graph, SpecStringIdent); break; } @@ -876,8 +1183,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi case CompareLessEq: case CompareGreater: case CompareGreaterEq: - case CompareEq: - case CompareEqConstant: { + case CompareEq: { JSValue leftConst = forNode(node->child1()).value(); JSValue rightConst = forNode(node->child2()).value(); if (leftConst && rightConst) { @@ -915,82 +1221,165 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; } } + + if (node->op() == CompareEq && leftConst.isSymbol() && rightConst.isSymbol()) { + setConstant(node, jsBoolean(asSymbol(leftConst)->privateName() == asSymbol(rightConst)->privateName())); + break; + } } - if (node->op() == CompareEqConstant || node->op() == CompareEq) { + if (node->op() == CompareEq) { SpeculatedType leftType = forNode(node->child1()).m_type; SpeculatedType rightType = forNode(node->child2()).m_type; - if ((isInt32Speculation(leftType) && isOtherSpeculation(rightType)) - || (isOtherSpeculation(leftType) && isInt32Speculation(rightType))) { + if (!valuesCouldBeEqual(leftType, rightType)) { setConstant(node, jsBoolean(false)); break; } + + if (leftType == SpecOther) + std::swap(leftType, rightType); + if (rightType == SpecOther) { + // Undefined and Null are always equal when compared to eachother. + if (!(leftType & ~SpecOther)) { + setConstant(node, jsBoolean(true)); + break; + } + + // Any other type compared to Null or Undefined is always false + // as long as the MasqueradesAsUndefined watchpoint is valid. + // + // MasqueradesAsUndefined only matters for SpecObjectOther, other + // cases are always "false". + if (!(leftType & (SpecObjectOther | SpecOther))) { + setConstant(node, jsBoolean(false)); + break; + } + + if (!(leftType & SpecOther) && m_graph.masqueradesAsUndefinedWatchpointIsStillValid(node->origin.semantic)) { + JSGlobalObject* globalObject = m_graph.globalObjectFor(node->origin.semantic); + m_graph.watchpoints().addLazily(globalObject->masqueradesAsUndefinedWatchpoint()); + setConstant(node, jsBoolean(false)); + break; + } + } } - forNode(node).setType(SpecBoolean); + if (node->child1() == node->child2()) { + if (node->isBinaryUseKind(Int32Use) || + node->isBinaryUseKind(Int52RepUse) || + node->isBinaryUseKind(StringUse) || + node->isBinaryUseKind(BooleanUse) || + node->isBinaryUseKind(SymbolUse) || + node->isBinaryUseKind(StringIdentUse) || + node->isBinaryUseKind(ObjectUse) || + node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse) || + node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) { + switch (node->op()) { + case CompareLess: + case CompareGreater: + setConstant(node, jsBoolean(false)); + break; + case CompareLessEq: + case CompareGreaterEq: + case CompareEq: + setConstant(node, jsBoolean(true)); + break; + default: + DFG_CRASH(m_graph, node, "Unexpected node type"); + break; + } + break; + } + } - // This is overly conservative. But the only thing this prevents is store elimination, - // and how likely is it, really, that you'll have redundant stores across a comparison - // operation? Comparison operations are typically at the end of basic blocks, so - // unless we have global store elimination (super unlikely given how unprofitable that - // optimization is to begin with), you aren't going to be wanting to store eliminate - // across an equality op. - node->setCanExit(true); + forNode(node).setType(SpecBoolean); break; } - case CompareStrictEq: - case CompareStrictEqConstant: { + case CompareStrictEq: { Node* leftNode = node->child1().node(); Node* rightNode = node->child2().node(); JSValue left = forNode(leftNode).value(); JSValue right = forNode(rightNode).value(); if (left && right) { - if (left.isNumber() && right.isNumber()) { - setConstant(node, jsBoolean(left.asNumber() == right.asNumber())); - break; - } if (left.isString() && right.isString()) { + // We need this case because JSValue::strictEqual is otherwise too racy for + // string comparisons. const StringImpl* a = asString(left)->tryGetValueImpl(); const StringImpl* b = asString(right)->tryGetValueImpl(); if (a && b) { setConstant(node, jsBoolean(WTF::equal(a, b))); break; } + } else { + setConstant(node, jsBoolean(JSValue::strictEqual(0, left, right))); + break; + } + } + + SpeculatedType leftLUB = leastUpperBoundOfStrictlyEquivalentSpeculations(forNode(leftNode).m_type); + SpeculatedType rightLUB = leastUpperBoundOfStrictlyEquivalentSpeculations(forNode(rightNode).m_type); + if (!(leftLUB & rightLUB)) { + setConstant(node, jsBoolean(false)); + break; + } + + if (node->child1() == node->child2()) { + if (node->isBinaryUseKind(BooleanUse) || + node->isBinaryUseKind(Int32Use) || + node->isBinaryUseKind(Int52RepUse) || + node->isBinaryUseKind(StringUse) || + node->isBinaryUseKind(StringIdentUse) || + node->isBinaryUseKind(SymbolUse) || + node->isBinaryUseKind(ObjectUse) || + node->isBinaryUseKind(MiscUse, UntypedUse) || + node->isBinaryUseKind(UntypedUse, MiscUse) || + node->isBinaryUseKind(StringIdentUse, NotStringVarUse) || + node->isBinaryUseKind(NotStringVarUse, StringIdentUse) || + node->isBinaryUseKind(StringUse, UntypedUse) || + node->isBinaryUseKind(UntypedUse, StringUse)) { + setConstant(node, jsBoolean(true)); + break; } } + forNode(node).setType(SpecBoolean); - node->setCanExit(true); // This is overly conservative. break; } case StringCharCodeAt: - node->setCanExit(true); forNode(node).setType(SpecInt32); break; case StringFromCharCode: - forNode(node).setType(SpecString); + forNode(node).setType(m_graph, SpecString); break; case StringCharAt: - node->setCanExit(true); forNode(node).set(m_graph, m_graph.m_vm.stringStructure.get()); break; case GetByVal: { - node->setCanExit(true); switch (node->arrayMode().type()) { case Array::SelectUsingPredictions: case Array::Unprofiled: - case Array::Undecided: + case Array::SelectUsingArguments: RELEASE_ASSERT_NOT_REACHED(); break; case Array::ForceExit: m_state.setIsValid(false); break; + case Array::Undecided: { + JSValue index = forNode(node->child2()).value(); + if (index && index.isInt32() && index.asInt32() >= 0) { + setConstant(node, jsUndefined()); + break; + } + forNode(node).setType(SpecOther); + break; + } case Array::Generic: - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); forNode(node).makeHeapTop(); break; case Array::String: @@ -1005,27 +1394,28 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi // implies an in-bounds access). None of this feels like it's worth it, // so we're going with TOP for now. The same thing applies to // clobbering the world. - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); forNode(node).makeHeapTop(); } else forNode(node).set(m_graph, m_graph.m_vm.stringStructure.get()); break; - case Array::Arguments: + case Array::DirectArguments: + case Array::ScopedArguments: forNode(node).makeHeapTop(); break; case Array::Int32: if (node->arrayMode().isOutOfBounds()) { - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); forNode(node).makeHeapTop(); } else forNode(node).setType(SpecInt32); break; case Array::Double: if (node->arrayMode().isOutOfBounds()) { - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); forNode(node).makeHeapTop(); } else if (node->arrayMode().isSaneChain()) - forNode(node).setType(SpecDouble); + forNode(node).setType(SpecBytecodeDouble); else forNode(node).setType(SpecDoubleReal); break; @@ -1033,7 +1423,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi case Array::ArrayStorage: case Array::SlowPutArrayStorage: if (node->arrayMode().isOutOfBounds()) - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); forNode(node).makeHeapTop(); break; case Array::Int8Array: @@ -1060,13 +1450,13 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi else if (enableInt52() && node->shouldSpeculateMachineInt()) forNode(node).setType(SpecInt52); else - forNode(node).setType(SpecDouble); + forNode(node).setType(SpecInt52AsDouble); break; case Array::Float32Array: - forNode(node).setType(SpecDouble); + forNode(node).setType(SpecFullDouble); break; case Array::Float64Array: - forNode(node).setType(SpecDouble); + forNode(node).setType(SpecFullDouble); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -1078,30 +1468,29 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi case PutByValDirect: case PutByVal: case PutByValAlias: { - node->setCanExit(true); switch (node->arrayMode().modeForPut().type()) { case Array::ForceExit: m_state.setIsValid(false); break; case Array::Generic: - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); break; case Array::Int32: if (node->arrayMode().isOutOfBounds()) - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); break; case Array::Double: if (node->arrayMode().isOutOfBounds()) - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); break; case Array::Contiguous: case Array::ArrayStorage: if (node->arrayMode().isOutOfBounds()) - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); break; case Array::SlowPutArrayStorage: if (node->arrayMode().mayStoreToHole()) - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); break; default: break; @@ -1110,16 +1499,59 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } case ArrayPush: - node->setCanExit(true); - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); forNode(node).setType(SpecBytecodeNumber); break; case ArrayPop: - node->setCanExit(true); - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); forNode(node).makeHeapTop(); break; + + case GetMyArgumentByVal: { + JSValue index = forNode(node->child2()).m_value; + InlineCallFrame* inlineCallFrame = node->child1()->origin.semantic.inlineCallFrame; + + if (index && index.isInt32()) { + // This pretends to return TOP for accesses that are actually proven out-of-bounds because + // that's the conservative thing to do. Otherwise we'd need to write more code to mark such + // paths as unreachable, and it's almost certainly not worth the effort. + + if (inlineCallFrame) { + if (index.asUInt32() < inlineCallFrame->arguments.size() - 1) { + forNode(node) = m_state.variables().operand( + virtualRegisterForArgument(index.asInt32() + 1) + inlineCallFrame->stackOffset); + m_state.setFoundConstants(true); + break; + } + } else { + if (index.asUInt32() < m_state.variables().numberOfArguments() - 1) { + forNode(node) = m_state.variables().argument(index.asInt32() + 1); + m_state.setFoundConstants(true); + break; + } + } + } + + if (inlineCallFrame) { + // We have a bound on the types even though it's random access. Take advantage of this. + + AbstractValue result; + for (unsigned i = inlineCallFrame->arguments.size(); i-- > 1;) { + result.merge( + m_state.variables().operand( + virtualRegisterForArgument(i) + inlineCallFrame->stackOffset)); + } + + if (result.value()) + m_state.setFoundConstants(true); + forNode(node) = result; + break; + } + + forNode(node).makeHeapTop(); + break; + } case RegExpExec: forNode(node).makeHeapTop(); @@ -1129,6 +1561,16 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi forNode(node).setType(SpecBoolean); break; + case StringReplace: + if (node->child1().useKind() == StringUse + && node->child2().useKind() == RegExpObjectUse + && node->child3().useKind() == StringUse) { + // This doesn't clobber the world. It just reads and writes regexp state. + } else + clobberWorld(node->origin.semantic, clobberLimit); + forNode(node).set(m_graph, m_graph.m_vm.stringStructure.get()); + break; + case Jump: break; @@ -1147,7 +1589,6 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi // constant propagation, but we can do better: // We can specialize the source variable's value on each direction of // the branch. - node->setCanExit(true); // This is overly conservative. m_state.setBranchDirection(TakeBoth); break; } @@ -1161,11 +1602,17 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi case Return: m_state.setIsValid(false); break; + + case TailCall: + case TailCallVarargs: + case TailCallForwardVarargs: + clobberWorld(node->origin.semantic, clobberLimit); + m_state.setIsValid(false); + break; case Throw: case ThrowReferenceError: m_state.setIsValid(false); - node->setCanExit(true); break; case ToPrimitive: { @@ -1177,58 +1624,38 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi ASSERT(node->child1().useKind() == UntypedUse); - AbstractValue& source = forNode(node->child1()); - AbstractValue& destination = forNode(node); - - // NB. The more canonical way of writing this would have been: - // - // destination = source; - // if (destination.m_type & !(SpecFullNumber | SpecString | SpecBoolean)) { - // destination.filter(SpecFullNumber | SpecString | SpecBoolean); - // AbstractValue string; - // string.set(vm->stringStructure); - // destination.merge(string); - // } - // - // The reason why this would, in most other cases, have been better is that - // then destination would preserve any non-SpeculatedType knowledge of source. - // As it stands, the code below forgets any non-SpeculatedType knowledge that - // source would have had. Fortunately, though, for things like strings and - // numbers and booleans, we don't care about the non-SpeculatedType knowedge: - // the structure won't tell us anything we don't already know, and neither - // will ArrayModes. And if the source was a meaningful constant then we - // would have handled that above. Unfortunately, this does mean that - // ToPrimitive will currently forget string constants. But that's not a big - // deal since we don't do any optimization on those currently. - - clobberWorld(node->codeOrigin, clobberLimit); - - SpeculatedType type = source.m_type; - if (type & ~(SpecFullNumber | SpecString | SpecBoolean)) - type = (SpecHeapTop & ~SpecCell) | SpecString; - - destination.setType(type); - if (destination.isClear()) + if (!forNode(node->child1()).m_type) { m_state.setIsValid(false); + break; + } + + if (!(forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean | SpecString | SpecSymbol))) { + m_state.setFoundConstants(true); + forNode(node) = forNode(node->child1()); + break; + } + + clobberWorld(node->origin.semantic, clobberLimit); + + forNode(node).setType(m_graph, SpecHeapTop & ~SpecObject); break; } - case ToString: { + case ToString: + case CallStringConstructor: { switch (node->child1().useKind()) { case StringObjectUse: // This also filters that the StringObject has the primordial StringObject // structure. filter( node->child1(), - m_graph.globalObjectFor(node->codeOrigin)->stringObjectStructure()); - node->setCanExit(true); // We could be more precise but it's likely not worth it. + m_graph.globalObjectFor(node->origin.semantic)->stringObjectStructure()); break; case StringOrStringObjectUse: - node->setCanExit(true); // We could be more precise but it's likely not worth it. break; case CellUse: case UntypedUse: - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -1245,25 +1672,19 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } case NewArray: - node->setCanExit(true); forNode(node).set( m_graph, - m_graph.globalObjectFor(node->codeOrigin)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())); - m_state.setHaveStructures(true); + m_graph.globalObjectFor(node->origin.semantic)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())); break; case NewArrayBuffer: - node->setCanExit(true); forNode(node).set( m_graph, - m_graph.globalObjectFor(node->codeOrigin)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())); - m_state.setHaveStructures(true); + m_graph.globalObjectFor(node->origin.semantic)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())); break; case NewArrayWithSize: - node->setCanExit(true); - forNode(node).setType(SpecArray); - m_state.setHaveStructures(true); + forNode(node).setType(m_graph, SpecArray); break; case NewTypedArray: @@ -1271,7 +1692,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi case Int32Use: break; case UntypedUse: - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -1279,23 +1700,32 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } forNode(node).set( m_graph, - m_graph.globalObjectFor(node->codeOrigin)->typedArrayStructure( + m_graph.globalObjectFor(node->origin.semantic)->typedArrayStructure( node->typedArrayType())); - m_state.setHaveStructures(true); break; - + case NewRegexp: - forNode(node).set(m_graph, m_graph.globalObjectFor(node->codeOrigin)->regExpStructure()); - m_state.setHaveStructures(true); + forNode(node).set(m_graph, m_graph.globalObjectFor(node->origin.semantic)->regExpStructure()); break; case ToThis: { AbstractValue& source = forNode(node->child1()); AbstractValue& destination = forNode(node); - - if (m_graph.executableFor(node->codeOrigin)->isStrictMode()) + + if (source.m_type == SpecStringObject) { + m_state.setFoundConstants(true); + destination = source; + break; + } + + if (m_graph.executableFor(node->origin.semantic)->isStrictMode()) { + if (!(source.m_type & ~(SpecFullNumber | SpecBoolean | SpecString | SpecSymbol))) { + m_state.setFoundConstants(true); + destination = source; + break; + } destination.makeHeapTop(); - else { + } else { destination = source; destination.merge(SpecObject); } @@ -1303,238 +1733,297 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi } case CreateThis: { - forNode(node).setType(SpecFinalObject); + // FIXME: We can fold this to NewObject if the incoming callee is a constant. + forNode(node).setType(m_graph, SpecFinalObject); break; } - case AllocationProfileWatchpoint: - node->setCanExit(true); - break; - case NewObject: ASSERT(node->structure()); forNode(node).set(m_graph, node->structure()); - m_state.setHaveStructures(true); break; + case PhantomNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case PhantomDirectArguments: + case PhantomClonedArguments: + case BottomValue: + m_state.setDidClobber(true); // Prevent constant folding. + // This claims to return bottom. + break; + + case PutHint: + break; + + case MaterializeNewObject: { + StructureSet set; + + m_phiChildren->forAllTransitiveIncomingValues( + m_graph.varArgChild(node, 0).node(), + [&] (Node* incoming) { + set.add(incoming->castConstant<Structure*>()); + }); + + forNode(node).set(m_graph, set); + break; + } + case CreateActivation: + case MaterializeCreateActivation: forNode(node).set( - m_graph, m_codeBlock->globalObjectFor(node->codeOrigin)->activationStructure()); - m_state.setHaveStructures(true); + m_graph, m_codeBlock->globalObjectFor(node->origin.semantic)->activationStructure()); break; - case FunctionReentryWatchpoint: - case TypedArrayWatchpoint: + case CreateDirectArguments: + forNode(node).set(m_graph, m_codeBlock->globalObjectFor(node->origin.semantic)->directArgumentsStructure()); break; - - case CreateArguments: - forNode(node) = forNode(node->child1()); - forNode(node).filter(~SpecEmpty); - forNode(node).merge(SpecArguments); + + case CreateScopedArguments: + forNode(node).set(m_graph, m_codeBlock->globalObjectFor(node->origin.semantic)->scopedArgumentsStructure()); break; - case TearOffActivation: - case TearOffArguments: - // Does nothing that is user-visible. + case CreateClonedArguments: + forNode(node).setType(m_graph, SpecObjectOther); + break; + + case NewArrowFunction: + forNode(node).set( + m_graph, m_codeBlock->globalObjectFor(node->origin.semantic)->functionStructure()); break; - case CheckArgumentsNotCreated: - if (isEmptySpeculation( - m_state.variables().operand( - m_graph.argumentsRegisterFor(node->codeOrigin).offset()).m_type)) - m_state.setFoundConstants(true); - else - node->setCanExit(true); + case NewGeneratorFunction: + forNode(node).set( + m_graph, m_codeBlock->globalObjectFor(node->origin.semantic)->generatorFunctionStructure()); break; - - case GetMyArgumentsLength: - // We know that this executable does not escape its arguments, so we can optimize - // the arguments a bit. Note that this is not sufficient to force constant folding - // of GetMyArgumentsLength, because GetMyArgumentsLength is a clobbering operation. - // We perform further optimizations on this later on. - if (node->codeOrigin.inlineCallFrame) { - forNode(node).set( - m_graph, jsNumber(node->codeOrigin.inlineCallFrame->arguments.size() - 1)); - } else - forNode(node).setType(SpecInt32); - node->setCanExit( - !isEmptySpeculation( - m_state.variables().operand( - m_graph.argumentsRegisterFor(node->codeOrigin)).m_type)); - break; - - case GetMyArgumentsLengthSafe: - // This potentially clobbers all structures if the arguments object had a getter - // installed on the length property. - clobberWorld(node->codeOrigin, clobberLimit); - // We currently make no guarantee about what this returns because it does not - // speculate that the length property is actually a length. - forNode(node).makeHeapTop(); + + case NewFunction: + forNode(node).set( + m_graph, m_codeBlock->globalObjectFor(node->origin.semantic)->functionStructure()); break; - case GetMyArgumentByVal: - node->setCanExit(true); - // We know that this executable does not escape its arguments, so we can optimize - // the arguments a bit. Note that this ends up being further optimized by the - // ArgumentsSimplificationPhase. - forNode(node).makeHeapTop(); + case GetCallee: + if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())) { + InferredValue* singleton = executable->singletonFunction(); + if (JSValue value = singleton->inferredValue()) { + m_graph.watchpoints().addLazily(singleton); + JSFunction* function = jsCast<JSFunction*>(value); + setConstant(node, *m_graph.freeze(function)); + break; + } + } + forNode(node).setType(m_graph, SpecFunction); break; - case GetMyArgumentByValSafe: - node->setCanExit(true); - // This potentially clobbers all structures if the property we're accessing has - // a getter. We don't speculate against this. - clobberWorld(node->codeOrigin, clobberLimit); - // And the result is unknown. - forNode(node).makeHeapTop(); + case GetArgumentCount: + forNode(node).setType(SpecInt32); break; - case NewFunction: { - AbstractValue& value = forNode(node); - value = forNode(node->child1()); + case GetRestLength: + forNode(node).setType(SpecInt32); + break; - if (!(value.m_type & SpecEmpty)) { - m_state.setFoundConstants(true); - break; + case GetGetter: { + JSValue base = forNode(node->child1()).m_value; + if (base) { + GetterSetter* getterSetter = jsCast<GetterSetter*>(base); + if (!getterSetter->isGetterNull()) { + setConstant(node, *m_graph.freeze(getterSetter->getterConcurrently())); + break; + } } - - value.setType((value.m_type & ~SpecEmpty) | SpecFunction); + + forNode(node).setType(m_graph, SpecObject); break; } - - case NewFunctionExpression: - case NewFunctionNoCheck: - forNode(node).set( - m_graph, m_codeBlock->globalObjectFor(node->codeOrigin)->functionStructure()); - break; - case GetCallee: - forNode(node).setType(SpecFunction); + case GetSetter: { + JSValue base = forNode(node->child1()).m_value; + if (base) { + GetterSetter* getterSetter = jsCast<GetterSetter*>(base); + if (!getterSetter->isSetterNull()) { + setConstant(node, *m_graph.freeze(getterSetter->setterConcurrently())); + break; + } + } + + forNode(node).setType(m_graph, SpecObject); break; + } - case GetScope: // FIXME: We could get rid of these if we know that the JSFunction is a constant. https://bugs.webkit.org/show_bug.cgi?id=106202 - case GetMyScope: - case SkipTopScope: - forNode(node).setType(SpecObjectOther); + case GetScope: + if (JSValue base = forNode(node->child1()).m_value) { + if (JSFunction* function = jsDynamicCast<JSFunction*>(base)) { + setConstant(node, *m_graph.freeze(function->scope())); + break; + } + } + forNode(node).setType(m_graph, SpecObjectOther); break; case SkipScope: { JSValue child = forNode(node->child1()).value(); if (child) { - setConstant(node, JSValue(jsCast<JSScope*>(child.asCell())->next())); + setConstant(node, *m_graph.freeze(JSValue(jsCast<JSScope*>(child.asCell())->next()))); break; } - forNode(node).setType(SpecObjectOther); + forNode(node).setType(m_graph, SpecObjectOther); break; } - case GetClosureRegisters: - forNode(node).clear(); // The result is not a JS value. - break; - case GetClosureVar: - forNode(node).makeHeapTop(); + if (JSValue value = m_graph.tryGetConstantClosureVar(forNode(node->child1()), node->scopeOffset())) { + setConstant(node, *m_graph.freeze(value)); + break; + } + forNode(node).makeBytecodeTop(); break; case PutClosureVar: - clobberCapturedVars(node->codeOrigin); + break; + + case GetFromArguments: + forNode(node).makeHeapTop(); + break; + + case PutToArguments: break; case GetById: - case GetByIdFlush: - node->setCanExit(true); + case GetByIdFlush: { if (!node->prediction()) { m_state.setIsValid(false); break; } - if (isCellSpeculation(node->child1()->prediction())) { - if (Structure* structure = forNode(node->child1()).bestProvenStructure()) { - GetByIdStatus status = GetByIdStatus::computeFor( - m_graph.m_vm, structure, - m_graph.identifiers()[node->identifierNumber()]); - if (status.isSimple()) { - // Assert things that we can't handle and that the computeFor() method - // above won't be able to return. - ASSERT(status.structureSet().size() == 1); - ASSERT(!status.chain()); - - if (status.specificValue()) - setConstant(node, status.specificValue()); - else - forNode(node).makeHeapTop(); - filter(node->child1(), status.structureSet()); - - m_state.setFoundConstants(true); - m_state.setHaveStructures(true); - break; + + AbstractValue& value = forNode(node->child1()); + if (value.m_structure.isFinite() + && (node->child1().useKind() == CellUse || !(value.m_type & ~SpecCell))) { + UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()]; + GetByIdStatus status = GetByIdStatus::computeFor(value.m_structure.set(), uid); + if (status.isSimple()) { + // Figure out what the result is going to be - is it TOP, a constant, or maybe + // something more subtle? + AbstractValue result; + for (unsigned i = status.numVariants(); i--;) { + // This thing won't give us a variant that involves prototypes. If it did, we'd + // have more work to do here. + DFG_ASSERT(m_graph, node, status[i].conditionSet().isEmpty()); + + result.merge( + m_graph.inferredValueForProperty( + value, uid, status[i].offset(), m_state.structureClobberState())); } + m_state.setFoundConstants(true); + forNode(node) = result; + break; } } - clobberWorld(node->codeOrigin, clobberLimit); + + clobberWorld(node->origin.semantic, clobberLimit); forNode(node).makeHeapTop(); break; + } - case GetArrayLength: - node->setCanExit(true); // Lies, but it's true for the common case of JSArray, so it's good enough. + case GetArrayLength: { + JSArrayBufferView* view = m_graph.tryGetFoldableView( + forNode(node->child1()).m_value, node->arrayMode()); + if (view) { + setConstant(node, jsNumber(view->length())); + break; + } forNode(node).setType(SpecInt32); break; - - case CheckExecutable: { - // FIXME: We could track executables in AbstractValue, which would allow us to get rid of these checks - // more thoroughly. https://bugs.webkit.org/show_bug.cgi?id=106200 - // FIXME: We could eliminate these entirely if we know the exact value that flows into this. - // https://bugs.webkit.org/show_bug.cgi?id=106201 - node->setCanExit(true); - break; } - + case CheckStructure: { - // FIXME: We should be able to propagate the structure sets of constants (i.e. prototypes). AbstractValue& value = forNode(node->child1()); - ASSERT(!(value.m_type & ~SpecCell)); // Edge filtering should have already ensured this. StructureSet& set = node->structureSet(); - - if (value.m_currentKnownStructure.isSubsetOf(set)) { + + // It's interesting that we could have proven that the object has a larger structure set + // that includes the set we're testing. In that case we could make the structure check + // more efficient. We currently don't. + + if (value.m_structure.isSubsetOf(set)) m_state.setFoundConstants(true); - break; - } - node->setCanExit(true); - m_state.setHaveStructures(true); - - // If this structure check is attempting to prove knowledge already held in - // the futurePossibleStructure set then the constant folding phase should - // turn this into a watchpoint instead. - if (value.m_futurePossibleStructure.isSubsetOf(set) - && value.m_futurePossibleStructure.hasSingleton()) { - m_state.setFoundConstants(true); - filter(value, value.m_futurePossibleStructure.singleton()); + SpeculatedType admittedTypes = SpecNone; + switch (node->child1().useKind()) { + case CellUse: + case KnownCellUse: + admittedTypes = SpecNone; + break; + case CellOrOtherUse: + admittedTypes = SpecOther; + break; + default: + DFG_CRASH(m_graph, node, "Bad use kind"); break; } - - filter(value, set); + + filter(value, set, admittedTypes); break; } - case StructureTransitionWatchpoint: { + case CheckStructureImmediate: { + // FIXME: This currently can only reason about one structure at a time. + // https://bugs.webkit.org/show_bug.cgi?id=136988 + AbstractValue& value = forNode(node->child1()); - - filter(value, node->structure()); - m_state.setHaveStructures(true); - node->setCanExit(true); + StructureSet& set = node->structureSet(); + + if (value.value()) { + if (Structure* structure = jsDynamicCast<Structure*>(value.value())) { + if (set.contains(structure)) { + m_state.setFoundConstants(true); + break; + } + } + m_state.setIsValid(false); + break; + } + + if (m_phiChildren) { + bool allGood = true; + m_phiChildren->forAllTransitiveIncomingValues( + node, + [&] (Node* incoming) { + if (Structure* structure = incoming->dynamicCastConstant<Structure*>()) { + if (set.contains(structure)) + return; + } + allGood = false; + }); + if (allGood) { + m_state.setFoundConstants(true); + break; + } + } + + if (Structure* structure = set.onlyStructure()) { + filterByValue(node->child1(), *m_graph.freeze(structure)); + break; + } + + // Aw shucks, we can't do anything! break; } - + case PutStructure: - case PhantomPutStructure: - if (!forNode(node->child1()).m_currentKnownStructure.isClear()) { - clobberStructures(clobberLimit); - forNode(node->child1()).set(m_graph, node->structureTransitionData().newStructure); - m_state.setHaveStructures(true); + if (!forNode(node->child1()).m_structure.isClear()) { + if (forNode(node->child1()).m_structure.onlyStructure() == node->transition()->next) + m_state.setFoundConstants(true); + else { + observeTransition( + clobberLimit, node->transition()->previous, node->transition()->next); + forNode(node->child1()).changeStructure(m_graph, node->transition()->next); + } } break; case GetButterfly: + case GetButterflyReadOnly: case AllocatePropertyStorage: case ReallocatePropertyStorage: forNode(node).clear(); // The result is not a JS value. @@ -1544,7 +2033,6 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi m_state.setFoundConstants(true); break; } - node->setCanExit(true); // Lies, but this is followed by operations (like GetByVal) that always exit, so there is no point in us trying to be clever here. switch (node->arrayMode().type()) { case Array::String: filter(node->child1(), SpecString); @@ -1552,11 +2040,15 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi case Array::Int32: case Array::Double: case Array::Contiguous: + case Array::Undecided: case Array::ArrayStorage: case Array::SlowPutArrayStorage: break; - case Array::Arguments: - filter(node->child1(), SpecArguments); + case Array::DirectArguments: + filter(node->child1(), SpecDirectArguments); + break; + case Array::ScopedArguments: + filter(node->child1(), SpecScopedArguments); break; case Array::Int8Array: filter(node->child1(), SpecInt8Array); @@ -1585,12 +2077,14 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi case Array::Float64Array: filter(node->child1(), SpecFloat64Array); break; + case Array::AnyTypedArray: + filter(node->child1(), SpecTypedArrayView); + break; default: RELEASE_ASSERT_NOT_REACHED(); break; } filterArrayModes(node->child1(), node->arrayMode().arrayModesThatPassFiltering()); - m_state.setHaveStructures(true); break; } case Arrayify: { @@ -1598,59 +2092,276 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi m_state.setFoundConstants(true); break; } - ASSERT(node->arrayMode().conversion() == Array::Convert - || node->arrayMode().conversion() == Array::RageConvert); - node->setCanExit(true); + ASSERT(node->arrayMode().conversion() == Array::Convert); clobberStructures(clobberLimit); filterArrayModes(node->child1(), node->arrayMode().arrayModesThatPassFiltering()); - m_state.setHaveStructures(true); break; } case ArrayifyToStructure: { AbstractValue& value = forNode(node->child1()); - StructureSet set = node->structure(); - if (value.m_futurePossibleStructure.isSubsetOf(set) - || value.m_currentKnownStructure.isSubsetOf(set)) + if (value.m_structure.isSubsetOf(StructureSet(node->structure()))) m_state.setFoundConstants(true); - node->setCanExit(true); clobberStructures(clobberLimit); - filter(value, set); - m_state.setHaveStructures(true); + + // We have a bunch of options of how to express the abstract set at this point. Let set S + // be the set of structures that the value had before clobbering and assume that all of + // them are watchable. The new value should be the least expressible upper bound of the + // intersection of "values that currently have structure = node->structure()" and "values + // that have structure in S plus any structure transition-reachable from S". Assume that + // node->structure() is not in S but it is transition-reachable from S. Then we would + // like to say that the result is "values that have structure = node->structure() until + // we invalidate", but there is no way to express this using the AbstractValue syntax. So + // we must choose between: + // + // 1) "values that currently have structure = node->structure()". This is a valid + // superset of the value that we really want, and it's specific enough to satisfy the + // preconditions of the array access that this is guarding. It's also specific enough + // to allow relevant optimizations in the case that we didn't have a contradiction + // like in this example. Notice that in the abscence of any contradiction, this result + // is precise rather than being a conservative LUB. + // + // 2) "values that currently hava structure in S plus any structure transition-reachable + // from S". This is also a valid superset of the value that we really want, but it's + // not specific enough to satisfy the preconditions of the array access that this is + // guarding - so playing such shenanigans would preclude us from having assertions on + // the typing preconditions of any array accesses. This would also not be a desirable + // answer in the absence of a contradiction. + // + // Note that it's tempting to simply say that the resulting value is BOTTOM because of + // the contradiction. That would be wrong, since we haven't hit an invalidation point, + // yet. + value.set(m_graph, node->structure()); + break; + } + case GetIndexedPropertyStorage: { + JSArrayBufferView* view = m_graph.tryGetFoldableView( + forNode(node->child1()).m_value, node->arrayMode()); + if (view) + m_state.setFoundConstants(true); + forNode(node).clear(); break; } - case GetIndexedPropertyStorage: case ConstantStoragePointer: { forNode(node).clear(); break; } case GetTypedArrayByteOffset: { + JSArrayBufferView* view = m_graph.tryGetFoldableView(forNode(node->child1()).m_value); + if (view) { + setConstant(node, jsNumber(view->byteOffset())); + break; + } forNode(node).setType(SpecInt32); break; } case GetByOffset: { - forNode(node).makeHeapTop(); + StorageAccessData& data = node->storageAccessData(); + UniquedStringImpl* uid = m_graph.identifiers()[data.identifierNumber]; + + // FIXME: The part of this that handles inferred property types relies on AI knowing the structure + // right now. That's probably not optimal. In some cases, we may perform an optimization (usually + // by something other than AI, maybe by CSE for example) that obscures AI's view of the structure + // at the point where GetByOffset runs. Currently, when that happens, we'll have to rely entirely + // on the type that ByteCodeParser was able to prove. + AbstractValue value = m_graph.inferredValueForProperty( + forNode(node->child2()), uid, data.offset, m_state.structureClobberState()); + + // It's possible that the type that ByteCodeParser came up with is better. + AbstractValue typeFromParsing; + typeFromParsing.set(m_graph, data.inferredType, m_state.structureClobberState()); + value.filter(typeFromParsing); + + // If we decide that there does not exist any value that this can return, then it's probably + // because the compilation was already invalidated. + if (value.isClear()) + m_state.setIsValid(false); + + forNode(node) = value; + if (value.m_value) + m_state.setFoundConstants(true); + break; + } + + case GetGetterSetterByOffset: { + StorageAccessData& data = node->storageAccessData(); + JSValue result = m_graph.tryGetConstantProperty(forNode(node->child2()), data.offset); + if (result && jsDynamicCast<GetterSetter*>(result)) { + setConstant(node, *m_graph.freeze(result)); + break; + } + + forNode(node).set(m_graph, m_graph.m_vm.getterSetterStructure.get()); + break; + } + + case MultiGetByOffset: { + // This code will filter the base value in a manner that is possibly different (either more + // or less precise) than the way it would be filtered if this was strength-reduced to a + // CheckStructure. This is fine. It's legal for different passes over the code to prove + // different things about the code, so long as all of them are sound. That even includes + // one guy proving that code should never execute (due to a contradiction) and another guy + // not finding that contradiction. If someone ever proved that there would be a + // contradiction then there must always be a contradiction even if subsequent passes don't + // realize it. This is the case here. + + // Ordinarily you have to be careful with calling setFoundConstants() + // because of the effect on compile times, but this node is FTL-only. + m_state.setFoundConstants(true); + + UniquedStringImpl* uid = m_graph.identifiers()[node->multiGetByOffsetData().identifierNumber]; + + AbstractValue base = forNode(node->child1()); + StructureSet baseSet; + AbstractValue result; + for (const MultiGetByOffsetCase& getCase : node->multiGetByOffsetData().cases) { + StructureSet set = getCase.set(); + set.filter(base); + if (set.isEmpty()) + continue; + baseSet.merge(set); + + switch (getCase.method().kind()) { + case GetByOffsetMethod::Constant: { + AbstractValue thisResult; + thisResult.set( + m_graph, + *getCase.method().constant(), + m_state.structureClobberState()); + result.merge(thisResult); + break; + } + + case GetByOffsetMethod::Load: { + result.merge( + m_graph.inferredValueForProperty( + set, uid, m_state.structureClobberState())); + break; + } + + default: { + result.makeHeapTop(); + break; + } } + } + + if (forNode(node->child1()).changeStructure(m_graph, baseSet) == Contradiction) + m_state.setIsValid(false); + + forNode(node) = result; break; } case PutByOffset: { break; } + + case MultiPutByOffset: { + StructureSet newSet; + TransitionVector transitions; + + // Ordinarily you have to be careful with calling setFoundConstants() + // because of the effect on compile times, but this node is FTL-only. + m_state.setFoundConstants(true); + + AbstractValue base = forNode(node->child1()); + AbstractValue originalValue = forNode(node->child2()); + AbstractValue resultingValue; + + for (unsigned i = node->multiPutByOffsetData().variants.size(); i--;) { + const PutByIdVariant& variant = node->multiPutByOffsetData().variants[i]; + StructureSet thisSet = variant.oldStructure(); + thisSet.filter(base); + if (thisSet.isEmpty()) + continue; + + AbstractValue thisValue = originalValue; + thisValue.filter(m_graph, variant.requiredType()); + resultingValue.merge(thisValue); - case CheckFunction: { + if (variant.kind() == PutByIdVariant::Transition) { + if (thisSet.onlyStructure() != variant.newStructure()) { + transitions.append( + Transition(variant.oldStructureForTransition(), variant.newStructure())); + } // else this is really a replace. + newSet.add(variant.newStructure()); + } else { + ASSERT(variant.kind() == PutByIdVariant::Replace); + newSet.merge(thisSet); + } + } + + observeTransitions(clobberLimit, transitions); + if (forNode(node->child1()).changeStructure(m_graph, newSet) == Contradiction) + m_state.setIsValid(false); + forNode(node->child2()) = resultingValue; + if (!!originalValue && !resultingValue) + m_state.setIsValid(false); + break; + } + + case GetExecutable: { + JSValue value = forNode(node->child1()).value(); + if (value) { + JSFunction* function = jsDynamicCast<JSFunction*>(value); + if (function) { + setConstant(node, *m_graph.freeze(function->executable())); + break; + } + } + forNode(node).setType(m_graph, SpecCellOther); + break; + } + + case CheckCell: { JSValue value = forNode(node->child1()).value(); - if (value == node->function()) { + if (value == node->cellOperand()->value()) { m_state.setFoundConstants(true); ASSERT(value); break; } - - node->setCanExit(true); // Lies! We can do better. - filterByValue(node->child1(), node->function()); + filterByValue(node->child1(), *node->cellOperand()); break; } + + case CheckNotEmpty: { + AbstractValue& value = forNode(node->child1()); + if (!(value.m_type & SpecEmpty)) { + m_state.setFoundConstants(true); + break; + } + filter(value, ~SpecEmpty); + break; + } + + case CheckIdent: { + AbstractValue& value = forNode(node->child1()); + UniquedStringImpl* uid = node->uidOperand(); + ASSERT(uid->isSymbol() ? !(value.m_type & ~SpecSymbol) : !(value.m_type & ~SpecStringIdent)); // Edge filtering should have already ensured this. + + JSValue childConstant = value.value(); + if (childConstant) { + if (uid->isSymbol()) { + ASSERT(childConstant.isSymbol()); + if (asSymbol(childConstant)->privateName().uid() == uid) { + m_state.setFoundConstants(true); + break; + } + } else { + ASSERT(childConstant.isString()); + if (asString(childConstant)->tryGetValueImpl() == uid) { + m_state.setFoundConstants(true); + break; + } + } + } + + filter(value, uid->isSymbol() ? SpecSymbol : SpecStringIdent); + break; + } + case CheckInBounds: { JSValue left = forNode(node->child1()).value(); JSValue right = forNode(node->child2()).value(); @@ -1659,79 +2370,157 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi m_state.setFoundConstants(true); break; } - - node->setCanExit(true); break; } case PutById: - case PutByIdDirect: - node->setCanExit(true); - if (Structure* structure = forNode(node->child1()).bestProvenStructure()) { + case PutByIdFlush: + case PutByIdDirect: { + AbstractValue& value = forNode(node->child1()); + if (value.m_structure.isFinite()) { PutByIdStatus status = PutByIdStatus::computeFor( - m_graph.m_vm, - m_graph.globalObjectFor(node->codeOrigin), - structure, + m_graph.globalObjectFor(node->origin.semantic), + value.m_structure.set(), m_graph.identifiers()[node->identifierNumber()], node->op() == PutByIdDirect); - if (status.isSimpleReplace()) { - filter(node->child1(), structure); - m_state.setFoundConstants(true); - m_state.setHaveStructures(true); - break; - } - if (status.isSimpleTransition()) { - clobberStructures(clobberLimit); - forNode(node->child1()).set(m_graph, status.newStructure()); - m_state.setHaveStructures(true); - m_state.setFoundConstants(true); + + if (status.isSimple()) { + StructureSet newSet; + TransitionVector transitions; + + for (unsigned i = status.numVariants(); i--;) { + const PutByIdVariant& variant = status[i]; + if (variant.kind() == PutByIdVariant::Transition) { + transitions.append( + Transition( + variant.oldStructureForTransition(), variant.newStructure())); + m_graph.registerStructure(variant.newStructure()); + newSet.add(variant.newStructure()); + } else { + ASSERT(variant.kind() == PutByIdVariant::Replace); + newSet.merge(variant.oldStructure()); + } + } + + if (status.numVariants() == 1 || isFTL(m_graph.m_plan.mode)) + m_state.setFoundConstants(true); + + observeTransitions(clobberLimit, transitions); + if (forNode(node->child1()).changeStructure(m_graph, newSet) == Contradiction) + m_state.setIsValid(false); break; } } - clobberWorld(node->codeOrigin, clobberLimit); + + clobberWorld(node->origin.semantic, clobberLimit); break; + } + + case PutGetterById: + case PutSetterById: + case PutGetterSetterById: + case PutGetterByVal: + case PutSetterByVal: { + clobberWorld(node->origin.semantic, clobberLimit); + break; + } - case In: + case In: { // FIXME: We can determine when the property definitely exists based on abstract // value information. - clobberWorld(node->codeOrigin, clobberLimit); + clobberWorld(node->origin.semantic, clobberLimit); forNode(node).setType(SpecBoolean); break; + } + case GetEnumerableLength: { + forNode(node).setType(SpecInt32); + break; + } + case HasGenericProperty: { + forNode(node).setType(SpecBoolean); + break; + } + case HasStructureProperty: { + forNode(node).setType(SpecBoolean); + break; + } + case HasIndexedProperty: { + ArrayMode mode = node->arrayMode(); + switch (mode.type()) { + case Array::Int32: + case Array::Double: + case Array::Contiguous: + case Array::ArrayStorage: { + break; + } + default: { + clobberWorld(node->origin.semantic, clobberLimit); + break; + } + } + forNode(node).setType(SpecBoolean); + break; + } + case GetDirectPname: { + clobberWorld(node->origin.semantic, clobberLimit); + forNode(node).makeHeapTop(); + break; + } + case GetPropertyEnumerator: { + forNode(node).setType(m_graph, SpecCell); + break; + } + case GetEnumeratorStructurePname: { + forNode(node).setType(m_graph, SpecString | SpecOther); + break; + } + case GetEnumeratorGenericPname: { + forNode(node).setType(m_graph, SpecString | SpecOther); + break; + } + case ToIndexString: { + forNode(node).setType(m_graph, SpecString); + break; + } + case GetGlobalVar: forNode(node).makeHeapTop(); break; + case GetGlobalLexicalVariable: + forNode(node).makeBytecodeTop(); + break; - case VariableWatchpoint: case VarInjectionWatchpoint: - node->setCanExit(true); - break; - - case PutGlobalVar: + case PutGlobalVariable: case NotifyWrite: break; - case CheckHasInstance: - node->setCanExit(true); - // Sadly, we don't propagate the fact that we've done CheckHasInstance + case OverridesHasInstance: + forNode(node).setType(SpecBoolean); break; case InstanceOf: - node->setCanExit(true); - // Again, sadly, we don't propagate the fact that we've done InstanceOf + // Sadly, we don't propagate the fact that we've done InstanceOf + forNode(node).setType(SpecBoolean); + break; + + case InstanceOfCustom: + clobberWorld(node->origin.semantic, clobberLimit); forNode(node).setType(SpecBoolean); break; case Phi: RELEASE_ASSERT(m_graph.m_form == SSA); - // The state of this node would have already been decided. + // The state of this node would have already been decided, but it may have become a + // constant, in which case we'd like to know. + if (forNode(node).m_value) + m_state.setFoundConstants(true); break; case Upsilon: { m_state.createValueForNode(node->phi()); - AbstractValue& value = forNode(node->child1()); - forNode(node) = value; - forNode(node->phi()) = value; + forNode(node->phi()) = forNode(node->child1()); break; } @@ -1740,39 +2529,57 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; case Call: + case TailCallInlinedCaller: case Construct: - node->setCanExit(true); - clobberWorld(node->codeOrigin, clobberLimit); + case CallVarargs: + case CallForwardVarargs: + case TailCallVarargsInlinedCaller: + case ConstructVarargs: + case ConstructForwardVarargs: + case TailCallForwardVarargsInlinedCaller: + clobberWorld(node->origin.semantic, clobberLimit); forNode(node).makeHeapTop(); break; case ForceOSRExit: - node->setCanExit(true); + case CheckBadCell: m_state.setIsValid(false); break; case InvalidationPoint: - node->setCanExit(true); + forAllValues(clobberLimit, AbstractValue::observeInvalidationPointFor); + m_state.setStructureClobberState(StructuresAreWatched); break; case CheckWatchdogTimer: - node->setCanExit(true); break; case Breakpoint: case ProfileWillCall: case ProfileDidCall: + case ProfileType: + case ProfileControlFlow: case Phantom: - case Check: case CountExecution: case CheckTierUpInLoop: case CheckTierUpAtReturn: + case CheckTypeInfoFlags: break; - case ConditionalStoreBarrier: { - if (!needsTypeCheck(node->child2().node(), ~SpecCell)) - m_state.setFoundConstants(true); - filter(node->child1(), SpecCell); + case CopyRest: + break; + + case Check: { + // Simplify out checks that don't actually do checking. + for (unsigned i = 0; i < AdjacencyList::Size; ++i) { + Edge edge = node->children.child(i); + if (!edge) + break; + if (edge.isProved() || edge.willNotHaveCheck()) { + m_state.setFoundConstants(true); + break; + } + } break; } @@ -1781,21 +2588,18 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi break; } - case StoreBarrierWithNullCheck: { - break; - } - case CheckTierUpAndOSREnter: + case CheckTierUpWithNestedTriggerAndOSREnter: case LoopHint: - // We pretend that it can exit because it may want to get all state. - node->setCanExit(true); + case ZombieHint: + case ExitOK: break; - case ZombieHint: case Unreachable: case LastNodeType: case ArithIMul: - RELEASE_ASSERT_NOT_REACHED(); + case FiatInt52: + DFG_CRASH(m_graph, node, "Unexpected node type"); break; } @@ -1812,9 +2616,8 @@ template<typename AbstractStateType> bool AbstractInterpreter<AbstractStateType>::execute(unsigned indexInBlock) { Node* node = m_state.block()->at(indexInBlock); - if (!startExecuting(node)) - return true; + startExecuting(); executeEdges(node); return executeEffects(indexInBlock, node); } @@ -1822,79 +2625,99 @@ bool AbstractInterpreter<AbstractStateType>::execute(unsigned indexInBlock) template<typename AbstractStateType> bool AbstractInterpreter<AbstractStateType>::execute(Node* node) { - if (!startExecuting(node)) - return true; - + startExecuting(); executeEdges(node); return executeEffects(UINT_MAX, node); } template<typename AbstractStateType> void AbstractInterpreter<AbstractStateType>::clobberWorld( - const CodeOrigin& codeOrigin, unsigned clobberLimit) + const CodeOrigin&, unsigned clobberLimit) { - clobberCapturedVars(codeOrigin); clobberStructures(clobberLimit); } template<typename AbstractStateType> -void AbstractInterpreter<AbstractStateType>::clobberCapturedVars(const CodeOrigin& codeOrigin) +template<typename Functor> +void AbstractInterpreter<AbstractStateType>::forAllValues( + unsigned clobberLimit, Functor& functor) { - if (codeOrigin.inlineCallFrame) { - const BitVector& capturedVars = codeOrigin.inlineCallFrame->capturedVars; - for (size_t i = capturedVars.size(); i--;) { - if (!capturedVars.quickGet(i)) - continue; - m_state.variables().local(i).makeHeapTop(); - } - } else { - for (size_t i = m_codeBlock->m_numVars; i--;) { - if (m_codeBlock->isCaptured(virtualRegisterForLocal(i))) - m_state.variables().local(i).makeHeapTop(); - } - } - - for (size_t i = m_state.variables().numberOfArguments(); i--;) { - if (m_codeBlock->isCaptured(virtualRegisterForArgument(i))) - m_state.variables().argument(i).makeHeapTop(); - } -} - -template<typename AbstractStateType> -void AbstractInterpreter<AbstractStateType>::clobberStructures(unsigned clobberLimit) -{ - if (!m_state.haveStructures()) - return; + SamplingRegion samplingRegion("DFG AI For All Values"); if (clobberLimit >= m_state.block()->size()) clobberLimit = m_state.block()->size(); else clobberLimit++; ASSERT(clobberLimit <= m_state.block()->size()); for (size_t i = clobberLimit; i--;) - forNode(m_state.block()->at(i)).clobberStructures(); + functor(forNode(m_state.block()->at(i))); if (m_graph.m_form == SSA) { HashSet<Node*>::iterator iter = m_state.block()->ssa->liveAtHead.begin(); HashSet<Node*>::iterator end = m_state.block()->ssa->liveAtHead.end(); for (; iter != end; ++iter) - forNode(*iter).clobberStructures(); + functor(forNode(*iter)); } for (size_t i = m_state.variables().numberOfArguments(); i--;) - m_state.variables().argument(i).clobberStructures(); + functor(m_state.variables().argument(i)); for (size_t i = m_state.variables().numberOfLocals(); i--;) - m_state.variables().local(i).clobberStructures(); - m_state.setHaveStructures(true); + functor(m_state.variables().local(i)); +} + +template<typename AbstractStateType> +void AbstractInterpreter<AbstractStateType>::clobberStructures(unsigned clobberLimit) +{ + SamplingRegion samplingRegion("DFG AI Clobber Structures"); + forAllValues(clobberLimit, AbstractValue::clobberStructuresFor); + setDidClobber(); +} + +template<typename AbstractStateType> +void AbstractInterpreter<AbstractStateType>::observeTransition( + unsigned clobberLimit, Structure* from, Structure* to) +{ + AbstractValue::TransitionObserver transitionObserver(from, to); + forAllValues(clobberLimit, transitionObserver); + + ASSERT(!from->dfgShouldWatch()); // We don't need to claim to be in a clobbered state because 'from' was never watchable (during the time we were compiling), hence no constants ever introduced into the DFG IR that ever had a watchable structure would ever have the same structure as from. +} + +template<typename AbstractStateType> +void AbstractInterpreter<AbstractStateType>::observeTransitions( + unsigned clobberLimit, const TransitionVector& vector) +{ + AbstractValue::TransitionsObserver transitionsObserver(vector); + forAllValues(clobberLimit, transitionsObserver); + + if (!ASSERT_DISABLED) { + // We don't need to claim to be in a clobbered state because none of the Transition::previous structures are watchable. + for (unsigned i = vector.size(); i--;) + ASSERT(!vector[i].previous->dfgShouldWatch()); + } +} + +template<typename AbstractStateType> +void AbstractInterpreter<AbstractStateType>::setDidClobber() +{ m_state.setDidClobber(true); + m_state.setStructureClobberState(StructuresAreClobbered); +} + +template<typename AbstractStateType> +void AbstractInterpreter<AbstractStateType>::dump(PrintStream& out) const +{ + const_cast<AbstractInterpreter<AbstractStateType>*>(this)->dump(out); } template<typename AbstractStateType> void AbstractInterpreter<AbstractStateType>::dump(PrintStream& out) { CommaPrinter comma(" "); + HashSet<Node*> seen; if (m_graph.m_form == SSA) { HashSet<Node*>::iterator iter = m_state.block()->ssa->liveAtHead.begin(); HashSet<Node*>::iterator end = m_state.block()->ssa->liveAtHead.end(); for (; iter != end; ++iter) { Node* node = *iter; + seen.add(node); AbstractValue& value = forNode(node); if (value.isClear()) continue; @@ -1903,18 +2726,32 @@ void AbstractInterpreter<AbstractStateType>::dump(PrintStream& out) } for (size_t i = 0; i < m_state.block()->size(); ++i) { Node* node = m_state.block()->at(i); + seen.add(node); AbstractValue& value = forNode(node); if (value.isClear()) continue; out.print(comma, node, ":", value); } + if (m_graph.m_form == SSA) { + HashSet<Node*>::iterator iter = m_state.block()->ssa->liveAtTail.begin(); + HashSet<Node*>::iterator end = m_state.block()->ssa->liveAtTail.end(); + for (; iter != end; ++iter) { + Node* node = *iter; + if (seen.contains(node)) + continue; + AbstractValue& value = forNode(node); + if (value.isClear()) + continue; + out.print(comma, node, ":", value); + } + } } template<typename AbstractStateType> FiltrationResult AbstractInterpreter<AbstractStateType>::filter( - AbstractValue& value, const StructureSet& set) + AbstractValue& value, const StructureSet& set, SpeculatedType admittedTypes) { - if (value.filter(m_graph, set) == FiltrationOK) + if (value.filter(m_graph, set, admittedTypes) == FiltrationOK) return FiltrationOK; m_state.setIsValid(false); return Contradiction; @@ -1942,7 +2779,7 @@ FiltrationResult AbstractInterpreter<AbstractStateType>::filter( template<typename AbstractStateType> FiltrationResult AbstractInterpreter<AbstractStateType>::filterByValue( - AbstractValue& abstractValue, JSValue concreteValue) + AbstractValue& abstractValue, FrozenValue concreteValue) { if (abstractValue.filterByValue(concreteValue) == FiltrationOK) return FiltrationOK; diff --git a/Source/JavaScriptCore/dfg/DFGAbstractValue.cpp b/Source/JavaScriptCore/dfg/DFGAbstractValue.cpp index bd1ba4844..b6fcf8993 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractValue.cpp +++ b/Source/JavaScriptCore/dfg/DFGAbstractValue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,64 +29,235 @@ #if ENABLE(DFG_JIT) #include "DFGGraph.h" -#include "Operations.h" +#include "JSCInlines.h" +#include "TrackedReferences.h" namespace JSC { namespace DFG { -void AbstractValue::setMostSpecific(Graph& graph, JSValue value) +void AbstractValue::observeTransitions(const TransitionVector& vector) { - if (!!value && value.isCell()) { - Structure* structure = value.asCell()->structure(); - m_currentKnownStructure = structure; - setFuturePossibleStructure(graph, structure); - m_arrayModes = asArrayModes(structure->indexingType()); - } else { - m_currentKnownStructure.clear(); - m_futurePossibleStructure.clear(); - m_arrayModes = 0; + if (m_type & SpecCell) { + m_structure.observeTransitions(vector); + ArrayModes newModes = 0; + for (unsigned i = vector.size(); i--;) { + if (m_arrayModes & asArrayModes(vector[i].previous->indexingType())) + newModes |= asArrayModes(vector[i].next->indexingType()); + } + m_arrayModes |= newModes; } - - m_type = speculationFromValue(value); - m_value = value; - checkConsistency(); } -void AbstractValue::set(Graph& graph, JSValue value) +void AbstractValue::set(Graph& graph, const FrozenValue& value, StructureClobberState clobberState) { - if (!!value && value.isCell()) { - m_currentKnownStructure.makeTop(); - Structure* structure = value.asCell()->structure(); - setFuturePossibleStructure(graph, structure); - m_arrayModes = asArrayModes(structure->indexingType()); - clobberArrayModes(); + if (!!value && value.value().isCell()) { + Structure* structure = value.structure(); + if (graph.registerStructure(structure) == StructureRegisteredAndWatched) { + m_structure = structure; + if (clobberState == StructuresAreClobbered) { + m_arrayModes = ALL_ARRAY_MODES; + m_structure.clobber(); + } else + m_arrayModes = asArrayModes(structure->indexingType()); + } else { + m_structure.makeTop(); + m_arrayModes = ALL_ARRAY_MODES; + } } else { - m_currentKnownStructure.clear(); - m_futurePossibleStructure.clear(); + m_structure.clear(); m_arrayModes = 0; } - - m_type = speculationFromValue(value); - if (m_type == SpecInt52AsDouble) - m_type = SpecInt52; - m_value = value; - + + m_type = speculationFromValue(value.value()); + m_value = value.value(); + checkConsistency(); + assertIsRegistered(graph); } void AbstractValue::set(Graph& graph, Structure* structure) { - m_currentKnownStructure = structure; - setFuturePossibleStructure(graph, structure); + m_structure = structure; m_arrayModes = asArrayModes(structure->indexingType()); m_type = speculationFromStructure(structure); m_value = JSValue(); checkConsistency(); + assertIsRegistered(graph); +} + +void AbstractValue::set(Graph& graph, const StructureSet& set) +{ + m_structure = set; + m_arrayModes = set.arrayModesFromStructures(); + m_type = set.speculationFromStructures(); + m_value = JSValue(); + + checkConsistency(); + assertIsRegistered(graph); +} + +void AbstractValue::setType(Graph& graph, SpeculatedType type) +{ + SpeculatedType cellType = type & SpecCell; + if (cellType) { + if (!(cellType & ~SpecString)) + m_structure = graph.m_vm.stringStructure.get(); + else if (isSymbolSpeculation(cellType)) + m_structure = graph.m_vm.symbolStructure.get(); + else + m_structure.makeTop(); + m_arrayModes = ALL_ARRAY_MODES; + } else { + m_structure.clear(); + m_arrayModes = 0; + } + m_type = type; + m_value = JSValue(); + checkConsistency(); +} + +void AbstractValue::set(Graph& graph, const InferredType::Descriptor& descriptor) +{ + switch (descriptor.kind()) { + case InferredType::Bottom: + clear(); + return; + case InferredType::Boolean: + setType(SpecBoolean); + return; + case InferredType::Other: + setType(SpecOther); + return; + case InferredType::Int32: + setType(SpecInt32); + return; + case InferredType::Number: + setType(SpecBytecodeNumber); + return; + case InferredType::String: + set(graph, graph.m_vm.stringStructure.get()); + return; + case InferredType::Symbol: + set(graph, graph.m_vm.symbolStructure.get()); + return; + case InferredType::ObjectWithStructure: + set(graph, descriptor.structure()); + return; + case InferredType::ObjectWithStructureOrOther: + set(graph, descriptor.structure()); + merge(SpecOther); + return; + case InferredType::Object: + setType(graph, SpecObject); + return; + case InferredType::ObjectOrOther: + setType(graph, SpecObject | SpecOther); + return; + case InferredType::Top: + makeHeapTop(); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +void AbstractValue::set( + Graph& graph, const InferredType::Descriptor& descriptor, StructureClobberState clobberState) +{ + set(graph, descriptor); + if (clobberState == StructuresAreClobbered) + clobberStructures(); +} + +void AbstractValue::fixTypeForRepresentation(Graph& graph, NodeFlags representation, Node* node) +{ + if (representation == NodeResultDouble) { + if (m_value) { + ASSERT(m_value.isNumber()); + if (m_value.isInt32()) + m_value = jsDoubleNumber(m_value.asNumber()); + } + if (m_type & SpecMachineInt) { + m_type &= ~SpecMachineInt; + m_type |= SpecInt52AsDouble; + } + if (m_type & ~SpecFullDouble) + DFG_CRASH(graph, node, toCString("Abstract value ", *this, " for double node has type outside SpecFullDouble.\n").data()); + } else if (representation == NodeResultInt52) { + if (m_type & SpecInt52AsDouble) { + m_type &= ~SpecInt52AsDouble; + m_type |= SpecInt52; + } + if (m_type & ~SpecMachineInt) + DFG_CRASH(graph, node, toCString("Abstract value ", *this, " for int52 node has type outside SpecMachineInt.\n").data()); + } else { + if (m_type & SpecInt52) { + m_type &= ~SpecInt52; + m_type |= SpecInt52AsDouble; + } + if (m_type & ~SpecBytecodeTop) + DFG_CRASH(graph, node, toCString("Abstract value ", *this, " for value node has type outside SpecBytecodeTop.\n").data()); + } + + checkConsistency(); +} + +void AbstractValue::fixTypeForRepresentation(Graph& graph, Node* node) +{ + fixTypeForRepresentation(graph, node->result(), node); +} + +bool AbstractValue::mergeOSREntryValue(Graph& graph, JSValue value) +{ + AbstractValue oldMe = *this; + + if (isClear()) { + FrozenValue* frozenValue = graph.freeze(value); + if (frozenValue->pointsToHeap()) { + m_structure = frozenValue->structure(); + m_arrayModes = asArrayModes(frozenValue->structure()->indexingType()); + } else { + m_structure.clear(); + m_arrayModes = 0; + } + + m_type = speculationFromValue(value); + m_value = value; + } else { + mergeSpeculation(m_type, speculationFromValue(value)); + if (!!value && value.isCell()) { + Structure* structure = value.asCell()->structure(); + graph.registerStructure(structure); + mergeArrayModes(m_arrayModes, asArrayModes(structure->indexingType())); + m_structure.merge(StructureSet(structure)); + } + if (m_value != value) + m_value = JSValue(); + } + + checkConsistency(); + assertIsRegistered(graph); + + return oldMe != *this; } -FiltrationResult AbstractValue::filter(Graph& graph, const StructureSet& other) +bool AbstractValue::isType(Graph& graph, const InferredType::Descriptor& inferredType) const { + AbstractValue typeValue; + typeValue.set(graph, inferredType); + + AbstractValue mergedValue = *this; + mergedValue.merge(typeValue); + + return mergedValue == typeValue; +} + +FiltrationResult AbstractValue::filter( + Graph& graph, const StructureSet& other, SpeculatedType admittedTypes) +{ + ASSERT(!(admittedTypes & SpecCell)); + if (isClear()) return FiltrationOK; @@ -94,23 +265,31 @@ FiltrationResult AbstractValue::filter(Graph& graph, const StructureSet& other) // having structures, array modes, or a specific value. // https://bugs.webkit.org/show_bug.cgi?id=109663 - m_type &= other.speculationFromStructures(); + m_type &= other.speculationFromStructures() | admittedTypes; m_arrayModes &= other.arrayModesFromStructures(); - m_currentKnownStructure.filter(other); + m_structure.filter(other); // It's possible that prior to the above two statements we had (Foo, TOP), where // Foo is a SpeculatedType that is disjoint with the passed StructureSet. In that // case, we will now have (None, [someStructure]). In general, we need to make // sure that new information gleaned from the SpeculatedType needs to be fed back // into the information gleaned from the StructureSet. - m_currentKnownStructure.filter(m_type); + m_structure.filter(m_type); - if (m_currentKnownStructure.hasSingleton()) - setFuturePossibleStructure(graph, m_currentKnownStructure.singleton()); - filterArrayModesByType(); filterValueByType(); - return normalizeClarity(); + return normalizeClarity(graph); +} + +FiltrationResult AbstractValue::changeStructure(Graph& graph, const StructureSet& other) +{ + m_type &= other.speculationFromStructures(); + m_arrayModes = other.arrayModesFromStructures(); + m_structure = other; + + filterValueByType(); + + return normalizeClarity(graph); } FiltrationResult AbstractValue::filterArrayModes(ArrayModes arrayModes) @@ -130,34 +309,84 @@ FiltrationResult AbstractValue::filter(SpeculatedType type) if ((m_type & type) == m_type) return FiltrationOK; + // Fast path for the case that we don't even have a cell. + if (!(m_type & SpecCell)) { + m_type &= type; + FiltrationResult result; + if (m_type == SpecNone) { + clear(); + result = Contradiction; + } else + result = FiltrationOK; + checkConsistency(); + return result; + } + m_type &= type; // It's possible that prior to this filter() call we had, say, (Final, TOP), and // the passed type is Array. At this point we'll have (None, TOP). The best way // to ensure that the structure filtering does the right thing is to filter on // the new type (None) rather than the one passed (Array). - m_currentKnownStructure.filter(m_type); - m_futurePossibleStructure.filter(m_type); + m_structure.filter(type); filterArrayModesByType(); filterValueByType(); return normalizeClarity(); } -FiltrationResult AbstractValue::filterByValue(JSValue value) +FiltrationResult AbstractValue::filterByValue(const FrozenValue& value) { - FiltrationResult result = filter(speculationFromValue(value)); + FiltrationResult result = filter(speculationFromValue(value.value())); if (m_type) - m_value = value; + m_value = value.value(); return result; } -void AbstractValue::setFuturePossibleStructure(Graph& graph, Structure* structure) +bool AbstractValue::contains(Structure* structure) const { - ASSERT(structure); - if (graph.watchpoints().isStillValid(structure->transitionWatchpointSet())) - m_futurePossibleStructure = structure; - else - m_futurePossibleStructure.makeTop(); + return couldBeType(speculationFromStructure(structure)) + && (m_arrayModes & arrayModeFromStructure(structure)) + && m_structure.contains(structure); +} + +FiltrationResult AbstractValue::filter(const AbstractValue& other) +{ + m_type &= other.m_type; + m_structure.filter(other.m_structure); + m_arrayModes &= other.m_arrayModes; + + m_structure.filter(m_type); + filterArrayModesByType(); + filterValueByType(); + + if (normalizeClarity() == Contradiction) + return Contradiction; + + if (m_value == other.m_value) + return FiltrationOK; + + // Neither of us are BOTTOM, so an empty value means TOP. + if (!m_value) { + // We previously didn't prove a value but now we have done so. + m_value = other.m_value; + return FiltrationOK; + } + + if (!other.m_value) { + // We had proved a value but the other guy hadn't, so keep our proof. + return FiltrationOK; + } + + // We both proved there to be a specific value but they are different. + clear(); + return Contradiction; +} + +FiltrationResult AbstractValue::filter(Graph& graph, const InferredType::Descriptor& descriptor) +{ + AbstractValue filterValue; + filterValue.set(graph, descriptor); + return filter(filterValue); } void AbstractValue::filterValueByType() @@ -205,8 +434,7 @@ bool AbstractValue::shouldBeClear() const return true; if (!(m_type & ~SpecCell) - && (!m_arrayModes - || m_currentKnownStructure.isClear())) + && (!m_arrayModes || m_structure.isClear())) return true; return false; @@ -230,12 +458,18 @@ FiltrationResult AbstractValue::normalizeClarity() return result; } +FiltrationResult AbstractValue::normalizeClarity(Graph& graph) +{ + FiltrationResult result = normalizeClarity(); + assertIsRegistered(graph); + return result; +} + #if !ASSERT_DISABLED void AbstractValue::checkConsistency() const { if (!(m_type & SpecCell)) { - ASSERT(m_currentKnownStructure.isClear()); - ASSERT(m_futurePossibleStructure.isClear()); + ASSERT(m_structure.isClear()); ASSERT(!m_arrayModes); } @@ -244,6 +478,8 @@ void AbstractValue::checkConsistency() const if (!!m_value) { SpeculatedType type = m_type; + // This relaxes the assertion below a bit, since we don't know the representation of the + // node. if (type & SpecInt52) type |= SpecInt52AsDouble; ASSERT(mergeSpeculations(type, speculationFromValue(m_value)) == type); @@ -254,8 +490,29 @@ void AbstractValue::checkConsistency() const // we don't want to get pedantic about this as it would only increase the computational // complexity of the code. } + +void AbstractValue::assertIsRegistered(Graph& graph) const +{ + m_structure.assertIsRegistered(graph); +} #endif +ResultType AbstractValue::resultType() const +{ + ASSERT(isType(SpecBytecodeTop)); + if (isType(SpecBoolean)) + return ResultType::booleanType(); + if (isType(SpecInt32)) + return ResultType::numberTypeIsInt32(); + if (isType(SpecBytecodeNumber)) + return ResultType::numberType(); + if (isType(SpecString)) + return ResultType::stringType(); + if (isType(SpecString | SpecBytecodeNumber)) + return ResultType::stringOrNumberType(); + return ResultType::unknownType(); +} + void AbstractValue::dump(PrintStream& out) const { dumpInContext(out, 0); @@ -267,14 +524,19 @@ void AbstractValue::dumpInContext(PrintStream& out, DumpContext* context) const if (m_type & SpecCell) { out.print( ", ", ArrayModesDump(m_arrayModes), ", ", - inContext(m_currentKnownStructure, context), ", ", - inContext(m_futurePossibleStructure, context)); + inContext(m_structure, context)); } if (!!m_value) out.print(", ", inContext(m_value, context)); out.print(")"); } +void AbstractValue::validateReferences(const TrackedReferences& trackedReferences) +{ + trackedReferences.check(m_value); + m_structure.validateReferences(trackedReferences); +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGAbstractValue.h b/Source/JavaScriptCore/dfg/DFGAbstractValue.h index db313d242..480842860 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractValue.h +++ b/Source/JavaScriptCore/dfg/DFGAbstractValue.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,21 +26,29 @@ #ifndef DFGAbstractValue_h #define DFGAbstractValue_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "ArrayProfile.h" #include "DFGFiltrationResult.h" +#include "DFGFrozenValue.h" +#include "DFGNodeFlags.h" #include "DFGStructureAbstractValue.h" +#include "DFGStructureClobberState.h" +#include "InferredType.h" #include "JSCell.h" +#include "ResultType.h" #include "SpeculatedType.h" #include "DumpContext.h" #include "StructureSet.h" -namespace JSC { namespace DFG { +namespace JSC { + +class TrackedReferences; + +namespace DFG { class Graph; +struct Node; struct AbstractValue { AbstractValue() @@ -53,8 +61,7 @@ struct AbstractValue { { m_type = SpecNone; m_arrayModes = 0; - m_currentKnownStructure.clear(); - m_futurePossibleStructure.clear(); + m_structure.clear(); m_value = JSValue(); checkConsistency(); } @@ -72,18 +79,82 @@ struct AbstractValue { makeTop(SpecBytecodeTop); } + void makeFullTop() + { + makeTop(SpecFullTop); + } + void clobberStructures() { if (m_type & SpecCell) { - m_currentKnownStructure.makeTop(); + m_structure.clobber(); clobberArrayModes(); } else { - ASSERT(m_currentKnownStructure.isClear()); + ASSERT(m_structure.isClear()); ASSERT(!m_arrayModes); } checkConsistency(); } + + static void clobberStructuresFor(AbstractValue& value) + { + value.clobberStructures(); + } + + void observeInvalidationPoint() + { + m_structure.observeInvalidationPoint(); + checkConsistency(); + } + + static void observeInvalidationPointFor(AbstractValue& value) + { + value.observeInvalidationPoint(); + } + + void observeTransition(Structure* from, Structure* to) + { + if (m_type & SpecCell) { + m_structure.observeTransition(from, to); + observeIndexingTypeTransition(from->indexingType(), to->indexingType()); + } + checkConsistency(); + } + + void observeTransitions(const TransitionVector& vector); + + class TransitionObserver { + public: + TransitionObserver(Structure* from, Structure* to) + : m_from(from) + , m_to(to) + { + } + void operator()(AbstractValue& value) + { + value.observeTransition(m_from, m_to); + } + private: + Structure* m_from; + Structure* m_to; + }; + + class TransitionsObserver { + public: + TransitionsObserver(const TransitionVector& vector) + : m_vector(vector) + { + } + + void operator()(AbstractValue& value) + { + value.observeTransitions(m_vector); + } + private: + const TransitionVector& m_vector; + }; + void clobberValue() { m_value = JSValue(); @@ -91,7 +162,10 @@ struct AbstractValue { bool isHeapTop() const { - return (m_type | SpecHeapTop) == m_type && m_currentKnownStructure.isTop() && m_futurePossibleStructure.isTop(); + return (m_type | SpecHeapTop) == m_type + && m_structure.isTop() + && m_arrayModes == ALL_ARRAY_MODES + && !m_value; } bool valueIsTop() const @@ -111,32 +185,49 @@ struct AbstractValue { return result; } - void setMostSpecific(Graph&, JSValue); - void set(Graph&, JSValue); + static AbstractValue bytecodeTop() + { + AbstractValue result; + result.makeBytecodeTop(); + return result; + } + + static AbstractValue fullTop() + { + AbstractValue result; + result.makeFullTop(); + return result; + } + + void set(Graph&, const FrozenValue&, StructureClobberState); void set(Graph&, Structure*); + void set(Graph&, const StructureSet&); + + // Set this value to represent the given set of types as precisely as possible. + void setType(Graph&, SpeculatedType); + // As above, but only valid for non-cell types. void setType(SpeculatedType type) { - if (type & SpecCell) { - m_currentKnownStructure.makeTop(); - m_futurePossibleStructure.makeTop(); - m_arrayModes = ALL_ARRAY_MODES; - } else { - m_currentKnownStructure.clear(); - m_futurePossibleStructure.clear(); - m_arrayModes = 0; - } + RELEASE_ASSERT(!(type & SpecCell)); + m_structure.clear(); + m_arrayModes = 0; m_type = type; m_value = JSValue(); checkConsistency(); } + + void set(Graph&, const InferredType::Descriptor&); + void set(Graph&, const InferredType::Descriptor&, StructureClobberState); + + void fixTypeForRepresentation(Graph&, NodeFlags representation, Node* = nullptr); + void fixTypeForRepresentation(Graph&, Node*); bool operator==(const AbstractValue& other) const { return m_type == other.m_type && m_arrayModes == other.m_arrayModes - && m_currentKnownStructure == other.m_currentKnownStructure - && m_futurePossibleStructure == other.m_futurePossibleStructure + && m_structure == other.m_structure && m_value == other.m_value; } bool operator!=(const AbstractValue& other) const @@ -159,8 +250,7 @@ struct AbstractValue { } else { result |= mergeSpeculation(m_type, other.m_type); result |= mergeArrayModes(m_arrayModes, other.m_arrayModes); - result |= m_currentKnownStructure.addAll(other.m_currentKnownStructure); - result |= m_futurePossibleStructure.addAll(other.m_futurePossibleStructure); + result |= m_structure.merge(other.m_structure); if (m_value != other.m_value) { result |= !!m_value; m_value = JSValue(); @@ -171,13 +261,14 @@ struct AbstractValue { return result; } + bool mergeOSREntryValue(Graph&, JSValue); + void merge(SpeculatedType type) { mergeSpeculation(m_type, type); if (type & SpecCell) { - m_currentKnownStructure.makeTop(); - m_futurePossibleStructure.makeTop(); + m_structure.makeTop(); m_arrayModes = ALL_ARRAY_MODES; } m_value = JSValue(); @@ -185,24 +276,36 @@ struct AbstractValue { checkConsistency(); } - bool couldBeType(SpeculatedType desiredType) + bool couldBeType(SpeculatedType desiredType) const { return !!(m_type & desiredType); } - bool isType(SpeculatedType desiredType) + bool isType(SpeculatedType desiredType) const { return !(m_type & ~desiredType); } + + bool isType(Graph&, const InferredType::Descriptor&) const; + + // Filters the value using the given structure set. If the admittedTypes argument is not passed, this + // implicitly filters by the types implied by the structure set, which are usually a subset of + // SpecCell. Hence, after this call, the value will no longer have any non-cell members. But, you can + // use admittedTypes to preserve some non-cell types. Note that it's wrong for admittedTypes to overlap + // with SpecCell. + FiltrationResult filter(Graph&, const StructureSet&, SpeculatedType admittedTypes = SpecNone); + + FiltrationResult filterArrayModes(ArrayModes); + FiltrationResult filter(SpeculatedType); + FiltrationResult filterByValue(const FrozenValue& value); + FiltrationResult filter(const AbstractValue&); + + FiltrationResult filter(Graph&, const InferredType::Descriptor&); - FiltrationResult filter(Graph&, const StructureSet&); - - FiltrationResult filterArrayModes(ArrayModes arrayModes); - - FiltrationResult filter(SpeculatedType type); - - FiltrationResult filterByValue(JSValue value); + FiltrationResult changeStructure(Graph&, const StructureSet&); + bool contains(Structure*) const; + bool validate(JSValue value) const { if (isHeapTop()) @@ -222,75 +325,34 @@ struct AbstractValue { if (!!value && value.isCell()) { ASSERT(m_type & SpecCell); Structure* structure = value.asCell()->structure(); - return m_currentKnownStructure.contains(structure) - && m_futurePossibleStructure.contains(structure) + return m_structure.contains(structure) && (m_arrayModes & asArrayModes(structure->indexingType())); } return true; } - Structure* bestProvenStructure() const - { - if (m_currentKnownStructure.hasSingleton()) - return m_currentKnownStructure.singleton(); - if (m_futurePossibleStructure.hasSingleton()) - return m_futurePossibleStructure.singleton(); - return 0; - } - bool hasClobberableState() const { - return m_currentKnownStructure.isNeitherClearNorTop() + return m_structure.isNeitherClearNorTop() || !arrayModesAreClearOrTop(m_arrayModes); } #if ASSERT_DISABLED void checkConsistency() const { } + void assertIsRegistered(Graph&) const { } #else void checkConsistency() const; + void assertIsRegistered(Graph&) const; #endif - + + ResultType resultType() const; + void dumpInContext(PrintStream&, DumpContext*) const; void dump(PrintStream&) const; - // A great way to think about the difference between m_currentKnownStructure and - // m_futurePossibleStructure is to consider these four examples: - // - // 1) x = foo(); - // - // In this case x's m_currentKnownStructure and m_futurePossibleStructure will - // both be TOP, since we don't know anything about x for sure, yet. - // - // 2) x = foo(); - // y = x.f; - // - // Where x will later have a new property added to it, 'g'. Because of the - // known but not-yet-executed property addition, x's current structure will - // not be watchpointable; hence we have no way of statically bounding the set - // of possible structures that x may have if a clobbering event happens. So, - // x's m_currentKnownStructure will be whatever structure we check to get - // property 'f', and m_futurePossibleStructure will be TOP. - // - // 3) x = foo(); - // y = x.f; - // - // Where x has a terminal structure that is still watchpointable. In this case, - // x's m_currentKnownStructure and m_futurePossibleStructure will both be - // whatever structure we checked for when getting 'f'. - // - // 4) x = foo(); - // y = x.f; - // bar(); - // - // Where x has a terminal structure that is still watchpointable. In this - // case, m_currentKnownStructure will be TOP because bar() may potentially - // change x's structure and we have no way of proving otherwise, but - // x's m_futurePossibleStructure will be whatever structure we had checked - // when getting property 'f'. - - // NB. All fields in this struct must have trivial destructors. - + void validateReferences(const TrackedReferences&); + // This is a proven constraint on the structures that this value can have right // now. The structure of the current value must belong to this set. The set may // be TOP, indicating that it is the set of all possible structures, in which @@ -298,44 +360,25 @@ struct AbstractValue { // in which case this value cannot be a cell. This is all subject to change // anytime a new value is assigned to this one, anytime there is a control flow // merge, or most crucially, anytime a side-effect or structure check happens. - // In case of a side-effect, we typically must assume that any value may have - // had its structure changed, hence contravening our proof. We make the proof - // valid again by switching this to TOP (i.e. claiming that we have proved that - // this value may have any structure). Of note is that the proof represented by - // this field is not subject to structure transition watchpoints - even if one - // fires, we can be sure that this proof is still valid. - StructureAbstractValue m_currentKnownStructure; - - // This is a proven constraint on the structures that this value can have now - // or any time in the future subject to the structure transition watchpoints of - // all members of this set not having fired. This set is impervious to side- - // effects; even if one happens the side-effect can only cause the value to - // change to at worst another structure that is also a member of this set. But, - // the theorem being proved by this field is predicated upon there not being - // any new structure transitions introduced into any members of this set. In - // cases where there is no way for us to guard this happening, the set must be - // TOP. But in cases where we can guard new structure transitions (all members - // of the set have still-valid structure transition watchpoints) then this set - // will be finite. Anytime that we make use of the finite nature of this set, - // we must first issue a structure transition watchpoint, which will effectively - // result in m_currentKnownStructure being filtered according to - // m_futurePossibleStructure. - StructureAbstractValue m_futurePossibleStructure; + // In case of a side-effect, we must assume that any value with a structure that + // isn't being watched may have had its structure changed, hence contravening + // our proof. In such a case we make the proof valid again by switching this to + // TOP (i.e. claiming that we have proved that this value may have any + // structure). + StructureAbstractValue m_structure; // This is a proven constraint on the possible types that this value can have // now or any time in the future, unless it is reassigned. This field is - // impervious to side-effects unless the side-effect can reassign the value - // (for example if we're talking about a captured variable). The relationship - // between this field, and the structure fields above, is as follows. The - // fields above constraint the structures that a cell may have, but they say - // nothing about whether or not the value is known to be a cell. More formally, - // the m_currentKnownStructure is itself an abstract value that consists of the - // union of the set of all non-cell values and the set of cell values that have - // the given structure. This abstract value is then the intersection of the - // m_currentKnownStructure and the set of values whose type is m_type. So, for - // example if m_type is SpecFinal|SpecInt32 and m_currentKnownStructure is - // [0x12345] then this abstract value corresponds to the set of all integers - // unified with the set of all objects with structure 0x12345. + // impervious to side-effects. The relationship between this field, and the + // structure fields above, is as follows. The fields above constraint the + // structures that a cell may have, but they say nothing about whether or not + // the value is known to be a cell. More formally, the m_structure is itself an + // abstract value that consists of the union of the set of all non-cell values + // and the set of cell values that have the given structure. This abstract + // value is then the intersection of the m_structure and the set of values + // whose type is m_type. So, for example if m_type is SpecFinal|SpecInt32 and + // m_structure is [0x12345] then this abstract value corresponds to the set of + // all integers unified with the set of all objects with structure 0x12345. SpeculatedType m_type; // This is a proven constraint on the possible indexing types that this value @@ -350,7 +393,11 @@ struct AbstractValue { // implies nothing about the structure. Oddly, JSValue() (i.e. the empty value) // means either BOTTOM or TOP depending on the state of m_type: if m_type is // BOTTOM then JSValue() means BOTTOM; if m_type is not BOTTOM then JSValue() - // means TOP. + // means TOP. Also note that this value isn't necessarily known to the GC + // (strongly or even weakly - it may be an "fragile" value, see + // DFGValueStrength.h). If you perform any optimization based on a cell m_value + // that requires that the value be kept alive, you must call freeze() on that + // value, which will turn it into a weak value. JSValue m_value; private: @@ -361,6 +408,12 @@ private: m_arrayModes = ALL_ARRAY_MODES; } + void observeIndexingTypeTransition(IndexingType from, IndexingType to) + { + if (m_arrayModes & asArrayModes(from)) + m_arrayModes |= asArrayModes(to); + } + bool validateType(JSValue value) const { if (isHeapTop()) @@ -388,19 +441,17 @@ private: { m_type |= top; m_arrayModes = ALL_ARRAY_MODES; - m_currentKnownStructure.makeTop(); - m_futurePossibleStructure.makeTop(); + m_structure.makeTop(); m_value = JSValue(); checkConsistency(); } - void setFuturePossibleStructure(Graph&, Structure* structure); - void filterValueByType(); void filterArrayModesByType(); bool shouldBeClear() const; FiltrationResult normalizeClarity(); + FiltrationResult normalizeClarity(Graph&); }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.cpp b/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.cpp new file mode 100644 index 000000000..207b2fcec --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGAdaptiveInferredPropertyValueWatchpoint.h" + +#if ENABLE(DFG_JIT) + +#include "CodeBlock.h" +#include "DFGCommon.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +AdaptiveInferredPropertyValueWatchpoint::AdaptiveInferredPropertyValueWatchpoint(const ObjectPropertyCondition& key, CodeBlock* codeBlock) + : Base(key) + , m_codeBlock(codeBlock) +{ +} + +void AdaptiveInferredPropertyValueWatchpoint::handleFire(const FireDetail& detail) +{ + if (DFG::shouldDumpDisassembly()) + dataLog("Firing watchpoint ", RawPointer(this), " (", key(), ") on ", *m_codeBlock, "\n"); + + + StringPrintStream out; + out.print("Adaptation of ", key(), " failed: ", detail); + + StringFireDetail stringDetail(out.toCString().data()); + + m_codeBlock->jettison(Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &stringDetail); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.h b/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.h new file mode 100644 index 000000000..2ed4c3b18 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGAdaptiveInferredPropertyValueWatchpoint.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGAdaptiveInferredPropertyValueWatchpoint_h +#define DFGAdaptiveInferredPropertyValueWatchpoint_h + +#if ENABLE(DFG_JIT) + +#include "AdaptiveInferredPropertyValueWatchpointBase.h" + +namespace JSC { namespace DFG { + +class AdaptiveInferredPropertyValueWatchpoint : public AdaptiveInferredPropertyValueWatchpointBase { +public: + typedef AdaptiveInferredPropertyValueWatchpointBase Base; + AdaptiveInferredPropertyValueWatchpoint(const ObjectPropertyCondition&, CodeBlock*); + +private: + virtual void handleFire(const FireDetail&) override; + + CodeBlock* m_codeBlock; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGAdaptiveInferredPropertyValueWatchpoint_h + diff --git a/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.cpp b/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.cpp new file mode 100644 index 000000000..0854c56b0 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGAdaptiveStructureWatchpoint.h" + +#if ENABLE(DFG_JIT) + +#include "CodeBlock.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +AdaptiveStructureWatchpoint::AdaptiveStructureWatchpoint( + const ObjectPropertyCondition& key, + CodeBlock* codeBlock) + : m_key(key) + , m_codeBlock(codeBlock) +{ + RELEASE_ASSERT(key.watchingRequiresStructureTransitionWatchpoint()); + RELEASE_ASSERT(!key.watchingRequiresReplacementWatchpoint()); +} + +void AdaptiveStructureWatchpoint::install() +{ + RELEASE_ASSERT(m_key.isWatchable()); + + m_key.object()->structure()->addTransitionWatchpoint(this); +} + +void AdaptiveStructureWatchpoint::fireInternal(const FireDetail& detail) +{ + if (m_key.isWatchable(PropertyCondition::EnsureWatchability)) { + install(); + return; + } + + if (DFG::shouldDumpDisassembly()) { + dataLog( + "Firing watchpoint ", RawPointer(this), " (", m_key, ") on ", *m_codeBlock, "\n"); + } + + StringPrintStream out; + out.print("Adaptation of ", m_key, " failed: ", detail); + + StringFireDetail stringDetail(out.toCString().data()); + + m_codeBlock->jettison( + Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &stringDetail); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.h b/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.h new file mode 100644 index 000000000..f153e23c5 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGAdaptiveStructureWatchpoint_h +#define DFGAdaptiveStructureWatchpoint_h + +#if ENABLE(DFG_JIT) + +#include "ObjectPropertyCondition.h" +#include "Watchpoint.h" + +namespace JSC { namespace DFG { + +class AdaptiveStructureWatchpoint : public Watchpoint { +public: + AdaptiveStructureWatchpoint(const ObjectPropertyCondition&, CodeBlock*); + + const ObjectPropertyCondition& key() const { return m_key; } + + void install(); + +protected: + virtual void fireInternal(const FireDetail&) override; + +private: + ObjectPropertyCondition m_key; + CodeBlock* m_codeBlock; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGAdaptiveStructureWatchpoint_h + diff --git a/Source/JavaScriptCore/dfg/DFGAdjacencyList.h b/Source/JavaScriptCore/dfg/DFGAdjacencyList.h index dc3cccb3f..63ebef5fa 100644 --- a/Source/JavaScriptCore/dfg/DFGAdjacencyList.h +++ b/Source/JavaScriptCore/dfg/DFGAdjacencyList.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGAdjacencyList_h #define DFGAdjacencyList_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" @@ -54,7 +52,7 @@ public: } } - AdjacencyList(Kind kind, Edge child1, Edge child2, Edge child3) + AdjacencyList(Kind kind, Edge child1, Edge child2 = Edge(), Edge child3 = Edge()) { ASSERT_UNUSED(kind, kind == Fixed); initialize(child1, child2, child3); @@ -67,6 +65,8 @@ public: setNumChildren(numChildren); } + bool isEmpty() const { return !child1(); } + const Edge& child(unsigned i) const { ASSERT(i < Size); @@ -132,7 +132,7 @@ public: setChild(i, child(i + 1)); setChild(Size - 1, Edge()); } - + unsigned firstChild() const { return m_words[0].m_encodedWord; @@ -151,6 +151,56 @@ public: m_words[1].m_encodedWord = numChildren; } + AdjacencyList sanitized() const + { + return AdjacencyList(Fixed, child1().sanitized(), child2().sanitized(), child3().sanitized()); + } + + AdjacencyList justChecks() const + { + AdjacencyList result(Fixed); + unsigned sourceIndex = 0; + unsigned targetIndex = 0; + while (sourceIndex < AdjacencyList::Size) { + Edge edge = child(sourceIndex++); + if (!edge) + break; + if (edge.willHaveCheck()) + result.child(targetIndex++) = edge; + } + return result; + } + + unsigned hash() const + { + unsigned result = 0; + if (!child1()) + return result; + + result += child1().hash(); + + if (!child2()) + return result; + + result *= 3; + result += child2().hash(); + + if (!child3()) + return result; + + result *= 3; + result += child3().hash(); + + return result; + } + + bool operator==(const AdjacencyList& other) const + { + return child1() == other.child1() + && child2() == other.child2() + && child3() == other.child3(); + } + private: Edge m_words[Size]; }; diff --git a/Source/JavaScriptCore/dfg/DFGAllocator.h b/Source/JavaScriptCore/dfg/DFGAllocator.h index 80e1034cf..f380df001 100644 --- a/Source/JavaScriptCore/dfg/DFGAllocator.h +++ b/Source/JavaScriptCore/dfg/DFGAllocator.h @@ -26,12 +26,9 @@ #ifndef DFGAllocator_h #define DFGAllocator_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" -#include <wtf/PageAllocationAligned.h> #include <wtf/StdLibExtras.h> namespace JSC { namespace DFG { @@ -52,7 +49,7 @@ public: void* allocate(); // Use placement new to allocate, and avoid using this method. void free(T*); // Call this method to delete; never use 'delete' directly. - void freeAll(); // Only call this if T has a trivial destructor. + void freeAll(); // Only call this if you've either freed everything or if T has a trivial destructor. void reset(); // Like freeAll(), but also returns all memory to the OS. unsigned indexOf(const T*); @@ -72,7 +69,7 @@ private: bool isInThisRegion(const T* pointer) { return static_cast<unsigned>(pointer - data()) < numberOfThingsPerRegion(); } static Region* regionFor(const T* pointer) { return bitwise_cast<Region*>(bitwise_cast<uintptr_t>(pointer) & ~(size() - 1)); } - PageAllocationAligned m_allocation; + void* m_allocation; Allocator* m_allocator; Region* m_next; }; @@ -155,11 +152,14 @@ void Allocator<T>::reset() template<typename T> unsigned Allocator<T>::indexOf(const T* object) { - unsigned baseIndex = 0; + unsigned numRegions = 0; + for (Region* region = m_regionHead; region; region = region->m_next) + numRegions++; + unsigned regionIndex = 0; for (Region* region = m_regionHead; region; region = region->m_next) { if (region->isInThisRegion(object)) - return baseIndex + (object - region->data()); - baseIndex += Region::numberOfThingsPerRegion(); + return (numRegions - 1 - regionIndex) * Region::numberOfThingsPerRegion() + (object - region->data()); + regionIndex++; } CRASH(); return 0; @@ -200,11 +200,9 @@ void* Allocator<T>::allocateSlow() if (logCompilationChanges()) dataLog("Allocating another allocator region.\n"); - - PageAllocationAligned allocation = PageAllocationAligned::allocate(Region::size(), Region::size(), OSAllocator::JSGCHeapPages); - if (!static_cast<bool>(allocation)) - CRASH(); - Region* region = static_cast<Region*>(allocation.base()); + + void* allocation = fastAlignedMalloc(Region::size(), Region::size()); + Region* region = static_cast<Region*>(allocation); region->m_allocation = allocation; region->m_allocator = this; startBumpingIn(region); @@ -221,7 +219,7 @@ void Allocator<T>::freeRegionsStartingAt(typename Allocator<T>::Region* region) { while (region) { Region* nextRegion = region->m_next; - region->m_allocation.deallocate(); + fastAlignedFree(region->m_allocation); region = nextRegion; } } diff --git a/Source/JavaScriptCore/dfg/DFGArgumentPosition.h b/Source/JavaScriptCore/dfg/DFGArgumentPosition.h index b4e4ade15..a6983a735 100644 --- a/Source/JavaScriptCore/dfg/DFGArgumentPosition.h +++ b/Source/JavaScriptCore/dfg/DFGArgumentPosition.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -46,6 +46,9 @@ public: void addVariable(VariableAccessData* variable) { m_variables.append(variable); + + // We may set this early. Merging it here saves us time in prediction propagation. + variable->mergeShouldNeverUnbox(m_shouldNeverUnbox); } VariableAccessData* someVariable() const @@ -64,7 +67,7 @@ public: bool mergeShouldNeverUnbox(bool shouldNeverUnbox) { - return checkAndSet(m_shouldNeverUnbox, m_shouldNeverUnbox | shouldNeverUnbox); + return checkAndSet(m_shouldNeverUnbox, m_shouldNeverUnbox || shouldNeverUnbox); } bool mergeArgumentPredictionAwareness() @@ -93,7 +96,7 @@ public: bool changed = false; for (unsigned i = 0; i < m_variables.size(); ++i) { VariableAccessData* variable = m_variables[i]->find(); - changed |= checkAndSet(m_isProfitableToUnbox, m_isProfitableToUnbox | variable->isProfitableToUnbox()); + changed |= checkAndSet(m_isProfitableToUnbox, m_isProfitableToUnbox || variable->isProfitableToUnbox()); } if (!changed) return false; @@ -123,10 +126,7 @@ public: if (i) out.print(" "); - if (operand.isArgument()) - out.print("arg", operand.toArgument(), "(", VariableAccessDataDump(*graph, variable), ")"); - else - out.print("r", operand.toLocal(), "(", VariableAccessDataDump(*graph, variable), ")"); + out.print(operand, "(", VariableAccessDataDump(*graph, variable), ")"); } out.print("\n"); } diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.cpp b/Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.cpp new file mode 100644 index 000000000..698cc75db --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.cpp @@ -0,0 +1,665 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGArgumentsEliminationPhase.h" + +#if ENABLE(DFG_JIT) + +#include "BytecodeLivenessAnalysisInlines.h" +#include "DFGArgumentsUtilities.h" +#include "DFGBasicBlockInlines.h" +#include "DFGBlockMapInlines.h" +#include "DFGClobberize.h" +#include "DFGCombinedLiveness.h" +#include "DFGForAllKills.h" +#include "DFGGraph.h" +#include "DFGInsertionSet.h" +#include "DFGLivenessAnalysisPhase.h" +#include "DFGOSRAvailabilityAnalysisPhase.h" +#include "DFGPhase.h" +#include "JSCInlines.h" +#include <wtf/HashMap.h> +#include <wtf/HashSet.h> +#include <wtf/ListDump.h> + +namespace JSC { namespace DFG { + +namespace { + +bool verbose = false; + +class ArgumentsEliminationPhase : public Phase { +public: + ArgumentsEliminationPhase(Graph& graph) + : Phase(graph, "arguments elimination") + { + } + + bool run() + { + // For now this phase only works on SSA. This could be changed; we could have a block-local + // version over LoadStore. + DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA); + + if (verbose) { + dataLog("Graph before arguments elimination:\n"); + m_graph.dump(); + } + + identifyCandidates(); + if (m_candidates.isEmpty()) + return false; + + eliminateCandidatesThatEscape(); + if (m_candidates.isEmpty()) + return false; + + eliminateCandidatesThatInterfere(); + if (m_candidates.isEmpty()) + return false; + + transform(); + + return true; + } + +private: + // Just finds nodes that we know how to work with. + void identifyCandidates() + { + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + for (Node* node : *block) { + switch (node->op()) { + case CreateDirectArguments: + case CreateClonedArguments: + m_candidates.add(node); + break; + + case CreateScopedArguments: + // FIXME: We could handle this if it wasn't for the fact that scoped arguments are + // always stored into the activation. + // https://bugs.webkit.org/show_bug.cgi?id=143072 and + // https://bugs.webkit.org/show_bug.cgi?id=143073 + break; + + default: + break; + } + } + } + + if (verbose) + dataLog("Candidates: ", listDump(m_candidates), "\n"); + } + + // Look for escaping sites, and remove from the candidates set if we see an escape. + void eliminateCandidatesThatEscape() + { + auto escape = [&] (Edge edge) { + if (!edge) + return; + m_candidates.remove(edge.node()); + }; + + auto escapeBasedOnArrayMode = [&] (ArrayMode mode, Edge edge) { + switch (mode.type()) { + case Array::DirectArguments: + if (edge->op() != CreateDirectArguments) + escape(edge); + break; + + case Array::Int32: + case Array::Double: + case Array::Contiguous: + if (edge->op() != CreateClonedArguments) + escape(edge); + break; + + default: + escape(edge); + break; + } + }; + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + for (Node* node : *block) { + switch (node->op()) { + case GetFromArguments: + DFG_ASSERT(m_graph, node, node->child1()->op() == CreateDirectArguments); + break; + + case GetByVal: + escapeBasedOnArrayMode(node->arrayMode(), node->child1()); + escape(node->child2()); + escape(node->child3()); + break; + + case GetArrayLength: + escapeBasedOnArrayMode(node->arrayMode(), node->child1()); + escape(node->child2()); + break; + + case LoadVarargs: + break; + + case CallVarargs: + case ConstructVarargs: + case TailCallVarargs: + case TailCallVarargsInlinedCaller: + escape(node->child1()); + escape(node->child3()); + break; + + case Check: + m_graph.doToChildren( + node, + [&] (Edge edge) { + if (edge.willNotHaveCheck()) + return; + + if (alreadyChecked(edge.useKind(), SpecObject)) + return; + + escape(edge); + }); + break; + + case MovHint: + case PutHint: + break; + + case GetButterfly: + case GetButterflyReadOnly: + // This barely works. The danger is that the GetButterfly is used by something that + // does something escaping to a candidate. Fortunately, the only butterfly-using ops + // that we exempt here also use the candidate directly. If there ever was a + // butterfly-using op that we wanted to exempt, then we'd have to look at the + // butterfly's child and check if it's a candidate. + break; + + case CheckArray: + escapeBasedOnArrayMode(node->arrayMode(), node->child1()); + break; + + // FIXME: For cloned arguments, we'd like to allow GetByOffset on length to not be + // an escape. + // https://bugs.webkit.org/show_bug.cgi?id=143074 + + // FIXME: We should be able to handle GetById/GetByOffset on callee. + // https://bugs.webkit.org/show_bug.cgi?id=143075 + + default: + m_graph.doToChildren(node, escape); + break; + } + } + } + + if (verbose) + dataLog("After escape analysis: ", listDump(m_candidates), "\n"); + } + + // Anywhere that a candidate is live (in bytecode or in DFG), check if there is a chance of + // interference between the stack area that the arguments object copies from and the arguments + // object's payload. Conservatively this means that the stack region doesn't get stored to. + void eliminateCandidatesThatInterfere() + { + performLivenessAnalysis(m_graph); + performOSRAvailabilityAnalysis(m_graph); + m_graph.initializeNodeOwners(); + CombinedLiveness combinedLiveness(m_graph); + + BlockMap<Operands<bool>> clobberedByBlock(m_graph); + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + Operands<bool>& clobberedByThisBlock = clobberedByBlock[block]; + clobberedByThisBlock = Operands<bool>(OperandsLike, m_graph.block(0)->variablesAtHead); + for (Node* node : *block) { + clobberize( + m_graph, node, NoOpClobberize(), + [&] (AbstractHeap heap) { + if (heap.kind() != Stack) { + ASSERT(!heap.overlaps(Stack)); + return; + } + ASSERT(!heap.payload().isTop()); + VirtualRegister reg(heap.payload().value32()); + clobberedByThisBlock.operand(reg) = true; + }, + NoOpClobberize()); + } + } + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + // Stop if we've already removed all candidates. + if (m_candidates.isEmpty()) + return; + + // Ignore blocks that don't write to the stack. + bool writesToStack = false; + for (unsigned i = clobberedByBlock[block].size(); i--;) { + if (clobberedByBlock[block][i]) { + writesToStack = true; + break; + } + } + if (!writesToStack) + continue; + + forAllKillsInBlock( + m_graph, combinedLiveness, block, + [&] (unsigned nodeIndex, Node* candidate) { + if (!m_candidates.contains(candidate)) + return; + + // Check if this block has any clobbers that affect this candidate. This is a fairly + // fast check. + bool isClobberedByBlock = false; + Operands<bool>& clobberedByThisBlock = clobberedByBlock[block]; + + if (InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame) { + if (inlineCallFrame->isVarargs()) { + isClobberedByBlock |= clobberedByThisBlock.operand( + inlineCallFrame->stackOffset + JSStack::ArgumentCount); + } + + if (!isClobberedByBlock || inlineCallFrame->isClosureCall) { + isClobberedByBlock |= clobberedByThisBlock.operand( + inlineCallFrame->stackOffset + JSStack::Callee); + } + + if (!isClobberedByBlock) { + for (unsigned i = 0; i < inlineCallFrame->arguments.size() - 1; ++i) { + VirtualRegister reg = + VirtualRegister(inlineCallFrame->stackOffset) + + CallFrame::argumentOffset(i); + if (clobberedByThisBlock.operand(reg)) { + isClobberedByBlock = true; + break; + } + } + } + } else { + // We don't include the ArgumentCount or Callee in this case because we can be + // damn sure that this won't be clobbered. + for (unsigned i = 1; i < static_cast<unsigned>(codeBlock()->numParameters()); ++i) { + if (clobberedByThisBlock.argument(i)) { + isClobberedByBlock = true; + break; + } + } + } + + if (!isClobberedByBlock) + return; + + // Check if we can immediately eliminate this candidate. If the block has a clobber + // for this arguments allocation, and we'd have to examine every node in the block, + // then we can just eliminate the candidate. + if (nodeIndex == block->size() && candidate->owner != block) { + m_candidates.remove(candidate); + return; + } + + // This loop considers all nodes up to the nodeIndex, excluding the nodeIndex. + while (nodeIndex--) { + Node* node = block->at(nodeIndex); + if (node == candidate) + break; + + bool found = false; + clobberize( + m_graph, node, NoOpClobberize(), + [&] (AbstractHeap heap) { + if (heap.kind() == Stack && !heap.payload().isTop()) { + if (argumentsInvolveStackSlot(candidate, VirtualRegister(heap.payload().value32()))) + found = true; + return; + } + if (heap.overlaps(Stack)) + found = true; + }, + NoOpClobberize()); + + if (found) { + m_candidates.remove(candidate); + return; + } + } + }); + } + + // Q: How do we handle OSR exit with a live PhantomArguments at a point where the inline call + // frame is dead? A: Naively we could say that PhantomArguments must escape the stack slots. But + // that would break PutStack sinking, which in turn would break object allocation sinking, in + // cases where we have a varargs call to an otherwise pure method. So, we need something smarter. + // For the outermost arguments, we just have a PhantomArguments that magically knows that it + // should load the arguments from the call frame. For the inline arguments, we have the heap map + // in the availabiltiy map track each possible inline argument as a promoted heap location. If the + // PutStacks for those arguments aren't sunk, those heap locations will map to very trivial + // availabilities (they will be flush availabilities). But if sinking happens then those + // availabilities may become whatever. OSR exit should be able to handle this quite naturally, + // since those availabilities speak of the stack before the optimizing compiler stack frame is + // torn down. + + if (verbose) + dataLog("After interference analysis: ", listDump(m_candidates), "\n"); + } + + void transform() + { + InsertionSet insertionSet(m_graph); + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + + auto getArrayLength = [&] (Node* candidate) -> Node* { + return emitCodeToGetArgumentsArrayLength( + insertionSet, candidate, nodeIndex, node->origin); + }; + + switch (node->op()) { + case CreateDirectArguments: + if (!m_candidates.contains(node)) + break; + + node->setOpAndDefaultFlags(PhantomDirectArguments); + break; + + case CreateClonedArguments: + if (!m_candidates.contains(node)) + break; + + node->setOpAndDefaultFlags(PhantomClonedArguments); + break; + + case GetFromArguments: { + Node* candidate = node->child1().node(); + if (!m_candidates.contains(candidate)) + break; + + DFG_ASSERT( + m_graph, node, + node->child1()->op() == CreateDirectArguments + || node->child1()->op() == PhantomDirectArguments); + VirtualRegister reg = + virtualRegisterForArgument(node->capturedArgumentsOffset().offset() + 1) + + node->origin.semantic.stackOffset(); + StackAccessData* data = m_graph.m_stackAccessData.add(reg, FlushedJSValue); + node->convertToGetStack(data); + break; + } + + case GetArrayLength: { + Node* candidate = node->child1().node(); + if (!m_candidates.contains(candidate)) + break; + + // Meh, this is kind of hackish - we use an Identity so that we can reuse the + // getArrayLength() helper. + node->convertToIdentityOn(getArrayLength(candidate)); + break; + } + + case GetByVal: { + // FIXME: For ClonedArguments, we would have already done a separate bounds check. + // This code will cause us to have two bounds checks - the original one that we + // already factored out in SSALoweringPhase, and the new one we insert here, which is + // often implicitly part of GetMyArgumentByVal. B3 will probably eliminate the + // second bounds check, but still - that's just silly. + // https://bugs.webkit.org/show_bug.cgi?id=143076 + + Node* candidate = node->child1().node(); + if (!m_candidates.contains(candidate)) + break; + + Node* result = nullptr; + if (node->child2()->isInt32Constant()) { + unsigned index = node->child2()->asUInt32(); + InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame; + + bool safeToGetStack; + if (inlineCallFrame) + safeToGetStack = index < inlineCallFrame->arguments.size() - 1; + else { + safeToGetStack = + index < static_cast<unsigned>(codeBlock()->numParameters()) - 1; + } + if (safeToGetStack) { + StackAccessData* data; + VirtualRegister arg = virtualRegisterForArgument(index + 1); + if (inlineCallFrame) + arg += inlineCallFrame->stackOffset; + data = m_graph.m_stackAccessData.add(arg, FlushedJSValue); + + if (!inlineCallFrame || inlineCallFrame->isVarargs()) { + insertionSet.insertNode( + nodeIndex, SpecNone, CheckInBounds, node->origin, + node->child2(), Edge(getArrayLength(candidate), Int32Use)); + } + + result = insertionSet.insertNode( + nodeIndex, node->prediction(), GetStack, node->origin, OpInfo(data)); + } + } + + if (!result) { + result = insertionSet.insertNode( + nodeIndex, node->prediction(), GetMyArgumentByVal, node->origin, + node->child1(), node->child2()); + } + + // Need to do this because we may have a data format conversion here. + node->convertToIdentityOn(result); + break; + } + + case LoadVarargs: { + Node* candidate = node->child1().node(); + if (!m_candidates.contains(candidate)) + break; + + LoadVarargsData* varargsData = node->loadVarargsData(); + InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame; + if (inlineCallFrame + && !inlineCallFrame->isVarargs() + && inlineCallFrame->arguments.size() - varargsData->offset <= varargsData->limit) { + + // LoadVarargs can exit, so it better be exitOK. + DFG_ASSERT(m_graph, node, node->origin.exitOK); + bool canExit = true; + + Node* argumentCount = insertionSet.insertConstant( + nodeIndex, node->origin.withExitOK(canExit), + jsNumber(inlineCallFrame->arguments.size() - varargsData->offset)); + insertionSet.insertNode( + nodeIndex, SpecNone, MovHint, node->origin.takeValidExit(canExit), + OpInfo(varargsData->count.offset()), Edge(argumentCount)); + insertionSet.insertNode( + nodeIndex, SpecNone, PutStack, node->origin.withExitOK(canExit), + OpInfo(m_graph.m_stackAccessData.add(varargsData->count, FlushedInt32)), + Edge(argumentCount, KnownInt32Use)); + + DFG_ASSERT(m_graph, node, varargsData->limit - 1 >= varargsData->mandatoryMinimum); + // Define our limit to not include "this", since that's a bit easier to reason about. + unsigned limit = varargsData->limit - 1; + Node* undefined = nullptr; + for (unsigned storeIndex = 0; storeIndex < limit; ++storeIndex) { + // First determine if we have an element we can load, and load it if + // possible. + + unsigned loadIndex = storeIndex + varargsData->offset; + + Node* value; + if (loadIndex + 1 < inlineCallFrame->arguments.size()) { + VirtualRegister reg = + virtualRegisterForArgument(loadIndex + 1) + + inlineCallFrame->stackOffset; + StackAccessData* data = m_graph.m_stackAccessData.add( + reg, FlushedJSValue); + + value = insertionSet.insertNode( + nodeIndex, SpecNone, GetStack, node->origin.withExitOK(canExit), + OpInfo(data)); + } else { + // FIXME: We shouldn't have to store anything if + // storeIndex >= varargsData->mandatoryMinimum, but we will still + // have GetStacks in that range. So if we don't do the stores, we'll + // have degenerate IR: we'll have GetStacks of something that didn't + // have PutStacks. + // https://bugs.webkit.org/show_bug.cgi?id=147434 + + if (!undefined) { + undefined = insertionSet.insertConstant( + nodeIndex, node->origin.withExitOK(canExit), jsUndefined()); + } + value = undefined; + } + + // Now that we have a value, store it. + + VirtualRegister reg = varargsData->start + storeIndex; + StackAccessData* data = + m_graph.m_stackAccessData.add(reg, FlushedJSValue); + + insertionSet.insertNode( + nodeIndex, SpecNone, MovHint, node->origin.takeValidExit(canExit), + OpInfo(reg.offset()), Edge(value)); + insertionSet.insertNode( + nodeIndex, SpecNone, PutStack, node->origin.withExitOK(canExit), + OpInfo(data), Edge(value)); + } + + node->remove(); + node->origin.exitOK = canExit; + break; + } + + node->setOpAndDefaultFlags(ForwardVarargs); + break; + } + + case CallVarargs: + case ConstructVarargs: + case TailCallVarargs: + case TailCallVarargsInlinedCaller: { + Node* candidate = node->child2().node(); + if (!m_candidates.contains(candidate)) + break; + + CallVarargsData* varargsData = node->callVarargsData(); + InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame; + if (inlineCallFrame && !inlineCallFrame->isVarargs()) { + Vector<Node*> arguments; + for (unsigned i = 1 + varargsData->firstVarArgOffset; i < inlineCallFrame->arguments.size(); ++i) { + StackAccessData* data = m_graph.m_stackAccessData.add( + virtualRegisterForArgument(i) + inlineCallFrame->stackOffset, + FlushedJSValue); + + Node* value = insertionSet.insertNode( + nodeIndex, SpecNone, GetStack, node->origin, OpInfo(data)); + + arguments.append(value); + } + + unsigned firstChild = m_graph.m_varArgChildren.size(); + m_graph.m_varArgChildren.append(node->child1()); + m_graph.m_varArgChildren.append(node->child3()); + for (Node* argument : arguments) + m_graph.m_varArgChildren.append(Edge(argument)); + switch (node->op()) { + case CallVarargs: + node->setOpAndDefaultFlags(Call); + break; + case ConstructVarargs: + node->setOpAndDefaultFlags(Construct); + break; + case TailCallVarargs: + node->setOpAndDefaultFlags(TailCall); + break; + case TailCallVarargsInlinedCaller: + node->setOpAndDefaultFlags(TailCallInlinedCaller); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + } + node->children = AdjacencyList( + AdjacencyList::Variable, + firstChild, m_graph.m_varArgChildren.size() - firstChild); + break; + } + + switch (node->op()) { + case CallVarargs: + node->setOpAndDefaultFlags(CallForwardVarargs); + break; + case ConstructVarargs: + node->setOpAndDefaultFlags(ConstructForwardVarargs); + break; + case TailCallVarargs: + node->setOpAndDefaultFlags(TailCallForwardVarargs); + break; + case TailCallVarargsInlinedCaller: + node->setOpAndDefaultFlags(TailCallForwardVarargsInlinedCaller); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + } + break; + } + + case CheckArray: + case GetButterfly: { + if (!m_candidates.contains(node->child1().node())) + break; + node->remove(); + break; + } + + default: + break; + } + } + + insertionSet.execute(block); + } + } + + HashSet<Node*> m_candidates; +}; + +} // anonymous namespace + +bool performArgumentsElimination(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Arguments Elimination Phase"); + return runPhase<ArgumentsEliminationPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGMergeMode.h b/Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.h index 6619feaaf..520b228ad 100644 --- a/Source/JavaScriptCore/dfg/DFGMergeMode.h +++ b/Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,25 +23,23 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef DFGMergeMode_h -#define DFGMergeMode_h +#ifndef DFGArgumentsEliminationPhase_h +#define DFGArgumentsEliminationPhase_h + +#if ENABLE(DFG_JIT) namespace JSC { namespace DFG { -enum MergeMode { - // Don't merge the state in AbstractState with basic blocks. - DontMerge, - - // Merge the state in AbstractState with the tail of the basic - // block being analyzed. - MergeToTail, - - // Merge the state in AbstractState with the tail of the basic - // block, and with the heads of successor blocks. - MergeToSuccessors -}; +class Graph; + +// Eliminates allocations of the Arguments-class objects when it can prove that the object doesn't escape +// and none of the arguments are mutated (either via the object or via the stack). + +bool performArgumentsElimination(Graph&); } } // namespace JSC::DFG -#endif // DFGMergeMode_h +#endif // ENABLE(DFG_JIT) + +#endif // DFGArgumentsEliminationPhase_h diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp deleted file mode 100644 index 936603150..000000000 --- a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp +++ /dev/null @@ -1,798 +0,0 @@ -/* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "DFGArgumentsSimplificationPhase.h" - -#if ENABLE(DFG_JIT) - -#include "DFGBasicBlock.h" -#include "DFGGraph.h" -#include "DFGInsertionSet.h" -#include "DFGPhase.h" -#include "DFGValidate.h" -#include "DFGVariableAccessDataDump.h" -#include "Operations.h" -#include <wtf/HashSet.h> -#include <wtf/HashMap.h> - -namespace JSC { namespace DFG { - -namespace { - -struct ArgumentsAliasingData { - InlineCallFrame* callContext; - bool callContextSet; - bool multipleCallContexts; - - bool assignedFromArguments; - bool assignedFromManyThings; - - bool escapes; - - ArgumentsAliasingData() - : callContext(0) - , callContextSet(false) - , multipleCallContexts(false) - , assignedFromArguments(false) - , assignedFromManyThings(false) - , escapes(false) - { - } - - void mergeCallContext(InlineCallFrame* newCallContext) - { - if (multipleCallContexts) - return; - - if (!callContextSet) { - callContext = newCallContext; - callContextSet = true; - return; - } - - if (callContext == newCallContext) - return; - - multipleCallContexts = true; - } - - bool callContextIsValid() - { - return callContextSet && !multipleCallContexts; - } - - void mergeArgumentsAssignment() - { - assignedFromArguments = true; - } - - void mergeNonArgumentsAssignment() - { - assignedFromManyThings = true; - } - - bool argumentsAssignmentIsValid() - { - return assignedFromArguments && !assignedFromManyThings; - } - - bool isValid() - { - return callContextIsValid() && argumentsAssignmentIsValid() && !escapes; - } -}; - -} // end anonymous namespace - -class ArgumentsSimplificationPhase : public Phase { -public: - ArgumentsSimplificationPhase(Graph& graph) - : Phase(graph, "arguments simplification") - { - } - - bool run() - { - if (!m_graph.m_hasArguments) - return false; - - bool changed = false; - - // Record which arguments are known to escape no matter what. - for (InlineCallFrameSet::iterator iter = m_graph.m_inlineCallFrames->begin(); !!iter; ++iter) - pruneObviousArgumentCreations(*iter); - pruneObviousArgumentCreations(0); // the machine call frame. - - // Create data for variable access datas that we will want to analyze. - for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { - VariableAccessData* variableAccessData = &m_graph.m_variableAccessData[i]; - if (!variableAccessData->isRoot()) - continue; - if (variableAccessData->isCaptured()) - continue; - m_argumentsAliasing.add(variableAccessData, ArgumentsAliasingData()); - } - - // Figure out which variables are live, using a conservative approximation of - // liveness. - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { - Node* node = block->at(indexInBlock); - switch (node->op()) { - case GetLocal: - case Flush: - case PhantomLocal: - m_isLive.add(node->variableAccessData()); - break; - default: - break; - } - } - } - - // Figure out which variables alias the arguments and nothing else, and are - // used only for GetByVal and GetArrayLength accesses. At the same time, - // identify uses of CreateArguments that are not consistent with the arguments - // being aliased only to variables that satisfy these constraints. - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { - Node* node = block->at(indexInBlock); - switch (node->op()) { - case CreateArguments: { - // Ignore this op. If we see a lone CreateArguments then we want to - // completely ignore it because: - // 1) The default would be to see that the child is a GetLocal on the - // arguments register and conclude that we have an arguments escape. - // 2) The fact that a CreateArguments exists does not mean that it - // will continue to exist after we're done with this phase. As far - // as this phase is concerned, a CreateArguments only "exists" if it - // is used in a manner that necessitates its existance. - break; - } - - case TearOffArguments: { - // Ignore arguments tear off, because it's only relevant if we actually - // need to create the arguments. - break; - } - - case SetLocal: { - Node* source = node->child1().node(); - VariableAccessData* variableAccessData = node->variableAccessData(); - VirtualRegister argumentsRegister = - m_graph.uncheckedArgumentsRegisterFor(node->codeOrigin); - if (source->op() != CreateArguments && source->op() != PhantomArguments) { - // Make sure that the source of the SetLocal knows that if it's - // a variable that we think is aliased to the arguments, then it - // may escape at this point. In future, we could track transitive - // aliasing. But not yet. - observeBadArgumentsUse(source); - - // If this is an assignment to the arguments register, then - // pretend as if the arguments were created. We don't want to - // optimize code that explicitly assigns to the arguments, - // because that seems too ugly. - - // But, before getting rid of CreateArguments, we will have - // an assignment to the arguments registers with JSValue(). - // That's because CSE will refuse to get rid of the - // init_lazy_reg since it treats CreateArguments as reading - // local variables. That could be fixed, but it's easier to - // work around this here. - if (source->op() == JSConstant - && !source->valueOfJSConstant(codeBlock())) - break; - - // If the variable is totally dead, then ignore it. - if (!m_isLive.contains(variableAccessData)) - break; - - if (argumentsRegister.isValid() - && (variableAccessData->local() == argumentsRegister - || variableAccessData->local() == unmodifiedArgumentsRegister(argumentsRegister))) { - m_createsArguments.add(node->codeOrigin.inlineCallFrame); - break; - } - - if (variableAccessData->isCaptured()) - break; - - // Make sure that if it's a variable that we think is aliased to - // the arguments, that we know that it might actually not be. - ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->value; - data.mergeNonArgumentsAssignment(); - data.mergeCallContext(node->codeOrigin.inlineCallFrame); - break; - } - if (argumentsRegister.isValid() - && (variableAccessData->local() == argumentsRegister - || variableAccessData->local() == unmodifiedArgumentsRegister(argumentsRegister))) { - if (node->codeOrigin.inlineCallFrame == source->codeOrigin.inlineCallFrame) - break; - m_createsArguments.add(source->codeOrigin.inlineCallFrame); - break; - } - if (variableAccessData->isCaptured()) { - m_createsArguments.add(source->codeOrigin.inlineCallFrame); - break; - } - ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->value; - data.mergeArgumentsAssignment(); - // This ensures that the variable's uses are in the same context as - // the arguments it is aliasing. - data.mergeCallContext(node->codeOrigin.inlineCallFrame); - data.mergeCallContext(source->codeOrigin.inlineCallFrame); - break; - } - - case GetLocal: - case Phi: /* FIXME: https://bugs.webkit.org/show_bug.cgi?id=108555 */ { - VariableAccessData* variableAccessData = node->variableAccessData(); - if (variableAccessData->isCaptured()) - break; - ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->value; - data.mergeCallContext(node->codeOrigin.inlineCallFrame); - break; - } - - case Flush: { - VariableAccessData* variableAccessData = node->variableAccessData(); - if (variableAccessData->isCaptured()) - break; - ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->value; - data.mergeCallContext(node->codeOrigin.inlineCallFrame); - - // If a variable is used in a flush then by definition it escapes. - data.escapes = true; - break; - } - - case SetArgument: { - VariableAccessData* variableAccessData = node->variableAccessData(); - if (variableAccessData->isCaptured()) - break; - ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->value; - data.mergeNonArgumentsAssignment(); - data.mergeCallContext(node->codeOrigin.inlineCallFrame); - break; - } - - case GetByVal: { - if (node->arrayMode().type() != Array::Arguments) { - observeBadArgumentsUses(node); - break; - } - - // That's so awful and pretty much impossible since it would - // imply that the arguments were predicted integer, but it's - // good to be defensive and thorough. - observeBadArgumentsUse(node->child2().node()); - observeProperArgumentsUse(node, node->child1()); - break; - } - - case GetArrayLength: { - if (node->arrayMode().type() != Array::Arguments) { - observeBadArgumentsUses(node); - break; - } - - observeProperArgumentsUse(node, node->child1()); - break; - } - - case Phantom: - // We don't care about phantom uses, since phantom uses are all about - // just keeping things alive for OSR exit. If something - like the - // CreateArguments - is just being kept alive, then this transformation - // will not break this, since the Phantom will now just keep alive a - // PhantomArguments and OSR exit will still do the right things. - break; - - case CheckStructure: - case StructureTransitionWatchpoint: - case CheckArray: - // We don't care about these because if we get uses of the relevant - // variable then we can safely get rid of these, too. This of course - // relies on there not being any information transferred by the CFA - // from a CheckStructure on one variable to the information about the - // structures of another variable. - break; - - case MovHint: - // We don't care about MovHints at all, since they represent what happens - // in bytecode. We rematerialize arguments objects on OSR exit anyway. - break; - - default: - observeBadArgumentsUses(node); - break; - } - } - } - - // Now we know which variables are aliased to arguments. But if any of them are - // found to have escaped, or were otherwise invalidated, then we need to mark - // the arguments as requiring creation. This is a property of SetLocals to - // variables that are neither the correct arguments register nor are marked as - // being arguments-aliased. - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { - Node* node = block->at(indexInBlock); - if (node->op() != SetLocal) - continue; - Node* source = node->child1().node(); - if (source->op() != CreateArguments) - continue; - VariableAccessData* variableAccessData = node->variableAccessData(); - if (variableAccessData->isCaptured()) { - // The captured case would have already been taken care of in the - // previous pass. - continue; - } - - ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->value; - if (data.isValid()) - continue; - - m_createsArguments.add(source->codeOrigin.inlineCallFrame); - } - } - - InsertionSet insertionSet(m_graph); - - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (unsigned indexInBlock = 0; indexInBlock < block->size(); indexInBlock++) { - Node* node = block->at(indexInBlock); - switch (node->op()) { - case SetLocal: { - Node* source = node->child1().node(); - if (source->op() != CreateArguments) - break; - - if (m_createsArguments.contains(source->codeOrigin.inlineCallFrame)) - break; - - VariableAccessData* variableAccessData = node->variableAccessData(); - - if (m_graph.argumentsRegisterFor(node->codeOrigin) == variableAccessData->local() - || unmodifiedArgumentsRegister(m_graph.argumentsRegisterFor(node->codeOrigin)) == variableAccessData->local()) - break; - - if (variableAccessData->mergeIsArgumentsAlias(true)) { - changed = true; - - // Make sure that the variable knows, that it may now hold non-cell values. - variableAccessData->predict(SpecEmpty); - } - - // Make sure that the SetLocal doesn't check that the input is a Cell. - if (node->child1().useKind() != UntypedUse) { - node->child1().setUseKind(UntypedUse); - changed = true; - } - break; - } - - case Flush: { - VariableAccessData* variableAccessData = node->variableAccessData(); - - if (variableAccessData->isCaptured() - || !m_argumentsAliasing.find(variableAccessData)->value.isValid() - || m_createsArguments.contains(node->codeOrigin.inlineCallFrame)) - break; - - RELEASE_ASSERT_NOT_REACHED(); - break; - } - - case Phantom: { - // It's highly likely that we will have a Phantom referencing either - // CreateArguments, or a local op for the arguments register, or a - // local op for an arguments-aliased variable. In any of those cases, - // we should remove the phantom reference, since: - // 1) Phantoms only exist to aid OSR exit. But arguments simplification - // has its own OSR exit story, which is to inform OSR exit to reify - // the arguments as necessary. - // 2) The Phantom may keep the CreateArguments node alive, which is - // precisely what we don't want. - for (unsigned i = 0; i < AdjacencyList::Size; ++i) - detypeArgumentsReferencingPhantomChild(node, i); - break; - } - - case CheckStructure: - case StructureTransitionWatchpoint: - case CheckArray: { - // We can just get rid of this node, if it references a phantom argument. - if (!isOKToOptimize(node->child1().node())) - break; - node->convertToPhantom(); - break; - } - - case GetByVal: { - if (node->arrayMode().type() != Array::Arguments) - break; - - // This can be simplified to GetMyArgumentByVal if we know that - // it satisfies either condition (1) or (2): - // 1) Its first child is a valid ArgumentsAliasingData and the - // InlineCallFrame* is not marked as creating arguments. - // 2) Its first child is CreateArguments and its InlineCallFrame* - // is not marked as creating arguments. - - if (!isOKToOptimize(node->child1().node())) - break; - - insertionSet.insertNode( - indexInBlock, SpecNone, Phantom, node->codeOrigin, node->child1()); - - node->child1() = node->child2(); - node->child2() = Edge(); - node->setOpAndDefaultFlags(GetMyArgumentByVal); - changed = true; - --indexInBlock; // Force reconsideration of this op now that it's a GetMyArgumentByVal. - break; - } - - case GetArrayLength: { - if (node->arrayMode().type() != Array::Arguments) - break; - - if (!isOKToOptimize(node->child1().node())) - break; - - insertionSet.insertNode( - indexInBlock, SpecNone, Phantom, node->codeOrigin, node->child1()); - - node->child1() = Edge(); - node->setOpAndDefaultFlags(GetMyArgumentsLength); - changed = true; - --indexInBlock; // Force reconsideration of this op noew that it's a GetMyArgumentsLength. - break; - } - - case GetMyArgumentsLength: - case GetMyArgumentsLengthSafe: { - if (m_createsArguments.contains(node->codeOrigin.inlineCallFrame)) { - ASSERT(node->op() == GetMyArgumentsLengthSafe); - break; - } - if (node->op() == GetMyArgumentsLengthSafe) { - node->setOp(GetMyArgumentsLength); - changed = true; - } - - CodeOrigin codeOrigin = node->codeOrigin; - if (!codeOrigin.inlineCallFrame) - break; - - // We know exactly what this will return. But only after we have checked - // that nobody has escaped our arguments. - insertionSet.insertNode( - indexInBlock, SpecNone, CheckArgumentsNotCreated, codeOrigin); - - m_graph.convertToConstant( - node, jsNumber(codeOrigin.inlineCallFrame->arguments.size() - 1)); - changed = true; - break; - } - - case GetMyArgumentByVal: - case GetMyArgumentByValSafe: { - if (m_createsArguments.contains(node->codeOrigin.inlineCallFrame)) { - ASSERT(node->op() == GetMyArgumentByValSafe); - break; - } - if (node->op() == GetMyArgumentByValSafe) { - node->setOp(GetMyArgumentByVal); - changed = true; - } - if (!node->codeOrigin.inlineCallFrame) - break; - if (!node->child1()->hasConstant()) - break; - JSValue value = node->child1()->valueOfJSConstant(codeBlock()); - if (!value.isInt32()) - break; - int32_t index = value.asInt32(); - if (index < 0 - || static_cast<size_t>(index + 1) >= - node->codeOrigin.inlineCallFrame->arguments.size()) - break; - - // We know which argument this is accessing. But only after we have checked - // that nobody has escaped our arguments. We also need to ensure that the - // index is kept alive. That's somewhat pointless since it's a constant, but - // it's important because this is one of those invariants that we like to - // have in the DFG. Note finally that we use the GetLocalUnlinked opcode - // here, since this is being done _after_ the prediction propagation phase - // has run - therefore it makes little sense to link the GetLocal operation - // into the VariableAccessData and Phi graphs. - - CodeOrigin codeOrigin = node->codeOrigin; - AdjacencyList children = node->children; - - node->convertToGetLocalUnlinked( - VirtualRegister( - node->codeOrigin.inlineCallFrame->stackOffset + - m_graph.baselineCodeBlockFor(node->codeOrigin)->argumentIndexAfterCapture(index))); - - insertionSet.insertNode( - indexInBlock, SpecNone, CheckArgumentsNotCreated, - codeOrigin); - insertionSet.insertNode( - indexInBlock, SpecNone, Phantom, codeOrigin, children); - - changed = true; - break; - } - - case TearOffArguments: { - if (m_createsArguments.contains(node->codeOrigin.inlineCallFrame)) - continue; - - node->convertToPhantom(); - break; - } - - default: - break; - } - } - insertionSet.execute(block); - } - - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { - Node* node = block->at(indexInBlock); - if (node->op() != CreateArguments) - continue; - // If this is a CreateArguments for an InlineCallFrame* that does - // not create arguments, then replace it with a PhantomArguments. - // PhantomArguments is a non-executing node that just indicates - // that the node should be reified as an arguments object on OSR - // exit. - if (m_createsArguments.contains(node->codeOrigin.inlineCallFrame)) - continue; - insertionSet.insertNode( - indexInBlock, SpecNone, Phantom, node->codeOrigin, node->children); - node->setOpAndDefaultFlags(PhantomArguments); - node->children.reset(); - changed = true; - } - insertionSet.execute(block); - } - - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { - Node* node = block->at(indexInBlock); - if (node->op() != Phantom) - continue; - for (unsigned i = 0; i < AdjacencyList::Size; ++i) - detypeArgumentsReferencingPhantomChild(node, i); - } - } - - if (changed) { - m_graph.dethread(); - m_graph.m_form = LoadStore; - } - - return changed; - } - -private: - HashSet<InlineCallFrame*, - DefaultHash<InlineCallFrame*>::Hash, - NullableHashTraits<InlineCallFrame*>> m_createsArguments; - HashMap<VariableAccessData*, ArgumentsAliasingData, - DefaultHash<VariableAccessData*>::Hash, - NullableHashTraits<VariableAccessData*>> m_argumentsAliasing; - HashSet<VariableAccessData*> m_isLive; - - void pruneObviousArgumentCreations(InlineCallFrame* inlineCallFrame) - { - ScriptExecutable* executable = m_graph.executableFor(inlineCallFrame); - if (m_graph.m_executablesWhoseArgumentsEscaped.contains(executable) - || executable->isStrictMode()) - m_createsArguments.add(inlineCallFrame); - } - - void observeBadArgumentsUse(Node* node) - { - if (!node) - return; - - switch (node->op()) { - case CreateArguments: { - m_createsArguments.add(node->codeOrigin.inlineCallFrame); - break; - } - - case GetLocal: { - VirtualRegister argumentsRegister = m_graph.uncheckedArgumentsRegisterFor(node->codeOrigin); - if (argumentsRegister.isValid() - && (node->local() == argumentsRegister - || node->local() == unmodifiedArgumentsRegister(argumentsRegister))) { - m_createsArguments.add(node->codeOrigin.inlineCallFrame); - break; - } - - VariableAccessData* variableAccessData = node->variableAccessData(); - if (variableAccessData->isCaptured()) - break; - - ArgumentsAliasingData& data = m_argumentsAliasing.find(variableAccessData)->value; - data.escapes = true; - break; - } - - default: - break; - } - } - - void observeBadArgumentsUses(Node* node) - { - for (unsigned i = m_graph.numChildren(node); i--;) - observeBadArgumentsUse(m_graph.child(node, i).node()); - } - - void observeProperArgumentsUse(Node* node, Edge edge) - { - if (edge->op() != GetLocal) { - // When can this happen? At least two cases that I can think - // of: - // - // 1) Aliased use of arguments in the same basic block, - // like: - // - // var a = arguments; - // var x = arguments[i]; - // - // 2) If we're accessing arguments we got from the heap! - - if (edge->op() == CreateArguments - && node->codeOrigin.inlineCallFrame - != edge->codeOrigin.inlineCallFrame) - m_createsArguments.add(edge->codeOrigin.inlineCallFrame); - - return; - } - - VariableAccessData* variableAccessData = edge->variableAccessData(); - if (edge->local() == m_graph.uncheckedArgumentsRegisterFor(edge->codeOrigin) - && node->codeOrigin.inlineCallFrame != edge->codeOrigin.inlineCallFrame) { - m_createsArguments.add(edge->codeOrigin.inlineCallFrame); - return; - } - - if (variableAccessData->isCaptured()) - return; - - ArgumentsAliasingData& data = m_argumentsAliasing.find(variableAccessData)->value; - data.mergeCallContext(node->codeOrigin.inlineCallFrame); - } - - bool isOKToOptimize(Node* source) - { - if (m_createsArguments.contains(source->codeOrigin.inlineCallFrame)) - return false; - - switch (source->op()) { - case GetLocal: { - VariableAccessData* variableAccessData = source->variableAccessData(); - VirtualRegister argumentsRegister = m_graph.uncheckedArgumentsRegisterFor(source->codeOrigin); - if (!argumentsRegister.isValid()) - break; - if (argumentsRegister == variableAccessData->local()) - return true; - if (unmodifiedArgumentsRegister(argumentsRegister) == variableAccessData->local()) - return true; - if (variableAccessData->isCaptured()) - break; - ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->value; - if (!data.isValid()) - break; - - return true; - } - - case CreateArguments: { - return true; - } - - default: - break; - } - - return false; - } - - void detypeArgumentsReferencingPhantomChild(Node* node, unsigned edgeIndex) - { - Edge edge = node->children.child(edgeIndex); - if (!edge) - return; - - switch (edge->op()) { - case GetLocal: { - VariableAccessData* variableAccessData = edge->variableAccessData(); - if (!variableAccessData->isArgumentsAlias()) - break; - node->children.child(edgeIndex).setUseKind(UntypedUse); - break; - } - - case PhantomArguments: { - node->children.child(edgeIndex).setUseKind(UntypedUse); - break; - } - - default: - break; - } - } -}; - -bool performArgumentsSimplification(Graph& graph) -{ - SamplingRegion samplingRegion("DFG Arguments Simplification Phase"); - return runPhase<ArgumentsSimplificationPhase>(graph); -} - -} } // namespace JSC::DFG - -#endif // ENABLE(DFG_JIT) - - diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.cpp b/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.cpp new file mode 100644 index 000000000..5d512b1b6 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.cpp @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGArgumentsUtilities.h" + +#if ENABLE(DFG_JIT) + +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +bool argumentsInvolveStackSlot(InlineCallFrame* inlineCallFrame, VirtualRegister reg) +{ + if (!inlineCallFrame) + return (reg.isArgument() && reg.toArgument()) || reg.isHeader(); + + if (inlineCallFrame->isClosureCall + && reg == VirtualRegister(inlineCallFrame->stackOffset + JSStack::Callee)) + return true; + + if (inlineCallFrame->isVarargs() + && reg == VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount)) + return true; + + unsigned numArguments = inlineCallFrame->arguments.size() - 1; + VirtualRegister argumentStart = + VirtualRegister(inlineCallFrame->stackOffset) + CallFrame::argumentOffset(0); + return reg >= argumentStart && reg < argumentStart + numArguments; +} + +bool argumentsInvolveStackSlot(Node* candidate, VirtualRegister reg) +{ + return argumentsInvolveStackSlot(candidate->origin.semantic.inlineCallFrame, reg); +} + +Node* emitCodeToGetArgumentsArrayLength( + InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin) +{ + Graph& graph = insertionSet.graph(); + + DFG_ASSERT( + graph, arguments, + arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments + || arguments->op() == CreateClonedArguments || arguments->op() == PhantomDirectArguments + || arguments->op() == PhantomClonedArguments); + + InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame; + + if (inlineCallFrame && !inlineCallFrame->isVarargs()) { + return insertionSet.insertConstant( + nodeIndex, origin, jsNumber(inlineCallFrame->arguments.size() - 1)); + } + + Node* argumentCount; + if (!inlineCallFrame) + argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32, GetArgumentCount, origin); + else { + VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount); + + argumentCount = insertionSet.insertNode( + nodeIndex, SpecInt32, GetStack, origin, + OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32))); + } + + return insertionSet.insertNode( + nodeIndex, SpecInt32, ArithSub, origin, OpInfo(Arith::Unchecked), + Edge(argumentCount, Int32Use), + insertionSet.insertConstantForUse( + nodeIndex, origin, jsNumber(1), Int32Use)); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.h b/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.h new file mode 100644 index 000000000..82bfec30a --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGArgumentsUtilities.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGArgumentsUtilities_h +#define DFGArgumentsUtilities_h + +#if ENABLE(DFG_JIT) + +#include "DFGGraph.h" +#include "DFGInsertionSet.h" + +namespace JSC { namespace DFG { + +bool argumentsInvolveStackSlot(InlineCallFrame*, VirtualRegister); +bool argumentsInvolveStackSlot(Node* candidate, VirtualRegister); + +Node* emitCodeToGetArgumentsArrayLength( + InsertionSet&, Node* arguments, unsigned nodeIndex, NodeOrigin); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGArgumentsUtilities_h + diff --git a/Source/JavaScriptCore/dfg/DFGArithMode.cpp b/Source/JavaScriptCore/dfg/DFGArithMode.cpp index cc9699a02..84ddae192 100644 --- a/Source/JavaScriptCore/dfg/DFGArithMode.cpp +++ b/Source/JavaScriptCore/dfg/DFGArithMode.cpp @@ -28,6 +28,7 @@ #if ENABLE(DFG_JIT) +#include "JSCInlines.h" #include <wtf/PrintStream.h> namespace WTF { @@ -54,6 +55,22 @@ void printInternal(PrintStream& out, JSC::DFG::Arith::Mode mode) RELEASE_ASSERT_NOT_REACHED(); } +void printInternal(PrintStream& out, JSC::DFG::Arith::RoundingMode mode) +{ + switch (mode) { + case JSC::DFG::Arith::RoundingMode::Int32: + out.print("Int32"); + return; + case JSC::DFG::Arith::RoundingMode::Int32WithNegativeZeroCheck: + out.print("Int32WithNegativeZeroCheck"); + return; + case JSC::DFG::Arith::RoundingMode::Double: + out.print("Double"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + } // namespace WTF #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGArithMode.h b/Source/JavaScriptCore/dfg/DFGArithMode.h index 073ed6aba..aecfe3e18 100644 --- a/Source/JavaScriptCore/dfg/DFGArithMode.h +++ b/Source/JavaScriptCore/dfg/DFGArithMode.h @@ -40,6 +40,14 @@ enum Mode { CheckOverflowAndNegativeZero, // Check for both overflow and negative zero. DoOverflow // Up-convert to the smallest type that soundly represents all possible results after input type speculation. }; + +// Define the type of operation the rounding operation will perform. +enum class RoundingMode { + Int32, // The round operation produces a integer and -0 is considered as 0. + Int32WithNegativeZeroCheck, // The round operation produces a integer and checks for -0. + Double // The round operation produce a double. The result can be -0, NaN or (+/-)Infinity. +}; + } // namespace Arith inline bool doesOverflow(Arith::Mode mode) @@ -97,12 +105,48 @@ inline bool shouldCheckNegativeZero(Arith::Mode mode) return true; } +inline bool subsumes(Arith::Mode earlier, Arith::Mode later) +{ + switch (earlier) { + case Arith::CheckOverflow: + switch (later) { + case Arith::Unchecked: + case Arith::CheckOverflow: + return true; + default: + return false; + } + case Arith::CheckOverflowAndNegativeZero: + switch (later) { + case Arith::Unchecked: + case Arith::CheckOverflow: + case Arith::CheckOverflowAndNegativeZero: + return true; + default: + return false; + } + default: + return earlier == later; + } +} + +inline bool producesInteger(Arith::RoundingMode mode) +{ + return mode == Arith::RoundingMode::Int32WithNegativeZeroCheck || mode == Arith::RoundingMode::Int32; +} + +inline bool shouldCheckNegativeZero(Arith::RoundingMode mode) +{ + return mode == Arith::RoundingMode::Int32WithNegativeZeroCheck; +} + } } // namespace JSC::DFG namespace WTF { class PrintStream; void printInternal(PrintStream&, JSC::DFG::Arith::Mode); +void printInternal(PrintStream&, JSC::DFG::Arith::RoundingMode); } // namespace WTF diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp index ef9b1c494..87ee3adff 100644 --- a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp +++ b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,9 +28,10 @@ #if ENABLE(DFG_JIT) +#include "ArrayPrototype.h" #include "DFGAbstractValue.h" #include "DFGGraph.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -48,18 +49,18 @@ ArrayMode ArrayMode::fromObserved(const ConcurrentJITLocker& locker, ArrayProfil return ArrayMode(Array::Unprofiled); case asArrayModes(NonArray): if (action == Array::Write && !profile->mayInterceptIndexedAccesses(locker)) - return ArrayMode(Array::Undecided, nonArray, Array::OutOfBounds, Array::Convert); - return ArrayMode(Array::SelectUsingPredictions, nonArray); + return ArrayMode(Array::SelectUsingArguments, nonArray, Array::OutOfBounds, Array::Convert); + return ArrayMode(Array::SelectUsingPredictions, nonArray).withSpeculationFromProfile(locker, profile, makeSafe); case asArrayModes(ArrayWithUndecided): if (action == Array::Write) - return ArrayMode(Array::Undecided, Array::Array, Array::OutOfBounds, Array::Convert); - return ArrayMode(Array::Generic); + return ArrayMode(Array::SelectUsingArguments, Array::Array, Array::OutOfBounds, Array::Convert); + return ArrayMode(Array::Undecided, Array::Array, Array::OutOfBounds, Array::AsIs).withProfile(locker, profile, makeSafe); case asArrayModes(NonArray) | asArrayModes(ArrayWithUndecided): if (action == Array::Write && !profile->mayInterceptIndexedAccesses(locker)) - return ArrayMode(Array::Undecided, Array::PossiblyArray, Array::OutOfBounds, Array::Convert); - return ArrayMode(Array::SelectUsingPredictions); + return ArrayMode(Array::SelectUsingArguments, Array::PossiblyArray, Array::OutOfBounds, Array::Convert); + return ArrayMode(Array::SelectUsingPredictions).withSpeculationFromProfile(locker, profile, makeSafe); case asArrayModes(NonArrayWithInt32): return ArrayMode(Array::Int32, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe); @@ -97,10 +98,28 @@ ArrayMode ArrayMode::fromObserved(const ConcurrentJITLocker& locker, ArrayProfil case asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage): case asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage): return ArrayMode(Array::SlowPutArrayStorage, Array::PossiblyArray, Array::AsIs).withProfile(locker, profile, makeSafe); + case Int8ArrayMode: + return ArrayMode(Array::Int8Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe); + case Int16ArrayMode: + return ArrayMode(Array::Int16Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe); + case Int32ArrayMode: + return ArrayMode(Array::Int32Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe); + case Uint8ArrayMode: + return ArrayMode(Array::Uint8Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe); + case Uint8ClampedArrayMode: + return ArrayMode(Array::Uint8ClampedArray, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe); + case Uint16ArrayMode: + return ArrayMode(Array::Uint16Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe); + case Uint32ArrayMode: + return ArrayMode(Array::Uint32Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe); + case Float32ArrayMode: + return ArrayMode(Array::Float32Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe); + case Float64ArrayMode: + return ArrayMode(Array::Float64Array, nonArray, Array::AsIs).withProfile(locker, profile, makeSafe); default: if ((observed & asArrayModes(NonArray)) && profile->mayInterceptIndexedAccesses(locker)) - return ArrayMode(Array::SelectUsingPredictions); + return ArrayMode(Array::SelectUsingPredictions).withSpeculationFromProfile(locker, profile, makeSafe); Array::Type type; Array::Class arrayClass; @@ -116,7 +135,7 @@ ArrayMode ArrayMode::fromObserved(const ConcurrentJITLocker& locker, ArrayProfil else if (shouldUseInt32(observed)) type = Array::Int32; else - type = Array::Undecided; + type = Array::SelectUsingArguments; if (hasSeenArray(observed) && hasSeenNonArray(observed)) arrayClass = Array::PossiblyArray; @@ -131,7 +150,9 @@ ArrayMode ArrayMode::fromObserved(const ConcurrentJITLocker& locker, ArrayProfil } } -ArrayMode ArrayMode::refine(SpeculatedType base, SpeculatedType index, SpeculatedType value, NodeFlags flags) const +ArrayMode ArrayMode::refine( + Graph& graph, Node* node, + SpeculatedType base, SpeculatedType index, SpeculatedType value) const { if (!base || !index) { // It can be that we had a legitimate arrayMode but no incoming predictions. That'll @@ -144,6 +165,10 @@ ArrayMode ArrayMode::refine(SpeculatedType base, SpeculatedType index, Speculate if (!isInt32Speculation(index)) return ArrayMode(Array::Generic); + // If we had exited because of an exotic object behavior, then don't try to specialize. + if (graph.hasExitSite(node->origin.semantic, ExoticObjectMode)) + return ArrayMode(Array::Generic); + // Note: our profiling currently doesn't give us good information in case we have // an unlikely control flow path that sets the base to a non-cell value. Value // profiling and prediction propagation will probably tell us that the value is @@ -155,10 +180,7 @@ ArrayMode ArrayMode::refine(SpeculatedType base, SpeculatedType index, Speculate // should just trust the array profile. switch (type()) { - case Array::Unprofiled: - return ArrayMode(Array::ForceExit); - - case Array::Undecided: + case Array::SelectUsingArguments: if (!value) return withType(Array::ForceExit); if (isInt32Speculation(value)) @@ -166,7 +188,20 @@ ArrayMode ArrayMode::refine(SpeculatedType base, SpeculatedType index, Speculate if (isFullNumberSpeculation(value)) return withTypeAndConversion(Array::Double, Array::Convert); return withTypeAndConversion(Array::Contiguous, Array::Convert); - + case Array::Undecided: { + // If we have an OriginalArray and the JSArray prototype chain is sane, + // any indexed access always return undefined. We have a fast path for that. + JSGlobalObject* globalObject = graph.globalObjectFor(node->origin.semantic); + if (node->op() == GetByVal + && arrayClass() == Array::OriginalArray + && globalObject->arrayPrototypeChainIsSane() + && !graph.hasExitSite(node->origin.semantic, OutOfBounds)) { + graph.watchpoints().addLazily(globalObject->arrayPrototype()->structure()->transitionWatchpointSet()); + graph.watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet()); + return withSpeculation(Array::SaneChain); + } + return ArrayMode(Array::Generic); + } case Array::Int32: if (!value || isInt32Speculation(value)) return *this; @@ -175,54 +210,93 @@ ArrayMode ArrayMode::refine(SpeculatedType base, SpeculatedType index, Speculate return withTypeAndConversion(Array::Contiguous, Array::Convert); case Array::Double: - if (flags & NodeBytecodeUsesAsInt) - return withTypeAndConversion(Array::Contiguous, Array::RageConvert); if (!value || isFullNumberSpeculation(value)) return *this; return withTypeAndConversion(Array::Contiguous, Array::Convert); case Array::Contiguous: - if (doesConversion() && (flags & NodeBytecodeUsesAsInt)) - return withConversion(Array::RageConvert); return *this; - - case Array::SelectUsingPredictions: + + case Array::Int8Array: + case Array::Int16Array: + case Array::Int32Array: + case Array::Uint8Array: + case Array::Uint8ClampedArray: + case Array::Uint16Array: + case Array::Uint32Array: + case Array::Float32Array: + case Array::Float64Array: + switch (node->op()) { + case PutByVal: + if (graph.hasExitSite(node->origin.semantic, OutOfBounds) || !isInBounds()) + return withSpeculation(Array::OutOfBounds); + return withSpeculation(Array::InBounds); + default: + return withSpeculation(Array::InBounds); + } + return *this; + case Array::Unprofiled: + case Array::SelectUsingPredictions: { base &= ~SpecOther; if (isStringSpeculation(base)) return withType(Array::String); - if (isArgumentsSpeculation(base)) - return withType(Array::Arguments); + if (isDirectArgumentsSpeculation(base) || isScopedArgumentsSpeculation(base)) { + // Handle out-of-bounds accesses as generic accesses. + if (graph.hasExitSite(node->origin.semantic, OutOfBounds) || !isInBounds()) + return ArrayMode(Array::Generic); + + if (isDirectArgumentsSpeculation(base)) + return withType(Array::DirectArguments); + return withType(Array::ScopedArguments); + } + + ArrayMode result; + switch (node->op()) { + case PutByVal: + if (graph.hasExitSite(node->origin.semantic, OutOfBounds) || !isInBounds()) + result = withSpeculation(Array::OutOfBounds); + else + result = withSpeculation(Array::InBounds); + break; + + default: + result = withSpeculation(Array::InBounds); + break; + } if (isInt8ArraySpeculation(base)) - return withType(Array::Int8Array); + return result.withType(Array::Int8Array); if (isInt16ArraySpeculation(base)) - return withType(Array::Int16Array); + return result.withType(Array::Int16Array); if (isInt32ArraySpeculation(base)) - return withType(Array::Int32Array); + return result.withType(Array::Int32Array); if (isUint8ArraySpeculation(base)) - return withType(Array::Uint8Array); + return result.withType(Array::Uint8Array); if (isUint8ClampedArraySpeculation(base)) - return withType(Array::Uint8ClampedArray); + return result.withType(Array::Uint8ClampedArray); if (isUint16ArraySpeculation(base)) - return withType(Array::Uint16Array); + return result.withType(Array::Uint16Array); if (isUint32ArraySpeculation(base)) - return withType(Array::Uint32Array); + return result.withType(Array::Uint32Array); if (isFloat32ArraySpeculation(base)) - return withType(Array::Float32Array); + return result.withType(Array::Float32Array); if (isFloat64ArraySpeculation(base)) - return withType(Array::Float64Array); + return result.withType(Array::Float64Array); + if (type() == Array::Unprofiled) + return ArrayMode(Array::ForceExit); return ArrayMode(Array::Generic); + } default: return *this; @@ -242,6 +316,8 @@ Structure* ArrayMode::originalArrayStructure(Graph& graph, const CodeOrigin& cod return globalObject->originalArrayStructureForIndexingType(ArrayWithDouble); case Array::Contiguous: return globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous); + case Array::Undecided: + return globalObject->originalArrayStructureForIndexingType(ArrayWithUndecided); case Array::ArrayStorage: return globalObject->originalArrayStructureForIndexingType(ArrayWithArrayStorage); default: @@ -265,34 +341,57 @@ Structure* ArrayMode::originalArrayStructure(Graph& graph, const CodeOrigin& cod Structure* ArrayMode::originalArrayStructure(Graph& graph, Node* node) const { - return originalArrayStructure(graph, node->codeOrigin); + return originalArrayStructure(graph, node->origin.semantic); } -bool ArrayMode::alreadyChecked(Graph& graph, Node* node, AbstractValue& value, IndexingType shape) const +bool ArrayMode::alreadyChecked(Graph& graph, Node* node, const AbstractValue& value, IndexingType shape) const { switch (arrayClass()) { - case Array::OriginalArray: - return value.m_currentKnownStructure.hasSingleton() - && (value.m_currentKnownStructure.singleton()->indexingType() & IndexingShapeMask) == shape - && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray) - && graph.globalObjectFor(node->codeOrigin)->isOriginalArrayStructure(value.m_currentKnownStructure.singleton()); + case Array::OriginalArray: { + if (value.m_structure.isTop()) + return false; + for (unsigned i = value.m_structure.size(); i--;) { + Structure* structure = value.m_structure[i]; + if ((structure->indexingType() & IndexingShapeMask) != shape) + return false; + if (!(structure->indexingType() & IsArray)) + return false; + if (!graph.globalObjectFor(node->origin.semantic)->isOriginalArrayStructure(structure)) + return false; + } + return true; + } - case Array::Array: + case Array::Array: { if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(shape | IsArray))) return true; - return value.m_currentKnownStructure.hasSingleton() - && (value.m_currentKnownStructure.singleton()->indexingType() & IndexingShapeMask) == shape - && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray); + if (value.m_structure.isTop()) + return false; + for (unsigned i = value.m_structure.size(); i--;) { + Structure* structure = value.m_structure[i]; + if ((structure->indexingType() & IndexingShapeMask) != shape) + return false; + if (!(structure->indexingType() & IsArray)) + return false; + } + return true; + } - default: + default: { if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(shape) | asArrayModes(shape | IsArray))) return true; - return value.m_currentKnownStructure.hasSingleton() - && (value.m_currentKnownStructure.singleton()->indexingType() & IndexingShapeMask) == shape; - } + if (value.m_structure.isTop()) + return false; + for (unsigned i = value.m_structure.size(); i--;) { + Structure* structure = value.m_structure[i]; + if ((structure->indexingType() & IndexingShapeMask) != shape) + return false; + } + return true; + } } } -bool ArrayMode::alreadyChecked(Graph& graph, Node* node, AbstractValue& value) const +bool ArrayMode::alreadyChecked(Graph& graph, Node* node, const AbstractValue& value) const { switch (type()) { case Array::Generic: @@ -315,29 +414,50 @@ bool ArrayMode::alreadyChecked(Graph& graph, Node* node, AbstractValue& value) c case Array::ArrayStorage: return alreadyChecked(graph, node, value, ArrayStorageShape); + + case Array::Undecided: + return alreadyChecked(graph, node, value, UndecidedShape); case Array::SlowPutArrayStorage: switch (arrayClass()) { - case Array::OriginalArray: + case Array::OriginalArray: { CRASH(); return false; + } - case Array::Array: + case Array::Array: { if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage))) return true; - return value.m_currentKnownStructure.hasSingleton() - && hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()) - && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray); + if (value.m_structure.isTop()) + return false; + for (unsigned i = value.m_structure.size(); i--;) { + Structure* structure = value.m_structure[i]; + if (!hasAnyArrayStorage(structure->indexingType())) + return false; + if (!(structure->indexingType() & IsArray)) + return false; + } + return true; + } - default: + default: { if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage))) return true; - return value.m_currentKnownStructure.hasSingleton() - && hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()); - } + if (value.m_structure.isTop()) + return false; + for (unsigned i = value.m_structure.size(); i--;) { + Structure* structure = value.m_structure[i]; + if (!hasAnyArrayStorage(structure->indexingType())) + return false; + } + return true; + } } + + case Array::DirectArguments: + return speculationChecked(value.m_type, SpecDirectArguments); - case Array::Arguments: - return speculationChecked(value.m_type, SpecArguments); + case Array::ScopedArguments: + return speculationChecked(value.m_type, SpecScopedArguments); case Array::Int8Array: return speculationChecked(value.m_type, SpecInt8Array); @@ -365,10 +485,13 @@ bool ArrayMode::alreadyChecked(Graph& graph, Node* node, AbstractValue& value) c case Array::Float64Array: return speculationChecked(value.m_type, SpecFloat64Array); - + + case Array::AnyTypedArray: + return speculationChecked(value.m_type, SpecTypedArrayView); + case Array::SelectUsingPredictions: case Array::Unprofiled: - case Array::Undecided: + case Array::SelectUsingArguments: break; } @@ -381,6 +504,8 @@ const char* arrayTypeToString(Array::Type type) switch (type) { case Array::SelectUsingPredictions: return "SelectUsingPredictions"; + case Array::SelectUsingArguments: + return "SelectUsingArguments"; case Array::Unprofiled: return "Unprofiled"; case Array::Generic: @@ -401,8 +526,10 @@ const char* arrayTypeToString(Array::Type type) return "ArrayStorage"; case Array::SlowPutArrayStorage: return "SlowPutArrayStorage"; - case Array::Arguments: - return "Arguments"; + case Array::DirectArguments: + return "DirectArguments"; + case Array::ScopedArguments: + return "ScopedArguments"; case Array::Int8Array: return "Int8Array"; case Array::Int16Array: @@ -421,6 +548,8 @@ const char* arrayTypeToString(Array::Type type) return "Float32Array"; case Array::Float64Array: return "Float64Array"; + case Array::AnyTypedArray: + return "AnyTypedArray"; default: // Better to return something then it is to crash. Remember, this method // is being called from our main diagnostic tool, the IR dumper. It's like @@ -471,8 +600,6 @@ const char* arrayConversionToString(Array::Conversion conversion) return "AsIs"; case Array::Convert: return "Convert"; - case Array::RageConvert: - return "RageConvert"; default: return "Unknown!"; } @@ -517,6 +644,9 @@ TypedArrayType toTypedArrayType(Array::Type type) return TypeFloat32; case Array::Float64Array: return TypeFloat64; + case Array::AnyTypedArray: + RELEASE_ASSERT_NOT_REACHED(); + return NotTypedArray; default: return NotTypedArray; } @@ -548,6 +678,19 @@ Array::Type toArrayType(TypedArrayType type) } } +Array::Type refineTypedArrayType(Array::Type oldType, TypedArrayType newType) +{ + if (oldType == Array::Generic) + return oldType; + Array::Type newArrayType = toArrayType(newType); + if (newArrayType == Array::Generic) + return newArrayType; + + if (oldType != newArrayType) + return Array::AnyTypedArray; + return oldType; +} + bool permitsBoundsCheckLowering(Array::Type type) { switch (type) { @@ -563,6 +706,7 @@ bool permitsBoundsCheckLowering(Array::Type type) case Array::Uint32Array: case Array::Float32Array: case Array::Float64Array: + case Array::AnyTypedArray: return true; default: // These don't allow for bounds check lowering either because the bounds diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.h b/Source/JavaScriptCore/dfg/DFGArrayMode.h index cbb87bd93..a170fe6db 100644 --- a/Source/JavaScriptCore/dfg/DFGArrayMode.h +++ b/Source/JavaScriptCore/dfg/DFGArrayMode.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGArrayMode_h #define DFGArrayMode_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "ArrayProfile.h" @@ -55,6 +53,7 @@ enum Action { enum Type { SelectUsingPredictions, // Implies that we need predictions to decide. We will never get to the backend in this mode. + SelectUsingArguments, // Implies that we use the Node's arguments to decide. We will never get to the backend in this mode. Unprofiled, // Implies that array profiling didn't see anything. But that could be because the operands didn't comply with basic type assumptions (base is cell, property is int). This either becomes Generic or ForceExit depending on value profiling. ForceExit, // Implies that we have no idea how to execute this operation, so we should just give up. Generic, @@ -67,7 +66,9 @@ enum Type { ArrayStorage, SlowPutArrayStorage, - Arguments, + DirectArguments, + ScopedArguments, + Int8Array, Int16Array, Int32Array, @@ -76,7 +77,8 @@ enum Type { Uint16Array, Uint32Array, Float32Array, - Float64Array + Float64Array, + AnyTypedArray }; enum Class { @@ -89,14 +91,14 @@ enum Class { enum Speculation { SaneChain, // In bounds and the array prototype chain is still intact, i.e. loading a hole doesn't require special treatment. + InBounds, // In bounds and not loading a hole. ToHole, // Potentially storing to a hole. OutOfBounds // Out-of-bounds access and anything can happen. }; enum Conversion { AsIs, - Convert, - RageConvert + Convert }; } // namespace Array @@ -109,6 +111,7 @@ IndexingType toIndexingShape(Array::Type); TypedArrayType toTypedArrayType(Array::Type); Array::Type toArrayType(TypedArrayType); +Array::Type refineTypedArrayType(Array::Type, TypedArrayType); bool permitsBoundsCheckLowering(Array::Type); @@ -173,11 +176,15 @@ public: return ArrayMode(type(), arrayClass(), speculation, conversion()); } - ArrayMode withProfile(const ConcurrentJITLocker& locker, ArrayProfile* profile, bool makeSafe) const + ArrayMode withArrayClass(Array::Class arrayClass) const + { + return ArrayMode(type(), arrayClass, speculation(), conversion()); + } + + ArrayMode withSpeculationFromProfile(const ConcurrentJITLocker& locker, ArrayProfile* profile, bool makeSafe) const { Array::Speculation mySpeculation; - Array::Class myArrayClass; - + if (makeSafe) mySpeculation = Array::OutOfBounds; else if (profile->mayStoreToHole(locker)) @@ -185,6 +192,13 @@ public: else mySpeculation = Array::InBounds; + return withSpeculation(mySpeculation); + } + + ArrayMode withProfile(const ConcurrentJITLocker& locker, ArrayProfile* profile, bool makeSafe) const + { + Array::Class myArrayClass; + if (isJSArray()) { if (profile->usesOriginalArrayStructures(locker) && benefitsFromOriginalArray()) myArrayClass = Array::OriginalArray; @@ -193,7 +207,7 @@ public: } else myArrayClass = arrayClass(); - return ArrayMode(type(), myArrayClass, mySpeculation, conversion()); + return withArrayClass(myArrayClass).withSpeculationFromProfile(locker, profile, makeSafe); } ArrayMode withType(Array::Type type) const @@ -211,9 +225,9 @@ public: return ArrayMode(type, arrayClass(), speculation(), conversion); } - ArrayMode refine(SpeculatedType base, SpeculatedType index, SpeculatedType value = SpecNone, NodeFlags = 0) const; + ArrayMode refine(Graph&, Node*, SpeculatedType base, SpeculatedType index, SpeculatedType value = SpecNone) const; - bool alreadyChecked(Graph&, Node*, AbstractValue&) const; + bool alreadyChecked(Graph&, Node*, const AbstractValue&) const; void dump(PrintStream&) const; @@ -282,10 +296,13 @@ public: { switch (type()) { case Array::SelectUsingPredictions: + case Array::SelectUsingArguments: case Array::Unprofiled: + case Array::Undecided: case Array::ForceExit: case Array::Generic: - case Array::Arguments: + case Array::DirectArguments: + case Array::ScopedArguments: return false; default: return true; @@ -295,7 +312,6 @@ public: bool lengthNeedsStorage() const { switch (type()) { - case Array::Undecided: case Array::Int32: case Array::Double: case Array::Contiguous: @@ -311,11 +327,9 @@ public: { switch (type()) { case Array::String: + case Array::DirectArguments: + case Array::ScopedArguments: return ArrayMode(Array::Generic); -#if USE(JSVALUE32_64) - case Array::Arguments: - return ArrayMode(Array::Generic); -#endif default: return *this; } @@ -325,10 +339,10 @@ public: { switch (type()) { case Array::SelectUsingPredictions: + case Array::SelectUsingArguments: case Array::Unprofiled: case Array::ForceExit: case Array::Generic: - case Array::Undecided: return false; default: return true; @@ -342,6 +356,16 @@ public: case Array::Unprofiled: case Array::ForceExit: case Array::Generic: + // TypedArrays do not have a self length property as of ES6. + case Array::Int8Array: + case Array::Int16Array: + case Array::Int32Array: + case Array::Uint8Array: + case Array::Uint8ClampedArray: + case Array::Uint16Array: + case Array::Uint32Array: + case Array::Float32Array: + case Array::Float64Array: return false; case Array::Int32: case Array::Double: @@ -362,6 +386,7 @@ public: case Array::Int32: case Array::Double: case Array::Contiguous: + case Array::Undecided: case Array::ArrayStorage: return true; default: @@ -397,7 +422,7 @@ public: case Array::ArrayStorage: return arrayModesWithIndexingShape(ArrayStorageShape); case Array::SlowPutArrayStorage: - return arrayModesWithIndexingShape(SlowPutArrayStorageShape); + return arrayModesWithIndexingShapes(SlowPutArrayStorageShape, ArrayStorageShape); default: return asArrayModes(NonArray); } @@ -417,6 +442,11 @@ public: { return toTypedArrayType(type()); } + + bool isSomeTypedArrayView() const + { + return type() == Array::AnyTypedArray || isTypedView(typedArrayType()); + } bool operator==(const ArrayMode& other) const { @@ -453,7 +483,14 @@ private: } } - bool alreadyChecked(Graph&, Node*, AbstractValue&, IndexingType shape) const; + ArrayModes arrayModesWithIndexingShapes(IndexingType shape1, IndexingType shape2) const + { + ArrayModes arrayMode1 = arrayModesWithIndexingShape(shape1); + ArrayModes arrayMode2 = arrayModesWithIndexingShape(shape2); + return arrayMode1 | arrayMode2; + } + + bool alreadyChecked(Graph&, Node*, const AbstractValue&, IndexingType shape) const; union { struct { diff --git a/Source/JavaScriptCore/dfg/DFGArrayifySlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGArrayifySlowPathGenerator.h index 9c7d47a42..0c5dc2a93 100644 --- a/Source/JavaScriptCore/dfg/DFGArrayifySlowPathGenerator.h +++ b/Source/JavaScriptCore/dfg/DFGArrayifySlowPathGenerator.h @@ -26,8 +26,6 @@ #ifndef DFGArrayifySlowPathGenerator_h #define DFGArrayifySlowPathGenerator_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGArrayMode.h" @@ -103,10 +101,7 @@ protected: jit->callOperation(operationEnsureDouble, m_tempGPR, m_baseGPR); break; case Array::Contiguous: - if (m_arrayMode.conversion() == Array::RageConvert) - jit->callOperation(operationRageEnsureContiguous, m_tempGPR, m_baseGPR); - else - jit->callOperation(operationEnsureContiguous, m_tempGPR, m_baseGPR); + jit->callOperation(operationEnsureContiguous, m_tempGPR, m_baseGPR); break; case Array::ArrayStorage: case Array::SlowPutArrayStorage: @@ -118,27 +113,19 @@ protected: } for (unsigned i = m_plans.size(); i--;) jit->silentFill(m_plans[i], GPRInfo::regT0); + jit->m_jit.exceptionCheck(); if (m_op == ArrayifyToStructure) { ASSERT(m_structure); m_badIndexingTypeJump.fill( - jit, jit->m_jit.branchWeakPtr( - MacroAssembler::NotEqual, - MacroAssembler::Address(m_baseGPR, JSCell::structureOffset()), - m_structure)); + jit, jit->m_jit.branchWeakStructure(MacroAssembler::NotEqual, MacroAssembler::Address(m_baseGPR, JSCell::structureIDOffset()), m_structure)); } else { - // Alas, we need to reload the structure because silent spilling does not save - // temporaries. Nor would it be useful for it to do so. Either way we're talking - // about a load. - jit->m_jit.loadPtr( - MacroAssembler::Address(m_baseGPR, JSCell::structureOffset()), m_structureGPR); - // Finally, check that we have the kind of array storage that we wanted to get. // Note that this is a backwards speculation check, which will result in the // bytecode operation corresponding to this arrayification being reexecuted. // That's fine, since arrayification is not user-visible. jit->m_jit.load8( - MacroAssembler::Address(m_structureGPR, Structure::indexingTypeOffset()), m_structureGPR); + MacroAssembler::Address(m_baseGPR, JSCell::indexingTypeOffset()), m_structureGPR); m_badIndexingTypeJump.fill( jit, jit->jumpSlowForUnwantedArrayMode(m_structureGPR, m_arrayMode)); } diff --git a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp index ca770681a..9d8c710de 100644 --- a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp +++ b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp @@ -28,12 +28,13 @@ #if ENABLE(DFG_JIT) -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { -AtTailAbstractState::AtTailAbstractState() - : m_block(0) +AtTailAbstractState::AtTailAbstractState(Graph& graph) + : m_graph(graph) + , m_block(0) { } @@ -47,7 +48,7 @@ void AtTailAbstractState::createValueForNode(Node* node) AbstractValue& AtTailAbstractState::forNode(Node* node) { HashMap<Node*, AbstractValue>::iterator iter = m_block->ssa->valuesAtTail.find(node); - ASSERT(iter != m_block->ssa->valuesAtTail.end()); + DFG_ASSERT(m_graph, node, iter != m_block->ssa->valuesAtTail.end()); return iter->value; } diff --git a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h index a994bf8d6..cd6a08001 100644 --- a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h +++ b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h @@ -26,8 +26,6 @@ #ifndef DFGAtTailAbstractState_h #define DFGAtTailAbstractState_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGAbstractValue.h" @@ -38,7 +36,7 @@ namespace JSC { namespace DFG { class AtTailAbstractState { public: - AtTailAbstractState(); + AtTailAbstractState(Graph&); ~AtTailAbstractState(); @@ -56,14 +54,16 @@ public: bool isValid() { return m_block->cfaDidFinish; } + StructureClobberState structureClobberState() const { return m_block->cfaStructureClobberStateAtTail; } + void setDidClobber(bool) { } + void setStructureClobberState(StructureClobberState state) { RELEASE_ASSERT(state == m_block->cfaStructureClobberStateAtTail); } void setIsValid(bool isValid) { m_block->cfaDidFinish = isValid; } void setBranchDirection(BranchDirection) { } void setFoundConstants(bool) { } - bool haveStructures() const { return true; } // It's always safe to return true. - void setHaveStructures(bool) { } private: + Graph& m_graph; BasicBlock* m_block; }; diff --git a/Source/JavaScriptCore/dfg/DFGAvailability.cpp b/Source/JavaScriptCore/dfg/DFGAvailability.cpp index 669c2b439..0d998abda 100644 --- a/Source/JavaScriptCore/dfg/DFGAvailability.cpp +++ b/Source/JavaScriptCore/dfg/DFGAvailability.cpp @@ -29,6 +29,7 @@ #if ENABLE(DFG_JIT) #include "DFGNode.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGAvailability.h b/Source/JavaScriptCore/dfg/DFGAvailability.h index fd9bf6529..507d816aa 100644 --- a/Source/JavaScriptCore/dfg/DFGAvailability.h +++ b/Source/JavaScriptCore/dfg/DFGAvailability.h @@ -81,10 +81,26 @@ public: return withNode(unavailableMarker()); } + void setFlush(FlushedAt flushedAt) + { + m_flushedAt = flushedAt; + } + + void setNode(Node* node) + { + m_node = node; + } + + void setNodeUnavailable() + { + m_node = unavailableMarker(); + } + bool nodeIsUndecided() const { return !m_node; } bool nodeIsUnavailable() const { return m_node == unavailableMarker(); } bool hasNode() const { return !nodeIsUndecided() && !nodeIsUnavailable(); } + bool shouldUseNode() const { return !isFlushUseful() && hasNode(); } Node* node() const { @@ -94,6 +110,12 @@ public: } FlushedAt flushedAt() const { return m_flushedAt; } + bool isFlushUseful() const + { + return flushedAt().format() != DeadFlush && flushedAt().format() != ConflictingFlush; + } + + bool isDead() const { return !isFlushUseful() && !hasNode(); } bool operator!() const { return nodeIsUnavailable() && flushedAt().format() == ConflictingFlush; } @@ -103,6 +125,11 @@ public: && m_flushedAt == other.m_flushedAt; } + bool operator!=(const Availability& other) const + { + return !(*this == other); + } + Availability merge(const Availability& other) const { return Availability( diff --git a/Source/JavaScriptCore/dfg/DFGAvailabilityMap.cpp b/Source/JavaScriptCore/dfg/DFGAvailabilityMap.cpp new file mode 100644 index 000000000..e319dc6f2 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGAvailabilityMap.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGAvailabilityMap.h" + +#if ENABLE(DFG_JIT) + +#include "DFGGraph.h" +#include "JSCInlines.h" +#include "OperandsInlines.h" +#include <wtf/ListDump.h> + +namespace JSC { namespace DFG { + +void AvailabilityMap::pruneHeap() +{ + if (m_heap.isEmpty()) + return; + + HashSet<Node*> possibleNodes; + + for (unsigned i = m_locals.size(); i--;) { + if (m_locals[i].hasNode()) + possibleNodes.add(m_locals[i].node()); + } + + closeOverNodes( + [&] (Node* node) -> bool { + return possibleNodes.contains(node); + }, + [&] (Node* node) -> bool { + return possibleNodes.add(node).isNewEntry; + }); + + HashMap<PromotedHeapLocation, Availability> newHeap; + for (auto pair : m_heap) { + if (possibleNodes.contains(pair.key.base())) + newHeap.add(pair.key, pair.value); + } + m_heap = newHeap; +} + +void AvailabilityMap::pruneByLiveness(Graph& graph, CodeOrigin where) +{ + Operands<Availability> localsCopy(OperandsLike, m_locals); + graph.forAllLiveInBytecode( + where, + [&] (VirtualRegister reg) { + localsCopy.operand(reg) = m_locals.operand(reg); + }); + m_locals = localsCopy; + pruneHeap(); +} + +void AvailabilityMap::clear() +{ + m_locals.fill(Availability()); + m_heap.clear(); +} + +void AvailabilityMap::dump(PrintStream& out) const +{ + out.print("{locals = ", m_locals, "; heap = ", mapDump(m_heap), "}"); +} + +bool AvailabilityMap::operator==(const AvailabilityMap& other) const +{ + return m_locals == other.m_locals + && m_heap == other.m_heap; +} + +void AvailabilityMap::merge(const AvailabilityMap& other) +{ + for (unsigned i = other.m_locals.size(); i--;) + m_locals[i] = other.m_locals[i].merge(m_locals[i]); + + for (auto pair : other.m_heap) { + auto result = m_heap.add(pair.key, Availability()); + result.iterator->value = pair.value.merge(result.iterator->value); + } +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGAvailabilityMap.h b/Source/JavaScriptCore/dfg/DFGAvailabilityMap.h new file mode 100644 index 000000000..1cdd25b3d --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGAvailabilityMap.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGAvailabilityMap_h +#define DFGAvailabilityMap_h + +#if ENABLE(DFG_JIT) + +#include "DFGAvailability.h" +#include "DFGPromotedHeapLocation.h" + +namespace JSC { namespace DFG { + +struct AvailabilityMap { + void pruneHeap(); + void pruneByLiveness(Graph&, CodeOrigin); + void clear(); + + void dump(PrintStream& out) const; + + bool operator==(const AvailabilityMap& other) const; + + void merge(const AvailabilityMap& other); + + template<typename Functor> + void forEachAvailability(const Functor& functor) + { + for (unsigned i = m_locals.size(); i--;) + functor(m_locals[i]); + for (auto pair : m_heap) + functor(pair.value); + } + + template<typename HasFunctor, typename AddFunctor> + void closeOverNodes(const HasFunctor& has, const AddFunctor& add) + { + bool changed; + do { + changed = false; + for (auto pair : m_heap) { + if (pair.value.hasNode() && has(pair.key.base())) + changed |= add(pair.value.node()); + } + } while (changed); + } + + template<typename HasFunctor, typename AddFunctor> + void closeStartingWithLocal(VirtualRegister reg, const HasFunctor& has, const AddFunctor& add) + { + Availability availability = m_locals.operand(reg); + if (!availability.hasNode()) + return; + + if (!add(availability.node())) + return; + + closeOverNodes(has, add); + } + + Operands<Availability> m_locals; + HashMap<PromotedHeapLocation, Availability> m_heap; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGAvailabilityMap_h + diff --git a/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp index a7ef96d70..bf651886e 100644 --- a/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,7 +31,7 @@ #include "DFGBasicBlockInlines.h" #include "DFGGraph.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -44,17 +44,21 @@ public: bool run() { - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - - // Prevent a tower of overflowing additions from creating a value that is out of the - // safe 2^48 range. - m_allowNestedOverflowingAdditions = block->size() < (1 << 16); - - for (unsigned indexInBlock = block->size(); indexInBlock--;) - propagate(block->at(indexInBlock)); + m_changed = true; + while (m_changed) { + m_changed = false; + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + + // Prevent a tower of overflowing additions from creating a value that is out of the + // safe 2^48 range. + m_allowNestedOverflowingAdditions = block->size() < (1 << 16); + + for (unsigned indexInBlock = block->size(); indexInBlock--;) + propagate(block->at(indexInBlock)); + } } return true; @@ -63,17 +67,17 @@ public: private: bool isNotNegZero(Node* node) { - if (!m_graph.isNumberConstant(node)) + if (!node->isNumberConstant()) return false; - double value = m_graph.valueOfNumberConstant(node); + double value = node->asNumber(); return (value || 1.0 / value > 0.0); } bool isNotPosZero(Node* node) { - if (!m_graph.isNumberConstant(node)) + if (!node->isNumberConstant()) return false; - double value = m_graph.valueOfNumberConstant(node); + double value = node->asNumber(); return (value || 1.0 / value < 0.0); } @@ -81,7 +85,7 @@ private: template<int power> bool isWithinPowerOfTwoForConstant(Node* node) { - JSValue immediateValue = node->valueOfJSConstant(codeBlock()); + JSValue immediateValue = node->asJSValue(); if (!immediateValue.isNumber()) return false; double immediate = immediateValue.asNumber(); @@ -91,7 +95,7 @@ private: template<int power> bool isWithinPowerOfTwoNonRecursive(Node* node) { - if (node->op() != JSConstant) + if (!node->isNumberConstant()) return false; return isWithinPowerOfTwoForConstant<power>(node); } @@ -100,7 +104,9 @@ private: bool isWithinPowerOfTwo(Node* node) { switch (node->op()) { - case JSConstant: { + case DoubleConstant: + case JSConstant: + case Int52Constant: { return isWithinPowerOfTwoForConstant<power>(node); } @@ -124,9 +130,9 @@ private: return true; Node* shiftAmount = node->child2().node(); - if (shiftAmount->op() != JSConstant) + if (!node->isNumberConstant()) return false; - JSValue immediateValue = shiftAmount->valueOfJSConstant(codeBlock()); + JSValue immediateValue = shiftAmount->asJSValue(); if (!immediateValue.isInt32()) return false; return immediateValue.asInt32() > 32 - power; @@ -174,7 +180,8 @@ private: switch (node->op()) { case GetLocal: { VariableAccessData* variableAccessData = node->variableAccessData(); - variableAccessData->mergeFlags(flags); + flags &= ~NodeBytecodeUsesAsInt; // We don't care about cross-block uses-as-int. + m_changed |= variableAccessData->mergeFlags(flags); break; } @@ -182,7 +189,16 @@ private: VariableAccessData* variableAccessData = node->variableAccessData(); if (!variableAccessData->isLoadedFrom()) break; - node->child1()->mergeFlags(NodeBytecodeUsesAsValue); + flags = variableAccessData->flags(); + RELEASE_ASSERT(!(flags & ~NodeBytecodeBackPropMask)); + flags |= NodeBytecodeUsesAsNumber; // Account for the fact that control flow may cause overflows that our modeling can't handle. + node->child1()->mergeFlags(flags); + break; + } + + case Flush: { + VariableAccessData* variableAccessData = node->variableAccessData(); + m_changed |= variableAccessData->mergeFlags(NodeBytecodeUsesAsValue); break; } @@ -199,6 +215,7 @@ private: case ArithIMul: { flags |= NodeBytecodeUsesAsInt; flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther); + flags &= ~NodeBytecodeUsesAsArrayIndex; node->child1()->mergeFlags(flags); node->child2()->mergeFlags(flags); break; @@ -206,11 +223,10 @@ private: case StringCharCodeAt: { node->child1()->mergeFlags(NodeBytecodeUsesAsValue); - node->child2()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt); + node->child2()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex); break; } - case Identity: case UInt32ToNumber: { node->child1()->mergeFlags(flags); break; @@ -230,7 +246,7 @@ private: node->child2()->mergeFlags(flags); break; } - + case ArithAdd: { if (isNotNegZero(node->child1().node()) || isNotNegZero(node->child2().node())) flags &= ~NodeBytecodeNeedsNegZero; @@ -243,7 +259,14 @@ private: node->child2()->mergeFlags(flags); break; } - + + case ArithClz32: { + flags &= ~(NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther | ~NodeBytecodeUsesAsArrayIndex); + flags |= NodeBytecodeUsesAsInt; + node->child1()->mergeFlags(flags); + break; + } + case ArithSub: { if (isNotNegZero(node->child1().node()) || isNotPosZero(node->child2().node())) flags &= ~NodeBytecodeNeedsNegZero; @@ -296,27 +319,22 @@ private: } case ArithMod: { - flags |= NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero; + flags |= NodeBytecodeUsesAsNumber; flags &= ~NodeBytecodeUsesAsOther; node->child1()->mergeFlags(flags); - node->child2()->mergeFlags(flags); + node->child2()->mergeFlags(flags & ~NodeBytecodeNeedsNegZero); break; } case GetByVal: { node->child1()->mergeFlags(NodeBytecodeUsesAsValue); - node->child2()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt); - break; - } - - case GetMyArgumentByValSafe: { - node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt); + node->child2()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex); break; } case NewArrayWithSize: { - node->child1()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt); + node->child1()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex); break; } @@ -324,17 +342,18 @@ private: // Negative zero is not observable. NaN versus undefined are only observable // in that you would get a different exception message. So, like, whatever: we // claim here that NaN v. undefined is observable. - node->child1()->mergeFlags(NodeBytecodeUsesAsInt | NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther); + node->child1()->mergeFlags(NodeBytecodeUsesAsInt | NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsArrayIndex); break; } case StringCharAt: { node->child1()->mergeFlags(NodeBytecodeUsesAsValue); - node->child2()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt); + node->child2()->mergeFlags(NodeBytecodeUsesAsValue | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex); break; } - case ToString: { + case ToString: + case CallStringConstructor: { node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther); break; } @@ -347,7 +366,7 @@ private: case PutByValDirect: case PutByVal: { m_graph.varArgChild(node, 0)->mergeFlags(NodeBytecodeUsesAsValue); - m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt); + m_graph.varArgChild(node, 1)->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex); m_graph.varArgChild(node, 2)->mergeFlags(NodeBytecodeUsesAsValue); break; } @@ -375,9 +394,19 @@ private: // then -0 and 0 are treated the same. node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther); break; + case SwitchCell: + // There is currently no point to being clever here since this is used for switching + // on objects. + mergeDefaultFlags(node); + break; } break; } + + case Identity: + // This would be trivial to handle but we just assert that we cannot see these yet. + RELEASE_ASSERT_NOT_REACHED(); + break; // Note: ArithSqrt, ArithSin, and ArithCos and other math intrinsics don't have special // rules in here because they are always followed by Phantoms to signify that if the @@ -392,6 +421,7 @@ private: } bool m_allowNestedOverflowingAdditions; + bool m_changed; }; bool performBackwardsPropagation(Graph& graph) diff --git a/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.h b/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.h index 438684657..47e71919b 100644 --- a/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.h +++ b/Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.h @@ -26,8 +26,6 @@ #ifndef DFGBackwardsPropagationPhase_h #define DFGBackwardsPropagationPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" diff --git a/Source/JavaScriptCore/dfg/DFGBasicBlock.cpp b/Source/JavaScriptCore/dfg/DFGBasicBlock.cpp index 07a972633..383ee4b9a 100644 --- a/Source/JavaScriptCore/dfg/DFGBasicBlock.cpp +++ b/Source/JavaScriptCore/dfg/DFGBasicBlock.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,11 +28,12 @@ #if ENABLE(DFG_JIT) -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { -BasicBlock::BasicBlock(unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals) +BasicBlock::BasicBlock( + unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals, float executionCount) : bytecodeBegin(bytecodeBegin) , index(NoBlock) , isOSRTarget(false) @@ -40,6 +41,8 @@ BasicBlock::BasicBlock(unsigned bytecodeBegin, unsigned numArguments, unsigned n , cfaShouldRevisit(false) , cfaFoundConstants(false) , cfaDidFinish(true) + , cfaStructureClobberStateAtHead(StructuresAreWatched) + , cfaStructureClobberStateAtTail(StructuresAreWatched) , cfaBranchDirection(InvalidBranchDirection) #if !ASSERT_DISABLED , isLinked(false) @@ -49,10 +52,15 @@ BasicBlock::BasicBlock(unsigned bytecodeBegin, unsigned numArguments, unsigned n , variablesAtTail(numArguments, numLocals) , valuesAtHead(numArguments, numLocals) , valuesAtTail(numArguments, numLocals) + , intersectionOfPastValuesAtHead(numArguments, numLocals, AbstractValue::fullTop()) + , intersectionOfCFAHasVisited(true) + , executionCount(executionCount) { } -BasicBlock::~BasicBlock() { } +BasicBlock::~BasicBlock() +{ +} void BasicBlock::ensureLocals(unsigned newNumLocals) { @@ -60,6 +68,20 @@ void BasicBlock::ensureLocals(unsigned newNumLocals) variablesAtTail.ensureLocals(newNumLocals); valuesAtHead.ensureLocals(newNumLocals); valuesAtTail.ensureLocals(newNumLocals); + intersectionOfPastValuesAtHead.ensureLocals(newNumLocals, AbstractValue::fullTop()); +} + +void BasicBlock::replaceTerminal(Node* node) +{ + NodeAndIndex result = findTerminal(); + if (!result) + append(node); + else { + m_nodes.insert(result.index + 1, node); + result.node->remove(); + } + + ASSERT(terminal()); } bool BasicBlock::isInPhis(Node* node) const @@ -109,11 +131,9 @@ void BasicBlock::dump(PrintStream& out) const } BasicBlock::SSAData::SSAData(BasicBlock* block) - : flushAtHead(OperandsLike, block->variablesAtHead) - , flushAtTail(OperandsLike, block->variablesAtHead) - , availabilityAtHead(OperandsLike, block->variablesAtHead) - , availabilityAtTail(OperandsLike, block->variablesAtHead) { + availabilityAtHead.m_locals = Operands<Availability>(OperandsLike, block->variablesAtHead); + availabilityAtTail.m_locals = Operands<Availability>(OperandsLike, block->variablesAtHead); } BasicBlock::SSAData::~SSAData() { } diff --git a/Source/JavaScriptCore/dfg/DFGBasicBlock.h b/Source/JavaScriptCore/dfg/DFGBasicBlock.h index a3a801227..f6ce18d7c 100644 --- a/Source/JavaScriptCore/dfg/DFGBasicBlock.h +++ b/Source/JavaScriptCore/dfg/DFGBasicBlock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,14 +30,15 @@ #include "DFGAbstractValue.h" #include "DFGAvailability.h" +#include "DFGAvailabilityMap.h" #include "DFGBranchDirection.h" #include "DFGFlushedAt.h" #include "DFGNode.h" -#include "DFGVariadicFunction.h" +#include "DFGNodeOrigin.h" +#include "DFGStructureClobberState.h" #include "Operands.h" #include <wtf/HashMap.h> #include <wtf/HashSet.h> -#include <wtf/OwnPtr.h> #include <wtf/Vector.h> namespace JSC { namespace DFG { @@ -46,9 +47,12 @@ class Graph; class InsertionSet; typedef Vector<BasicBlock*, 2> PredecessorList; +typedef Vector<Node*, 8> BlockNodeList; struct BasicBlock : RefCounted<BasicBlock> { - BasicBlock(unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals); + BasicBlock( + unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals, + float executionCount); ~BasicBlock(); void ensureLocals(unsigned newNumLocals); @@ -57,19 +61,75 @@ struct BasicBlock : RefCounted<BasicBlock> { bool isEmpty() const { return !size(); } Node*& at(size_t i) { return m_nodes[i]; } Node* at(size_t i) const { return m_nodes[i]; } + Node* tryAt(size_t i) const + { + if (i >= size()) + return nullptr; + return at(i); + } Node*& operator[](size_t i) { return at(i); } Node* operator[](size_t i) const { return at(i); } - Node* last() const { return at(size() - 1); } + + // Use this to find both the index of the terminal and the terminal itself in one go. May + // return a clear NodeAndIndex if the basic block currently lacks a terminal. That may happen + // in the middle of IR transformations within a phase but should never be the case in between + // phases. + // + // The reason why this is more than just "at(size() - 1)" is that we may place non-terminal + // liveness marking instructions after the terminal. This is supposed to happen infrequently + // but some basic blocks - most notably return blocks - will have liveness markers for all of + // the flushed variables right after the return. + // + // It turns out that doing this linear search is basically perf-neutral, so long as we force + // the method to be inlined. Hence the ALWAYS_INLINE. + ALWAYS_INLINE NodeAndIndex findTerminal() const + { + size_t i = size(); + while (i--) { + Node* node = at(i); + switch (node->op()) { + case Jump: + case Branch: + case Switch: + case Return: + case TailCall: + case TailCallVarargs: + case TailCallForwardVarargs: + case Unreachable: + return NodeAndIndex(node, i); + // The bitter end can contain Phantoms and the like. There will probably only be one or two nodes after the terminal. They are all no-ops and will not have any checked children. + case Check: // This is here because it's our universal no-op. + case Phantom: + case PhantomLocal: + case Flush: + break; + default: + return NodeAndIndex(); + } + } + return NodeAndIndex(); + } + + ALWAYS_INLINE Node* terminal() const + { + return findTerminal().node; + } + void resize(size_t size) { m_nodes.resize(size); } void grow(size_t size) { m_nodes.grow(size); } void append(Node* node) { m_nodes.append(node); } - void insertBeforeLast(Node* node) + void insertBeforeTerminal(Node* node) { - append(last()); - at(size() - 2) = node; + NodeAndIndex result = findTerminal(); + if (!result) + append(node); + else + m_nodes.insert(result.index, node); } + void replaceTerminal(Node*); + size_t numNodes() const { return phis.size() + size(); } Node* node(size_t i) const { @@ -82,32 +142,46 @@ struct BasicBlock : RefCounted<BasicBlock> { bool isInPhis(Node* node) const; bool isInBlock(Node* myNode) const; - unsigned numSuccessors() { return last()->numSuccessors(); } + BlockNodeList::iterator begin() { return m_nodes.begin(); } + BlockNodeList::iterator end() { return m_nodes.end(); } + + unsigned numSuccessors() { return terminal()->numSuccessors(); } BasicBlock*& successor(unsigned index) { - return last()->successor(index); + return terminal()->successor(index); } BasicBlock*& successorForCondition(bool condition) { - return last()->successorForCondition(condition); + return terminal()->successorForCondition(condition); + } + + Node::SuccessorsIterable successors() + { + return terminal()->successors(); } void removePredecessor(BasicBlock* block); void replacePredecessor(BasicBlock* from, BasicBlock* to); -#define DFG_DEFINE_APPEND_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \ - templatePre typeParams templatePost Node* appendNode(Graph&, SpeculatedType valueParamsComma valueParams); - DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_APPEND_NODE) -#undef DFG_DEFINE_APPEND_NODE + template<typename... Params> + Node* appendNode(Graph&, SpeculatedType, Params...); -#define DFG_DEFINE_APPEND_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \ - templatePre typeParams templatePost Node* appendNonTerminal(Graph&, SpeculatedType valueParamsComma valueParams); - DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_APPEND_NODE) -#undef DFG_DEFINE_APPEND_NODE + template<typename... Params> + Node* appendNonTerminal(Graph&, SpeculatedType, Params...); + + template<typename... Params> + Node* replaceTerminal(Graph&, SpeculatedType, Params...); void dump(PrintStream& out) const; + void didLink() + { +#if !ASSERT_DISABLED + isLinked = true; +#endif + } + // This value is used internally for block linking and OSR entry. It is mostly meaningless // for other purposes due to inlining. unsigned bytecodeBegin; @@ -119,6 +193,8 @@ struct BasicBlock : RefCounted<BasicBlock> { bool cfaShouldRevisit; bool cfaFoundConstants; bool cfaDidFinish; + StructureClobberState cfaStructureClobberStateAtHead; + StructureClobberState cfaStructureClobberStateAtTail; BranchDirection cfaBranchDirection; #if !ASSERT_DISABLED bool isLinked; @@ -128,36 +204,62 @@ struct BasicBlock : RefCounted<BasicBlock> { Vector<Node*> phis; PredecessorList predecessors; - Operands<Node*, NodePointerTraits> variablesAtHead; - Operands<Node*, NodePointerTraits> variablesAtTail; + Operands<Node*> variablesAtHead; + Operands<Node*> variablesAtTail; Operands<AbstractValue> valuesAtHead; Operands<AbstractValue> valuesAtTail; + // The intersection of assumptions we have made previously at the head of this block. Note + // that under normal circumstances, each time we run the CFA, we will get strictly more precise + // results. But we don't actually require this to be the case. It's fine for the CFA to loosen + // up for any odd reason. It's fine when this happens, because anything that the CFA proves + // must be true from that point forward, except if some registered watchpoint fires, in which + // case the code won't ever run. So, the CFA proving something less precise later on is just an + // outcome of the CFA being imperfect; the more precise thing that it had proved earlier is no + // less true. + // + // But for the purpose of OSR entry, we need to make sure that we remember what assumptions we + // had used for optimizing any given basic block. That's what this is for. + // + // It's interesting that we could use this to make the CFA more precise: all future CFAs could + // filter their results with this thing to sort of maintain maximal precision. Because we + // expect CFA to usually be monotonically more precise each time we run it to fixpoint, this + // would not be a productive optimization: it would make setting up a basic block more + // expensive and would only benefit bizarre pathological cases. + Operands<AbstractValue> intersectionOfPastValuesAtHead; + bool intersectionOfCFAHasVisited; + + float executionCount; + // These fields are reserved for NaturalLoops. static const unsigned numberOfInnerMostLoopIndices = 2; unsigned innerMostLoopIndices[numberOfInnerMostLoopIndices]; struct SSAData { - Operands<FlushedAt> flushAtHead; - Operands<FlushedAt> flushAtTail; - Operands<Availability> availabilityAtHead; - Operands<Availability> availabilityAtTail; - HashSet<Node*> liveAtHead; + WTF_MAKE_FAST_ALLOCATED; + public: + AvailabilityMap availabilityAtHead; + AvailabilityMap availabilityAtTail; + + bool liveAtTailIsDirty { false }; HashSet<Node*> liveAtTail; + HashSet<Node*> liveAtHead; HashMap<Node*, AbstractValue> valuesAtHead; HashMap<Node*, AbstractValue> valuesAtTail; SSAData(BasicBlock*); ~SSAData(); }; - OwnPtr<SSAData> ssa; - + std::unique_ptr<SSAData> ssa; + private: friend class InsertionSet; - Vector<Node*, 8> m_nodes; + BlockNodeList m_nodes; }; +typedef Vector<BasicBlock*, 5> BlockList; + struct UnlinkedBlock { BasicBlock* m_block; bool m_needsNormalLinking; diff --git a/Source/JavaScriptCore/dfg/DFGBasicBlockInlines.h b/Source/JavaScriptCore/dfg/DFGBasicBlockInlines.h index 7f9e38af4..3423a0db3 100644 --- a/Source/JavaScriptCore/dfg/DFGBasicBlockInlines.h +++ b/Source/JavaScriptCore/dfg/DFGBasicBlockInlines.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -33,25 +33,29 @@ namespace JSC { namespace DFG { -#define DFG_DEFINE_APPEND_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \ - templatePre typeParams templatePost inline Node* BasicBlock::appendNode(Graph& graph, SpeculatedType type valueParamsComma valueParams) \ - { \ - Node* result = graph.addNode(type valueParamsComma valueArgs); \ - append(result); \ - return result; \ - } - DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_APPEND_NODE) -#undef DFG_DEFINE_APPEND_NODE - -#define DFG_DEFINE_APPEND_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \ - templatePre typeParams templatePost inline Node* BasicBlock::appendNonTerminal(Graph& graph, SpeculatedType type valueParamsComma valueParams) \ - { \ - Node* result = graph.addNode(type valueParamsComma valueArgs); \ - insertBeforeLast(result); \ - return result; \ - } - DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_APPEND_NODE) -#undef DFG_DEFINE_APPEND_NODE +template<typename... Params> +Node* BasicBlock::appendNode(Graph& graph, SpeculatedType type, Params... params) +{ + Node* result = graph.addNode(type, params...); + append(result); + return result; +} + +template<typename... Params> +Node* BasicBlock::appendNonTerminal(Graph& graph, SpeculatedType type, Params... params) +{ + Node* result = graph.addNode(type, params...); + insertBeforeTerminal(result); + return result; +} + +template<typename... Params> +Node* BasicBlock::replaceTerminal(Graph& graph, SpeculatedType type, Params... params) +{ + Node* result = graph.addNode(type, params...); + replaceTerminal(result); + return result; +} } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGBinarySwitch.cpp b/Source/JavaScriptCore/dfg/DFGBinarySwitch.cpp deleted file mode 100644 index 7c35cc155..000000000 --- a/Source/JavaScriptCore/dfg/DFGBinarySwitch.cpp +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "DFGBinarySwitch.h" - -#if ENABLE(DFG_JIT) - -namespace JSC { namespace DFG { - -BinarySwitch::BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type type) - : m_value(value) - , m_index(0) - , m_caseIndex(UINT_MAX) - , m_medianBias(0) - , m_type(type) -{ - if (cases.isEmpty()) - return; - - for (unsigned i = 0; i < cases.size(); ++i) - m_cases.append(Case(cases[i], i)); - std::sort(m_cases.begin(), m_cases.end()); - build(0, m_cases.size()); -} - -bool BinarySwitch::advance(MacroAssembler& jit) -{ - if (m_cases.isEmpty()) { - m_fallThrough.append(jit.jump()); - return false; - } - - if (m_index == m_branches.size()) { - RELEASE_ASSERT(m_jumpStack.isEmpty()); - return false; - } - - for (;;) { - const BranchCode& code = m_branches[m_index++]; - switch (code.kind) { - case NotEqualToFallThrough: - switch (m_type) { - case Int32: - m_fallThrough.append(jit.branch32( - MacroAssembler::NotEqual, m_value, - MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value)))); - break; - case IntPtr: - m_fallThrough.append(jit.branchPtr( - MacroAssembler::NotEqual, m_value, - MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value))))); - break; - } - break; - case NotEqualToPush: - switch (m_type) { - case Int32: - m_jumpStack.append(jit.branch32( - MacroAssembler::NotEqual, m_value, - MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value)))); - break; - case IntPtr: - m_jumpStack.append(jit.branchPtr( - MacroAssembler::NotEqual, m_value, - MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value))))); - break; - } - break; - case LessThanToPush: - switch (m_type) { - case Int32: - m_jumpStack.append(jit.branch32( - MacroAssembler::LessThan, m_value, - MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value)))); - break; - case IntPtr: - m_jumpStack.append(jit.branchPtr( - MacroAssembler::LessThan, m_value, - MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value))))); - break; - } - break; - case Pop: - m_jumpStack.takeLast().link(&jit); - break; - case ExecuteCase: - m_caseIndex = code.index; - return true; - } - } -} - -void BinarySwitch::build(unsigned start, unsigned end) -{ - unsigned size = end - start; - - switch (size) { - case 0: { - RELEASE_ASSERT_NOT_REACHED(); - break; - } - - case 1: { - if (start - && m_cases[start - 1].value == m_cases[start].value - 1 - && start + 1 < m_cases.size() - && m_cases[start + 1].value == m_cases[start].value + 1) { - m_branches.append(BranchCode(ExecuteCase, start)); - break; - } - - m_branches.append(BranchCode(NotEqualToFallThrough, start)); - m_branches.append(BranchCode(ExecuteCase, start)); - break; - } - - case 2: { - if (m_cases[start].value + 1 == m_cases[start + 1].value - && start - && m_cases[start - 1].value == m_cases[start].value - 1 - && start + 2 < m_cases.size() - && m_cases[start + 2].value == m_cases[start + 1].value + 1) { - m_branches.append(BranchCode(NotEqualToPush, start)); - m_branches.append(BranchCode(ExecuteCase, start)); - m_branches.append(BranchCode(Pop)); - m_branches.append(BranchCode(ExecuteCase, start + 1)); - break; - } - - unsigned firstCase = start; - unsigned secondCase = start + 1; - if (m_medianBias) - std::swap(firstCase, secondCase); - m_medianBias ^= 1; - - m_branches.append(BranchCode(NotEqualToPush, firstCase)); - m_branches.append(BranchCode(ExecuteCase, firstCase)); - m_branches.append(BranchCode(Pop)); - m_branches.append(BranchCode(NotEqualToFallThrough, secondCase)); - m_branches.append(BranchCode(ExecuteCase, secondCase)); - break; - } - - default: { - unsigned medianIndex = (start + end) / 2; - if (!(size & 1)) { - // Because end is exclusive, in the even case, this rounds up by - // default. Hence median bias sometimes flips to subtracing one - // in order to get round-down behavior. - medianIndex -= m_medianBias; - m_medianBias ^= 1; - } - - RELEASE_ASSERT(medianIndex > start); - RELEASE_ASSERT(medianIndex + 1 < end); - - m_branches.append(BranchCode(LessThanToPush, medianIndex)); - m_branches.append(BranchCode(NotEqualToPush, medianIndex)); - m_branches.append(BranchCode(ExecuteCase, medianIndex)); - - m_branches.append(BranchCode(Pop)); - build(medianIndex + 1, end); - - m_branches.append(BranchCode(Pop)); - build(start, medianIndex); - break; - } } -} - -} } // namespace JSC::DFG - -#endif // ENABLE(DFG_JIT) - diff --git a/Source/JavaScriptCore/dfg/DFGBinarySwitch.h b/Source/JavaScriptCore/dfg/DFGBinarySwitch.h deleted file mode 100644 index be39cb19a..000000000 --- a/Source/JavaScriptCore/dfg/DFGBinarySwitch.h +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef DFGBinarySwitch_h -#define DFGBinarySwitch_h - -#include <wtf/Platform.h> - -#if ENABLE(DFG_JIT) - -#include "GPRInfo.h" -#include "MacroAssembler.h" - -namespace JSC { namespace DFG { - -// The BinarySwitch class makes it easy to emit a switch statement over either -// 32-bit integers or pointers, where the switch uses a tree of branches -// rather than a jump table. This makes it particularly useful if the case -// values are too far apart to make a jump table practical, or if there are -// sufficiently few cases that the total cost of log(numCases) branches is -// less than the cost of an indirected jump. -// -// In an effort to simplify the logic of emitting code for each case, this -// uses an iterator style, rather than a functor callback style. This makes -// sense because even the iterator implementation found herein is relatively -// simple, whereas the code it's used from is usually quite complex - one -// example being the trie-of-trees string switch implementation, where the -// code emitted for each case involves recursing to emit code for a sub-trie. -// -// Use this like so: -// -// BinarySwitch switch(valueReg, casesVector, BinarySwitch::Int32); -// while (switch.advance(jit)) { -// int value = switch.caseValue(); -// unsigned index = switch.caseIndex(); // index into casesVector, above -// ... // generate code for this case -// } -// switch.fallThrough().link(&jit); - -class BinarySwitch { -public: - enum Type { - Int32, - IntPtr - }; - - BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type); - - unsigned caseIndex() const { return m_cases[m_caseIndex].index; } - int64_t caseValue() const { return m_cases[m_caseIndex].value; } - - bool advance(MacroAssembler&); - - MacroAssembler::JumpList& fallThrough() { return m_fallThrough; } - -private: - void build(unsigned start, unsigned end); - - GPRReg m_value; - - struct Case { - Case() { } - - Case(int64_t value, unsigned index) - : value(value) - , index(index) - { - } - - bool operator<(const Case& other) const - { - return value < other.value; - } - - int64_t value; - unsigned index; - }; - - Vector<Case> m_cases; - - enum BranchKind { - NotEqualToFallThrough, - NotEqualToPush, - LessThanToPush, - Pop, - ExecuteCase - }; - - struct BranchCode { - BranchCode() { } - - BranchCode(BranchKind kind, unsigned index = UINT_MAX) - : kind(kind) - , index(index) - { - } - - BranchKind kind; - unsigned index; - }; - - Vector<BranchCode> m_branches; - - unsigned m_index; - unsigned m_caseIndex; - Vector<MacroAssembler::Jump> m_jumpStack; - - MacroAssembler::JumpList m_fallThrough; - - unsigned m_medianBias; - - Type m_type; -}; - -} } // namespace JSC::DFG - -#endif // ENABLE(DFG_JIT) - -#endif // DFGBinarySwitch_h - diff --git a/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp b/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp index 252d638ee..d57c01c70 100644 --- a/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp +++ b/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,6 +28,8 @@ #if ENABLE(DFG_JIT) +#include "JSCInlines.h" + namespace JSC { namespace DFG { BlockInsertionSet::BlockInsertionSet(Graph& graph) @@ -47,20 +49,21 @@ void BlockInsertionSet::insert(size_t index, PassRefPtr<BasicBlock> block) insert(BlockInsertion(index, block)); } -BasicBlock* BlockInsertionSet::insert(size_t index) +BasicBlock* BlockInsertionSet::insert(size_t index, float executionCount) { RefPtr<BasicBlock> block = adoptRef(new BasicBlock( UINT_MAX, m_graph.block(0)->variablesAtHead.numberOfArguments(), - m_graph.block(0)->variablesAtHead.numberOfLocals())); + m_graph.block(0)->variablesAtHead.numberOfLocals(), + executionCount)); block->isReachable = true; insert(index, block); return block.get(); } -BasicBlock* BlockInsertionSet::insertBefore(BasicBlock* before) +BasicBlock* BlockInsertionSet::insertBefore(BasicBlock* before, float executionCount) { - return insert(before->index); + return insert(before->index, executionCount); } bool BlockInsertionSet::execute() @@ -68,8 +71,9 @@ bool BlockInsertionSet::execute() if (m_insertions.isEmpty()) return false; - // We allow insertions to be given to us in any order. So, we need to - // sort them before running WTF::executeInsertions. + // We allow insertions to be given to us in any order. So, we need to sort them before + // running WTF::executeInsertions. Also, we don't really care if the sort is stable since + // basic block order doesn't have semantics - it's just to make code easier to read. std::sort(m_insertions.begin(), m_insertions.end()); executeInsertions(m_graph.m_blocks, m_insertions); diff --git a/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.h b/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.h index aa2cdc57b..03cab123f 100644 --- a/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.h +++ b/Source/JavaScriptCore/dfg/DFGBlockInsertionSet.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGBlockInsertionSet_h #define DFGBlockInsertionSet_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGGraph.h" @@ -40,14 +38,14 @@ typedef WTF::Insertion<RefPtr<BasicBlock>> BlockInsertion; class BlockInsertionSet { public: - BlockInsertionSet(Graph& graph); + BlockInsertionSet(Graph&); ~BlockInsertionSet(); - void insert(const BlockInsertion& insertion); - void insert(size_t index, PassRefPtr<BasicBlock> block); - BasicBlock* insert(size_t index); - BasicBlock* insertBefore(BasicBlock* before); - + void insert(const BlockInsertion&); + void insert(size_t index, PassRefPtr<BasicBlock>); + BasicBlock* insert(size_t index, float executionCount); + BasicBlock* insertBefore(BasicBlock* before, float executionCount); + bool execute(); private: diff --git a/Source/JavaScriptCore/dfg/DFGBlockMap.h b/Source/JavaScriptCore/dfg/DFGBlockMap.h new file mode 100644 index 000000000..a52546a0d --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGBlockMap.h @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGBlockMap_h +#define DFGBlockMap_h + +#if ENABLE(DFG_JIT) + +#include "DFGBasicBlock.h" + +namespace JSC { namespace DFG { + +class Graph; + +template<typename T> +class BlockMap { + WTF_MAKE_FAST_ALLOCATED; +public: + BlockMap() + { + } + + BlockMap(Graph&); + + BlockIndex size() const + { + return m_vector.size(); + } + + T& atIndex(BlockIndex blockIndex) + { + return m_vector[blockIndex]; + } + + const T& atIndex(BlockIndex blockIndex) const + { + return m_vector[blockIndex]; + } + + T& at(BlockIndex blockIndex) + { + return m_vector[blockIndex]; + } + + const T& at(BlockIndex blockIndex) const + { + return m_vector[blockIndex]; + } + + T& at(BasicBlock* block) + { + return m_vector[block->index]; + } + + const T& at(BasicBlock* block) const + { + return m_vector[block->index]; + } + + T& operator[](BlockIndex blockIndex) + { + return m_vector[blockIndex]; + } + + const T& operator[](BlockIndex blockIndex) const + { + return m_vector[blockIndex]; + } + + T& operator[](BasicBlock* block) + { + return m_vector[block->index]; + } + + const T& operator[](BasicBlock* block) const + { + return m_vector[block->index]; + } + +private: + Vector<T> m_vector; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGBlockMap_h + diff --git a/Source/JavaScriptCore/dfg/DFGBlockMapInlines.h b/Source/JavaScriptCore/dfg/DFGBlockMapInlines.h new file mode 100644 index 000000000..e61626d80 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGBlockMapInlines.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGBlockMapInlines_h +#define DFGBlockMapInlines_h + +#if ENABLE(DFG_JIT) + +#include "DFGBlockMap.h" +#include "DFGGraph.h" + +namespace JSC { namespace DFG { + +template<typename T> +BlockMap<T>::BlockMap(Graph& graph) +{ + m_vector.resize(graph.numBlocks()); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGBlockMapInlines_h diff --git a/Source/JavaScriptCore/dfg/DFGDesiredStructureChains.cpp b/Source/JavaScriptCore/dfg/DFGBlockSet.cpp index 04bd5f01f..790e380db 100644 --- a/Source/JavaScriptCore/dfg/DFGDesiredStructureChains.cpp +++ b/Source/JavaScriptCore/dfg/DFGBlockSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,10 +10,10 @@ * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR @@ -24,22 +24,17 @@ */ #include "config.h" -#include "DFGDesiredStructureChains.h" +#include "DFGBlockSet.h" #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { -DesiredStructureChains::DesiredStructureChains() { } -DesiredStructureChains::~DesiredStructureChains() { } - -bool DesiredStructureChains::areStillValid() const +void BlockSet::dump(PrintStream& out) const { - for (unsigned i = 0; i < m_vector.size(); ++i) { - if (!m_vector[i]->isStillValid()) - return false; - } - return true; + CommaPrinter comma(" "); + for (BlockIndex blockIndex = m_set.findBit(0, true); blockIndex < m_set.size(); blockIndex = m_set.findBit(blockIndex + 1, true)) + out.print(comma, "#", blockIndex); } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGBlockSet.h b/Source/JavaScriptCore/dfg/DFGBlockSet.h new file mode 100644 index 000000000..b09afecd9 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGBlockSet.h @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGBlockSet_h +#define DFGBlockSet_h + +#if ENABLE(DFG_JIT) + +#include "DFGBasicBlock.h" + +namespace JSC { namespace DFG { + +class Graph; + +class BlockSet { +public: + BlockSet() { } + + // Return true if the block was added, false if it was already present. + bool add(BasicBlock* block) + { + return !m_set.set(block->index); + } + + bool contains(BasicBlock* block) const + { + if (!block) + return false; + return m_set.get(block->index); + } + + class iterator { + public: + iterator() + : m_graph(nullptr) + , m_set(nullptr) + , m_index(0) + { + } + + iterator& operator++() + { + m_index = m_set->m_set.findBit(m_index + 1, true); + return *this; + } + + BasicBlock* operator*() const; + + bool operator==(const iterator& other) const + { + return m_index == other.m_index; + } + + bool operator!=(const iterator& other) const + { + return !(*this == other); + } + + private: + friend class BlockSet; + + Graph* m_graph; + const BlockSet* m_set; + size_t m_index; + }; + + class Iterable { + public: + Iterable(Graph& graph, const BlockSet& set) + : m_graph(graph) + , m_set(set) + { + } + + iterator begin() const + { + iterator result; + result.m_graph = &m_graph; + result.m_set = &m_set; + result.m_index = m_set.m_set.findBit(0, true); + return result; + } + + iterator end() const + { + iterator result; + result.m_graph = &m_graph; + result.m_set = &m_set; + result.m_index = m_set.m_set.size(); + return result; + } + + private: + Graph& m_graph; + const BlockSet& m_set; + }; + + Iterable iterable(Graph& graph) const + { + return Iterable(graph, *this); + } + + void dump(PrintStream&) const; + +private: + BitVector m_set; +}; + +class BlockAdder { +public: + BlockAdder(BlockSet& set) + : m_set(set) + { + } + + bool operator()(BasicBlock* block) const + { + return m_set.add(block); + } +private: + BlockSet& m_set; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGBlockSet_h + diff --git a/Source/JavaScriptCore/dfg/DFGBlockSetInlines.h b/Source/JavaScriptCore/dfg/DFGBlockSetInlines.h new file mode 100644 index 000000000..df9628597 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGBlockSetInlines.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGBlockSetInlines_h +#define DFGBlockSetInlines_h + +#if ENABLE(DFG_JIT) + +#include "DFGBlockSet.h" +#include "DFGGraph.h" + +namespace JSC { namespace DFG { + +inline BasicBlock* BlockSet::iterator::operator*() const +{ + return m_graph->block(m_index); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGBlockSetInlines_h + diff --git a/Source/JavaScriptCore/dfg/DFGBlockWorklist.h b/Source/JavaScriptCore/dfg/DFGBlockWorklist.h new file mode 100644 index 000000000..8fb3d7f7f --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGBlockWorklist.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGBlockWorklist_h +#define DFGBlockWorklist_h + +#if ENABLE(DFG_JIT) + +#include "DFGBasicBlock.h" +#include "DFGBlockSet.h" +#include <wtf/GraphNodeWorklist.h> +#include <wtf/Vector.h> + +namespace JSC { namespace DFG { + +struct BasicBlock; + +typedef GraphNodeWorklist<BasicBlock*, BlockSet> BlockWorklist; + +// When you say BlockWith<int> you should read it as "block with an int". +template<typename T> using BlockWith = GraphNodeWith<BasicBlock*, T>; + +// Extended block worklist is useful for enqueueing some meta-data along with the block. It also +// permits forcibly enqueueing things even if the block has already been seen. It's useful for +// things like building a spanning tree, in which case T (the auxiliary payload) would be the +// successor index. +template<typename T> using ExtendedBlockWorklist = ExtendedGraphNodeWorklist<BasicBlock*, T, BlockSet>; + +typedef GraphVisitOrder VisitOrder; + +typedef GraphNodeWithOrder<BasicBlock*> BlockWithOrder; + +typedef PostOrderGraphNodeWorklist<BasicBlock*, BlockSet> PostOrderBlockWorklist; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGBlockWorklist_h + diff --git a/Source/JavaScriptCore/dfg/DFGBranchDirection.h b/Source/JavaScriptCore/dfg/DFGBranchDirection.h index 8bbe3c635..dcdde27f8 100644 --- a/Source/JavaScriptCore/dfg/DFGBranchDirection.h +++ b/Source/JavaScriptCore/dfg/DFGBranchDirection.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGBranchDirection_h #define DFGBranchDirection_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { @@ -52,7 +50,7 @@ static inline const char* branchDirectionToString(BranchDirection branchDirectio { switch (branchDirection) { case InvalidBranchDirection: - return "Invalid"; + return "InvalidBranchDirection"; case TakeTrue: return "TakeTrue"; case TakeFalse: @@ -60,6 +58,9 @@ static inline const char* branchDirectionToString(BranchDirection branchDirectio case TakeBoth: return "TakeBoth"; } + + RELEASE_ASSERT_NOT_REACHED(); + return "InvalidBranchDirection"; } static inline bool isKnownDirection(BranchDirection branchDirection) @@ -83,6 +84,15 @@ static inline bool branchCondition(BranchDirection branchDirection) } } // namespace JSC::DFG +namespace WTF { + +inline void printInternal(PrintStream& out, JSC::DFG::BranchDirection direction) +{ + out.print(JSC::DFG::branchDirectionToString(direction)); +} + +} // namespace WTF + #endif // ENABLE(DFG_JIT) #endif // DFGBranchDirection_h diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp index c572e7a3e..2f3f12d6c 100644 --- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp +++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp @@ -1,5 +1,5 @@ - /* - * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. +/* + * Copyright (C) 2011-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,18 +29,26 @@ #if ENABLE(DFG_JIT) #include "ArrayConstructor.h" +#include "BasicBlockLocation.h" #include "CallLinkStatus.h" #include "CodeBlock.h" #include "CodeBlockWithJITType.h" #include "DFGArrayMode.h" #include "DFGCapabilities.h" +#include "DFGClobbersExitState.h" +#include "DFGGraph.h" #include "DFGJITCode.h" #include "GetByIdStatus.h" -#include "JSActivation.h" -#include "Operations.h" +#include "Heap.h" +#include "JSLexicalEnvironment.h" +#include "JSCInlines.h" +#include "JSModuleEnvironment.h" #include "PreciseJumpTargets.h" +#include "PutByIdFlags.h" #include "PutByIdStatus.h" +#include "StackAlignment.h" #include "StringConstructor.h" +#include "Watchdog.h" #include <wtf/CommaPrinter.h> #include <wtf/HashMap.h> #include <wtf/MathExtras.h> @@ -48,6 +56,8 @@ namespace JSC { namespace DFG { +static const bool verbose = false; + class ConstantBufferKey { public: ConstantBufferKey() @@ -130,19 +140,17 @@ public: , m_graph(graph) , m_currentBlock(0) , m_currentIndex(0) - , m_constantUndefined(UINT_MAX) - , m_constantNull(UINT_MAX) - , m_constantNaN(UINT_MAX) - , m_constant1(UINT_MAX) - , m_constants(m_codeBlock->numberOfConstantRegisters()) + , m_constantUndefined(graph.freeze(jsUndefined())) + , m_constantNull(graph.freeze(jsNull())) + , m_constantNaN(graph.freeze(jsNumber(PNaN))) + , m_constantOne(graph.freeze(jsNumber(1))) , m_numArguments(m_codeBlock->numParameters()) - , m_numLocals(m_codeBlock->m_numCalleeRegisters) + , m_numLocals(m_codeBlock->m_numCalleeLocals) , m_parameterSlots(0) , m_numPassedVarArgs(0) , m_inlineStackTop(0) - , m_haveBuiltOperandMaps(false) - , m_emptyJSValueIndex(UINT_MAX) , m_currentInstruction(0) + , m_hasDebuggerEnabled(graph.hasDebuggerEnabled()) { ASSERT(m_profiledBlock); } @@ -155,56 +163,128 @@ private: // Just parse from m_currentIndex to the end of the current CodeBlock. void parseCodeBlock(); + + void ensureLocals(unsigned newNumLocals) + { + if (newNumLocals <= m_numLocals) + return; + m_numLocals = newNumLocals; + for (size_t i = 0; i < m_graph.numBlocks(); ++i) + m_graph.block(i)->ensureLocals(newNumLocals); + } // Helper for min and max. - bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis); - + template<typename ChecksFunctor> + bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks); + // Handle calls. This resolves issues surrounding inlining and intrinsics. - void handleCall(Instruction* currentInstruction, NodeType op, CodeSpecializationKind); - void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind); - void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind); + enum Terminality { Terminal, NonTerminal }; + Terminality handleCall( + int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize, + Node* callTarget, int argCount, int registerOffset, CallLinkStatus, + SpeculatedType prediction); + Terminality handleCall( + int result, NodeType op, CallMode, unsigned instructionSize, + Node* callTarget, int argCount, int registerOffset, CallLinkStatus); + Terminality handleCall(int result, NodeType op, CallMode, unsigned instructionSize, int callee, int argCount, int registerOffset); + Terminality handleCall(Instruction* pc, NodeType op, CallMode); + Terminality handleVarargsCall(Instruction* pc, NodeType op, CallMode); + void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt); + void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis); + unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CallMode); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1. // Handle inlining. Return true if it succeeded, false if we need to plant a call. - bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind); + bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction); + enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually }; + template<typename ChecksFunctor> + bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks); + template<typename ChecksFunctor> + void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, const ChecksFunctor& insertChecks); + void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry. // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call. - bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction); - bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType); - bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind); - Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value); - Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset); - void handleGetByOffset( - int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, - PropertyOffset); + template<typename ChecksFunctor> + bool handleIntrinsicCall(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); + template<typename ChecksFunctor> + bool handleIntrinsicGetter(int resultOperand, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks); + template<typename ChecksFunctor> + bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks); + template<typename ChecksFunctor> + bool handleConstantInternalFunction(Node* callTargetNode, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, const ChecksFunctor& insertChecks); + Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, const InferredType::Descriptor&, Node* value); + Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, const InferredType::Descriptor&, NodeType = GetByOffset); + + // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not + // check the validity of the condition, but it may return a null one if it encounters a contradiction. + ObjectPropertyCondition presenceLike( + JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&); + + // Attempt to watch the presence of a property. It will watch that the property is present in the same + // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint. + // Returns true if this all works out. + bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&); + void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&); + + // Works with both GetByIdVariant and the setter form of PutByIdVariant. + template<typename VariantType> + Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&); + + Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value); + void handleGetById( int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, const GetByIdStatus&); - - Node* getScope(bool skipTop, unsigned skipCount); + void emitPutById( + Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect); + void handlePutById( + Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, + bool isDirect); + + // Either register a watchpoint or emit a check for this condition. Returns false if the + // condition no longer holds, and therefore no reasonable check can be emitted. + bool check(const ObjectPropertyCondition&); - // Prepare to parse a block. + GetByOffsetMethod promoteToConstant(GetByOffsetMethod); + + // Either register a watchpoint or emit a check for this condition. It must be a Presence + // condition. It will attempt to promote a Presence condition to an Equivalence condition. + // Emits code for the loaded value that the condition guards, and returns a node containing + // the loaded value. Returns null if the condition no longer holds. + GetByOffsetMethod planLoad(const ObjectPropertyCondition&); + Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset); + Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset); + + // Calls check() for each condition in the set: that is, it either emits checks or registers + // watchpoints (or a combination of the two) to make the conditions hold. If any of those + // conditions are no longer checkable, returns false. + bool check(const ObjectPropertyConditionSet&); + + // Calls check() for those conditions that aren't the slot base, and calls load() for the slot + // base. Does a combination of watchpoint registration and check emission to guard the + // conditions, and emits code to load the value from the slot base. Returns a node containing + // the loaded value. Returns null if any of the conditions were no longer checkable. + GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&); + Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset); + void prepareToParseBlock(); + void clearCaches(); + // Parse a single basic block of bytecode instructions. bool parseBlock(unsigned limit); // Link block successors. void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets); void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets); - VariableAccessData* newVariableAccessData(VirtualRegister operand, bool isCaptured) + VariableAccessData* newVariableAccessData(VirtualRegister operand) { ASSERT(!operand.isConstant()); - m_graph.m_variableAccessData.append(VariableAccessData(operand, isCaptured)); + m_graph.m_variableAccessData.append(VariableAccessData(operand)); return &m_graph.m_variableAccessData.last(); } // Get/Set the operands/result of a bytecode instruction. Node* getDirect(VirtualRegister operand) { - // Is this a constant? - if (operand.isConstant()) { - unsigned constant = operand.toConstantIndex(); - ASSERT(constant < m_constants.size()); - return getJSConstant(constant); - } + ASSERT(!operand.isConstant()); // Is this an argument? if (operand.isArgument()) @@ -216,28 +296,81 @@ private: Node* get(VirtualRegister operand) { + if (operand.isConstant()) { + unsigned constantIndex = operand.toConstantIndex(); + unsigned oldSize = m_constants.size(); + if (constantIndex >= oldSize || !m_constants[constantIndex]) { + const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock; + JSValue value = codeBlock.getConstant(operand.offset()); + SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset()); + if (constantIndex >= oldSize) { + m_constants.grow(constantIndex + 1); + for (unsigned i = oldSize; i < m_constants.size(); ++i) + m_constants[i] = nullptr; + } + + Node* constantNode = nullptr; + if (sourceCodeRepresentation == SourceCodeRepresentation::Double) + constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber())))); + else + constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value))); + m_constants[constantIndex] = constantNode; + } + ASSERT(m_constants[constantIndex]); + return m_constants[constantIndex]; + } + if (inlineCallFrame()) { if (!inlineCallFrame()->isClosureCall) { JSFunction* callee = inlineCallFrame()->calleeConstant(); if (operand.offset() == JSStack::Callee) - return cellConstant(callee); - if (operand.offset() == JSStack::ScopeChain) - return cellConstant(callee->scope()); + return weakJSConstant(callee); + } + } else if (operand.offset() == JSStack::Callee) { + // We have to do some constant-folding here because this enables CreateThis folding. Note + // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that + // case if the function is a singleton then we already know it. + if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())) { + InferredValue* singleton = executable->singletonFunction(); + if (JSValue value = singleton->inferredValue()) { + m_graph.watchpoints().addLazily(singleton); + JSFunction* function = jsCast<JSFunction*>(value); + return weakJSConstant(function); + } } - } else if (operand.offset() == JSStack::Callee) return addToGraph(GetCallee); - else if (operand.offset() == JSStack::ScopeChain) - return addToGraph(GetMyScope); + } return getDirect(m_inlineStackTop->remapOperand(operand)); } - enum SetMode { NormalSet, ImmediateSet }; + enum SetMode { + // A normal set which follows a two-phase commit that spans code origins. During + // the current code origin it issues a MovHint, and at the start of the next + // code origin there will be a SetLocal. If the local needs flushing, the second + // SetLocal will be preceded with a Flush. + NormalSet, + + // A set where the SetLocal happens immediately and there is still a Flush. This + // is relevant when assigning to a local in tricky situations for the delayed + // SetLocal logic but where we know that we have not performed any side effects + // within this code origin. This is a safe replacement for NormalSet anytime we + // know that we have not yet performed side effects in this code origin. + ImmediateSetWithFlush, + + // A set where the SetLocal happens immediately and we do not Flush it even if + // this is a local that is marked as needing it. This is relevant when + // initializing locals at the top of a function. + ImmediateNakedSet + }; Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) { addToGraph(MovHint, OpInfo(operand.offset()), value); - - DelayedSetLocal delayed = DelayedSetLocal(operand, value); + + // We can't exit anymore because our OSR exit state has changed. + m_exitOK = false; + + DelayedSetLocal delayed(currentCodeOrigin(), operand, value); if (setMode == NormalSet) { m_setLocalQueue.append(delayed); @@ -246,6 +379,13 @@ private: return delayed.execute(this, setMode); } + + void processSetLocalQueue() + { + for (unsigned i = 0; i < m_setLocalQueue.size(); ++i) + m_setLocalQueue[i].execute(this); + m_setLocalQueue.resize(0); + } Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) { @@ -255,7 +395,7 @@ private: Node* injectLazyOperandSpeculation(Node* node) { ASSERT(node->op() == GetLocal); - ASSERT(node->codeOrigin.bytecodeIndex == m_currentIndex); + ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex); ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); LazyOperandValueProfileKey key(m_currentIndex, node->local()); SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key); @@ -268,25 +408,7 @@ private: { unsigned local = operand.toLocal(); - if (local < m_localWatchpoints.size()) { - if (VariableWatchpointSet* set = m_localWatchpoints[local]) { - if (JSValue value = set->inferredValue()) { - addToGraph(FunctionReentryWatchpoint, OpInfo(m_codeBlock->symbolTable())); - addToGraph(VariableWatchpoint, OpInfo(set)); - // Note: this is very special from an OSR exit standpoint. We wouldn't be - // able to do this for most locals, but it works here because we're dealing - // with a flushed local. For most locals we would need to issue a GetLocal - // here and ensure that we have uses in DFG IR wherever there would have - // been uses in bytecode. Clearly this optimization does not do this. But - // that's fine, because we don't need to track liveness for captured - // locals, and this optimization only kicks in for captured locals. - return inferredConstant(value); - } - } - } - Node* node = m_currentBlock->variablesAtTail.local(local); - bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame()); // This has two goals: 1) link together variable access datas, and 2) // try to avoid creating redundant GetLocals. (1) is required for @@ -298,45 +420,46 @@ private: if (node) { variable = node->variableAccessData(); - variable->mergeIsCaptured(isCaptured); - if (!isCaptured) { - switch (node->op()) { - case GetLocal: - return node; - case SetLocal: - return node->child1().node(); - default: - break; - } + switch (node->op()) { + case GetLocal: + return node; + case SetLocal: + return node->child1().node(); + default: + break; } } else - variable = newVariableAccessData(operand, isCaptured); + variable = newVariableAccessData(operand); node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable))); m_currentBlock->variablesAtTail.local(local) = node; return node; } - - Node* setLocal(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) + Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet) { + CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin; + m_currentSemanticOrigin = semanticOrigin; + unsigned local = operand.toLocal(); - bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame()); - if (setMode == NormalSet) { + if (setMode != ImmediateNakedSet) { ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand); - if (isCaptured || argumentPosition) + if (argumentPosition) flushDirect(operand, argumentPosition); + else if (m_hasDebuggerEnabled && operand == m_codeBlock->scopeRegister()) + flush(operand); } - VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured); + VariableAccessData* variableAccessData = newVariableAccessData(operand); variableAccessData->mergeStructureCheckHoistingFailed( - m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)); + m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache)); variableAccessData->mergeCheckArrayHoistingFailed( - m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)); + m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType)); Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value); m_currentBlock->variablesAtTail.local(local) = node; + + m_currentSemanticOrigin = oldSemanticOrigin; return node; } @@ -347,13 +470,11 @@ private: ASSERT(argument < m_numArguments); Node* node = m_currentBlock->variablesAtTail.argument(argument); - bool isCaptured = m_codeBlock->isCaptured(operand); VariableAccessData* variable; if (node) { variable = node->variableAccessData(); - variable->mergeIsCaptured(isCaptured); switch (node->op()) { case GetLocal: @@ -364,36 +485,38 @@ private: break; } } else - variable = newVariableAccessData(operand, isCaptured); + variable = newVariableAccessData(operand); node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable))); m_currentBlock->variablesAtTail.argument(argument) = node; return node; } - Node* setArgument(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) + Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet) { + CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin; + m_currentSemanticOrigin = semanticOrigin; + unsigned argument = operand.toArgument(); ASSERT(argument < m_numArguments); - bool isCaptured = m_codeBlock->isCaptured(operand); - - VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured); + VariableAccessData* variableAccessData = newVariableAccessData(operand); // Always flush arguments, except for 'this'. If 'this' is created by us, // then make sure that it's never unboxed. if (argument) { - if (setMode == NormalSet) + if (setMode != ImmediateNakedSet) flushDirect(operand); } else if (m_codeBlock->specializationKind() == CodeForConstruct) variableAccessData->mergeShouldNeverUnbox(true); variableAccessData->mergeStructureCheckHoistingFailed( - m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)); + m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache)); variableAccessData->mergeCheckArrayHoistingFailed( - m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)); + m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType)); Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value); m_currentBlock->variablesAtTail.argument(argument) = node; + + m_currentSemanticOrigin = oldSemanticOrigin; return node; } @@ -430,18 +553,6 @@ private: return findArgumentPositionForLocal(operand); } - void addConstant(JSValue value) - { - unsigned constantIndex = m_codeBlock->addConstantLazily(); - initializeLazyWriteBarrierForConstant( - m_graph.m_plan.writeBarriers, - m_codeBlock->constants()[constantIndex], - m_codeBlock, - constantIndex, - m_codeBlock->ownerExecutable(), - value); - } - void flush(VirtualRegister operand) { flushDirect(m_inlineStackTop->remapOperand(operand)); @@ -454,84 +565,74 @@ private: void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition) { - bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame()); - ASSERT(!operand.isConstant()); Node* node = m_currentBlock->variablesAtTail.operand(operand); VariableAccessData* variable; - if (node) { + if (node) variable = node->variableAccessData(); - variable->mergeIsCaptured(isCaptured); - } else - variable = newVariableAccessData(operand, isCaptured); + else + variable = newVariableAccessData(operand); node = addToGraph(Flush, OpInfo(variable)); m_currentBlock->variablesAtTail.operand(operand) = node; if (argumentPosition) argumentPosition->addVariable(variable); } - + void flush(InlineStackEntry* inlineStackEntry) { int numArguments; if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) { + ASSERT(!m_hasDebuggerEnabled); numArguments = inlineCallFrame->arguments.size(); - if (inlineCallFrame->isClosureCall) { + if (inlineCallFrame->isClosureCall) flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee))); - flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ScopeChain))); - } + if (inlineCallFrame->isVarargs()) + flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ArgumentCount))); } else numArguments = inlineStackEntry->m_codeBlock->numParameters(); for (unsigned argument = numArguments; argument-- > 1;) flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument))); - for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) { - if (!inlineStackEntry->m_codeBlock->isCaptured(virtualRegisterForLocal(local))) - continue; - flushDirect(inlineStackEntry->remapOperand(virtualRegisterForLocal(local))); - } + if (m_hasDebuggerEnabled) + flush(m_codeBlock->scopeRegister()); } - void flushAllArgumentsAndCapturedVariablesInInlineStack() + void flushForTerminal() { for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller) flush(inlineStackEntry); } - void flushArgumentsAndCapturedVariables() + void flushForReturn() { flush(m_inlineStackTop); } - - // NOTE: Only use this to construct constants that arise from non-speculative - // constant folding. I.e. creating constants using this if we had constant - // field inference would be a bad idea, since the bytecode parser's folding - // doesn't handle liveness preservation. - Node* getJSConstantForValue(JSValue constantValue, NodeFlags flags = NodeIsStaticConstant) + + void flushIfTerminal(SwitchData& data) { - unsigned constantIndex; - if (!m_codeBlock->findConstant(constantValue, constantIndex)) { - addConstant(constantValue); - m_constants.append(ConstantRecord()); - } + if (data.fallThrough.bytecodeIndex() > m_currentIndex) + return; - ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); + for (unsigned i = data.cases.size(); i--;) { + if (data.cases[i].target.bytecodeIndex() > m_currentIndex) + return; + } - return getJSConstant(constantIndex, flags); + flushForTerminal(); } - Node* getJSConstant(unsigned constant, NodeFlags flags = NodeIsStaticConstant) + // Assumes that the constant should be strongly marked. + Node* jsConstant(JSValue constantValue) { - Node* node = m_constants[constant].asJSValue; - if (node) - return node; + return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue))); + } - Node* result = addToGraph(JSConstant, OpInfo(constant)); - result->mergeFlags(flags); - m_constants[constant].asJSValue = result; - return result; + Node* weakJSConstant(JSValue constantValue) + { + return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue))); } // Helper functions to get/set the this value. @@ -545,275 +646,207 @@ private: set(m_inlineStackTop->m_codeBlock->thisRegister(), value); } - // Convenience methods for checking nodes for constants. - bool isJSConstant(Node* node) - { - return node->op() == JSConstant; - } - bool isInt32Constant(Node* node) - { - return isJSConstant(node) && valueOfJSConstant(node).isInt32(); - } - // Convenience methods for getting constant values. - JSValue valueOfJSConstant(Node* node) - { - ASSERT(isJSConstant(node)); - return m_codeBlock->getConstant(FirstConstantRegisterIndex + node->constantNumber()); - } - int32_t valueOfInt32Constant(Node* node) + InlineCallFrame* inlineCallFrame() { - ASSERT(isInt32Constant(node)); - return valueOfJSConstant(node).asInt32(); + return m_inlineStackTop->m_inlineCallFrame; } - - // This method returns a JSConstant with the value 'undefined'. - Node* constantUndefined() - { - // Has m_constantUndefined been set up yet? - if (m_constantUndefined == UINT_MAX) { - // Search the constant pool for undefined, if we find it, we can just reuse this! - unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters(); - for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) { - JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined); - if (testMe.isUndefined()) - return getJSConstant(m_constantUndefined); - } - // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants. - ASSERT(m_constants.size() == numberOfConstants); - addConstant(jsUndefined()); - m_constants.append(ConstantRecord()); - ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); - } - - // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'. - ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined()); - return getJSConstant(m_constantUndefined); + bool allInlineFramesAreTailCalls() + { + return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls(); } - // This method returns a JSConstant with the value 'null'. - Node* constantNull() + CodeOrigin currentCodeOrigin() { - // Has m_constantNull been set up yet? - if (m_constantNull == UINT_MAX) { - // Search the constant pool for null, if we find it, we can just reuse this! - unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters(); - for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) { - JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull); - if (testMe.isNull()) - return getJSConstant(m_constantNull); - } - - // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants. - ASSERT(m_constants.size() == numberOfConstants); - addConstant(jsNull()); - m_constants.append(ConstantRecord()); - ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); - } - - // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'. - ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull()); - return getJSConstant(m_constantNull); + return CodeOrigin(m_currentIndex, inlineCallFrame()); } - // This method returns a DoubleConstant with the value 1. - Node* one() + NodeOrigin currentNodeOrigin() { - // Has m_constant1 been set up yet? - if (m_constant1 == UINT_MAX) { - // Search the constant pool for the value 1, if we find it, we can just reuse this! - unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters(); - for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) { - JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1); - if (testMe.isInt32() && testMe.asInt32() == 1) - return getJSConstant(m_constant1); - } - - // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants. - ASSERT(m_constants.size() == numberOfConstants); - addConstant(jsNumber(1)); - m_constants.append(ConstantRecord()); - ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); - } + CodeOrigin semantic; + CodeOrigin forExit; - // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1. - ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32()); - ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1); - return getJSConstant(m_constant1); - } - - // This method returns a DoubleConstant with the value NaN. - Node* constantNaN() - { - JSValue nan = jsNaN(); - - // Has m_constantNaN been set up yet? - if (m_constantNaN == UINT_MAX) { - // Search the constant pool for the value NaN, if we find it, we can just reuse this! - unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters(); - for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) { - JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN); - if (JSValue::encode(testMe) == JSValue::encode(nan)) - return getJSConstant(m_constantNaN); - } + if (m_currentSemanticOrigin.isSet()) + semantic = m_currentSemanticOrigin; + else + semantic = currentCodeOrigin(); - // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants. - ASSERT(m_constants.size() == numberOfConstants); - addConstant(nan); - m_constants.append(ConstantRecord()); - ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); - } + forExit = currentCodeOrigin(); - // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan. - ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble()); - ASSERT(std::isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble())); - return getJSConstant(m_constantNaN); + return NodeOrigin(semantic, forExit, m_exitOK); } - Node* cellConstant(JSCell* cell) + BranchData* branchData(unsigned taken, unsigned notTaken) { - HashMap<JSCell*, Node*>::AddResult result = m_cellConstantNodes.add(cell, nullptr); - if (result.isNewEntry) - result.iterator->value = addToGraph(WeakJSConstant, OpInfo(cell)); - - return result.iterator->value; - } - - Node* inferredConstant(JSValue value) - { - if (value.isCell()) - return cellConstant(value.asCell()); - return getJSConstantForValue(value, 0); - } - - InlineCallFrame* inlineCallFrame() - { - return m_inlineStackTop->m_inlineCallFrame; - } - - CodeOrigin currentCodeOrigin() - { - return CodeOrigin(m_currentIndex, inlineCallFrame()); + // We assume that branches originating from bytecode always have a fall-through. We + // use this assumption to avoid checking for the creation of terminal blocks. + ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex)); + BranchData* data = m_graph.m_branchData.add(); + *data = BranchData::withBytecodeIndices(taken, notTaken); + return data; } - bool canFold(Node* node) + Node* addToGraph(Node* node) { - if (Options::validateFTLOSRExitLiveness()) { - // The static folding that the bytecode parser does results in the DFG - // being able to do some DCE that the bytecode liveness analysis would - // miss. Hence, we disable the static folding if we're validating FTL OSR - // exit liveness. This may be brutish, but this validator is powerful - // enough that it's worth it. - return false; - } - - return node->isStronglyProvedConstantIn(inlineCallFrame()); - } - - // Our codegen for constant strict equality performs a bitwise comparison, - // so we can only select values that have a consistent bitwise identity. - bool isConstantForCompareStrictEq(Node* node) - { - if (!node->isConstant()) - return false; - JSValue value = valueOfJSConstant(node); - return value.isBoolean() || value.isUndefinedOrNull(); + if (Options::verboseDFGByteCodeParsing()) + dataLog(" appended ", node, " ", Graph::opName(node->op()), "\n"); + m_currentBlock->append(node); + if (clobbersExitState(m_graph, node)) + m_exitOK = false; + return node; } Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) { Node* result = m_graph.addNode( - SpecNone, op, currentCodeOrigin(), Edge(child1), Edge(child2), Edge(child3)); - ASSERT(op != Phi); - m_currentBlock->append(result); - return result; + SpecNone, op, currentNodeOrigin(), Edge(child1), Edge(child2), + Edge(child3)); + return addToGraph(result); } Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge()) { Node* result = m_graph.addNode( - SpecNone, op, currentCodeOrigin(), child1, child2, child3); - ASSERT(op != Phi); - m_currentBlock->append(result); - return result; + SpecNone, op, currentNodeOrigin(), child1, child2, child3); + return addToGraph(result); } Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) { Node* result = m_graph.addNode( - SpecNone, op, currentCodeOrigin(), info, Edge(child1), Edge(child2), Edge(child3)); - ASSERT(op != Phi); - m_currentBlock->append(result); - return result; + SpecNone, op, currentNodeOrigin(), info, Edge(child1), Edge(child2), + Edge(child3)); + return addToGraph(result); } Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) { Node* result = m_graph.addNode( - SpecNone, op, currentCodeOrigin(), info1, info2, + SpecNone, op, currentNodeOrigin(), info1, info2, Edge(child1), Edge(child2), Edge(child3)); - ASSERT(op != Phi); - m_currentBlock->append(result); - return result; + return addToGraph(result); } Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2) { Node* result = m_graph.addNode( - SpecNone, Node::VarArg, op, currentCodeOrigin(), info1, info2, + SpecNone, Node::VarArg, op, currentNodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs); - ASSERT(op != Phi); - m_currentBlock->append(result); + addToGraph(result); m_numPassedVarArgs = 0; return result; } - + void addVarArgChild(Node* child) { m_graph.m_varArgChildren.append(Edge(child)); m_numPassedVarArgs++; } - Node* addCall(Instruction* currentInstruction, NodeType op) + Node* addCallWithoutSettingResult( + NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset, + OpInfo prediction) { - SpeculatedType prediction = getPrediction(); - - addVarArgChild(get(VirtualRegister(currentInstruction[2].u.operand))); - int argCount = currentInstruction[3].u.operand; - if (JSStack::ThisArgument + (unsigned)argCount > m_parameterSlots) - m_parameterSlots = JSStack::ThisArgument + argCount; + addVarArgChild(callee); + size_t frameSize = JSStack::CallFrameHeaderSize + argCount; + size_t alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), frameSize); + size_t parameterSlots = alignedFrameSize - JSStack::CallerFrameAndPCSize; - int registerOffset = -currentInstruction[4].u.operand; - int dummyThisArgument = op == Call ? 0 : 1; - for (int i = 0 + dummyThisArgument; i < argCount; ++i) + if (parameterSlots > m_parameterSlots) + m_parameterSlots = parameterSlots; + + for (int i = 0; i < argCount; ++i) addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); - Node* call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction)); - set(VirtualRegister(currentInstruction[1].u.operand), call); + return addToGraph(Node::VarArg, op, opInfo, prediction); + } + + Node* addCall( + int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset, + SpeculatedType prediction) + { + if (op == TailCall) { + if (allInlineFramesAreTailCalls()) + return addCallWithoutSettingResult(op, OpInfo(), callee, argCount, registerOffset, OpInfo()); + op = TailCallInlinedCaller; + } + + + Node* call = addCallWithoutSettingResult( + op, opInfo, callee, argCount, registerOffset, OpInfo(prediction)); + VirtualRegister resultReg(result); + if (resultReg.isValid()) + set(resultReg, call); return call; } Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure) { - Node* objectNode = cellConstant(object); + // FIXME: This should route to emitPropertyCheck, not the other way around. But currently, + // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the + // object's structure as soon as we make it a weakJSCosntant. + Node* objectNode = weakJSConstant(object); addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode); return objectNode; } - Node* cellConstantWithStructureCheck(JSCell* object) - { - return cellConstantWithStructureCheck(object, object->structure()); - } - SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex) { - ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); - return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex); + SpeculatedType prediction; + CodeBlock* profiledBlock = nullptr; + + { + ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); + prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex); + + if (prediction == SpecNone) { + // If we have no information about the values this + // node generates, we check if by any chance it is + // a tail call opcode. In that case, we walk up the + // inline frames to find a call higher in the call + // chain and use its prediction. If we only have + // inlined tail call frames, we use SpecFullTop + // to avoid a spurious OSR exit. + Instruction* instruction = m_inlineStackTop->m_profiledBlock->instructions().begin() + bytecodeIndex; + OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instruction->u.opcode); + + switch (opcodeID) { + case op_tail_call: + case op_tail_call_varargs: { + if (!inlineCallFrame()) { + prediction = SpecFullTop; + break; + } + CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls(); + if (!codeOrigin) { + prediction = SpecFullTop; + break; + } + InlineStackEntry* stack = m_inlineStackTop; + while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame) + stack = stack->m_caller; + bytecodeIndex = codeOrigin->bytecodeIndex; + profiledBlock = stack->m_profiledBlock; + break; + } + + default: + break; + } + } + } + + if (profiledBlock) { + ConcurrentJITLocker locker(profiledBlock->m_lock); + prediction = profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex); + } + + return prediction; } SpeculatedType getPrediction(unsigned bytecodeIndex) { SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex); - + if (prediction == SpecNone) { // We have no information about what values this node generates. Give up // on executing this code, since we're likely to do more damage than good. @@ -837,7 +870,8 @@ private: { ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock); - return ArrayMode::fromObserved(locker, profile, action, false); + bool makeSafe = profile->outOfBounds(locker); + return ArrayMode::fromObserved(locker, profile, action, makeSafe); } ArrayMode getArrayMode(ArrayProfile* profile) @@ -845,32 +879,17 @@ private: return getArrayMode(profile, Array::Read); } - ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action) - { - ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); - - profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock); - - bool makeSafe = - m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex) - || profile->outOfBounds(locker); - - ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe); - - return result; - } - Node* makeSafe(Node* node) { - bool likelyToTakeSlowCase; - if (!isX86() && node->op() == ArithMod) - likelyToTakeSlowCase = false; - else - likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex); + if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) + node->mergeFlags(NodeMayOverflowInt32InDFG); + if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) + node->mergeFlags(NodeMayNegZeroInDFG); - if (!likelyToTakeSlowCase - && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow) - && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) + if (!isX86() && node->op() == ArithMod) + return node; + + if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) return node; switch (node->op()) { @@ -879,26 +898,30 @@ private: case ArithSub: case ValueAdd: case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double. - node->mergeFlags(NodeMayOverflow); + node->mergeFlags(NodeMayOverflowInt32InBaseline); break; case ArithNegate: // Currently we can't tell the difference between a negation overflowing // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow // path then we assume that it did both of those things. - node->mergeFlags(NodeMayOverflow); - node->mergeFlags(NodeMayNegZero); + node->mergeFlags(NodeMayOverflowInt32InBaseline); + node->mergeFlags(NodeMayNegZeroInBaseline); break; - case ArithMul: - if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) - node->mergeFlags(NodeMayOverflow | NodeMayNegZero); - else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) - node->mergeFlags(NodeMayNegZero); + case ArithMul: { + ResultProfile& resultProfile = *m_inlineStackTop->m_profiledBlock->resultProfileForBytecodeOffset(m_currentIndex); + if (resultProfile.didObserveInt52Overflow()) + node->mergeFlags(NodeMayOverflowInt52); + if (resultProfile.didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) + node->mergeFlags(NodeMayOverflowInt32InBaseline); + if (resultProfile.didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) + node->mergeFlags(NodeMayNegZeroInBaseline); + if (resultProfile.didObserveNonInt32()) + node->mergeFlags(NodeMayHaveNonIntResult); break; - + } + default: RELEASE_ASSERT_NOT_REACHED(); break; @@ -911,42 +934,36 @@ private: { ASSERT(node->op() == ArithDiv); + if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) + node->mergeFlags(NodeMayOverflowInt32InDFG); + if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) + node->mergeFlags(NodeMayNegZeroInDFG); + // The main slow case counter for op_div in the old JIT counts only when // the operands are not numbers. We don't care about that since we already // have speculations in place that take care of that separately. We only // care about when the outcome of the division is not an integer, which // is what the special fast case counter tells us. - if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex) - && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow) - && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) + if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex)) return node; - // FIXME: It might be possible to make this more granular. The DFG certainly can - // distinguish between negative zero and overflow in its exit profiles. - node->mergeFlags(NodeMayOverflow | NodeMayNegZero); + // FIXME: It might be possible to make this more granular. + node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline); return node; } - bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain) + void noticeArgumentsUse() { - if (direct) - return true; - - if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get()) - return false; + // All of the arguments in this function need to be formatted as JSValues because we will + // load from them in a random-access fashion and we don't want to have to switch on + // format. - for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) { - if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get()) - return false; - } - - return true; + for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions) + argument->mergeShouldNeverUnbox(true); } - void buildOperandMapsIfNecessary(); - VM* m_vm; CodeBlock* m_codeBlock; CodeBlock* m_profiledBlock; @@ -956,54 +973,32 @@ private: BasicBlock* m_currentBlock; // The bytecode index of the current instruction being generated. unsigned m_currentIndex; + // The semantic origin of the current node if different from the current Index. + CodeOrigin m_currentSemanticOrigin; + // True if it's OK to OSR exit right now. + bool m_exitOK { false }; - // We use these values during code generation, and to avoid the need for - // special handling we make sure they are available as constants in the - // CodeBlock's constant pool. These variables are initialized to - // UINT_MAX, and lazily updated to hold an index into the CodeBlock's - // constant pool, as necessary. - unsigned m_constantUndefined; - unsigned m_constantNull; - unsigned m_constantNaN; - unsigned m_constant1; - HashMap<JSCell*, unsigned> m_cellConstants; - HashMap<JSCell*, Node*> m_cellConstantNodes; - - // A constant in the constant pool may be represented by more than one - // node in the graph, depending on the context in which it is being used. - struct ConstantRecord { - ConstantRecord() - : asInt32(0) - , asNumeric(0) - , asJSValue(0) - { - } - - Node* asInt32; - Node* asNumeric; - Node* asJSValue; - }; - - // Track the index of the node whose result is the current value for every - // register value in the bytecode - argument, local, and temporary. - Vector<ConstantRecord, 16> m_constants; + FrozenValue* m_constantUndefined; + FrozenValue* m_constantNull; + FrozenValue* m_constantNaN; + FrozenValue* m_constantOne; + Vector<Node*, 16> m_constants; // The number of arguments passed to the function. unsigned m_numArguments; // The number of locals (vars + temporaries) used in the function. unsigned m_numLocals; // The number of slots (in units of sizeof(Register)) that we need to - // preallocate for calls emanating from this frame. This includes the - // size of the CallFrame, only if this is not a leaf function. (I.e. - // this is 0 if and only if this function is a leaf.) + // preallocate for arguments to outgoing calls from this frame. This + // number includes the CallFrame slots that we initialize for the callee + // (but not the callee-initialized CallerFrame and ReturnPC slots). + // This number is 0 if and only if this function is a leaf. unsigned m_parameterSlots; // The number of var args passed to the next var arg node. unsigned m_numPassedVarArgs; HashMap<ConstantBufferKey, unsigned> m_constantBufferCache; - Vector<VariableWatchpointSet*, 16> m_localWatchpoints; - struct InlineStackEntry { ByteCodeParser* m_byteCodeParser; @@ -1011,7 +1006,7 @@ private: CodeBlock* m_profiledBlock; InlineCallFrame* m_inlineCallFrame; - ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); } + ScriptExecutable* executable() { return m_codeBlock->ownerScriptExecutable(); } QueryableExitProfile m_exitProfile; @@ -1020,7 +1015,6 @@ private: // (the machine code block, which is the transitive, though not necessarily // direct, caller). Vector<unsigned> m_identifierRemap; - Vector<unsigned> m_constantRemap; Vector<unsigned> m_constantBufferRemap; Vector<unsigned> m_switchRemap; @@ -1032,8 +1026,7 @@ private: Vector<UnlinkedBlock> m_unlinkedBlocks; // Potential block linking targets. Must be sorted by bytecodeBegin, and - // cannot have two blocks that have the same bytecodeBegin. For this very - // reason, this is not equivalent to + // cannot have two blocks that have the same bytecodeBegin. Vector<BasicBlock*> m_blockLinkingTargets; // If the callsite's basic block was split into two, then this will be @@ -1055,7 +1048,9 @@ private: // code block had gathered. LazyOperandValueProfileParser m_lazyOperands; + CallLinkInfoMap m_callLinkInfos; StubInfoMap m_stubInfos; + ByValInfoMap m_byValInfos; // Did we see any returns? We need to handle the (uncommon but necessary) // case where a procedure that does not return was inlined. @@ -1078,7 +1073,7 @@ private: VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, int argumentCountIncludingThis, - CodeSpecializationKind); + InlineCallFrame::Kind); ~InlineStackEntry() { @@ -1090,11 +1085,7 @@ private: if (!m_inlineCallFrame) return operand; - if (operand.isConstant()) { - VirtualRegister result = VirtualRegister(m_constantRemap[operand.toConstantIndex()]); - ASSERT(result.isConstant()); - return result; - } + ASSERT(!operand.isConstant()); return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset); } @@ -1103,12 +1094,14 @@ private: InlineStackEntry* m_inlineStackTop; struct DelayedSetLocal { + CodeOrigin m_origin; VirtualRegister m_operand; Node* m_value; DelayedSetLocal() { } - DelayedSetLocal(VirtualRegister operand, Node* value) - : m_operand(operand) + DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value) + : m_origin(origin) + , m_operand(operand) , m_value(value) { } @@ -1116,25 +1109,19 @@ private: Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet) { if (m_operand.isArgument()) - return parser->setArgument(m_operand, m_value, setMode); - return parser->setLocal(m_operand, m_value, setMode); + return parser->setArgument(m_origin, m_operand, m_value, setMode); + return parser->setLocal(m_origin, m_operand, m_value, setMode); } }; Vector<DelayedSetLocal, 2> m_setLocalQueue; - // Have we built operand maps? We initialize them lazily, and only when doing - // inlining. - bool m_haveBuiltOperandMaps; - // Mapping between identifier names and numbers. - BorrowedIdentifierMap m_identifierMap; - // Mapping between values and constant numbers. - JSValueMap m_jsValueMap; - // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible - // work-around for the fact that JSValueMap can't handle "empty" values. - unsigned m_emptyJSValueIndex; + CodeBlock* m_dfgCodeBlock; + CallLinkStatus::ContextMap m_callContextMap; + StubInfoMap m_dfgStubInfos; Instruction* m_currentInstruction; + bool m_hasDebuggerEnabled; }; #define NEXT_OPCODE(name) \ @@ -1143,179 +1130,317 @@ private: #define LAST_OPCODE(name) \ m_currentIndex += OPCODE_LENGTH(name); \ + m_exitOK = false; \ return shouldContinueParsing - -void ByteCodeParser::handleCall(Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind) +ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType op, CallMode callMode) { ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct)); + ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call)); + return handleCall( + pc[1].u.operand, op, callMode, OPCODE_LENGTH(op_call), + pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand); +} + +ByteCodeParser::Terminality ByteCodeParser::handleCall( + int result, NodeType op, CallMode callMode, unsigned instructionSize, + int callee, int argumentCountIncludingThis, int registerOffset) +{ + Node* callTarget = get(VirtualRegister(callee)); - Node* callTarget = get(VirtualRegister(currentInstruction[2].u.operand)); + CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( + m_inlineStackTop->m_profiledBlock, currentCodeOrigin(), + m_inlineStackTop->m_callLinkInfos, m_callContextMap); + + return handleCall( + result, op, callMode, instructionSize, callTarget, + argumentCountIncludingThis, registerOffset, callLinkStatus); +} - CallLinkStatus callLinkStatus; +ByteCodeParser::Terminality ByteCodeParser::handleCall( + int result, NodeType op, CallMode callMode, unsigned instructionSize, + Node* callTarget, int argumentCountIncludingThis, int registerOffset, + CallLinkStatus callLinkStatus) +{ + return handleCall( + result, op, InlineCallFrame::kindFor(callMode), instructionSize, callTarget, argumentCountIncludingThis, + registerOffset, callLinkStatus, getPrediction()); +} - if (m_graph.isConstant(callTarget)) - callLinkStatus = CallLinkStatus(m_graph.valueOfJSConstant(callTarget)).setIsProved(true); - else { - callLinkStatus = CallLinkStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex); - callLinkStatus.setHasBadFunctionExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction)); - callLinkStatus.setHasBadCacheExitSite( - m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)); - callLinkStatus.setHasBadExecutableExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadExecutable)); - } +ByteCodeParser::Terminality ByteCodeParser::handleCall( + int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize, + Node* callTarget, int argumentCountIncludingThis, int registerOffset, + CallLinkStatus callLinkStatus, SpeculatedType prediction) +{ + ASSERT(registerOffset <= 0); + + if (callTarget->isCellConstant()) + callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell())); + + if (Options::verboseDFGByteCodeParsing()) + dataLog(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n"); if (!callLinkStatus.canOptimize()) { // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically // that we cannot optimize them. - - addCall(currentInstruction, op); - return; + + Node* callNode = addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction); + if (callNode->op() == TailCall) + return Terminal; + ASSERT(callNode->op() != TailCallVarargs); + return NonTerminal; + } + + unsigned nextOffset = m_currentIndex + instructionSize; + + OpInfo callOpInfo; + + if (handleInlining(callTarget, result, callLinkStatus, registerOffset, virtualRegisterForArgument(0, registerOffset), VirtualRegister(), 0, argumentCountIncludingThis, nextOffset, op, kind, prediction)) { + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedCall(); + return NonTerminal; } - int argumentCountIncludingThis = currentInstruction[3].u.operand; - int registerOffset = -currentInstruction[4].u.operand; + Node* callNode = addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction); + if (callNode->op() == TailCall) + return Terminal; + ASSERT(callNode->op() != TailCallVarargs); + return NonTerminal; +} - int resultOperand = currentInstruction[1].u.operand; - unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call); +ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CallMode callMode) +{ + ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs)); + ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_tail_call_varargs)); + + int result = pc[1].u.operand; + int callee = pc[2].u.operand; + int thisReg = pc[3].u.operand; + int arguments = pc[4].u.operand; + int firstFreeReg = pc[5].u.operand; + int firstVarArgOffset = pc[6].u.operand; + SpeculatedType prediction = getPrediction(); - - if (InternalFunction* function = callLinkStatus.internalFunction()) { - if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, prediction, kind)) { - // This phantoming has to be *after* the code for the intrinsic, to signify that - // the inputs must be kept alive whatever exits the intrinsic may do. - addToGraph(Phantom, callTarget); - emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind); - return; - } - - // Can only handle this using the generic call handler. - addCall(currentInstruction, op); - return; - } - - Intrinsic intrinsic = callLinkStatus.intrinsicFor(kind); - if (intrinsic != NoIntrinsic) { - emitFunctionChecks(callLinkStatus, callTarget, registerOffset, kind); - - if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) { - // This phantoming has to be *after* the code for the intrinsic, to signify that - // the inputs must be kept alive whatever exits the intrinsic may do. - addToGraph(Phantom, callTarget); - emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind); - if (m_graph.compilation()) - m_graph.compilation()->noticeInlinedCall(); - return; - } - } else if (handleInlining(callTarget, resultOperand, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) { + + Node* callTarget = get(VirtualRegister(callee)); + + CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( + m_inlineStackTop->m_profiledBlock, currentCodeOrigin(), + m_inlineStackTop->m_callLinkInfos, m_callContextMap); + if (callTarget->isCellConstant()) + callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell())); + + if (Options::verboseDFGByteCodeParsing()) + dataLog(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n"); + + if (callLinkStatus.canOptimize() + && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(callMode), prediction)) { if (m_graph.compilation()) m_graph.compilation()->noticeInlinedCall(); - return; + return NonTerminal; } - addCall(currentInstruction, op); + CallVarargsData* data = m_graph.m_callVarargsData.add(); + data->firstVarArgOffset = firstVarArgOffset; + + Node* thisChild = get(VirtualRegister(thisReg)); + + if (op == TailCallVarargs) { + if (allInlineFramesAreTailCalls()) { + addToGraph(op, OpInfo(data), OpInfo(), callTarget, get(VirtualRegister(arguments)), thisChild); + return Terminal; + } + op = TailCallVarargsInlinedCaller; + } + + Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, get(VirtualRegister(arguments)), thisChild); + VirtualRegister resultReg(result); + if (resultReg.isValid()) + set(resultReg, call); + return NonTerminal; } -void ByteCodeParser::emitFunctionChecks(const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind) +void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg) { Node* thisArgument; - if (kind == CodeForCall) - thisArgument = get(virtualRegisterForArgument(0, registerOffset)); + if (thisArgumentReg.isValid()) + thisArgument = get(thisArgumentReg); else thisArgument = 0; - if (callLinkStatus.isProved()) { - addToGraph(Phantom, callTarget, thisArgument); - return; + JSCell* calleeCell; + Node* callTargetForCheck; + if (callee.isClosureCall()) { + calleeCell = callee.executable(); + callTargetForCheck = addToGraph(GetExecutable, callTarget); + } else { + calleeCell = callee.nonExecutableCallee(); + callTargetForCheck = callTarget; } - ASSERT(callLinkStatus.canOptimize()); - - if (JSFunction* function = callLinkStatus.function()) - addToGraph(CheckFunction, OpInfo(function), callTarget, thisArgument); - else { - ASSERT(callLinkStatus.structure()); - ASSERT(callLinkStatus.executable()); - - addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(callLinkStatus.structure())), callTarget); - addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument); - } + ASSERT(calleeCell); + addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument); } -void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind) +void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis) { - for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i) + for (int i = 0; i < argumentCountIncludingThis; ++i) addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset))); } -bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind) +unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CallMode callMode) { - // First, the really simple checks: do we have an actual JS function? - if (!callLinkStatus.executable()) - return false; - if (callLinkStatus.executable()->isHostFunction()) - return false; + CodeSpecializationKind kind = specializationKindFor(callMode); + if (verbose) + dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n"); - FunctionExecutable* executable = jsCast<FunctionExecutable*>(callLinkStatus.executable()); + if (m_hasDebuggerEnabled) { + if (verbose) + dataLog(" Failing because the debugger is in use.\n"); + return UINT_MAX; + } + + FunctionExecutable* executable = callee.functionExecutable(); + if (!executable) { + if (verbose) + dataLog(" Failing because there is no function executable.\n"); + return UINT_MAX; + } // Does the number of arguments we're passing match the arity of the target? We currently // inline only if the number of arguments passed is greater than or equal to the number // arguments expected. - if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) - return false; + if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) { + if (verbose) + dataLog(" Failing because of arity mismatch.\n"); + return UINT_MAX; + } + + // Do we have a code block, and does the code block's size match the heuristics/requirements for + // being an inline candidate? We might not have a code block (1) if code was thrown away, + // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and + // specialization kind is construct. In the former 2 cases, we could still theoretically attempt + // to inline it if we had a static proof of what was being called; this might happen for example + // if you call a global function, where watchpointing gives us static information. Overall, + // it's a rare case because we expect that any hot callees would have already been compiled. + CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind); + if (!codeBlock) { + if (verbose) + dataLog(" Failing because no code block available.\n"); + return UINT_MAX; + } + CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel( + codeBlock, kind, callee.isClosureCall()); + if (verbose) { + dataLog(" Call mode: ", callMode, "\n"); + dataLog(" Is closure call: ", callee.isClosureCall(), "\n"); + dataLog(" Capability level: ", capabilityLevel, "\n"); + dataLog(" Might inline function: ", mightInlineFunctionFor(codeBlock, kind), "\n"); + dataLog(" Might compile function: ", mightCompileFunctionFor(codeBlock, kind), "\n"); + dataLog(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n"); + dataLog(" Is inlining candidate: ", codeBlock->ownerScriptExecutable()->isInliningCandidate(), "\n"); + } + if (!canInline(capabilityLevel)) { + if (verbose) + dataLog(" Failing because the function is not inlineable.\n"); + return UINT_MAX; + } + + // Check if the caller is already too large. We do this check here because that's just + // where we happen to also have the callee's code block, and we want that for the + // purpose of unsetting SABI. + if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) { + codeBlock->m_shouldAlwaysBeInlined = false; + if (verbose) + dataLog(" Failing because the caller is too large.\n"); + return UINT_MAX; + } + + // FIXME: this should be better at predicting how much bloat we will introduce by inlining + // this function. + // https://bugs.webkit.org/show_bug.cgi?id=127627 + + // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These + // functions have very low fidelity profiling, and presumably they weren't very hot if they + // haven't gotten to Baseline yet. Consider not inlining these functions. + // https://bugs.webkit.org/show_bug.cgi?id=145503 + + // Have we exceeded inline stack depth, or are we trying to inline a recursive call to + // too many levels? If either of these are detected, then don't inline. We adjust our + // heuristics if we are dealing with a function that cannot otherwise be compiled. - // Have we exceeded inline stack depth, or are we trying to inline a recursive call? - // If either of these are detected, then don't inline. unsigned depth = 0; + unsigned recursion = 0; + for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) { ++depth; - if (depth >= Options::maximumInliningDepth()) - return false; // Depth exceeded. + if (depth >= Options::maximumInliningDepth()) { + if (verbose) + dataLog(" Failing because depth exceeded.\n"); + return UINT_MAX; + } - if (entry->executable() == executable) - return false; // Recursion detected. + if (entry->executable() == executable) { + ++recursion; + if (recursion >= Options::maximumInliningRecursion()) { + if (verbose) + dataLog(" Failing because recursion detected.\n"); + return UINT_MAX; + } + } } - // Do we have a code block, and does the code block's size match the heuristics/requirements for - // being an inline candidate? We might not have a code block if code was thrown away or if we - // simply hadn't actually made this call yet. We could still theoretically attempt to inline it - // if we had a static proof of what was being called; this might happen for example if you call a - // global function, where watchpointing gives us static information. Overall, it's a rare case - // because we expect that any hot callees would have already been compiled. - CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind); - if (!codeBlock) - return false; - if (!canInlineFunctionFor(codeBlock, kind, callLinkStatus.isClosureCall())) - return false; + if (verbose) + dataLog(" Inlining should be possible.\n"); + + // It might be possible to inline. + return codeBlock->instructionCount(); +} + +template<typename ChecksFunctor> +void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, const ChecksFunctor& insertChecks) +{ + CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); - // Now we know without a doubt that we are committed to inlining. So begin the process - // by checking the callee (if necessary) and making sure that arguments and the callee - // are flushed. - emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, kind); + ASSERT(inliningCost(callee, argumentCountIncludingThis, InlineCallFrame::callModeFor(kind)) != UINT_MAX); + CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind); + insertChecks(codeBlock); + // FIXME: Don't flush constants! int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize; - // Make sure that we have enough locals. - unsigned newNumLocals = VirtualRegister(inlineCallFrameStart).toLocal() + 1 + JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters; - if (newNumLocals > m_numLocals) { - m_numLocals = newNumLocals; - for (size_t i = 0; i < m_graph.numBlocks(); ++i) - m_graph.block(i)->ensureLocals(newNumLocals); - } + ensureLocals( + VirtualRegister(inlineCallFrameStart).toLocal() + 1 + + JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeLocals); size_t argumentPositionStart = m_graph.m_argumentPositions.size(); + VirtualRegister resultReg(resultOperand); + if (resultReg.isValid()) + resultReg = m_inlineStackTop->remapOperand(resultReg); + + VariableAccessData* calleeVariable = nullptr; + if (callee.isClosureCall()) { + Node* calleeSet = set( + VirtualRegister(registerOffset + JSStack::Callee), callTargetNode, ImmediateNakedSet); + + calleeVariable = calleeSet->variableAccessData(); + calleeVariable->mergeShouldNeverUnbox(true); + } + InlineStackEntry inlineStackEntry( - this, codeBlock, codeBlock, m_graph.lastBlock(), callLinkStatus.function(), - m_inlineStackTop->remapOperand(VirtualRegister(resultOperand)), + this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg, (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind); // This is where the actual inlining really happens. unsigned oldIndex = m_currentIndex; m_currentIndex = 0; + // At this point, it's again OK to OSR exit. + m_exitOK = true; + InlineVariableData inlineVariableData; inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame; inlineVariableData.argumentPositionStart = argumentPositionStart; @@ -1323,24 +1448,19 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con RELEASE_ASSERT( m_inlineStackTop->m_inlineCallFrame->isClosureCall - == callLinkStatus.isClosureCall()); - if (callLinkStatus.isClosureCall()) { - VariableAccessData* calleeVariable = - set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateSet)->variableAccessData(); - VariableAccessData* scopeVariable = - set(VirtualRegister(JSStack::ScopeChain), addToGraph(GetScope, callTargetNode), ImmediateSet)->variableAccessData(); - - calleeVariable->mergeShouldNeverUnbox(true); - scopeVariable->mergeShouldNeverUnbox(true); - + == callee.isClosureCall()); + if (callee.isClosureCall()) { + RELEASE_ASSERT(calleeVariable); inlineVariableData.calleeVariable = calleeVariable; } m_graph.m_inlineVariableData.append(inlineVariableData); - + parseCodeBlock(); + clearCaches(); // Reset our state now that we're back to the outer code. m_currentIndex = oldIndex; + m_exitOK = false; // If the inlined code created some new basic blocks, then we have linking to do. if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) { @@ -1351,20 +1471,8 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con else ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked); - // It's possible that the callsite block head is not owned by the caller. - if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) { - // It's definitely owned by the caller, because the caller created new blocks. - // Assert that this all adds up. - ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_block == inlineStackEntry.m_callsiteBlockHead); - ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking); - inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false; - } else { - // It's definitely not owned by the caller. Tell the caller that he does not - // need to link his callsite block head, because we did it for him. - ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking); - ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead); - inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false; - } + if (callerLinkability == CallerDoesNormalLinking) + cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead); linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets); } else @@ -1374,7 +1482,10 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con // If there was a return, but no early returns, then we're done. We allow parsing of // the caller to continue in whatever basic block we're in right now. if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) { - ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal()); + if (Options::verboseDFGByteCodeParsing()) + dataLog(" Allowing parsing to continue in last inlined block.\n"); + + ASSERT(lastBlock->isEmpty() || !lastBlock->terminal()); // If we created new blocks then the last block needs linking, but in the // caller. It doesn't need to be linked to, but it needs outgoing links. @@ -1382,20 +1493,28 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter // for release builds because this block will never serve as a potential target // in the linker's binary search. + if (Options::verboseDFGByteCodeParsing()) + dataLog(" Repurposing last block from ", lastBlock->bytecodeBegin, " to ", m_currentIndex, "\n"); lastBlock->bytecodeBegin = m_currentIndex; - m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock())); + if (callerLinkability == CallerDoesNormalLinking) { + if (verbose) + dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n"); + m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock())); + } } m_currentBlock = m_graph.lastBlock(); - return true; + return; } - + + if (Options::verboseDFGByteCodeParsing()) + dataLog(" Creating new block after inlining.\n"); + // If we get to this point then all blocks must end in some sort of terminals. - ASSERT(lastBlock->last()->isTerminal()); - + ASSERT(lastBlock->terminal()); // Need to create a new basic block for the continuation at the caller. - RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals)); + RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, 1)); // Link the early returns to the basic block we're about to create. for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) { @@ -1403,36 +1522,458 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con continue; BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block; ASSERT(!blockToLink->isLinked); - Node* node = blockToLink->last(); + Node* node = blockToLink->terminal(); ASSERT(node->op() == Jump); - ASSERT(node->takenBlock() == 0); - node->setTakenBlock(block.get()); + ASSERT(!node->targetBlock()); + node->targetBlock() = block.get(); inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false; -#if !ASSERT_DISABLED - blockToLink->isLinked = true; -#endif + if (verbose) + dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n"); + blockToLink->didLink(); } m_currentBlock = block.get(); ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset); - m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get())); - m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get()); + if (verbose) + dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n"); + if (callerLinkability == CallerDoesNormalLinking) { + m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get())); + m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get()); + } m_graph.appendBlock(block); prepareToParseBlock(); +} + +void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block) +{ + // It's possible that the callsite block head is not owned by the caller. + if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) { + // It's definitely owned by the caller, because the caller created new blocks. + // Assert that this all adds up. + ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block); + ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking); + inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false; + } else { + // It's definitely not owned by the caller. Tell the caller that he does not + // need to link his callsite block head, because we did it for him. + ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking); + ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block); + inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false; + } +} + +template<typename ChecksFunctor> +bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks) +{ + CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); + + if (!inliningBalance) + return false; + + bool didInsertChecks = false; + auto insertChecksWithAccounting = [&] () { + insertChecks(nullptr); + didInsertChecks = true; + }; + + if (verbose) + dataLog(" Considering callee ", callee, "\n"); + + // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because + // we currently don't have any way of getting profiling information for arguments to non-JS varargs + // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow, + // and there are no callsite value profiles and native function won't have callee value profiles for + // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to + // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without + // calling LoadVarargs twice. + if (!InlineCallFrame::isVarargs(kind)) { + if (InternalFunction* function = callee.internalFunction()) { + if (handleConstantInternalFunction(callTargetNode, resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, insertChecksWithAccounting)) { + RELEASE_ASSERT(didInsertChecks); + addToGraph(Phantom, callTargetNode); + emitArgumentPhantoms(registerOffset, argumentCountIncludingThis); + inliningBalance--; + return true; + } + RELEASE_ASSERT(!didInsertChecks); + return false; + } + + Intrinsic intrinsic = callee.intrinsicFor(specializationKind); + if (intrinsic != NoIntrinsic) { + if (handleIntrinsicCall(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { + RELEASE_ASSERT(didInsertChecks); + addToGraph(Phantom, callTargetNode); + emitArgumentPhantoms(registerOffset, argumentCountIncludingThis); + inliningBalance--; + return true; + } + RELEASE_ASSERT(!didInsertChecks); + return false; + } + } + + unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, InlineCallFrame::callModeFor(kind)); + if (myInliningCost > inliningBalance) + return false; + + Instruction* savedCurrentInstruction = m_currentInstruction; + inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability, insertChecks); + inliningBalance -= myInliningCost; + m_currentInstruction = savedCurrentInstruction; + return true; +} + +bool ByteCodeParser::handleInlining( + Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, + int registerOffsetOrFirstFreeReg, VirtualRegister thisArgument, + VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, + unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction) +{ + if (verbose) { + dataLog("Handling inlining...\n"); + dataLog("Stack: ", currentCodeOrigin(), "\n"); + } + CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); + + if (!callLinkStatus.size()) { + if (verbose) + dataLog("Bailing inlining.\n"); + return false; + } + + if (InlineCallFrame::isVarargs(kind) + && callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) { + if (verbose) + dataLog("Bailing inlining because of varargs.\n"); + return false; + } + + unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount(); + if (specializationKind == CodeForConstruct) + inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount()); + if (callLinkStatus.isClosureCall()) + inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount()); + + // First check if we can avoid creating control flow. Our inliner does some CFG + // simplification on the fly and this helps reduce compile times, but we can only leverage + // this in cases where we don't need control flow diamonds to check the callee. + if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) { + int registerOffset; + + // Only used for varargs calls. + unsigned mandatoryMinimum = 0; + unsigned maxNumArguments = 0; + + if (InlineCallFrame::isVarargs(kind)) { + if (FunctionExecutable* functionExecutable = callLinkStatus[0].functionExecutable()) + mandatoryMinimum = functionExecutable->parameterCount(); + else + mandatoryMinimum = 0; + + // includes "this" + maxNumArguments = std::max( + callLinkStatus.maxNumArguments(), + mandatoryMinimum + 1); + + // We sort of pretend that this *is* the number of arguments that were passed. + argumentCountIncludingThis = maxNumArguments; + + registerOffset = registerOffsetOrFirstFreeReg + 1; + registerOffset -= maxNumArguments; // includes "this" + registerOffset -= JSStack::CallFrameHeaderSize; + registerOffset = -WTF::roundUpToMultipleOf( + stackAlignmentRegisters(), + -registerOffset); + } else + registerOffset = registerOffsetOrFirstFreeReg; + + bool result = attemptToInlineCall( + callTargetNode, resultOperand, callLinkStatus[0], registerOffset, + argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction, + inliningBalance, [&] (CodeBlock* codeBlock) { + emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument); + + // If we have a varargs call, we want to extract the arguments right now. + if (InlineCallFrame::isVarargs(kind)) { + int remappedRegisterOffset = + m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset(); + + ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal()); + + int argumentStart = registerOffset + JSStack::CallFrameHeaderSize; + int remappedArgumentStart = + m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset(); + + LoadVarargsData* data = m_graph.m_loadVarargsData.add(); + data->start = VirtualRegister(remappedArgumentStart + 1); + data->count = VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount); + data->offset = argumentsOffset; + data->limit = maxNumArguments; + data->mandatoryMinimum = mandatoryMinimum; + + addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument)); + + // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument + // and argumentsArgument for the baseline JIT. However, we only need a Phantom for + // callTargetNode because the other 2 are still in use and alive at this point. + addToGraph(Phantom, callTargetNode); + + // In DFG IR before SSA, we cannot insert control flow between after the + // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG + // SSA. Fortunately, we also have other reasons for not inserting control flow + // before SSA. + + VariableAccessData* countVariable = newVariableAccessData( + VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount)); + // This is pretty lame, but it will force the count to be flushed as an int. This doesn't + // matter very much, since our use of a SetArgument and Flushes for this local slot is + // mostly just a formality. + countVariable->predict(SpecInt32); + countVariable->mergeIsProfitableToUnbox(true); + Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable)); + m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount); + + set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet); + for (unsigned argument = 1; argument < maxNumArguments; ++argument) { + VariableAccessData* variable = newVariableAccessData( + VirtualRegister(remappedArgumentStart + argument)); + variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit. + + // For a while it had been my intention to do things like this inside the + // prediction injection phase. But in this case it's really best to do it here, + // because it's here that we have access to the variable access datas for the + // inlining we're about to do. + // + // Something else that's interesting here is that we'd really love to get + // predictions from the arguments loaded at the callsite, rather than the + // arguments received inside the callee. But that probably won't matter for most + // calls. + if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) { + ConcurrentJITLocker locker(codeBlock->m_lock); + if (ValueProfile* profile = codeBlock->valueProfileForArgument(argument)) + variable->predict(profile->computeUpdatedPrediction(locker)); + } + + Node* setArgument = addToGraph(SetArgument, OpInfo(variable)); + m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument); + } + } + }); + if (verbose) { + dataLog("Done inlining (simple).\n"); + dataLog("Stack: ", currentCodeOrigin(), "\n"); + dataLog("Result: ", result, "\n"); + } + return result; + } + + // We need to create some kind of switch over callee. For now we only do this if we believe that + // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to + // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in + // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that + // we could improve that aspect of this by doing polymorphic inlining but having the profiling + // also. + if (!isFTL(m_graph.m_plan.mode) || !Options::usePolymorphicCallInlining() + || InlineCallFrame::isVarargs(kind)) { + if (verbose) { + dataLog("Bailing inlining (hard).\n"); + dataLog("Stack: ", currentCodeOrigin(), "\n"); + } + return false; + } + + unsigned oldOffset = m_currentIndex; + + bool allAreClosureCalls = true; + bool allAreDirectCalls = true; + for (unsigned i = callLinkStatus.size(); i--;) { + if (callLinkStatus[i].isClosureCall()) + allAreDirectCalls = false; + else + allAreClosureCalls = false; + } + + Node* thingToSwitchOn; + if (allAreDirectCalls) + thingToSwitchOn = callTargetNode; + else if (allAreClosureCalls) + thingToSwitchOn = addToGraph(GetExecutable, callTargetNode); + else { + // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases + // where it would be beneficial. It might be best to handle these cases as if all calls were + // closure calls. + // https://bugs.webkit.org/show_bug.cgi?id=136020 + if (verbose) { + dataLog("Bailing inlining (mix).\n"); + dataLog("Stack: ", currentCodeOrigin(), "\n"); + } + return false; + } + + if (verbose) { + dataLog("Doing hard inlining...\n"); + dataLog("Stack: ", currentCodeOrigin(), "\n"); + } + + int registerOffset = registerOffsetOrFirstFreeReg; + + // This makes me wish that we were in SSA all the time. We need to pick a variable into which to + // store the callee so that it will be accessible to all of the blocks we're about to create. We + // get away with doing an immediate-set here because we wouldn't have performed any side effects + // yet. + if (verbose) + dataLog("Register offset: ", registerOffset); + VirtualRegister calleeReg(registerOffset + JSStack::Callee); + calleeReg = m_inlineStackTop->remapOperand(calleeReg); + if (verbose) + dataLog("Callee is going to be ", calleeReg, "\n"); + setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush); + + // It's OK to exit right now, even though we set some locals. That's because those locals are not + // user-visible. + m_exitOK = true; + addToGraph(ExitOK); + + SwitchData& data = *m_graph.m_switchData.add(); + data.kind = SwitchCell; + addToGraph(Switch, OpInfo(&data), thingToSwitchOn); + + BasicBlock* originBlock = m_currentBlock; + if (verbose) + dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n"); + originBlock->didLink(); + cancelLinkingForBlock(m_inlineStackTop, originBlock); + + // Each inlined callee will have a landing block that it returns at. They should all have jumps + // to the continuation block, which we create last. + Vector<BasicBlock*> landingBlocks; + + // We may force this true if we give up on inlining any of the edges. + bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath(); + + if (verbose) + dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n"); + + for (unsigned i = 0; i < callLinkStatus.size(); ++i) { + m_currentIndex = oldOffset; + RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1)); + m_currentBlock = block.get(); + m_graph.appendBlock(block); + prepareToParseBlock(); + + Node* myCallTargetNode = getDirect(calleeReg); + + bool inliningResult = attemptToInlineCall( + myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset, + argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction, + inliningBalance, [&] (CodeBlock*) { }); + + if (!inliningResult) { + // That failed so we let the block die. Nothing interesting should have been added to + // the block. We also give up on inlining any of the (less frequent) callees. + ASSERT(m_currentBlock == block.get()); + ASSERT(m_graph.m_blocks.last() == block); + m_graph.killBlockAndItsContents(block.get()); + m_graph.m_blocks.removeLast(); + + // The fact that inlining failed means we need a slow path. + couldTakeSlowPath = true; + break; + } + + JSCell* thingToCaseOn; + if (allAreDirectCalls) + thingToCaseOn = callLinkStatus[i].nonExecutableCallee(); + else { + ASSERT(allAreClosureCalls); + thingToCaseOn = callLinkStatus[i].executable(); + } + data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get())); + m_currentIndex = nextOffset; + m_exitOK = true; + processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue. + if (Node* terminal = m_currentBlock->terminal()) + ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs); + else { + addToGraph(Jump); + landingBlocks.append(m_currentBlock); + } + if (verbose) + dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n"); + m_currentBlock->didLink(); + + if (verbose) + dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n"); + } + + RefPtr<BasicBlock> slowPathBlock = adoptRef( + new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1)); + m_currentIndex = oldOffset; + m_exitOK = true; + data.fallThrough = BranchTarget(slowPathBlock.get()); + m_graph.appendBlock(slowPathBlock); + if (verbose) + dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n"); + slowPathBlock->didLink(); + prepareToParseBlock(); + m_currentBlock = slowPathBlock.get(); + Node* myCallTargetNode = getDirect(calleeReg); + if (couldTakeSlowPath) { + addCall( + resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis, + registerOffset, prediction); + } else { + addToGraph(CheckBadCell); + addToGraph(Phantom, myCallTargetNode); + emitArgumentPhantoms(registerOffset, argumentCountIncludingThis); + + set(VirtualRegister(resultOperand), addToGraph(BottomValue)); + } + + m_currentIndex = nextOffset; + m_exitOK = true; // Origin changed, so it's fine to exit again. + processSetLocalQueue(); + if (Node* terminal = m_currentBlock->terminal()) + ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs); + else { + addToGraph(Jump); + landingBlocks.append(m_currentBlock); + } + + RefPtr<BasicBlock> continuationBlock = adoptRef( + new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1)); + m_graph.appendBlock(continuationBlock); + if (verbose) + dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n"); + m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get())); + prepareToParseBlock(); + m_currentBlock = continuationBlock.get(); - // At this point we return and continue to generate code for the caller, but - // in the new basic block. + for (unsigned i = landingBlocks.size(); i--;) + landingBlocks[i]->terminal()->targetBlock() = continuationBlock.get(); + + m_currentIndex = oldOffset; + m_exitOK = true; + + if (verbose) { + dataLog("Done inlining (hard).\n"); + dataLog("Stack: ", currentCodeOrigin(), "\n"); + } return true; } -bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis) +template<typename ChecksFunctor> +bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks) { if (argumentCountIncludingThis == 1) { // Math.min() - set(VirtualRegister(resultOperand), constantNaN()); + insertChecks(); + set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); return true; } if (argumentCountIncludingThis == 2) { // Math.min(x) + insertChecks(); Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset))); addToGraph(Phantom, Edge(result, NumberUse)); set(VirtualRegister(resultOperand), result); @@ -1440,6 +1981,7 @@ bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOf } if (argumentCountIncludingThis == 3) { // Math.min(x, y) + insertChecks(); set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); return true; } @@ -1448,74 +1990,101 @@ bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOf return false; } -bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction) +template<typename ChecksFunctor> +bool ByteCodeParser::handleIntrinsicCall(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) { switch (intrinsic) { + + // Intrinsic Functions: + case AbsIntrinsic: { if (argumentCountIncludingThis == 1) { // Math.abs() - set(VirtualRegister(resultOperand), constantNaN()); + insertChecks(); + set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); return true; } if (!MacroAssembler::supportsFloatingPointAbs()) return false; + insertChecks(); Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset))); if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) - node->mergeFlags(NodeMayOverflow); + node->mergeFlags(NodeMayOverflowInt32InDFG); set(VirtualRegister(resultOperand), node); return true; } case MinIntrinsic: - return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis); + return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks); case MaxIntrinsic: - return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis); - + return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks); + case SqrtIntrinsic: case CosIntrinsic: - case SinIntrinsic: { + case SinIntrinsic: + case LogIntrinsic: { if (argumentCountIncludingThis == 1) { - set(VirtualRegister(resultOperand), constantNaN()); + insertChecks(); + set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); return true; } switch (intrinsic) { case SqrtIntrinsic: - if (!MacroAssembler::supportsFloatingPointSqrt()) - return false; - + insertChecks(); set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset)))); return true; case CosIntrinsic: + insertChecks(); set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset)))); return true; case SinIntrinsic: + insertChecks(); set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset)))); return true; + + case LogIntrinsic: + insertChecks(); + set(VirtualRegister(resultOperand), addToGraph(ArithLog, get(virtualRegisterForArgument(1, registerOffset)))); + return true; default: RELEASE_ASSERT_NOT_REACHED(); return false; } } + + case PowIntrinsic: { + if (argumentCountIncludingThis < 3) { + // Math.pow() and Math.pow(x) return NaN. + insertChecks(); + set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); + return true; + } + insertChecks(); + VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset); + VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset); + set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand))); + return true; + } case ArrayPushIntrinsic: { if (argumentCountIncludingThis != 2) return false; - ArrayMode arrayMode = getArrayMode(m_currentInstruction[6].u.arrayProfile); + ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile); if (!arrayMode.isJSArray()) return false; switch (arrayMode.type()) { - case Array::Undecided: case Array::Int32: case Array::Double: case Array::Contiguous: case Array::ArrayStorage: { + insertChecks(); Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); set(VirtualRegister(resultOperand), arrayPush); @@ -1531,7 +2100,7 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int if (argumentCountIncludingThis != 1) return false; - ArrayMode arrayMode = getArrayMode(m_currentInstruction[6].u.arrayProfile); + ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile); if (!arrayMode.isJSArray()) return false; switch (arrayMode.type()) { @@ -1539,6 +2108,7 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int case Array::Double: case Array::Contiguous: case Array::ArrayStorage: { + insertChecks(); Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset))); set(VirtualRegister(resultOperand), arrayPop); return true; @@ -1553,6 +2123,7 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int if (argumentCountIncludingThis != 2) return false; + insertChecks(); VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset); VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand)); @@ -1565,6 +2136,7 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int if (argumentCountIncludingThis != 2) return false; + insertChecks(); VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset); VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand)); @@ -1572,10 +2144,21 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int set(VirtualRegister(resultOperand), charCode); return true; } + case Clz32Intrinsic: { + insertChecks(); + if (argumentCountIncludingThis == 1) + set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32))))); + else { + Node* operand = get(virtualRegisterForArgument(1, registerOffset)); + set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand)); + } + return true; + } case FromCharCodeIntrinsic: { if (argumentCountIncludingThis != 2) return false; + insertChecks(); VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); Node* charCode = addToGraph(StringFromCharCode, get(indexOperand)); @@ -1588,6 +2171,7 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int if (argumentCountIncludingThis != 2) return false; + insertChecks(); Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); set(VirtualRegister(resultOperand), regExpExec); @@ -1598,15 +2182,59 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int if (argumentCountIncludingThis != 2) return false; + insertChecks(); Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); set(VirtualRegister(resultOperand), regExpExec); return true; } + case StringPrototypeReplaceIntrinsic: { + if (!isFTL(m_graph.m_plan.mode)) { + // This is a marginally profitable intrinsic. We've only the work to make it an + // intrinsic on the fourth tier. + return false; + } + + if (argumentCountIncludingThis != 3) + return false; + + insertChecks(); + Node* result = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); + set(VirtualRegister(resultOperand), result); + return true; + } + + case RoundIntrinsic: + case FloorIntrinsic: + case CeilIntrinsic: { + if (argumentCountIncludingThis == 1) { + insertChecks(); + set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); + return true; + } + if (argumentCountIncludingThis == 2) { + insertChecks(); + Node* operand = get(virtualRegisterForArgument(1, registerOffset)); + NodeType op; + if (intrinsic == RoundIntrinsic) + op = ArithRound; + else if (intrinsic == FloorIntrinsic) + op = ArithFloor; + else { + ASSERT(intrinsic == CeilIntrinsic); + op = ArithCeil; + } + Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand); + set(VirtualRegister(resultOperand), roundNode); + return true; + } + return false; + } case IMulIntrinsic: { if (argumentCountIncludingThis != 3) return false; + insertChecks(); VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset); VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset); Node* left = get(leftOperand); @@ -1614,15 +2242,160 @@ bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right)); return true; } + + case RandomIntrinsic: { + if (argumentCountIncludingThis != 1) + return false; + insertChecks(); + set(VirtualRegister(resultOperand), addToGraph(ArithRandom)); + return true; + } + + case FRoundIntrinsic: { + if (argumentCountIncludingThis != 2) + return false; + insertChecks(); + VirtualRegister operand = virtualRegisterForArgument(1, registerOffset); + set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand))); + return true; + } + + case DFGTrueIntrinsic: { + insertChecks(); + set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true))); + return true; + } + + case OSRExitIntrinsic: { + insertChecks(); + addToGraph(ForceOSRExit); + set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined))); + return true; + } + + case IsFinalTierIntrinsic: { + insertChecks(); + set(VirtualRegister(resultOperand), + jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true))); + return true; + } + + case SetInt32HeapPredictionIntrinsic: { + insertChecks(); + for (int i = 1; i < argumentCountIncludingThis; ++i) { + Node* node = get(virtualRegisterForArgument(i, registerOffset)); + if (node->hasHeapPrediction()) + node->setHeapPrediction(SpecInt32); + } + set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined))); + return true; + } + + case CheckInt32Intrinsic: { + insertChecks(); + for (int i = 1; i < argumentCountIncludingThis; ++i) { + Node* node = get(virtualRegisterForArgument(i, registerOffset)); + addToGraph(Phantom, Edge(node, Int32Use)); + } + set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true))); + return true; + } + + case FiatInt52Intrinsic: { + if (argumentCountIncludingThis != 2) + return false; + insertChecks(); + VirtualRegister operand = virtualRegisterForArgument(1, registerOffset); + if (enableInt52()) + set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand))); + else + set(VirtualRegister(resultOperand), get(operand)); + return true; + } default: return false; } } +template<typename ChecksFunctor> +bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks) +{ + switch (variant.intrinsic()) { + case TypedArrayByteLengthIntrinsic: { + insertChecks(); + + TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType; + Array::Type arrayType = toArrayType(type); + size_t logSize = logElementSize(type); + + variant.structureSet().forEach([&] (Structure* structure) { + TypedArrayType curType = structure->classInfo()->typedArrayStorageType; + ASSERT(logSize == logElementSize(curType)); + arrayType = refineTypedArrayType(arrayType, curType); + ASSERT(arrayType != Array::Generic); + }); + + Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType).asWord()), thisNode); + + if (!logSize) { + set(VirtualRegister(resultOperand), lengthNode); + return true; + } + + // We can use a BitLShift here because typed arrays will never have a byteLength + // that overflows int32. + Node* shiftNode = jsConstant(jsNumber(logSize)); + set(VirtualRegister(resultOperand), addToGraph(BitLShift, lengthNode, shiftNode)); + + return true; + } + + case TypedArrayLengthIntrinsic: { + insertChecks(); + + TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType; + Array::Type arrayType = toArrayType(type); + + variant.structureSet().forEach([&] (Structure* structure) { + TypedArrayType curType = structure->classInfo()->typedArrayStorageType; + arrayType = refineTypedArrayType(arrayType, curType); + ASSERT(arrayType != Array::Generic); + }); + + set(VirtualRegister(resultOperand), addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType).asWord()), thisNode)); + + return true; + + } + + case TypedArrayByteOffsetIntrinsic: { + insertChecks(); + + TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType; + Array::Type arrayType = toArrayType(type); + + variant.structureSet().forEach([&] (Structure* structure) { + TypedArrayType curType = structure->classInfo()->typedArrayStorageType; + arrayType = refineTypedArrayType(arrayType, curType); + ASSERT(arrayType != Array::Generic); + }); + + set(VirtualRegister(resultOperand), addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType).asWord()), thisNode)); + + return true; + } + + default: + return false; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +template<typename ChecksFunctor> bool ByteCodeParser::handleTypedArrayConstructor( int resultOperand, InternalFunction* function, int registerOffset, - int argumentCountIncludingThis, TypedArrayType type) + int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks) { if (!isTypedView(type)) return false; @@ -1666,29 +2439,35 @@ bool ByteCodeParser::handleTypedArrayConstructor( if (argumentCountIncludingThis != 2) return false; - + + insertChecks(); set(VirtualRegister(resultOperand), addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset)))); return true; } +template<typename ChecksFunctor> bool ByteCodeParser::handleConstantInternalFunction( - int resultOperand, InternalFunction* function, int registerOffset, - int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind) + Node* callTargetNode, int resultOperand, InternalFunction* function, int registerOffset, + int argumentCountIncludingThis, CodeSpecializationKind kind, const ChecksFunctor& insertChecks) { - // If we ever find that we have a lot of internal functions that we specialize for, - // then we should probably have some sort of hashtable dispatch, or maybe even - // dispatch straight through the MethodTable of the InternalFunction. But for now, - // it seems that this case is hit infrequently enough, and the number of functions - // we know about is small enough, that having just a linear cascade of if statements - // is good enough. - - UNUSED_PARAM(prediction); // Remove this once we do more things. - + if (verbose) + dataLog(" Handling constant internal function ", JSValue(function), "\n"); + + if (kind == CodeForConstruct) { + Node* newTargetNode = get(virtualRegisterForArgument(0, registerOffset)); + // We cannot handle the case where new.target != callee (i.e. a construct from a super call) because we + // don't know what the prototype of the constructed object will be. + // FIXME: If we have inlined super calls up to the call site, however, we should be able to figure out the structure. https://bugs.webkit.org/show_bug.cgi?id=152700 + if (newTargetNode != callTargetNode) + return false; + } + if (function->classInfo() == ArrayConstructor::info()) { if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject()) return false; + insertChecks(); if (argumentCountIncludingThis == 2) { set(VirtualRegister(resultOperand), addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset)))); @@ -1703,12 +2482,14 @@ bool ByteCodeParser::handleConstantInternalFunction( } if (function->classInfo() == StringConstructor::info()) { + insertChecks(); + Node* result; if (argumentCountIncludingThis <= 1) - result = cellConstant(m_vm->smallStrings.emptyString()); + result = jsConstant(m_vm->smallStrings.emptyString()); else - result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset))); + result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset))); if (kind == CodeForConstruct) result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result); @@ -1720,7 +2501,7 @@ bool ByteCodeParser::handleConstantInternalFunction( for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) { bool result = handleTypedArrayConstructor( resultOperand, function, registerOffset, argumentCountIncludingThis, - indexToTypedArrayType(typeIndex)); + indexToTypedArrayType(typeIndex), insertChecks); if (result) return true; } @@ -1728,120 +2509,677 @@ bool ByteCodeParser::handleConstantInternalFunction( return false; } -Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset) +Node* ByteCodeParser::handleGetByOffset( + SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset, + const InferredType::Descriptor& inferredType, NodeType op) { Node* propertyStorage; if (isInlineOffset(offset)) propertyStorage = base; else propertyStorage = addToGraph(GetButterfly, base); - Node* getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage, base); - - StorageAccessData storageAccessData; - storageAccessData.offset = offset; - storageAccessData.identifierNumber = identifierNumber; - m_graph.m_storageAccessData.append(storageAccessData); + + StorageAccessData* data = m_graph.m_storageAccessData.add(); + data->offset = offset; + data->identifierNumber = identifierNumber; + data->inferredType = inferredType; + m_graph.registerInferredType(inferredType); + + Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base); return getByOffset; } -void ByteCodeParser::handleGetByOffset( - int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber, - PropertyOffset offset) -{ - set(VirtualRegister(destinationOperand), handleGetByOffset(prediction, base, identifierNumber, offset)); -} - -Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value) +Node* ByteCodeParser::handlePutByOffset( + Node* base, unsigned identifier, PropertyOffset offset, const InferredType::Descriptor& inferredType, + Node* value) { Node* propertyStorage; if (isInlineOffset(offset)) propertyStorage = base; else propertyStorage = addToGraph(GetButterfly, base); - Node* result = addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value); - StorageAccessData storageAccessData; - storageAccessData.offset = offset; - storageAccessData.identifierNumber = identifier; - m_graph.m_storageAccessData.append(storageAccessData); + StorageAccessData* data = m_graph.m_storageAccessData.add(); + data->offset = offset; + data->identifierNumber = identifier; + data->inferredType = inferredType; + m_graph.registerInferredType(inferredType); + + Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value); + + return result; +} + +bool ByteCodeParser::check(const ObjectPropertyCondition& condition) +{ + if (!condition) + return false; + + if (m_graph.watchCondition(condition)) + return true; + + Structure* structure = condition.object()->structure(); + if (!condition.structureEnsuresValidity(structure)) + return false; + + addToGraph( + CheckStructure, + OpInfo(m_graph.addStructureSet(structure)), + weakJSConstant(condition.object())); + return true; +} + +GetByOffsetMethod ByteCodeParser::promoteToConstant(GetByOffsetMethod method) +{ + if (method.kind() == GetByOffsetMethod::LoadFromPrototype + && method.prototype()->structure()->dfgShouldWatch()) { + if (JSValue constant = m_graph.tryGetConstantProperty(method.prototype()->value(), method.prototype()->structure(), method.offset())) + return GetByOffsetMethod::constant(m_graph.freeze(constant)); + } + + return method; +} + +GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyCondition& condition) +{ + if (verbose) + dataLog("Planning a load: ", condition, "\n"); + + // We might promote this to Equivalence, and a later DFG pass might also do such promotion + // even if we fail, but for simplicity this cannot be asked to load an equivalence condition. + // None of the clients of this method will request a load of an Equivalence condition anyway, + // and supporting it would complicate the heuristics below. + RELEASE_ASSERT(condition.kind() == PropertyCondition::Presence); + + // Here's the ranking of how to handle this, from most preferred to least preferred: + // + // 1) Watchpoint on an equivalence condition and return a constant node for the loaded value. + // No other code is emitted, and the structure of the base object is never registered. + // Hence this results in zero code and we won't jettison this compilation if the object + // transitions, even if the structure is watchable right now. + // + // 2) Need to emit a load, and the current structure of the base is going to be watched by the + // DFG anyway (i.e. dfgShouldWatch). Watch the structure and emit the load. Don't watch the + // condition, since the act of turning the base into a constant in IR will cause the DFG to + // watch the structure anyway and doing so would subsume watching the condition. + // + // 3) Need to emit a load, and the current structure of the base is watchable but not by the + // DFG (i.e. transitionWatchpointSetIsStillValid() and !dfgShouldWatchIfPossible()). Watch + // the condition, and emit a load. + // + // 4) Need to emit a load, and the current structure of the base is not watchable. Emit a + // structure check, and emit a load. + // + // 5) The condition does not hold. Give up and return null. + + // First, try to promote Presence to Equivalence. We do this before doing anything else + // because it's the most profitable. Also, there are cases where the presence is watchable but + // we don't want to watch it unless it became an equivalence (see the relationship between + // (1), (2), and (3) above). + ObjectPropertyCondition equivalenceCondition = condition.attemptToMakeEquivalenceWithoutBarrier(); + if (m_graph.watchCondition(equivalenceCondition)) + return GetByOffsetMethod::constant(m_graph.freeze(equivalenceCondition.requiredValue())); + + // At this point, we'll have to materialize the condition's base as a constant in DFG IR. Once + // we do this, the frozen value will have its own idea of what the structure is. Use that from + // now on just because it's less confusing. + FrozenValue* base = m_graph.freeze(condition.object()); + Structure* structure = base->structure(); + + // Check if the structure that we've registered makes the condition hold. If not, just give + // up. This is case (5) above. + if (!condition.structureEnsuresValidity(structure)) + return GetByOffsetMethod(); + + // If the structure is watched by the DFG already, then just use this fact to emit the load. + // This is case (2) above. + if (structure->dfgShouldWatch()) + return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset())); + + // If we can watch the condition right now, then we can emit the load after watching it. This + // is case (3) above. + if (m_graph.watchCondition(condition)) + return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset())); + + // We can't watch anything but we know that the current structure satisfies the condition. So, + // check for that structure and then emit the load. + addToGraph( + CheckStructure, + OpInfo(m_graph.addStructureSet(structure)), + addToGraph(JSConstant, OpInfo(base))); + return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset())); +} + +Node* ByteCodeParser::load( + SpeculatedType prediction, unsigned identifierNumber, const GetByOffsetMethod& method, + NodeType op) +{ + switch (method.kind()) { + case GetByOffsetMethod::Invalid: + return nullptr; + case GetByOffsetMethod::Constant: + return addToGraph(JSConstant, OpInfo(method.constant())); + case GetByOffsetMethod::LoadFromPrototype: { + Node* baseNode = addToGraph(JSConstant, OpInfo(method.prototype())); + return handleGetByOffset( + prediction, baseNode, identifierNumber, method.offset(), InferredType::Top, op); + } + case GetByOffsetMethod::Load: + // Will never see this from planLoad(). + RELEASE_ASSERT_NOT_REACHED(); + return nullptr; + } + + RELEASE_ASSERT_NOT_REACHED(); + return nullptr; +} + +Node* ByteCodeParser::load( + SpeculatedType prediction, const ObjectPropertyCondition& condition, NodeType op) +{ + GetByOffsetMethod method = planLoad(condition); + return load(prediction, m_graph.identifiers().ensure(condition.uid()), method, op); +} + +bool ByteCodeParser::check(const ObjectPropertyConditionSet& conditionSet) +{ + for (const ObjectPropertyCondition condition : conditionSet) { + if (!check(condition)) + return false; + } + return true; +} +GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyConditionSet& conditionSet) +{ + if (verbose) + dataLog("conditionSet = ", conditionSet, "\n"); + + GetByOffsetMethod result; + for (const ObjectPropertyCondition condition : conditionSet) { + switch (condition.kind()) { + case PropertyCondition::Presence: + RELEASE_ASSERT(!result); // Should only see exactly one of these. + result = planLoad(condition); + if (!result) + return GetByOffsetMethod(); + break; + default: + if (!check(condition)) + return GetByOffsetMethod(); + break; + } + } + RELEASE_ASSERT(!!result); return result; } +Node* ByteCodeParser::load( + SpeculatedType prediction, const ObjectPropertyConditionSet& conditionSet, NodeType op) +{ + GetByOffsetMethod method = planLoad(conditionSet); + return load( + prediction, + m_graph.identifiers().ensure(conditionSet.slotBaseCondition().uid()), + method, op); +} + +ObjectPropertyCondition ByteCodeParser::presenceLike( + JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set) +{ + if (set.isEmpty()) + return ObjectPropertyCondition(); + unsigned attributes; + PropertyOffset firstOffset = set[0]->getConcurrently(uid, attributes); + if (firstOffset != offset) + return ObjectPropertyCondition(); + for (unsigned i = 1; i < set.size(); ++i) { + unsigned otherAttributes; + PropertyOffset otherOffset = set[i]->getConcurrently(uid, otherAttributes); + if (otherOffset != offset || otherAttributes != attributes) + return ObjectPropertyCondition(); + } + return ObjectPropertyCondition::presenceWithoutBarrier(knownBase, uid, offset, attributes); +} + +bool ByteCodeParser::checkPresenceLike( + JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set) +{ + return check(presenceLike(knownBase, uid, offset, set)); +} + +void ByteCodeParser::checkPresenceLike( + Node* base, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set) +{ + if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>()) { + if (checkPresenceLike(knownBase, uid, offset, set)) + return; + } + + addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(set)), base); +} + +template<typename VariantType> +Node* ByteCodeParser::load( + SpeculatedType prediction, Node* base, unsigned identifierNumber, const VariantType& variant) +{ + // Make sure backwards propagation knows that we've used base. + addToGraph(Phantom, base); + + bool needStructureCheck = true; + + UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; + + if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>()) { + // Try to optimize away the structure check. Note that it's not worth doing anything about this + // if the base's structure is watched. + Structure* structure = base->constant()->structure(); + if (!structure->dfgShouldWatch()) { + if (!variant.conditionSet().isEmpty()) { + // This means that we're loading from a prototype. We expect the base not to have the + // property. We can only use ObjectPropertyCondition if all of the structures in the + // variant.structureSet() agree on the prototype (it would be hilariously rare if they + // didn't). Note that we are relying on structureSet() having at least one element. That + // will always be true here because of how GetByIdStatus/PutByIdStatus work. + JSObject* prototype = variant.structureSet()[0]->storedPrototypeObject(); + bool allAgree = true; + for (unsigned i = 1; i < variant.structureSet().size(); ++i) { + if (variant.structureSet()[i]->storedPrototypeObject() != prototype) { + allAgree = false; + break; + } + } + if (allAgree) { + ObjectPropertyCondition condition = ObjectPropertyCondition::absenceWithoutBarrier( + knownBase, uid, prototype); + if (check(condition)) + needStructureCheck = false; + } + } else { + // This means we're loading directly from base. We can avoid all of the code that follows + // if we can prove that the property is a constant. Otherwise, we try to prove that the + // property is watchably present, in which case we get rid of the structure check. + + ObjectPropertyCondition presenceCondition = + presenceLike(knownBase, uid, variant.offset(), variant.structureSet()); + if (presenceCondition) { + ObjectPropertyCondition equivalenceCondition = + presenceCondition.attemptToMakeEquivalenceWithoutBarrier(); + if (m_graph.watchCondition(equivalenceCondition)) + return weakJSConstant(equivalenceCondition.requiredValue()); + + if (check(presenceCondition)) + needStructureCheck = false; + } + } + } + } + + if (needStructureCheck) + addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base); + + SpeculatedType loadPrediction; + NodeType loadOp; + if (variant.callLinkStatus() || variant.intrinsic() != NoIntrinsic) { + loadPrediction = SpecCellOther; + loadOp = GetGetterSetterByOffset; + } else { + loadPrediction = prediction; + loadOp = GetByOffset; + } + + Node* loadedValue; + if (!variant.conditionSet().isEmpty()) + loadedValue = load(loadPrediction, variant.conditionSet(), loadOp); + else { + if (needStructureCheck && base->hasConstant()) { + // We did emit a structure check. That means that we have an opportunity to do constant folding + // here, since we didn't do it above. + JSValue constant = m_graph.tryGetConstantProperty( + base->asJSValue(), variant.structureSet(), variant.offset()); + if (constant) + return weakJSConstant(constant); + } + + InferredType::Descriptor inferredType; + if (needStructureCheck) { + for (Structure* structure : variant.structureSet()) { + InferredType::Descriptor thisType = m_graph.inferredTypeForProperty(structure, uid); + inferredType.merge(thisType); + } + } else + inferredType = InferredType::Top; + + loadedValue = handleGetByOffset( + loadPrediction, base, identifierNumber, variant.offset(), inferredType, loadOp); + } + + return loadedValue; +} + +Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVariant& variant, Node* value) +{ + RELEASE_ASSERT(variant.kind() == PutByIdVariant::Replace); + + checkPresenceLike(base, m_graph.identifiers()[identifier], variant.offset(), variant.structure()); + return handlePutByOffset(base, identifier, variant.offset(), variant.requiredType(), value); +} + void ByteCodeParser::handleGetById( int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber, const GetByIdStatus& getByIdStatus) { - if (!getByIdStatus.isSimple() - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCacheWatchpoint)) { + NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById; + + if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::useAccessInlining()) { set(VirtualRegister(destinationOperand), - addToGraph( - getByIdStatus.makesCalls() ? GetByIdFlush : GetById, - OpInfo(identifierNumber), OpInfo(prediction), base)); + addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); return; } - ASSERT(getByIdStatus.structureSet().size()); + if (getByIdStatus.numVariants() > 1) { + if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode) + || !Options::usePolymorphicAccessInlining()) { + set(VirtualRegister(destinationOperand), + addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); + return; + } + + Vector<MultiGetByOffsetCase, 2> cases; + + // 1) Emit prototype structure checks for all chains. This could sort of maybe not be + // optimal, if there is some rarely executed case in the chain that requires a lot + // of checks and those checks are not watchpointable. + for (const GetByIdVariant& variant : getByIdStatus.variants()) { + if (variant.intrinsic() != NoIntrinsic) { + set(VirtualRegister(destinationOperand), + addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); + return; + } + + if (variant.conditionSet().isEmpty()) { + cases.append( + MultiGetByOffsetCase( + variant.structureSet(), + GetByOffsetMethod::load(variant.offset()))); + continue; + } + + GetByOffsetMethod method = planLoad(variant.conditionSet()); + if (!method) { + set(VirtualRegister(destinationOperand), + addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); + return; + } + + cases.append(MultiGetByOffsetCase(variant.structureSet(), method)); + } + + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedGetById(); + + // 2) Emit a MultiGetByOffset + MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add(); + data->cases = cases; + data->identifierNumber = identifierNumber; + set(VirtualRegister(destinationOperand), + addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base)); + return; + } + + ASSERT(getByIdStatus.numVariants() == 1); + GetByIdVariant variant = getByIdStatus[0]; + Node* loadedValue = load(prediction, base, identifierNumber, variant); + if (!loadedValue) { + set(VirtualRegister(destinationOperand), + addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); + return; + } + if (m_graph.compilation()) m_graph.compilation()->noticeInlinedGetById(); + + if (!variant.callLinkStatus() && variant.intrinsic() == NoIntrinsic) { + set(VirtualRegister(destinationOperand), loadedValue); + return; + } - Node* originalBaseForBaselineJIT = base; - - addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base); + Node* getter = addToGraph(GetGetter, loadedValue); + + if (handleIntrinsicGetter(destinationOperand, variant, base, + [&] () { + addToGraph(CheckCell, OpInfo(m_graph.freeze(variant.intrinsicFunction())), getter, base); + })) { + addToGraph(Phantom, getter); + return; + } + + if (variant.intrinsic() != NoIntrinsic) + ASSERT(variant.intrinsic() == NoIntrinsic); + + // Make a call. We don't try to get fancy with using the smallest operand number because + // the stack layout phase should compress the stack anyway. + + unsigned numberOfParameters = 0; + numberOfParameters++; // The 'this' argument. + numberOfParameters++; // True return PC. + + // Start with a register offset that corresponds to the last in-use register. + int registerOffset = virtualRegisterForLocal( + m_inlineStackTop->m_profiledBlock->m_numCalleeLocals - 1).offset(); + registerOffset -= numberOfParameters; + registerOffset -= JSStack::CallFrameHeaderSize; + + // Get the alignment right. + registerOffset = -WTF::roundUpToMultipleOf( + stackAlignmentRegisters(), + -registerOffset); + + ensureLocals( + m_inlineStackTop->remapOperand( + VirtualRegister(registerOffset)).toLocal()); + + // Issue SetLocals. This has two effects: + // 1) That's how handleCall() sees the arguments. + // 2) If we inline then this ensures that the arguments are flushed so that if you use + // the dreaded arguments object on the getter, the right things happen. Well, sort of - + // since we only really care about 'this' in this case. But we're not going to take that + // shortcut. + int nextRegister = registerOffset + JSStack::CallFrameHeaderSize; + set(VirtualRegister(nextRegister++), base, ImmediateNakedSet); + + // We've set some locals, but they are not user-visible. It's still OK to exit from here. + m_exitOK = true; + addToGraph(ExitOK); + + handleCall( + destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id), + getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction); +} + +void ByteCodeParser::emitPutById( + Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect) +{ + if (isDirect) + addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value); + else + addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value); +} + +void ByteCodeParser::handlePutById( + Node* base, unsigned identifierNumber, Node* value, + const PutByIdStatus& putByIdStatus, bool isDirect) +{ + if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::useAccessInlining()) { + if (!putByIdStatus.isSet()) + addToGraph(ForceOSRExit); + emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); + return; + } - if (getByIdStatus.chain()) { - m_graph.chains().addLazily(getByIdStatus.chain()); - Structure* currentStructure = getByIdStatus.structureSet().singletonStructure(); - JSObject* currentObject = 0; - for (unsigned i = 0; i < getByIdStatus.chain()->size(); ++i) { - currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock)); - currentStructure = getByIdStatus.chain()->at(i); - base = cellConstantWithStructureCheck(currentObject, currentStructure); + if (putByIdStatus.numVariants() > 1) { + if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls() + || !Options::usePolymorphicAccessInlining()) { + emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); + return; + } + + if (!isDirect) { + for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) { + if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition) + continue; + if (!check(putByIdStatus[variantIndex].conditionSet())) { + emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); + return; + } + } } + + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedPutById(); + + for (const PutByIdVariant& variant : putByIdStatus.variants()) + m_graph.registerInferredType(variant.requiredType()); + + MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add(); + data->variants = putByIdStatus.variants(); + data->identifierNumber = identifierNumber; + addToGraph(MultiPutByOffset, OpInfo(data), base, value); + return; } - // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to - // ensure that the base of the original get_by_id is kept alive until we're done with - // all of the speculations. We only insert the Phantom if there had been a CheckStructure - // on something other than the base following the CheckStructure on base, or if the - // access was compiled to a WeakJSConstant specific value, in which case we might not - // have any explicit use of the base at all. - if (getByIdStatus.specificValue() || originalBaseForBaselineJIT != base) - addToGraph(Phantom, originalBaseForBaselineJIT); + ASSERT(putByIdStatus.numVariants() == 1); + const PutByIdVariant& variant = putByIdStatus[0]; - if (getByIdStatus.specificValue()) { - ASSERT(getByIdStatus.specificValue().isCell()); + switch (variant.kind()) { + case PutByIdVariant::Replace: { + store(base, identifierNumber, variant, value); + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedPutById(); + return; + } + + case PutByIdVariant::Transition: { + addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base); + if (!check(variant.conditionSet())) { + emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); + return; + } + + ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated()); + + Node* propertyStorage; + Transition* transition = m_graph.m_transitions.add( + variant.oldStructureForTransition(), variant.newStructure()); + + if (variant.reallocatesStorage()) { + + // If we're growing the property storage then it must be because we're + // storing into the out-of-line storage. + ASSERT(!isInlineOffset(variant.offset())); + + if (!variant.oldStructureForTransition()->outOfLineCapacity()) { + propertyStorage = addToGraph( + AllocatePropertyStorage, OpInfo(transition), base); + } else { + propertyStorage = addToGraph( + ReallocatePropertyStorage, OpInfo(transition), + base, addToGraph(GetButterfly, base)); + } + } else { + if (isInlineOffset(variant.offset())) + propertyStorage = base; + else + propertyStorage = addToGraph(GetButterfly, base); + } + + StorageAccessData* data = m_graph.m_storageAccessData.add(); + data->offset = variant.offset(); + data->identifierNumber = identifierNumber; + data->inferredType = variant.requiredType(); + m_graph.registerInferredType(data->inferredType); - set(VirtualRegister(destinationOperand), cellConstant(getByIdStatus.specificValue().asCell())); + addToGraph( + PutByOffset, + OpInfo(data), + propertyStorage, + base, + value); + + // FIXME: PutStructure goes last until we fix either + // https://bugs.webkit.org/show_bug.cgi?id=142921 or + // https://bugs.webkit.org/show_bug.cgi?id=142924. + addToGraph(PutStructure, OpInfo(transition), base); + + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedPutById(); return; } + + case PutByIdVariant::Setter: { + Node* loadedValue = load(SpecCellOther, base, identifierNumber, variant); + if (!loadedValue) { + emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); + return; + } + + Node* setter = addToGraph(GetSetter, loadedValue); + + // Make a call. We don't try to get fancy with using the smallest operand number because + // the stack layout phase should compress the stack anyway. + + unsigned numberOfParameters = 0; + numberOfParameters++; // The 'this' argument. + numberOfParameters++; // The new value. + numberOfParameters++; // True return PC. - handleGetByOffset( - destinationOperand, prediction, base, identifierNumber, getByIdStatus.offset()); + // Start with a register offset that corresponds to the last in-use register. + int registerOffset = virtualRegisterForLocal( + m_inlineStackTop->m_profiledBlock->m_numCalleeLocals - 1).offset(); + registerOffset -= numberOfParameters; + registerOffset -= JSStack::CallFrameHeaderSize; + + // Get the alignment right. + registerOffset = -WTF::roundUpToMultipleOf( + stackAlignmentRegisters(), + -registerOffset); + + ensureLocals( + m_inlineStackTop->remapOperand( + VirtualRegister(registerOffset)).toLocal()); + + int nextRegister = registerOffset + JSStack::CallFrameHeaderSize; + set(VirtualRegister(nextRegister++), base, ImmediateNakedSet); + set(VirtualRegister(nextRegister++), value, ImmediateNakedSet); + + // We've set some locals, but they are not user-visible. It's still OK to exit from here. + m_exitOK = true; + addToGraph(ExitOK); + + handleCall( + VirtualRegister().offset(), Call, InlineCallFrame::SetterCall, + OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset, + *variant.callLinkStatus(), SpecOther); + return; + } + + default: { + emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); + return; + } } } void ByteCodeParser::prepareToParseBlock() { - for (unsigned i = 0; i < m_constants.size(); ++i) - m_constants[i] = ConstantRecord(); - m_cellConstantNodes.clear(); + clearCaches(); + ASSERT(m_setLocalQueue.isEmpty()); } -Node* ByteCodeParser::getScope(bool skipTop, unsigned skipCount) +void ByteCodeParser::clearCaches() { - Node* localBase = get(VirtualRegister(JSStack::ScopeChain)); - if (skipTop) { - ASSERT(!inlineCallFrame()); - localBase = addToGraph(SkipTopScope, localBase); - } - for (unsigned n = skipCount; n--;) - localBase = addToGraph(SkipScope, localBase); - return localBase; + m_constants.resize(0); } bool ByteCodeParser::parseBlock(unsigned limit) @@ -1851,18 +3189,20 @@ bool ByteCodeParser::parseBlock(unsigned limit) Interpreter* interpreter = m_vm->interpreter; Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin(); unsigned blockBegin = m_currentIndex; - + // If we are the first basic block, introduce markers for arguments. This allows // us to track if a use of an argument may use the actual argument passed, as // opposed to using a value we set explicitly. if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) { m_graph.m_arguments.resize(m_numArguments); + // We will emit SetArgument nodes. They don't exit, but we're at the top of an op_enter so + // exitOK = true. + m_exitOK = true; for (unsigned argument = 0; argument < m_numArguments; ++argument) { VariableAccessData* variable = newVariableAccessData( - virtualRegisterForArgument(argument), m_codeBlock->isCaptured(virtualRegisterForArgument(argument))); + virtualRegisterForArgument(argument)); variable->mergeStructureCheckHoistingFailed( - m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)); + m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)); variable->mergeCheckArrayHoistingFailed( m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)); @@ -1873,9 +3213,11 @@ bool ByteCodeParser::parseBlock(unsigned limit) } while (true) { - for (unsigned i = 0; i < m_setLocalQueue.size(); ++i) - m_setLocalQueue[i].execute(this); - m_setLocalQueue.resize(0); + // We're staring a new bytecode instruction. Hence, we once again have a place that we can exit + // to. + m_exitOK = true; + + processSetLocalQueue(); // Don't extend over jump destinations. if (m_currentIndex == limit) { @@ -1886,6 +3228,9 @@ bool ByteCodeParser::parseBlock(unsigned limit) // logic relies on every bytecode resulting in one or more nodes, which would // be true anyway except for op_loop_hint, which emits a Phantom to force this // to be true. + // We also don't insert a jump if the block already has a terminal, + // which could happen after a tail call. + ASSERT(m_currentBlock->isEmpty() || !m_currentBlock->terminal()); if (!m_currentBlock->isEmpty()) addToGraph(Jump, OpInfo(m_currentIndex)); return shouldContinueParsing; @@ -1896,6 +3241,9 @@ bool ByteCodeParser::parseBlock(unsigned limit) m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls. OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode); + if (Options::verboseDFGByteCodeParsing()) + dataLog(" parsing ", currentCodeOrigin(), ": ", opcodeID, "\n"); + if (m_graph.compilation()) { addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor( Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin())))); @@ -1905,26 +3253,24 @@ bool ByteCodeParser::parseBlock(unsigned limit) // === Function entry opcodes === - case op_enter: + case op_enter: { + Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined)); // Initialize all locals to undefined. for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i) - set(virtualRegisterForLocal(i), constantUndefined(), ImmediateSet); + set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet); NEXT_OPCODE(op_enter); - - case op_touch_entry: - if (m_inlineStackTop->m_codeBlock->symbolTable()->m_functionEnteredOnce.isStillValid()) - addToGraph(ForceOSRExit); - NEXT_OPCODE(op_touch_entry); + } case op_to_this: { Node* op1 = getThis(); if (op1->op() != ToThis) { Structure* cachedStructure = currentInstruction[2].u.structure.get(); - if (!cachedStructure + if (currentInstruction[2].u.toThisStatus != ToThisOK + || !cachedStructure || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)) { + || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) { setThis(addToGraph(ToThis, op1)); } else { addToGraph( @@ -1939,18 +3285,33 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_create_this: { int calleeOperand = currentInstruction[2].u.operand; Node* callee = get(VirtualRegister(calleeOperand)); + + JSFunction* function = callee->dynamicCastConstant<JSFunction*>(); + if (!function) { + JSCell* cachedFunction = currentInstruction[4].u.jsCell.unvalidatedGet(); + if (cachedFunction + && cachedFunction != JSCell::seenMultipleCalleeObjects() + && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { + ASSERT(cachedFunction->inherits(JSFunction::info())); + + FrozenValue* frozen = m_graph.freeze(cachedFunction); + addToGraph(CheckCell, OpInfo(frozen), callee); + + function = static_cast<JSFunction*>(cachedFunction); + } + } + bool alreadyEmitted = false; - if (callee->op() == WeakJSConstant) { - JSCell* cell = callee->weakConstant(); - ASSERT(cell->inherits(JSFunction::info())); - - JSFunction* function = jsCast<JSFunction*>(cell); - if (Structure* structure = function->allocationStructure()) { - addToGraph(AllocationProfileWatchpoint, OpInfo(function)); - // The callee is still live up to this point. - addToGraph(Phantom, callee); - set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure))); - alreadyEmitted = true; + if (function) { + if (FunctionRareData* rareData = function->rareData()) { + if (Structure* structure = rareData->objectAllocationStructure()) { + m_graph.freeze(rareData); + m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet()); + // The callee is still live up to this point. + addToGraph(Phantom, callee); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure))); + alreadyEmitted = true; + } } } if (!alreadyEmitted) { @@ -2007,25 +3368,41 @@ bool ByteCodeParser::parseBlock(unsigned limit) } case op_new_regexp: { + // FIXME: We really should be able to inline code that uses NewRegexp. That means + // using something other than the index into the CodeBlock here. + // https://bugs.webkit.org/show_bug.cgi?id=154808 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand))); NEXT_OPCODE(op_new_regexp); } - - case op_get_callee: { - JSCell* cachedFunction = currentInstruction[2].u.jsCell.get(); - if (!cachedFunction - || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction)) { - set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee))); - } else { - ASSERT(cachedFunction->inherits(JSFunction::info())); - Node* actualCallee = get(VirtualRegister(JSStack::Callee)); - addToGraph(CheckFunction, OpInfo(cachedFunction), actualCallee); - set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(WeakJSConstant, OpInfo(cachedFunction))); - } - NEXT_OPCODE(op_get_callee); + + case op_get_rest_length: { + InlineCallFrame* inlineCallFrame = this->inlineCallFrame(); + Node* length; + if (inlineCallFrame && !inlineCallFrame->isVarargs()) { + unsigned argumentsLength = inlineCallFrame->arguments.size() - 1; + unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue; + JSValue restLength; + if (argumentsLength <= numParamsToSkip) + restLength = jsNumber(0); + else + restLength = jsNumber(argumentsLength - numParamsToSkip); + + length = jsConstant(restLength); + } else + length = addToGraph(GetRestLength, OpInfo(currentInstruction[2].u.unsignedValue)); + set(VirtualRegister(currentInstruction[1].u.operand), length); + NEXT_OPCODE(op_get_rest_length); } + case op_copy_rest: { + noticeArgumentsUse(); + Node* array = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* arrayLength = get(VirtualRegister(currentInstruction[2].u.operand)); + addToGraph(CopyRest, OpInfo(currentInstruction[3].u.unsignedValue), + array, arrayLength); + NEXT_OPCODE(op_copy_rest); + } + // === Bitwise operations === case op_bitand: { @@ -2085,7 +3462,7 @@ bool ByteCodeParser::parseBlock(unsigned limit) int srcDst = currentInstruction[1].u.operand; VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); Node* op = get(srcDstVirtualRegister); - set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, one()))); + set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); NEXT_OPCODE(op_inc); } @@ -2093,7 +3470,7 @@ bool ByteCodeParser::parseBlock(unsigned limit) int srcDst = currentInstruction[1].u.operand; VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); Node* op = get(srcDstVirtualRegister); - set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, one()))); + set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); NEXT_OPCODE(op_dec); } @@ -2165,20 +3542,21 @@ bool ByteCodeParser::parseBlock(unsigned limit) set(VirtualRegister(currentInstruction[1].u.operand), op); NEXT_OPCODE(op_mov); } - - case op_captured_mov: { - Node* op = get(VirtualRegister(currentInstruction[2].u.operand)); - if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet) { - if (set->state() != IsInvalidated) - addToGraph(NotifyWrite, OpInfo(set), op); - } - set(VirtualRegister(currentInstruction[1].u.operand), op); - NEXT_OPCODE(op_captured_mov); + + case op_check_tdz: { + addToGraph(CheckNotEmpty, get(VirtualRegister(currentInstruction[1].u.operand))); + NEXT_OPCODE(op_check_tdz); } - case op_check_has_instance: - addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand))); - NEXT_OPCODE(op_check_has_instance); + case op_overrides_has_instance: { + JSFunction* defaultHasInstanceSymbolFunction = m_inlineStackTop->m_codeBlock->globalObjectFor(currentCodeOrigin())->functionProtoHasInstanceSymbolFunction(); + + Node* constructor = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* hasInstanceValue = get(VirtualRegister(currentInstruction[3].u.operand)); + + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue)); + NEXT_OPCODE(op_overrides_has_instance); + } case op_instanceof: { Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); @@ -2186,7 +3564,15 @@ bool ByteCodeParser::parseBlock(unsigned limit) set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype)); NEXT_OPCODE(op_instanceof); } - + + case op_instanceof_custom: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* constructor = get(VirtualRegister(currentInstruction[3].u.operand)); + Node* hasInstanceValue = get(VirtualRegister(currentInstruction[4].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue)); + NEXT_OPCODE(op_instanceof_custom); + } + case op_is_undefined: { Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value)); @@ -2217,6 +3603,12 @@ bool ByteCodeParser::parseBlock(unsigned limit) NEXT_OPCODE(op_is_object); } + case op_is_object_or_null: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value)); + NEXT_OPCODE(op_is_object_or_null); + } + case op_is_function: { Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value)); @@ -2239,52 +3631,37 @@ bool ByteCodeParser::parseBlock(unsigned limit) int startOperand = currentInstruction[2].u.operand; int numOperands = currentInstruction[3].u.operand; #if CPU(X86) - // X86 doesn't have enough registers to compile MakeRope with three arguments. - // Rather than try to be clever, we just make MakeRope dumber on this processor. - const unsigned maxRopeArguments = 2; + // X86 doesn't have enough registers to compile MakeRope with three arguments. The + // StrCat we emit here may be turned into a MakeRope. Rather than try to be clever, + // we just make StrCat dumber on this processor. + const unsigned maxArguments = 2; #else - const unsigned maxRopeArguments = 3; + const unsigned maxArguments = 3; #endif - auto toStringNodes = std::make_unique<Node*[]>(numOperands); - for (int i = 0; i < numOperands; i++) - toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i))); - - for (int i = 0; i < numOperands; i++) - addToGraph(Phantom, toStringNodes[i]); - Node* operands[AdjacencyList::Size]; unsigned indexInOperands = 0; for (unsigned i = 0; i < AdjacencyList::Size; ++i) operands[i] = 0; for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) { - if (indexInOperands == maxRopeArguments) { - operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]); + if (indexInOperands == maxArguments) { + operands[0] = addToGraph(StrCat, operands[0], operands[1], operands[2]); for (unsigned i = 1; i < AdjacencyList::Size; ++i) operands[i] = 0; indexInOperands = 1; } ASSERT(indexInOperands < AdjacencyList::Size); - ASSERT(indexInOperands < maxRopeArguments); - operands[indexInOperands++] = toStringNodes[operandIdx]; + ASSERT(indexInOperands < maxArguments); + operands[indexInOperands++] = get(VirtualRegister(startOperand - operandIdx)); } set(VirtualRegister(currentInstruction[1].u.operand), - addToGraph(MakeRope, operands[0], operands[1], operands[2])); + addToGraph(StrCat, operands[0], operands[1], operands[2])); NEXT_OPCODE(op_strcat); } case op_less: { Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue a = valueOfJSConstant(op1); - JSValue b = valueOfJSConstant(op2); - if (a.isNumber() && b.isNumber()) { - set(VirtualRegister(currentInstruction[1].u.operand), - getJSConstantForValue(jsBoolean(a.asNumber() < b.asNumber()))); - NEXT_OPCODE(op_less); - } - } set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2)); NEXT_OPCODE(op_less); } @@ -2292,15 +3669,6 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_lesseq: { Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue a = valueOfJSConstant(op1); - JSValue b = valueOfJSConstant(op2); - if (a.isNumber() && b.isNumber()) { - set(VirtualRegister(currentInstruction[1].u.operand), - getJSConstantForValue(jsBoolean(a.asNumber() <= b.asNumber()))); - NEXT_OPCODE(op_lesseq); - } - } set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2)); NEXT_OPCODE(op_lesseq); } @@ -2308,15 +3676,6 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_greater: { Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue a = valueOfJSConstant(op1); - JSValue b = valueOfJSConstant(op2); - if (a.isNumber() && b.isNumber()) { - set(VirtualRegister(currentInstruction[1].u.operand), - getJSConstantForValue(jsBoolean(a.asNumber() > b.asNumber()))); - NEXT_OPCODE(op_greater); - } - } set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2)); NEXT_OPCODE(op_greater); } @@ -2324,15 +3683,6 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_greatereq: { Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue a = valueOfJSConstant(op1); - JSValue b = valueOfJSConstant(op2); - if (a.isNumber() && b.isNumber()) { - set(VirtualRegister(currentInstruction[1].u.operand), - getJSConstantForValue(jsBoolean(a.asNumber() >= b.asNumber()))); - NEXT_OPCODE(op_greatereq); - } - } set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2)); NEXT_OPCODE(op_greatereq); } @@ -2340,79 +3690,43 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_eq: { Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue a = valueOfJSConstant(op1); - JSValue b = valueOfJSConstant(op2); - set(VirtualRegister(currentInstruction[1].u.operand), - getJSConstantForValue(jsBoolean(JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b)))); - NEXT_OPCODE(op_eq); - } set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2)); NEXT_OPCODE(op_eq); } case op_eq_null: { Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); - set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, constantNull())); + Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, value, nullConstant)); NEXT_OPCODE(op_eq_null); } case op_stricteq: { Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue a = valueOfJSConstant(op1); - JSValue b = valueOfJSConstant(op2); - set(VirtualRegister(currentInstruction[1].u.operand), - getJSConstantForValue(jsBoolean(JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b)))); - NEXT_OPCODE(op_stricteq); - } - if (isConstantForCompareStrictEq(op1)) - set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEqConstant, op2, op1)); - else if (isConstantForCompareStrictEq(op2)) - set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEqConstant, op1, op2)); - else - set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2)); NEXT_OPCODE(op_stricteq); } case op_neq: { Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue a = valueOfJSConstant(op1); - JSValue b = valueOfJSConstant(op2); - set(VirtualRegister(currentInstruction[1].u.operand), - getJSConstantForValue(jsBoolean(!JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b)))); - NEXT_OPCODE(op_neq); - } set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); NEXT_OPCODE(op_neq); } case op_neq_null: { Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); - set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, constantNull()))); + Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant))); NEXT_OPCODE(op_neq_null); } case op_nstricteq: { Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue a = valueOfJSConstant(op1); - JSValue b = valueOfJSConstant(op2); - set(VirtualRegister(currentInstruction[1].u.operand), - getJSConstantForValue(jsBoolean(!JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b)))); - NEXT_OPCODE(op_nstricteq); - } Node* invertedResult; - if (isConstantForCompareStrictEq(op1)) - invertedResult = addToGraph(CompareStrictEqConstant, op2, op1); - else if (isConstantForCompareStrictEq(op2)) - invertedResult = addToGraph(CompareStrictEqConstant, op1, op2); - else - invertedResult = addToGraph(CompareStrictEq, op1, op2); + invertedResult = addToGraph(CompareStrictEq, op1, op2); set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult)); NEXT_OPCODE(op_nstricteq); } @@ -2420,13 +3734,37 @@ bool ByteCodeParser::parseBlock(unsigned limit) // === Property access operations === case op_get_by_val: { - SpeculatedType prediction = getPrediction(); - + SpeculatedType prediction = getPredictionWithoutOSRExit(); + Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); - ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read); Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); - Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property); - set(VirtualRegister(currentInstruction[1].u.operand), getByVal); + bool compiledAsGetById = false; + { + ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); + ByValInfo* byValInfo = m_inlineStackTop->m_byValInfos.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)); + // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null. + // At that time, there is no information. + if (byValInfo && byValInfo->stubInfo && !byValInfo->tookSlowPath && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)) { + compiledAsGetById = true; + unsigned identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl()); + UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; + + addToGraph(CheckIdent, OpInfo(uid), property); + + GetByIdStatus getByIdStatus = GetByIdStatus::computeForStubInfo( + locker, m_inlineStackTop->m_profiledBlock, + byValInfo->stubInfo, currentCodeOrigin(), uid); + + handleGetById(currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus); + } + } + + if (!compiledAsGetById) { + ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read); + Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property); + m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic. + set(VirtualRegister(currentInstruction[1].u.operand), getByVal); + } NEXT_OPCODE(op_get_by_val); } @@ -2434,220 +3772,161 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_put_by_val_direct: case op_put_by_val: { Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); - - ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write); - Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); - - addVarArgChild(base); - addVarArgChild(property); - addVarArgChild(value); - addVarArgChild(0); // Leave room for property storage. - addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); + bool isDirect = opcodeID == op_put_by_val_direct; + bool compiledAsPutById = false; + { + ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); + ByValInfo* byValInfo = m_inlineStackTop->m_byValInfos.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)); + // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null. + // At that time, there is no information. + if (byValInfo && byValInfo->stubInfo && !byValInfo->tookSlowPath && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)) { + compiledAsPutById = true; + unsigned identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl()); + UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; + + addToGraph(CheckIdent, OpInfo(uid), property); + + PutByIdStatus putByIdStatus = PutByIdStatus::computeForStubInfo( + locker, m_inlineStackTop->m_profiledBlock, + byValInfo->stubInfo, currentCodeOrigin(), uid); + + handlePutById(base, identifierNumber, value, putByIdStatus, isDirect); + } + } + + if (!compiledAsPutById) { + ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Write); + + addVarArgChild(base); + addVarArgChild(property); + addVarArgChild(value); + addVarArgChild(0); // Leave room for property storage. + addVarArgChild(0); // Leave room for length. + addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); + } NEXT_OPCODE(op_put_by_val); } case op_get_by_id: - case op_get_by_id_out_of_line: case op_get_array_length: { SpeculatedType prediction = getPrediction(); Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; - StringImpl* uid = m_graph.identifiers()[identifierNumber]; + UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; GetByIdStatus getByIdStatus = GetByIdStatus::computeFor( - m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_stubInfos, - m_currentIndex, uid); + m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock, + m_inlineStackTop->m_stubInfos, m_dfgStubInfos, + currentCodeOrigin(), uid); handleGetById( currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus); NEXT_OPCODE(op_get_by_id); } - case op_put_by_id: - case op_put_by_id_out_of_line: - case op_put_by_id_transition_direct: - case op_put_by_id_transition_normal: - case op_put_by_id_transition_direct_out_of_line: - case op_put_by_id_transition_normal_out_of_line: { + case op_put_by_id: { Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; - bool direct = currentInstruction[8].u.operand; + bool direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect; PutByIdStatus putByIdStatus = PutByIdStatus::computeFor( - m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_stubInfos, - m_currentIndex, m_graph.identifiers()[identifierNumber]); - bool canCountAsInlined = true; - if (!putByIdStatus.isSet()) { - addToGraph(ForceOSRExit); - canCountAsInlined = false; - } - - bool hasExitSite = - m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache) - || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCacheWatchpoint); - - if (!hasExitSite && putByIdStatus.isSimpleReplace()) { - addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base); - handlePutByOffset(base, identifierNumber, putByIdStatus.offset(), value); - } else if ( - !hasExitSite - && putByIdStatus.isSimpleTransition() - && (!putByIdStatus.structureChain() - || putByIdStatus.structureChain()->isStillValid())) { - - m_graph.chains().addLazily(putByIdStatus.structureChain()); - - addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base); - if (!direct) { - if (!putByIdStatus.oldStructure()->storedPrototype().isNull()) { - cellConstantWithStructureCheck( - putByIdStatus.oldStructure()->storedPrototype().asCell()); - } - - for (unsigned i = 0; i < putByIdStatus.structureChain()->size(); ++i) { - JSValue prototype = putByIdStatus.structureChain()->at(i)->storedPrototype(); - if (prototype.isNull()) - continue; - cellConstantWithStructureCheck(prototype.asCell()); - } - } - ASSERT(putByIdStatus.oldStructure()->transitionWatchpointSetHasBeenInvalidated()); - - Node* propertyStorage; - StructureTransitionData* transitionData = - m_graph.addStructureTransitionData( - StructureTransitionData( - putByIdStatus.oldStructure(), - putByIdStatus.newStructure())); - - if (putByIdStatus.oldStructure()->outOfLineCapacity() - != putByIdStatus.newStructure()->outOfLineCapacity()) { - - // If we're growing the property storage then it must be because we're - // storing into the out-of-line storage. - ASSERT(!isInlineOffset(putByIdStatus.offset())); - - if (!putByIdStatus.oldStructure()->outOfLineCapacity()) { - propertyStorage = addToGraph( - AllocatePropertyStorage, OpInfo(transitionData), base); - } else { - propertyStorage = addToGraph( - ReallocatePropertyStorage, OpInfo(transitionData), - base, addToGraph(GetButterfly, base)); - } - } else { - if (isInlineOffset(putByIdStatus.offset())) - propertyStorage = base; - else - propertyStorage = addToGraph(GetButterfly, base); - } - - addToGraph(PutStructure, OpInfo(transitionData), base); - - addToGraph( - PutByOffset, - OpInfo(m_graph.m_storageAccessData.size()), - propertyStorage, - base, - value); - - StorageAccessData storageAccessData; - storageAccessData.offset = putByIdStatus.offset(); - storageAccessData.identifierNumber = identifierNumber; - m_graph.m_storageAccessData.append(storageAccessData); - } else { - if (direct) - addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value); - else - addToGraph(PutById, OpInfo(identifierNumber), base, value); - canCountAsInlined = false; - } + m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock, + m_inlineStackTop->m_stubInfos, m_dfgStubInfos, + currentCodeOrigin(), m_graph.identifiers()[identifierNumber]); - if (canCountAsInlined && m_graph.compilation()) - m_graph.compilation()->noticeInlinedPutById(); - + handlePutById(base, identifierNumber, value, putByIdStatus, direct); NEXT_OPCODE(op_put_by_id); } - case op_init_global_const_nop: { - NEXT_OPCODE(op_init_global_const_nop); + case op_put_getter_by_id: + case op_put_setter_by_id: { + Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); + unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; + unsigned attributes = currentInstruction[3].u.operand; + Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand)); + NodeType op = (opcodeID == op_put_getter_by_id) ? PutGetterById : PutSetterById; + addToGraph(op, OpInfo(identifierNumber), OpInfo(attributes), base, accessor); + NEXT_OPCODE(op_put_getter_by_id); } - case op_init_global_const: { - Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); - addToGraph( - PutGlobalVar, - OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)), - value); - NEXT_OPCODE(op_init_global_const); + case op_put_getter_setter_by_id: { + Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); + unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; + unsigned attributes = currentInstruction[3].u.operand; + Node* getter = get(VirtualRegister(currentInstruction[4].u.operand)); + Node* setter = get(VirtualRegister(currentInstruction[5].u.operand)); + addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(attributes), base, getter, setter); + NEXT_OPCODE(op_put_getter_setter_by_id); + } + + case op_put_getter_by_val: + case op_put_setter_by_val: { + Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* subscript = get(VirtualRegister(currentInstruction[2].u.operand)); + unsigned attributes = currentInstruction[3].u.operand; + Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand)); + NodeType op = (opcodeID == op_put_getter_by_val) ? PutGetterByVal : PutSetterByVal; + addToGraph(op, OpInfo(attributes), base, subscript, accessor); + NEXT_OPCODE(op_put_getter_by_val); + } + + case op_profile_type: { + Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand)); + addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile); + NEXT_OPCODE(op_profile_type); + } + + case op_profile_control_flow: { + BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation; + addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation)); + NEXT_OPCODE(op_profile_control_flow); } // === Block terminators. === case op_jmp: { - unsigned relativeOffset = currentInstruction[1].u.operand; + ASSERT(!m_currentBlock->terminal()); + int relativeOffset = currentInstruction[1].u.operand; addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); + if (relativeOffset <= 0) + flushForTerminal(); LAST_OPCODE(op_jmp); } case op_jtrue: { unsigned relativeOffset = currentInstruction[2].u.operand; Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); - if (canFold(condition)) { - TriState state = valueOfJSConstant(condition).pureToBoolean(); - if (state == TrueTriState) { - addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); - LAST_OPCODE(op_jtrue); - } else if (state == FalseTriState) { - // Emit a placeholder for this bytecode operation but otherwise - // just fall through. - addToGraph(Phantom); - NEXT_OPCODE(op_jtrue); - } - } - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition); LAST_OPCODE(op_jtrue); } case op_jfalse: { unsigned relativeOffset = currentInstruction[2].u.operand; Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); - if (canFold(condition)) { - TriState state = valueOfJSConstant(condition).pureToBoolean(); - if (state == FalseTriState) { - addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); - LAST_OPCODE(op_jfalse); - } else if (state == TrueTriState) { - // Emit a placeholder for this bytecode operation but otherwise - // just fall through. - addToGraph(Phantom); - NEXT_OPCODE(op_jfalse); - } - } - addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition); LAST_OPCODE(op_jfalse); } case op_jeq_null: { unsigned relativeOffset = currentInstruction[2].u.operand; Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); - Node* condition = addToGraph(CompareEqConstant, value, constantNull()); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition); + Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); + Node* condition = addToGraph(CompareEq, value, nullConstant); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition); LAST_OPCODE(op_jeq_null); } case op_jneq_null: { unsigned relativeOffset = currentInstruction[2].u.operand; Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); - Node* condition = addToGraph(CompareEqConstant, value, constantNull()); - addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition); + Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); + Node* condition = addToGraph(CompareEq, value, nullConstant); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition); LAST_OPCODE(op_jneq_null); } @@ -2655,25 +3934,8 @@ bool ByteCodeParser::parseBlock(unsigned limit) unsigned relativeOffset = currentInstruction[3].u.operand; Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue aValue = valueOfJSConstant(op1); - JSValue bValue = valueOfJSConstant(op2); - if (aValue.isNumber() && bValue.isNumber()) { - double a = aValue.asNumber(); - double b = bValue.asNumber(); - if (a < b) { - addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); - LAST_OPCODE(op_jless); - } else { - // Emit a placeholder for this bytecode operation but otherwise - // just fall through. - addToGraph(Phantom); - NEXT_OPCODE(op_jless); - } - } - } Node* condition = addToGraph(CompareLess, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition); LAST_OPCODE(op_jless); } @@ -2681,25 +3943,8 @@ bool ByteCodeParser::parseBlock(unsigned limit) unsigned relativeOffset = currentInstruction[3].u.operand; Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue aValue = valueOfJSConstant(op1); - JSValue bValue = valueOfJSConstant(op2); - if (aValue.isNumber() && bValue.isNumber()) { - double a = aValue.asNumber(); - double b = bValue.asNumber(); - if (a <= b) { - addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); - LAST_OPCODE(op_jlesseq); - } else { - // Emit a placeholder for this bytecode operation but otherwise - // just fall through. - addToGraph(Phantom); - NEXT_OPCODE(op_jlesseq); - } - } - } Node* condition = addToGraph(CompareLessEq, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition); LAST_OPCODE(op_jlesseq); } @@ -2707,25 +3952,8 @@ bool ByteCodeParser::parseBlock(unsigned limit) unsigned relativeOffset = currentInstruction[3].u.operand; Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue aValue = valueOfJSConstant(op1); - JSValue bValue = valueOfJSConstant(op2); - if (aValue.isNumber() && bValue.isNumber()) { - double a = aValue.asNumber(); - double b = bValue.asNumber(); - if (a > b) { - addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); - LAST_OPCODE(op_jgreater); - } else { - // Emit a placeholder for this bytecode operation but otherwise - // just fall through. - addToGraph(Phantom); - NEXT_OPCODE(op_jgreater); - } - } - } Node* condition = addToGraph(CompareGreater, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition); LAST_OPCODE(op_jgreater); } @@ -2733,25 +3961,8 @@ bool ByteCodeParser::parseBlock(unsigned limit) unsigned relativeOffset = currentInstruction[3].u.operand; Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue aValue = valueOfJSConstant(op1); - JSValue bValue = valueOfJSConstant(op2); - if (aValue.isNumber() && bValue.isNumber()) { - double a = aValue.asNumber(); - double b = bValue.asNumber(); - if (a >= b) { - addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); - LAST_OPCODE(op_jgreatereq); - } else { - // Emit a placeholder for this bytecode operation but otherwise - // just fall through. - addToGraph(Phantom); - NEXT_OPCODE(op_jgreatereq); - } - } - } Node* condition = addToGraph(CompareGreaterEq, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition); LAST_OPCODE(op_jgreatereq); } @@ -2759,25 +3970,8 @@ bool ByteCodeParser::parseBlock(unsigned limit) unsigned relativeOffset = currentInstruction[3].u.operand; Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue aValue = valueOfJSConstant(op1); - JSValue bValue = valueOfJSConstant(op2); - if (aValue.isNumber() && bValue.isNumber()) { - double a = aValue.asNumber(); - double b = bValue.asNumber(); - if (a < b) { - // Emit a placeholder for this bytecode operation but otherwise - // just fall through. - addToGraph(Phantom); - NEXT_OPCODE(op_jnless); - } else { - addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); - LAST_OPCODE(op_jnless); - } - } - } Node* condition = addToGraph(CompareLess, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition); LAST_OPCODE(op_jnless); } @@ -2785,25 +3979,8 @@ bool ByteCodeParser::parseBlock(unsigned limit) unsigned relativeOffset = currentInstruction[3].u.operand; Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue aValue = valueOfJSConstant(op1); - JSValue bValue = valueOfJSConstant(op2); - if (aValue.isNumber() && bValue.isNumber()) { - double a = aValue.asNumber(); - double b = bValue.asNumber(); - if (a <= b) { - // Emit a placeholder for this bytecode operation but otherwise - // just fall through. - addToGraph(Phantom); - NEXT_OPCODE(op_jnlesseq); - } else { - addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); - LAST_OPCODE(op_jnlesseq); - } - } - } Node* condition = addToGraph(CompareLessEq, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition); LAST_OPCODE(op_jnlesseq); } @@ -2811,25 +3988,8 @@ bool ByteCodeParser::parseBlock(unsigned limit) unsigned relativeOffset = currentInstruction[3].u.operand; Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue aValue = valueOfJSConstant(op1); - JSValue bValue = valueOfJSConstant(op2); - if (aValue.isNumber() && bValue.isNumber()) { - double a = aValue.asNumber(); - double b = bValue.asNumber(); - if (a > b) { - // Emit a placeholder for this bytecode operation but otherwise - // just fall through. - addToGraph(Phantom); - NEXT_OPCODE(op_jngreater); - } else { - addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); - LAST_OPCODE(op_jngreater); - } - } - } Node* condition = addToGraph(CompareGreater, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition); LAST_OPCODE(op_jngreater); } @@ -2837,92 +3997,76 @@ bool ByteCodeParser::parseBlock(unsigned limit) unsigned relativeOffset = currentInstruction[3].u.operand; Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); - if (canFold(op1) && canFold(op2)) { - JSValue aValue = valueOfJSConstant(op1); - JSValue bValue = valueOfJSConstant(op2); - if (aValue.isNumber() && bValue.isNumber()) { - double a = aValue.asNumber(); - double b = bValue.asNumber(); - if (a >= b) { - // Emit a placeholder for this bytecode operation but otherwise - // just fall through. - addToGraph(Phantom); - NEXT_OPCODE(op_jngreatereq); - } else { - addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); - LAST_OPCODE(op_jngreatereq); - } - } - } Node* condition = addToGraph(CompareGreaterEq, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition); LAST_OPCODE(op_jngreatereq); } case op_switch_imm: { - SwitchData data; + SwitchData& data = *m_graph.m_switchData.add(); data.kind = SwitchImm; data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand]; - data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); + data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { if (!table.branchOffsets[i]) continue; unsigned target = m_currentIndex + table.branchOffsets[i]; - if (target == data.fallThroughBytecodeIndex()) + if (target == data.fallThrough.bytecodeIndex()) continue; - data.cases.append(SwitchCase::withBytecodeIndex(jsNumber(static_cast<int32_t>(table.min + i)), target)); + data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target)); } - m_graph.m_switchData.append(data); - addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(VirtualRegister(currentInstruction[3].u.operand))); + addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); + flushIfTerminal(data); LAST_OPCODE(op_switch_imm); } case op_switch_char: { - SwitchData data; + SwitchData& data = *m_graph.m_switchData.add(); data.kind = SwitchChar; data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand]; - data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); + data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { if (!table.branchOffsets[i]) continue; unsigned target = m_currentIndex + table.branchOffsets[i]; - if (target == data.fallThroughBytecodeIndex()) + if (target == data.fallThrough.bytecodeIndex()) continue; data.cases.append( SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target)); } - m_graph.m_switchData.append(data); - addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(VirtualRegister(currentInstruction[3].u.operand))); + addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); + flushIfTerminal(data); LAST_OPCODE(op_switch_char); } case op_switch_string: { - SwitchData data; + SwitchData& data = *m_graph.m_switchData.add(); data.kind = SwitchString; data.switchTableIndex = currentInstruction[1].u.operand; - data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); + data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); StringJumpTable::StringOffsetTable::iterator iter; StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); for (iter = table.offsetTable.begin(); iter != end; ++iter) { unsigned target = m_currentIndex + iter->value.branchOffset; - if (target == data.fallThroughBytecodeIndex()) + if (target == data.fallThrough.bytecodeIndex()) continue; data.cases.append( SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target)); } - m_graph.m_switchData.append(data); - addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(VirtualRegister(currentInstruction[3].u.operand))); + addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); + flushIfTerminal(data); LAST_OPCODE(op_switch_string); } case op_ret: - flushArgumentsAndCapturedVariables(); + ASSERT(!m_currentBlock->terminal()); if (inlineCallFrame()) { - ASSERT(m_inlineStackTop->m_returnValue.isValid()); - setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSet); + flushForReturn(); + if (m_inlineStackTop->m_returnValue.isValid()) + setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush); m_inlineStackTop->m_didReturn = true; if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) { // If we're returning from the first block, then we're done parsing. @@ -2944,61 +4088,78 @@ bool ByteCodeParser::parseBlock(unsigned limit) LAST_OPCODE(op_ret); } addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand))); + flushForReturn(); LAST_OPCODE(op_ret); case op_end: - flushArgumentsAndCapturedVariables(); ASSERT(!inlineCallFrame()); addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand))); + flushForReturn(); LAST_OPCODE(op_end); case op_throw: addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand))); - flushAllArgumentsAndCapturedVariablesInInlineStack(); + flushForTerminal(); addToGraph(Unreachable); LAST_OPCODE(op_throw); case op_throw_static_error: addToGraph(ThrowReferenceError); - flushAllArgumentsAndCapturedVariablesInInlineStack(); + flushForTerminal(); addToGraph(Unreachable); LAST_OPCODE(op_throw_static_error); + + case op_catch: + m_graph.m_hasExceptionHandlers = true; + NEXT_OPCODE(op_catch); case op_call: - handleCall(currentInstruction, Call, CodeForCall); + handleCall(currentInstruction, Call, CallMode::Regular); + // Verify that handleCall(), which could have inlined the callee, didn't trash m_currentInstruction. + ASSERT(m_currentInstruction == currentInstruction); NEXT_OPCODE(op_call); - + + case op_tail_call: { + flushForReturn(); + Terminality terminality = handleCall(currentInstruction, TailCall, CallMode::Tail); + // Verify that handleCall(), which could have inlined the callee, didn't trash m_currentInstruction. + ASSERT(m_currentInstruction == currentInstruction); + // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. + // If the call is not terminal, however, then we want the subsequent op_ret/op_jumpt to update metadata and clean + // things up. + if (terminality == NonTerminal) { + NEXT_OPCODE(op_tail_call); + } else { + LAST_OPCODE(op_tail_call); + } + } + case op_construct: - handleCall(currentInstruction, Construct, CodeForConstruct); + handleCall(currentInstruction, Construct, CallMode::Construct); NEXT_OPCODE(op_construct); case op_call_varargs: { - ASSERT(inlineCallFrame()); - ASSERT(currentInstruction[4].u.operand == m_inlineStackTop->m_codeBlock->argumentsRegister().offset()); - ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments()); - // It would be cool to funnel this into handleCall() so that it can handle - // inlining. But currently that won't be profitable anyway, since none of the - // uses of call_varargs will be inlineable. So we set this up manually and - // without inline/intrinsic detection. - - SpeculatedType prediction = getPrediction(); - - addToGraph(CheckArgumentsNotCreated); - - unsigned argCount = inlineCallFrame()->arguments.size(); - if (JSStack::ThisArgument + argCount > m_parameterSlots) - m_parameterSlots = JSStack::ThisArgument + argCount; - - addVarArgChild(get(VirtualRegister(currentInstruction[2].u.operand))); // callee - addVarArgChild(get(VirtualRegister(currentInstruction[3].u.operand))); // this - for (unsigned argument = 1; argument < argCount; ++argument) - addVarArgChild(get(virtualRegisterForArgument(argument))); - - set(VirtualRegister(currentInstruction[1].u.operand), - addToGraph(Node::VarArg, Call, OpInfo(0), OpInfo(prediction))); - + handleVarargsCall(currentInstruction, CallVarargs, CallMode::Regular); NEXT_OPCODE(op_call_varargs); } + + case op_tail_call_varargs: { + flushForReturn(); + Terminality terminality = handleVarargsCall(currentInstruction, TailCallVarargs, CallMode::Tail); + // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. + // If the call is not terminal, however, then we want the subsequent op_ret/op_jumpt to update metadata and clean + // things up. + if (terminality == NonTerminal) { + NEXT_OPCODE(op_tail_call_varargs); + } else { + LAST_OPCODE(op_tail_call_varargs); + } + } + + case op_construct_varargs: { + handleVarargsCall(currentInstruction, ConstructVarargs, CallMode::Construct); + NEXT_OPCODE(op_construct_varargs); + } case op_jneq_ptr: // Statically speculate for now. It makes sense to let speculate-only jneq_ptr @@ -3006,16 +4167,18 @@ bool ByteCodeParser::parseBlock(unsigned limit) // already gnarly enough as it is. ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer)); addToGraph( - CheckFunction, - OpInfo(actualPointerFor(m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)), + CheckCell, + OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor( + m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))), get(VirtualRegister(currentInstruction[1].u.operand))); addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr))); LAST_OPCODE(op_jneq_ptr); case op_resolve_scope: { int dst = currentInstruction[1].u.operand; - ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand); - unsigned depth = currentInstruction[4].u.operand; + ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand); + unsigned depth = currentInstruction[5].u.operand; + int scope = currentInstruction[2].u.operand; // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints. if (needsVarInjectionChecks(resolveType)) @@ -3026,19 +4189,55 @@ bool ByteCodeParser::parseBlock(unsigned limit) case GlobalVar: case GlobalPropertyWithVarInjectionChecks: case GlobalVarWithVarInjectionChecks: - set(VirtualRegister(dst), cellConstant(m_inlineStackTop->m_codeBlock->globalObject())); + case GlobalLexicalVar: + case GlobalLexicalVarWithVarInjectionChecks: { + JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock); + RELEASE_ASSERT(constantScope); + RELEASE_ASSERT(static_cast<JSScope*>(currentInstruction[6].u.pointer) == constantScope); + set(VirtualRegister(dst), weakJSConstant(constantScope)); + addToGraph(Phantom, get(VirtualRegister(scope))); break; + } + case ModuleVar: { + // Since the value of the "scope" virtual register is not used in LLInt / baseline op_resolve_scope with ModuleVar, + // we need not to keep it alive by the Phantom node. + JSModuleEnvironment* moduleEnvironment = jsCast<JSModuleEnvironment*>(currentInstruction[6].u.jsCell.get()); + // Module environment is already strongly referenced by the CodeBlock. + set(VirtualRegister(dst), weakJSConstant(moduleEnvironment)); + break; + } + case LocalClosureVar: case ClosureVar: case ClosureVarWithVarInjectionChecks: { - JSActivation* activation = currentInstruction[5].u.activation.get(); - if (activation - && activation->symbolTable()->m_functionEnteredOnce.isStillValid()) { - addToGraph(FunctionReentryWatchpoint, OpInfo(activation->symbolTable())); - set(VirtualRegister(dst), cellConstant(activation)); + Node* localBase = get(VirtualRegister(scope)); + addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope. + + // We have various forms of constant folding here. This is necessary to avoid + // spurious recompiles in dead-but-foldable code. + if (SymbolTable* symbolTable = currentInstruction[6].u.symbolTable.get()) { + InferredValue* singleton = symbolTable->singletonScope(); + if (JSValue value = singleton->inferredValue()) { + m_graph.watchpoints().addLazily(singleton); + set(VirtualRegister(dst), weakJSConstant(value)); + break; + } + } + if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>()) { + for (unsigned n = depth; n--;) + scope = scope->next(); + set(VirtualRegister(dst), weakJSConstant(scope)); break; } - set(VirtualRegister(dst), - getScope(m_inlineStackTop->m_codeBlock->needsActivation(), depth)); + for (unsigned n = depth; n--;) + localBase = addToGraph(SkipScope, localBase); + set(VirtualRegister(dst), localBase); + break; + } + case UnresolvedProperty: + case UnresolvedPropertyWithVarInjectionChecks: { + addToGraph(Phantom, get(VirtualRegister(scope))); + addToGraph(ForceOSRExit); + set(VirtualRegister(dst), addToGraph(JSConstant, OpInfo(m_constantNull))); break; } case Dynamic: @@ -3052,81 +4251,157 @@ bool ByteCodeParser::parseBlock(unsigned limit) int dst = currentInstruction[1].u.operand; int scope = currentInstruction[2].u.operand; unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; - StringImpl* uid = m_graph.identifiers()[identifierNumber]; - ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type(); + UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; + ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType(); Structure* structure = 0; WatchpointSet* watchpoints = 0; uintptr_t operand; { ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); - if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) + if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) watchpoints = currentInstruction[5].u.watchpointSet; - else + else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) structure = currentInstruction[5].u.structure.get(); operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer); } UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode. - SpeculatedType prediction = getPrediction(); JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); switch (resolveType) { case GlobalProperty: case GlobalPropertyWithVarInjectionChecks: { - GetByIdStatus status = GetByIdStatus::computeFor(*m_vm, structure, uid); - if (status.takesSlowPath()) { + SpeculatedType prediction = getPrediction(); + + GetByIdStatus status = GetByIdStatus::computeFor(structure, uid); + if (status.state() != GetByIdStatus::Simple + || status.numVariants() != 1 + || status[0].structureSet().size() != 1) { set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope)))); break; } - Node* base = cellConstantWithStructureCheck(globalObject, status.structureSet().singletonStructure()); + + Node* base = weakJSConstant(globalObject); + Node* result = load(prediction, base, identifierNumber, status[0]); addToGraph(Phantom, get(VirtualRegister(scope))); - if (JSValue specificValue = status.specificValue()) - set(VirtualRegister(dst), cellConstant(specificValue.asCell())); - else - set(VirtualRegister(dst), handleGetByOffset(prediction, base, identifierNumber, operand)); + set(VirtualRegister(dst), result); break; } case GlobalVar: - case GlobalVarWithVarInjectionChecks: { + case GlobalVarWithVarInjectionChecks: + case GlobalLexicalVar: + case GlobalLexicalVarWithVarInjectionChecks: { addToGraph(Phantom, get(VirtualRegister(scope))); - SymbolTableEntry entry = globalObject->symbolTable()->get(uid); - VariableWatchpointSet* watchpointSet = entry.watchpointSet(); - JSValue specificValue = - watchpointSet ? watchpointSet->inferredValue() : JSValue(); - if (!specificValue) { - set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction))); - break; + WatchpointSet* watchpointSet; + ScopeOffset offset; + JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock)); + { + ConcurrentJITLocker locker(scopeObject->symbolTable()->m_lock); + SymbolTableEntry entry = scopeObject->symbolTable()->get(locker, uid); + watchpointSet = entry.watchpointSet(); + offset = entry.scopeOffset(); + } + if (watchpointSet && watchpointSet->state() == IsWatched) { + // This has a fun concurrency story. There is the possibility of a race in two + // directions: + // + // We see that the set IsWatched, but in the meantime it gets invalidated: this is + // fine because if we saw that it IsWatched then we add a watchpoint. If it gets + // invalidated, then this compilation is invalidated. Note that in the meantime we + // may load an absurd value from the global object. It's fine to load an absurd + // value if the compilation is invalidated anyway. + // + // We see that the set IsWatched, but the value isn't yet initialized: this isn't + // possible because of the ordering of operations. + // + // Here's how we order operations: + // + // Main thread stores to the global object: always store a value first, and only + // after that do we touch the watchpoint set. There is a fence in the touch, that + // ensures that the store to the global object always happens before the touch on the + // set. + // + // Compilation thread: always first load the state of the watchpoint set, and then + // load the value. The WatchpointSet::state() method does fences for us to ensure + // that the load of the state happens before our load of the value. + // + // Finalizing compilation: this happens on the main thread and synchronously checks + // validity of all watchpoint sets. + // + // We will only perform optimizations if the load of the state yields IsWatched. That + // means that at least one store would have happened to initialize the original value + // of the variable (that is, the value we'd like to constant fold to). There may be + // other stores that happen after that, but those stores will invalidate the + // watchpoint set and also the compilation. + + // Note that we need to use the operand, which is a direct pointer at the global, + // rather than looking up the global by doing variableAt(offset). That's because the + // internal data structures of JSSegmentedVariableObject are not thread-safe even + // though accessing the global itself is. The segmentation involves a vector spine + // that resizes with malloc/free, so if new globals unrelated to the one we are + // reading are added, we might access freed memory if we do variableAt(). + WriteBarrier<Unknown>* pointer = bitwise_cast<WriteBarrier<Unknown>*>(operand); + + ASSERT(scopeObject->findVariableIndex(pointer) == offset); + + JSValue value = pointer->get(); + if (value) { + m_graph.watchpoints().addLazily(watchpointSet); + set(VirtualRegister(dst), weakJSConstant(value)); + break; + } } - addToGraph(VariableWatchpoint, OpInfo(watchpointSet)); - set(VirtualRegister(dst), inferredConstant(specificValue)); + SpeculatedType prediction = getPrediction(); + NodeType nodeType; + if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) + nodeType = GetGlobalVar; + else + nodeType = GetGlobalLexicalVariable; + Node* value = addToGraph(nodeType, OpInfo(operand), OpInfo(prediction)); + if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) + addToGraph(CheckNotEmpty, value); + set(VirtualRegister(dst), value); break; } + case LocalClosureVar: case ClosureVar: case ClosureVarWithVarInjectionChecks: { Node* scopeNode = get(VirtualRegister(scope)); - if (JSActivation* activation = m_graph.tryGetActivation(scopeNode)) { - SymbolTable* symbolTable = activation->symbolTable(); - ConcurrentJITLocker locker(symbolTable->m_lock); - SymbolTable::Map::iterator iter = symbolTable->find(locker, uid); - ASSERT(iter != symbolTable->end(locker)); - VariableWatchpointSet* watchpointSet = iter->value.watchpointSet(); - if (watchpointSet) { - if (JSValue value = watchpointSet->inferredValue()) { - addToGraph(Phantom, scopeNode); - addToGraph(VariableWatchpoint, OpInfo(watchpointSet)); - set(VirtualRegister(dst), inferredConstant(value)); - break; - } - } + + // Ideally we wouldn't have to do this Phantom. But: + // + // For the constant case: we must do it because otherwise we would have no way of knowing + // that the scope is live at OSR here. + // + // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation + // won't be able to handle an Undefined scope. + addToGraph(Phantom, scopeNode); + + // Constant folding in the bytecode parser is important for performance. This may not + // have executed yet. If it hasn't, then we won't have a prediction. Lacking a + // prediction, we'd otherwise think that it has to exit. Then when it did execute, we + // would recompile. But if we can fold it here, we avoid the exit. + if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) { + set(VirtualRegister(dst), weakJSConstant(value)); + break; } + SpeculatedType prediction = getPrediction(); set(VirtualRegister(dst), - addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), - addToGraph(GetClosureRegisters, scopeNode))); + addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode)); + break; + } + case UnresolvedProperty: + case UnresolvedPropertyWithVarInjectionChecks: { + addToGraph(ForceOSRExit); + Node* scopeNode = get(VirtualRegister(scope)); + addToGraph(Phantom, scopeNode); + set(VirtualRegister(dst), addToGraph(JSConstant, OpInfo(m_constantUndefined))); break; } + case ModuleVar: case Dynamic: RELEASE_ASSERT_NOT_REACHED(); break; @@ -3136,19 +4411,26 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_put_to_scope: { unsigned scope = currentInstruction[1].u.operand; - unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; + unsigned identifierNumber = currentInstruction[2].u.operand; + if (identifierNumber != UINT_MAX) + identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber]; unsigned value = currentInstruction[3].u.operand; - ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type(); - StringImpl* uid = m_graph.identifiers()[identifierNumber]; - - Structure* structure = 0; - VariableWatchpointSet* watchpoints = 0; + GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand); + ResolveType resolveType = getPutInfo.resolveType(); + UniquedStringImpl* uid; + if (identifierNumber != UINT_MAX) + uid = m_graph.identifiers()[identifierNumber]; + else + uid = nullptr; + + Structure* structure = nullptr; + WatchpointSet* watchpoints = nullptr; uintptr_t operand; { ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); - if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) + if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) watchpoints = currentInstruction[5].u.watchpointSet; - else + else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) structure = currentInstruction[5].u.structure.get(); operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer); } @@ -3158,37 +4440,77 @@ bool ByteCodeParser::parseBlock(unsigned limit) switch (resolveType) { case GlobalProperty: case GlobalPropertyWithVarInjectionChecks: { - PutByIdStatus status = PutByIdStatus::computeFor(*m_vm, globalObject, structure, uid, false); - if (!status.isSimpleReplace()) { + PutByIdStatus status; + if (uid) + status = PutByIdStatus::computeFor(globalObject, structure, uid, false); + else + status = PutByIdStatus(PutByIdStatus::TakesSlowPath); + if (status.numVariants() != 1 + || status[0].kind() != PutByIdVariant::Replace + || status[0].structure().size() != 1) { addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value))); break; } - Node* base = cellConstantWithStructureCheck(globalObject, status.oldStructure()); - addToGraph(Phantom, get(VirtualRegister(scope))); - handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(VirtualRegister(value))); + Node* base = weakJSConstant(globalObject); + store(base, identifierNumber, status[0], get(VirtualRegister(value))); // Keep scope alive until after put. addToGraph(Phantom, get(VirtualRegister(scope))); break; } + case GlobalLexicalVar: + case GlobalLexicalVarWithVarInjectionChecks: case GlobalVar: case GlobalVarWithVarInjectionChecks: { - SymbolTableEntry entry = globalObject->symbolTable()->get(uid); - ASSERT(watchpoints == entry.watchpointSet()); + if (getPutInfo.initializationMode() != Initialization && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) { + SpeculatedType prediction = SpecEmpty; + Node* value = addToGraph(GetGlobalLexicalVariable, OpInfo(operand), OpInfo(prediction)); + addToGraph(CheckNotEmpty, value); + } + + JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock)); + if (watchpoints) { + SymbolTableEntry entry = scopeObject->symbolTable()->get(uid); + ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet()); + } Node* valueNode = get(VirtualRegister(value)); - addToGraph(PutGlobalVar, OpInfo(operand), valueNode); - if (watchpoints->state() != IsInvalidated) - addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode); + addToGraph(PutGlobalVariable, OpInfo(operand), weakJSConstant(scopeObject), valueNode); + if (watchpoints && watchpoints->state() != IsInvalidated) { + // Must happen after the store. See comment for GetGlobalVar. + addToGraph(NotifyWrite, OpInfo(watchpoints)); + } // Keep scope alive until after put. addToGraph(Phantom, get(VirtualRegister(scope))); break; } + case LocalClosureVar: case ClosureVar: case ClosureVarWithVarInjectionChecks: { Node* scopeNode = get(VirtualRegister(scope)); - Node* scopeRegisters = addToGraph(GetClosureRegisters, scopeNode); - addToGraph(PutClosureVar, OpInfo(operand), scopeNode, scopeRegisters, get(VirtualRegister(value))); + Node* valueNode = get(VirtualRegister(value)); + + addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode); + + if (watchpoints && watchpoints->state() != IsInvalidated) { + // Must happen after the store. See comment for GetGlobalVar. + addToGraph(NotifyWrite, OpInfo(watchpoints)); + } + break; + } + + case UnresolvedProperty: + case UnresolvedPropertyWithVarInjectionChecks: { + addToGraph(ForceOSRExit); + Node* scopeNode = get(VirtualRegister(scope)); + addToGraph(Phantom, scopeNode); break; } + + case ModuleVar: + // Need not to keep "scope" and "value" register values here by Phantom because + // they are not used in LLInt / baseline op_put_to_scope with ModuleVar. + addToGraph(ForceOSRExit); + break; + case Dynamic: RELEASE_ASSERT_NOT_REACHED(); break; @@ -3209,86 +4531,115 @@ bool ByteCodeParser::parseBlock(unsigned limit) m_currentBlock->isOSRTarget = true; addToGraph(LoopHint); - - if (m_vm->watchdog.isEnabled()) - addToGraph(CheckWatchdogTimer); - NEXT_OPCODE(op_loop_hint); } - - case op_init_lazy_reg: { - set(VirtualRegister(currentInstruction[1].u.operand), getJSConstantForValue(JSValue())); - ASSERT(operandIsLocal(currentInstruction[1].u.operand)); - m_graph.m_lazyVars.set(VirtualRegister(currentInstruction[1].u.operand).toLocal()); - NEXT_OPCODE(op_init_lazy_reg); + + case op_watchdog: { + addToGraph(CheckWatchdogTimer); + NEXT_OPCODE(op_watchdog); } - case op_create_activation: { - set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CreateActivation, get(VirtualRegister(currentInstruction[1].u.operand)))); - NEXT_OPCODE(op_create_activation); + case op_create_lexical_environment: { + VirtualRegister symbolTableRegister(currentInstruction[3].u.operand); + VirtualRegister initialValueRegister(currentInstruction[4].u.operand); + ASSERT(symbolTableRegister.isConstant() && initialValueRegister.isConstant()); + FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(symbolTableRegister.offset())); + FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(initialValueRegister.offset())); + Node* scope = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope); + set(VirtualRegister(currentInstruction[1].u.operand), lexicalEnvironment); + NEXT_OPCODE(op_create_lexical_environment); + } + + case op_get_parent_scope: { + Node* currentScope = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* newScope = addToGraph(SkipScope, currentScope); + set(VirtualRegister(currentInstruction[1].u.operand), newScope); + addToGraph(Phantom, currentScope); + NEXT_OPCODE(op_get_parent_scope); + } + + case op_get_scope: { + // Help the later stages a bit by doing some small constant folding here. Note that this + // only helps for the first basic block. It's extremely important not to constant fold + // loads from the scope register later, as that would prevent the DFG from tracking the + // bytecode-level liveness of the scope register. + Node* callee = get(VirtualRegister(JSStack::Callee)); + Node* result; + if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>()) + result = weakJSConstant(function->scope()); + else + result = addToGraph(GetScope, callee); + set(VirtualRegister(currentInstruction[1].u.operand), result); + NEXT_OPCODE(op_get_scope); } - case op_create_arguments: { - m_graph.m_hasArguments = true; - Node* createArguments = addToGraph(CreateArguments, get(VirtualRegister(currentInstruction[1].u.operand))); + case op_create_direct_arguments: { + noticeArgumentsUse(); + Node* createArguments = addToGraph(CreateDirectArguments); set(VirtualRegister(currentInstruction[1].u.operand), createArguments); - set(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand)), createArguments); - NEXT_OPCODE(op_create_arguments); + NEXT_OPCODE(op_create_direct_arguments); } - case op_tear_off_activation: { - addToGraph(TearOffActivation, get(VirtualRegister(currentInstruction[1].u.operand))); - NEXT_OPCODE(op_tear_off_activation); + case op_create_scoped_arguments: { + noticeArgumentsUse(); + Node* createArguments = addToGraph(CreateScopedArguments, get(VirtualRegister(currentInstruction[2].u.operand))); + set(VirtualRegister(currentInstruction[1].u.operand), createArguments); + NEXT_OPCODE(op_create_scoped_arguments); } - case op_tear_off_arguments: { - m_graph.m_hasArguments = true; - addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand))), get(VirtualRegister(currentInstruction[2].u.operand))); - NEXT_OPCODE(op_tear_off_arguments); - } - - case op_get_arguments_length: { - m_graph.m_hasArguments = true; - set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetMyArgumentsLengthSafe)); - NEXT_OPCODE(op_get_arguments_length); + case op_create_out_of_band_arguments: { + noticeArgumentsUse(); + Node* createArguments = addToGraph(CreateClonedArguments); + set(VirtualRegister(currentInstruction[1].u.operand), createArguments); + NEXT_OPCODE(op_create_out_of_band_arguments); } - case op_get_argument_by_val: { - m_graph.m_hasArguments = true; + case op_get_from_arguments: { set(VirtualRegister(currentInstruction[1].u.operand), addToGraph( - GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()), - get(VirtualRegister(currentInstruction[3].u.operand)))); - NEXT_OPCODE(op_get_argument_by_val); + GetFromArguments, + OpInfo(currentInstruction[3].u.operand), + OpInfo(getPrediction()), + get(VirtualRegister(currentInstruction[2].u.operand)))); + NEXT_OPCODE(op_get_from_arguments); } - case op_new_func: { - if (!currentInstruction[3].u.operand) { - set(VirtualRegister(currentInstruction[1].u.operand), - addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand))); - } else { - set(VirtualRegister(currentInstruction[1].u.operand), - addToGraph( - NewFunction, - OpInfo(currentInstruction[2].u.operand), - get(VirtualRegister(currentInstruction[1].u.operand)))); - } - NEXT_OPCODE(op_new_func); + case op_put_to_arguments: { + addToGraph( + PutToArguments, + OpInfo(currentInstruction[2].u.operand), + get(VirtualRegister(currentInstruction[1].u.operand)), + get(VirtualRegister(currentInstruction[3].u.operand))); + NEXT_OPCODE(op_put_to_arguments); } - case op_new_captured_func: { - Node* function = addToGraph( - NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)); - if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet) - addToGraph(NotifyWrite, OpInfo(set), function); - set(VirtualRegister(currentInstruction[1].u.operand), function); - NEXT_OPCODE(op_new_captured_func); + case op_new_func: + case op_new_generator_func: { + FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(currentInstruction[3].u.operand); + FrozenValue* frozen = m_graph.freezeStrong(decl); + NodeType op = (opcodeID == op_new_generator_func) ? NewGeneratorFunction : NewFunction; + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), get(VirtualRegister(currentInstruction[2].u.operand)))); + static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_generator_func), "The length of op_new_func should eqaual to one of op_new_generator_func"); + NEXT_OPCODE(op_new_func); } + + case op_new_func_exp: + case op_new_generator_func_exp: + case op_new_arrow_func_exp: { + FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(currentInstruction[3].u.operand); + FrozenValue* frozen = m_graph.freezeStrong(expr); + NodeType op = (opcodeID == op_new_generator_func_exp) ? NewGeneratorFunction : NewFunction; + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), get(VirtualRegister(currentInstruction[2].u.operand)))); - case op_new_func_exp: { - set(VirtualRegister(currentInstruction[1].u.operand), - addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand))); - NEXT_OPCODE(op_new_func_exp); + if (opcodeID == op_new_func_exp || opcodeID == op_new_generator_func_exp) { + // Curly braces are necessary + static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_generator_func_exp), "The length of op_new_func_exp should eqaual to one of op_new_generator_func_exp"); + NEXT_OPCODE(op_new_func_exp); + } else { + // Curly braces are necessary + NEXT_OPCODE(op_new_arrow_func_exp); + } } case op_typeof: { @@ -3298,17 +4649,98 @@ bool ByteCodeParser::parseBlock(unsigned limit) } case op_to_number: { - set(VirtualRegister(currentInstruction[1].u.operand), - addToGraph(Identity, Edge(get(VirtualRegister(currentInstruction[2].u.operand)), NumberUse))); + Node* node = get(VirtualRegister(currentInstruction[2].u.operand)); + addToGraph(Phantom, Edge(node, NumberUse)); + set(VirtualRegister(currentInstruction[1].u.operand), node); NEXT_OPCODE(op_to_number); } - + + case op_to_string: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToString, value)); + NEXT_OPCODE(op_to_string); + } + case op_in: { set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(In, get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand)))); NEXT_OPCODE(op_in); } + case op_get_enumerable_length: { + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength, + get(VirtualRegister(currentInstruction[2].u.operand)))); + NEXT_OPCODE(op_get_enumerable_length); + } + + case op_has_generic_property: { + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty, + get(VirtualRegister(currentInstruction[2].u.operand)), + get(VirtualRegister(currentInstruction[3].u.operand)))); + NEXT_OPCODE(op_has_generic_property); + } + + case op_has_structure_property: { + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty, + get(VirtualRegister(currentInstruction[2].u.operand)), + get(VirtualRegister(currentInstruction[3].u.operand)), + get(VirtualRegister(currentInstruction[4].u.operand)))); + NEXT_OPCODE(op_has_structure_property); + } + + case op_has_indexed_property: { + Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); + ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read); + Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); + Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), base, property); + set(VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty); + NEXT_OPCODE(op_has_indexed_property); + } + + case op_get_direct_pname: { + SpeculatedType prediction = getPredictionWithoutOSRExit(); + + Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); + Node* index = get(VirtualRegister(currentInstruction[4].u.operand)); + Node* enumerator = get(VirtualRegister(currentInstruction[5].u.operand)); + + addVarArgChild(base); + addVarArgChild(property); + addVarArgChild(index); + addVarArgChild(enumerator); + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction))); + + NEXT_OPCODE(op_get_direct_pname); + } + + case op_get_property_enumerator: { + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetPropertyEnumerator, + get(VirtualRegister(currentInstruction[2].u.operand)))); + NEXT_OPCODE(op_get_property_enumerator); + } + + case op_enumerator_structure_pname: { + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorStructurePname, + get(VirtualRegister(currentInstruction[2].u.operand)), + get(VirtualRegister(currentInstruction[3].u.operand)))); + NEXT_OPCODE(op_enumerator_structure_pname); + } + + case op_enumerator_generic_pname: { + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorGenericPname, + get(VirtualRegister(currentInstruction[2].u.operand)), + get(VirtualRegister(currentInstruction[3].u.operand)))); + NEXT_OPCODE(op_enumerator_generic_pname); + } + + case op_to_index_string: { + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString, + get(VirtualRegister(currentInstruction[2].u.operand)))); + NEXT_OPCODE(op_to_index_string); + } + default: // Parse failed! This should not happen because the capabilities checker // should have caught it. @@ -3322,62 +4754,52 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleT { ASSERT(!block->isLinked); ASSERT(!block->isEmpty()); - Node* node = block->last(); + Node* node = block->terminal(); ASSERT(node->isTerminal()); switch (node->op()) { case Jump: - node->setTakenBlock(blockForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing())); + node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing()); break; - case Branch: - node->setTakenBlock(blockForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing())); - node->setNotTakenBlock(blockForBytecodeOffset(possibleTargets, node->notTakenBytecodeOffsetDuringParsing())); + case Branch: { + BranchData* data = node->branchData(); + data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex()); + data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex()); break; + } - case Switch: + case Switch: { + SwitchData* data = node->switchData(); for (unsigned i = node->switchData()->cases.size(); i--;) - node->switchData()->cases[i].target = blockForBytecodeOffset(possibleTargets, node->switchData()->cases[i].targetBytecodeIndex()); - node->switchData()->fallThrough = blockForBytecodeOffset(possibleTargets, node->switchData()->fallThroughBytecodeIndex()); + data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex()); + data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex()); break; + } default: break; } -#if !ASSERT_DISABLED - block->isLinked = true; -#endif + if (verbose) + dataLog("Marking ", RawPointer(block), " as linked (actually did linking)\n"); + block->didLink(); } void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets) { for (size_t i = 0; i < unlinkedBlocks.size(); ++i) { + if (verbose) + dataLog("Attempting to link ", RawPointer(unlinkedBlocks[i].m_block), "\n"); if (unlinkedBlocks[i].m_needsNormalLinking) { + if (verbose) + dataLog(" Does need normal linking.\n"); linkBlock(unlinkedBlocks[i].m_block, possibleTargets); unlinkedBlocks[i].m_needsNormalLinking = false; } } } -void ByteCodeParser::buildOperandMapsIfNecessary() -{ - if (m_haveBuiltOperandMaps) - return; - - for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i) - m_identifierMap.add(m_codeBlock->identifier(i).impl(), i); - for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) { - JSValue value = m_codeBlock->getConstant(i + FirstConstantRegisterIndex); - if (!value) - m_emptyJSValueIndex = i + FirstConstantRegisterIndex; - else - m_jsValueMap.add(JSValue::encode(value), i + FirstConstantRegisterIndex); - } - - m_haveBuiltOperandMaps = true; -} - ByteCodeParser::InlineStackEntry::InlineStackEntry( ByteCodeParser* byteCodeParser, CodeBlock* codeBlock, @@ -3387,7 +4809,7 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry( VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, int argumentCountIncludingThis, - CodeSpecializationKind kind) + InlineCallFrame::Kind kind) : m_byteCodeParser(byteCodeParser) , m_codeBlock(codeBlock) , m_profiledBlock(profiledBlock) @@ -3405,8 +4827,11 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry( // We do this while holding the lock because we want to encourage StructureStubInfo's // to be potentially added to operations and because the profiled block could be in the // middle of LLInt->JIT tier-up in which case we would be adding the info's right now. - if (m_profiledBlock->hasBaselineJITProfiling()) + if (m_profiledBlock->hasBaselineJITProfiling()) { m_profiledBlock->getStubInfoMap(locker, m_stubInfos); + m_profiledBlock->getCallLinkInfoMap(locker, m_callLinkInfos); + m_profiledBlock->getByValInfoMap(locker, m_byValInfos); + } } m_argumentPositions.resize(argumentCountIncludingThis); @@ -3416,87 +4841,35 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry( m_argumentPositions[i] = argumentPosition; } - // Track the code-block-global exit sites. - if (m_exitProfile.hasExitSite(ArgumentsEscaped)) { - byteCodeParser->m_graph.m_executablesWhoseArgumentsEscaped.add( - codeBlock->ownerExecutable()); - } - if (m_caller) { // Inline case. ASSERT(codeBlock != byteCodeParser->m_codeBlock); ASSERT(inlineCallFrameStart.isValid()); ASSERT(callsiteBlockHead); - m_inlineCallFrame = byteCodeParser->m_graph.m_inlineCallFrames->add(); - initializeLazyWriteBarrierForInlineCallFrameExecutable( - byteCodeParser->m_graph.m_plan.writeBarriers, - m_inlineCallFrame->executable, - byteCodeParser->m_codeBlock, - m_inlineCallFrame, - byteCodeParser->m_codeBlock->ownerExecutable(), - codeBlock->ownerExecutable()); - m_inlineCallFrame->stackOffset = inlineCallFrameStart.offset() - JSStack::CallFrameHeaderSize; + m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames->add(); + byteCodeParser->m_graph.freeze(codeBlock->baselineVersion()); + // The owner is the machine code block, and we already have a barrier on that when the + // plan finishes. + m_inlineCallFrame->baselineCodeBlock.setWithoutWriteBarrier(codeBlock->baselineVersion()); + m_inlineCallFrame->setStackOffset(inlineCallFrameStart.offset() - JSStack::CallFrameHeaderSize); if (callee) { m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee); m_inlineCallFrame->isClosureCall = false; } else m_inlineCallFrame->isClosureCall = true; - m_inlineCallFrame->caller = byteCodeParser->currentCodeOrigin(); - m_inlineCallFrame->arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet. - m_inlineCallFrame->isCall = isCall(kind); - - if (m_inlineCallFrame->caller.inlineCallFrame) - m_inlineCallFrame->capturedVars = m_inlineCallFrame->caller.inlineCallFrame->capturedVars; - else { - for (int i = byteCodeParser->m_codeBlock->m_numVars; i--;) { - if (byteCodeParser->m_codeBlock->isCaptured(virtualRegisterForLocal(i))) - m_inlineCallFrame->capturedVars.set(i); - } - } - - for (int i = argumentCountIncludingThis; i--;) { - VirtualRegister argument = virtualRegisterForArgument(i); - if (codeBlock->isCaptured(argument)) - m_inlineCallFrame->capturedVars.set(VirtualRegister(argument.offset() + m_inlineCallFrame->stackOffset).toLocal()); - } - for (size_t i = codeBlock->m_numVars; i--;) { - VirtualRegister local = virtualRegisterForLocal(i); - if (codeBlock->isCaptured(local)) - m_inlineCallFrame->capturedVars.set(VirtualRegister(local.offset() + m_inlineCallFrame->stackOffset).toLocal()); - } - - byteCodeParser->buildOperandMapsIfNecessary(); + m_inlineCallFrame->directCaller = byteCodeParser->currentCodeOrigin(); + m_inlineCallFrame->arguments.resizeToFit(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet. + m_inlineCallFrame->kind = kind; m_identifierRemap.resize(codeBlock->numberOfIdentifiers()); - m_constantRemap.resize(codeBlock->numberOfConstantRegisters()); m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers()); m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables()); for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) { - StringImpl* rep = codeBlock->identifier(i).impl(); - BorrowedIdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_graph.identifiers().numberOfIdentifiers()); - if (result.isNewEntry) - byteCodeParser->m_graph.identifiers().addLazily(rep); - m_identifierRemap[i] = result.iterator->value; - } - for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) { - JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex); - if (!value) { - if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) { - byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex; - byteCodeParser->addConstant(JSValue()); - byteCodeParser->m_constants.append(ConstantRecord()); - } - m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex; - continue; - } - JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex); - if (result.isNewEntry) { - byteCodeParser->addConstant(value); - byteCodeParser->m_constants.append(ConstantRecord()); - } - m_constantRemap[i] = result.iterator->value; + UniquedStringImpl* rep = codeBlock->identifier(i).impl(); + unsigned index = byteCodeParser->m_graph.identifiers().ensure(rep); + m_identifierRemap[i] = index; } for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) { // If we inline the same code block multiple times, we don't want to needlessly @@ -3528,13 +4901,10 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry( m_inlineCallFrame = 0; m_identifierRemap.resize(codeBlock->numberOfIdentifiers()); - m_constantRemap.resize(codeBlock->numberOfConstantRegisters()); m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers()); m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables()); for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) m_identifierRemap[i] = i; - for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) - m_constantRemap[i] = i + FirstConstantRegisterIndex; for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) m_constantBufferRemap[i] = i; for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) @@ -3542,14 +4912,13 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry( m_callsiteBlockHeadNeedsLinking = false; } - for (size_t i = 0; i < m_constantRemap.size(); ++i) - ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex)); - byteCodeParser->m_inlineStackTop = this; } void ByteCodeParser::parseCodeBlock() { + clearCaches(); + CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock; if (m_graph.compilation()) { @@ -3557,19 +4926,24 @@ void ByteCodeParser::parseCodeBlock() *m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock); } - bool shouldDumpBytecode = Options::dumpBytecodeAtDFGTime(); - if (shouldDumpBytecode) { + if (UNLIKELY(Options::dumpSourceAtDFGTime())) { + Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback->ensureDeferredSourceDump(); + if (inlineCallFrame()) { + DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->directCaller); + deferredSourceDump.append(dump); + } else + deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion())); + } + + if (Options::dumpBytecodeAtDFGTime()) { dataLog("Parsing ", *codeBlock); if (inlineCallFrame()) { dataLog( " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT), - " ", inlineCallFrame()->caller); + " ", inlineCallFrame()->directCaller); } dataLog( - ": captureCount = ", codeBlock->symbolTable() ? codeBlock->symbolTable()->captureCount() : 0, - ", needsFullScopeChain = ", codeBlock->needsFullScopeChain(), - ", needsActivation = ", codeBlock->ownerExecutable()->needsActivation(), - ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n"); + ", isStrictMode = ", codeBlock->ownerScriptExecutable()->isStrictMode(), "\n"); codeBlock->baselineVersion()->dumpBytecode(); } @@ -3607,7 +4981,7 @@ void ByteCodeParser::parseCodeBlock() m_currentBlock = m_graph.lastBlock(); m_currentBlock->bytecodeBegin = m_currentIndex; } else { - RefPtr<BasicBlock> block = adoptRef(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals)); + RefPtr<BasicBlock> block = adoptRef(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals, 1)); m_currentBlock = block.get(); // This assertion checks two things: // 1) If the bytecodeBegin is greater than currentIndex, then something has gone @@ -3615,7 +4989,12 @@ void ByteCodeParser::parseCodeBlock() // 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do // a peephole coalescing of this block in the if statement above. So, we're // generating suboptimal code and leaving more work for the CFG simplifier. - ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin < m_currentIndex); + if (!m_inlineStackTop->m_unlinkedBlocks.isEmpty()) { + unsigned lastBegin = + m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin; + ASSERT_UNUSED( + lastBegin, lastBegin == UINT_MAX || lastBegin < m_currentIndex); + } m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(block.get())); m_inlineStackTop->m_blockLinkingTargets.append(block.get()); // The first block is definitely an OSR target. @@ -3636,17 +5015,23 @@ void ByteCodeParser::parseCodeBlock() // are at the end of an inline function, or we realized that we // should stop parsing because there was a return in the first // basic block. - ASSERT(m_currentBlock->isEmpty() || m_currentBlock->last()->isTerminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing); + ASSERT(m_currentBlock->isEmpty() || m_currentBlock->terminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing); - if (!shouldContinueParsing) + if (!shouldContinueParsing) { + if (Options::verboseDFGByteCodeParsing()) + dataLog("Done parsing ", *codeBlock, "\n"); return; + } - m_currentBlock = 0; + m_currentBlock = nullptr; } while (m_currentIndex < limit); } // Should have reached the end of the instructions. ASSERT(m_currentIndex == codeBlock->instructions().size()); + + if (Options::verboseDFGByteCodeParsing()) + dataLog("Done parsing ", *codeBlock, " (fell off end)\n"); } bool ByteCodeParser::parse() @@ -3654,25 +5039,21 @@ bool ByteCodeParser::parse() // Set during construction. ASSERT(!m_currentIndex); - if (m_codeBlock->captureCount()) { - SymbolTable* symbolTable = m_codeBlock->symbolTable(); - ConcurrentJITLocker locker(symbolTable->m_lock); - SymbolTable::Map::iterator iter = symbolTable->begin(locker); - SymbolTable::Map::iterator end = symbolTable->end(locker); - for (; iter != end; ++iter) { - VariableWatchpointSet* set = iter->value.watchpointSet(); - if (!set) - continue; - size_t index = static_cast<size_t>(VirtualRegister(iter->value.getIndex()).toLocal()); - while (m_localWatchpoints.size() <= index) - m_localWatchpoints.append(nullptr); - m_localWatchpoints[index] = set; - } + if (Options::verboseDFGByteCodeParsing()) + dataLog("Parsing ", *m_codeBlock, "\n"); + + m_dfgCodeBlock = m_graph.m_plan.profiledDFGCodeBlock; + if (isFTL(m_graph.m_plan.mode) && m_dfgCodeBlock + && Options::usePolyvariantDevirtualization()) { + if (Options::usePolyvariantCallInlining()) + CallLinkStatus::computeDFGStatuses(m_dfgCodeBlock, m_callContextMap); + if (Options::usePolyvariantByIdInlining()) + m_dfgCodeBlock->getStubInfoMap(m_dfgStubInfos); } InlineStackEntry inlineStackEntry( this, m_codeBlock, m_profiledBlock, 0, 0, VirtualRegister(), VirtualRegister(), - m_codeBlock->numParameters(), CodeForCall); + m_codeBlock->numParameters(), InlineCallFrame::Call); parseCodeBlock(); diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.h b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h index cb8626998..bd6888d70 100644 --- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.h +++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h @@ -28,17 +28,10 @@ #if ENABLE(DFG_JIT) -#include "DFGGraph.h" +namespace JSC { namespace DFG { -namespace JSC { +class Graph; -class CodeBlock; -class VM; - -namespace DFG { - -// Populate the Graph with a basic block of code from the CodeBlock, -// starting at the provided bytecode index. bool parse(Graph&); } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp index d149fc692..016e7d7f7 100644 --- a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -34,7 +34,7 @@ #include "DFGPhase.h" #include "DFGSafeToExecute.h" #include "OperandsInlines.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -56,7 +56,7 @@ public: m_count = 0; - if (m_verbose && !shouldDumpGraphAtEachPhase()) { + if (m_verbose && !shouldDumpGraphAtEachPhase(m_graph.m_plan.mode)) { dataLog("Graph before CFA:\n"); m_graph.dump(); } @@ -79,6 +79,59 @@ public: performForwardCFA(); } while (m_changed); + if (m_graph.m_form != SSA) { + ASSERT(!m_changed); + + // Widen the abstract values at the block that serves as the must-handle OSR entry. + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + + if (!block->isOSRTarget) + continue; + if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex) + continue; + + bool changed = false; + for (size_t i = m_graph.m_plan.mustHandleValues.size(); i--;) { + int operand = m_graph.m_plan.mustHandleValues.operandForIndex(i); + JSValue value = m_graph.m_plan.mustHandleValues[i]; + Node* node = block->variablesAtHead.operand(operand); + if (!node) + continue; + + AbstractValue& target = block->valuesAtHead.operand(operand); + changed |= target.mergeOSREntryValue(m_graph, value); + target.fixTypeForRepresentation( + m_graph, resultFor(node->variableAccessData()->flushFormat())); + } + + if (changed || !block->cfaHasVisited) { + m_changed = true; + block->cfaShouldRevisit = true; + } + } + + // Propagate any of the changes we just introduced. + while (m_changed) { + m_changed = false; + performForwardCFA(); + } + + // Make sure we record the intersection of all proofs that we ever allowed the + // compiler to rely upon. + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + + block->intersectionOfCFAHasVisited &= block->cfaHasVisited; + for (unsigned i = block->intersectionOfPastValuesAtHead.size(); i--;) + block->intersectionOfPastValuesAtHead[i].filter(block->valuesAtHead[i]); + } + } + return true; } @@ -92,8 +145,11 @@ private: if (m_verbose) dataLog(" Block ", *block, ":\n"); m_state.beginBasicBlock(block); - if (m_verbose) + if (m_verbose) { dataLog(" head vars: ", block->valuesAtHead, "\n"); + if (m_graph.m_form == SSA) + dataLog(" head regs: ", mapDump(block->ssa->valuesAtHead), "\n"); + } for (unsigned i = 0; i < block->size(); ++i) { if (m_verbose) { Node* node = block->at(i); @@ -102,10 +158,8 @@ private: if (!safeToExecute(m_state, m_graph, node)) dataLog("(UNSAFE) "); - m_interpreter.dump(WTF::dataFile()); + dataLog(m_state.variables(), " ", m_interpreter); - if (m_state.haveStructures()) - dataLog(" (Have Structures)"); dataLogF("\n"); } if (!m_interpreter.execute(i)) { @@ -119,10 +173,13 @@ private: m_interpreter.dump(WTF::dataFile()); dataLogF("\n"); } - m_changed |= m_state.endBasicBlock(MergeToSuccessors); + m_changed |= m_state.endBasicBlock(); - if (m_verbose) + if (m_verbose) { dataLog(" tail vars: ", block->valuesAtTail, "\n"); + if (m_graph.m_form == SSA) + dataLog(" head regs: ", mapDump(block->ssa->valuesAtTail), "\n"); + } } void performForwardCFA() diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.h b/Source/JavaScriptCore/dfg/DFGCFAPhase.h index cc9e6c4b4..30a69c4c6 100644 --- a/Source/JavaScriptCore/dfg/DFGCFAPhase.h +++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.h @@ -26,8 +26,6 @@ #ifndef DFGCFAPhase_h #define DFGCFAPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGCFG.h b/Source/JavaScriptCore/dfg/DFGCFG.h new file mode 100644 index 000000000..1805b29df --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGCFG.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGCFG_h +#define DFGCFG_h + +#if ENABLE(DFG_JIT) + +#include "DFGBasicBlock.h" +#include "DFGBlockMapInlines.h" +#include "DFGBlockSet.h" +#include "DFGGraph.h" + +namespace JSC { namespace DFG { + +class CFG { + WTF_MAKE_NONCOPYABLE(CFG); + WTF_MAKE_FAST_ALLOCATED; +public: + typedef BasicBlock* Node; + typedef BlockSet Set; + template<typename T> using Map = BlockMap<T>; + typedef BlockList List; + + CFG(Graph& graph) + : m_graph(graph) + { + } + + Node root() { return m_graph.block(0); } + + template<typename T> + Map<T> newMap() { return BlockMap<T>(m_graph); } + + DFG::Node::SuccessorsIterable successors(Node node) { return node->successors(); } + PredecessorList& predecessors(Node node) { return node->predecessors; } + + unsigned index(Node node) const { return node->index; } + Node node(unsigned index) const { return m_graph.block(index); } + unsigned numNodes() const { return m_graph.numBlocks(); } + + PointerDump<BasicBlock> dump(Node node) const { return pointerDump(node); } + + void dump(PrintStream& out) const + { + m_graph.dump(out); + } + +private: + Graph& m_graph; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGCFG_h + diff --git a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp index 5de36a0da..5bdd9f746 100644 --- a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -33,7 +33,7 @@ #include "DFGInsertionSet.h" #include "DFGPhase.h" #include "DFGValidate.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -46,6 +46,9 @@ public: bool run() { + // FIXME: We should make this work in SSA. https://bugs.webkit.org/show_bug.cgi?id=148260 + DFG_ASSERT(m_graph, nullptr, m_graph.m_form != SSA); + const bool extremeLogging = false; bool outerChanged = false; @@ -59,7 +62,7 @@ public: continue; ASSERT(block->isReachable); - switch (block->last()->op()) { + switch (block->terminal()->op()) { case Jump: { // Successor with one predecessor -> merge. if (block->successor(0)->predecessors.size() == 1) { @@ -79,6 +82,8 @@ public: // suboptimal, because if my successor has multiple predecessors then we'll // be keeping alive things on other predecessor edges unnecessarily. // What we really need is the notion of end-of-block ghosties! + // FIXME: Allow putting phantoms after terminals. + // https://bugs.webkit.org/show_bug.cgi?id=126778 break; } @@ -97,17 +102,19 @@ public: if (extremeLogging) m_graph.dump(); m_graph.dethread(); - - ASSERT(block->last()->isTerminal()); - CodeOrigin boundaryCodeOrigin = block->last()->codeOrigin; - block->last()->convertToPhantom(); - ASSERT(block->last()->refCount() == 1); - - jettisonBlock(block, jettisonedBlock, boundaryCodeOrigin); - - block->appendNode( - m_graph, SpecNone, Jump, boundaryCodeOrigin, + + Node* terminal = block->terminal(); + ASSERT(terminal->isTerminal()); + NodeOrigin boundaryNodeOrigin = terminal->origin; + + jettisonBlock(block, jettisonedBlock, boundaryNodeOrigin); + + block->replaceTerminal( + m_graph, SpecNone, Jump, boundaryNodeOrigin, OpInfo(targetBlock)); + + ASSERT(block->terminal()); + } innerChanged = outerChanged = true; break; @@ -127,44 +134,47 @@ public: } case Switch: { - SwitchData* data = block->last()->switchData(); + SwitchData* data = block->terminal()->switchData(); // Prune out cases that end up jumping to default. for (unsigned i = 0; i < data->cases.size(); ++i) { - if (data->cases[i].target == data->fallThrough) - data->cases[i--] = data->cases.takeLast(); + if (data->cases[i].target.block == data->fallThrough.block) { + data->fallThrough.count += data->cases[i].target.count; + data->cases[i--] = data->cases.last(); + data->cases.removeLast(); + } } // If there are no cases other than default then this turns // into a jump. if (data->cases.isEmpty()) { - convertToJump(block, data->fallThrough); + convertToJump(block, data->fallThrough.block); innerChanged = outerChanged = true; break; } // Switch on constant -> jettison all other targets and merge. - if (block->last()->child1()->hasConstant()) { - JSValue value = m_graph.valueOfJSConstant(block->last()->child1().node()); + Node* terminal = block->terminal(); + if (terminal->child1()->hasConstant()) { + FrozenValue* value = terminal->child1()->constant(); TriState found = FalseTriState; BasicBlock* targetBlock = 0; for (unsigned i = data->cases.size(); found == FalseTriState && i--;) { found = data->cases[i].value.strictEqual(value); if (found == TrueTriState) - targetBlock = data->cases[i].target; + targetBlock = data->cases[i].target.block; } if (found == MixedTriState) break; if (found == FalseTriState) - targetBlock = data->fallThrough; + targetBlock = data->fallThrough.block; ASSERT(targetBlock); Vector<BasicBlock*, 1> jettisonedBlocks; - for (unsigned i = block->numSuccessors(); i--;) { - BasicBlock* jettisonedBlock = block->successor(i); - if (jettisonedBlock != targetBlock) - jettisonedBlocks.append(jettisonedBlock); + for (BasicBlock* successor : terminal->successors()) { + if (successor != targetBlock) + jettisonedBlocks.append(successor); } if (targetBlock->predecessors.size() == 1) { @@ -178,12 +188,13 @@ public: m_graph.dump(); m_graph.dethread(); - CodeOrigin boundaryCodeOrigin = block->last()->codeOrigin; - block->last()->convertToPhantom(); + NodeOrigin boundaryNodeOrigin = terminal->origin; + for (unsigned i = jettisonedBlocks.size(); i--;) - jettisonBlock(block, jettisonedBlocks[i], boundaryCodeOrigin); - block->appendNode( - m_graph, SpecNone, Jump, boundaryCodeOrigin, OpInfo(targetBlock)); + jettisonBlock(block, jettisonedBlocks[i], boundaryNodeOrigin); + + block->replaceTerminal( + m_graph, SpecNone, Jump, boundaryNodeOrigin, OpInfo(targetBlock)); } innerChanged = outerChanged = true; break; @@ -233,7 +244,7 @@ public: } if (Options::validateGraphAtEachPhase()) - validate(m_graph); + validate(); } while (innerChanged); return outerChanged; @@ -248,36 +259,40 @@ private: m_graph.dethread(); mergeBlocks(block, targetBlock, noBlocks()); } else { - Node* branch = block->last(); - ASSERT(branch->isTerminal()); + Node* branch = block->terminal(); ASSERT(branch->op() == Branch || branch->op() == Switch); - branch->convertToPhantom(); - ASSERT(branch->refCount() == 1); - - block->appendNode( - m_graph, SpecNone, Jump, branch->codeOrigin, - OpInfo(targetBlock)); + + block->replaceTerminal( + m_graph, SpecNone, Jump, branch->origin, OpInfo(targetBlock)); } } - - void keepOperandAlive(BasicBlock* block, BasicBlock* jettisonedBlock, CodeOrigin codeOrigin, VirtualRegister operand) + + void keepOperandAlive(BasicBlock* block, BasicBlock* jettisonedBlock, NodeOrigin nodeOrigin, VirtualRegister operand) { Node* livenessNode = jettisonedBlock->variablesAtHead.operand(operand); if (!livenessNode) return; - if (livenessNode->variableAccessData()->isCaptured()) - return; + NodeType nodeType; + if (livenessNode->flags() & NodeIsFlushed) + nodeType = Flush; + else { + // This seems like it shouldn't be necessary because we could just rematerialize + // PhantomLocals or something similar using bytecode liveness. However, in ThreadedCPS, it's + // worth the sanity to maintain this eagerly. See + // https://bugs.webkit.org/show_bug.cgi?id=144086 + nodeType = PhantomLocal; + } block->appendNode( - m_graph, SpecNone, PhantomLocal, codeOrigin, + m_graph, SpecNone, nodeType, nodeOrigin, OpInfo(livenessNode->variableAccessData())); } - void jettisonBlock(BasicBlock* block, BasicBlock* jettisonedBlock, CodeOrigin boundaryCodeOrigin) + void jettisonBlock(BasicBlock* block, BasicBlock* jettisonedBlock, NodeOrigin boundaryNodeOrigin) { for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfArguments(); ++i) - keepOperandAlive(block, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForArgument(i)); + keepOperandAlive(block, jettisonedBlock, boundaryNodeOrigin, virtualRegisterForArgument(i)); for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfLocals(); ++i) - keepOperandAlive(block, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForLocal(i)); + keepOperandAlive(block, jettisonedBlock, boundaryNodeOrigin, virtualRegisterForLocal(i)); fixJettisonedPredecessors(block, jettisonedBlock); } @@ -310,11 +325,12 @@ private: // kept alive. // Remove the terminal of firstBlock since we don't need it anymore. Well, we don't - // really remove it; we actually turn it into a Phantom. - ASSERT(firstBlock->last()->isTerminal()); - CodeOrigin boundaryCodeOrigin = firstBlock->last()->codeOrigin; - firstBlock->last()->convertToPhantom(); - ASSERT(firstBlock->last()->refCount() == 1); + // really remove it; we actually turn it into a check. + Node* terminal = firstBlock->terminal(); + ASSERT(terminal->isTerminal()); + NodeOrigin boundaryNodeOrigin = terminal->origin; + terminal->remove(); + ASSERT(terminal->refCount() == 1); for (unsigned i = jettisonedBlocks.size(); i--;) { BasicBlock* jettisonedBlock = jettisonedBlocks[i]; @@ -324,9 +340,9 @@ private: // different path than secondBlock. for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfArguments(); ++i) - keepOperandAlive(firstBlock, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForArgument(i)); + keepOperandAlive(firstBlock, jettisonedBlock, boundaryNodeOrigin, virtualRegisterForArgument(i)); for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfLocals(); ++i) - keepOperandAlive(firstBlock, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForLocal(i)); + keepOperandAlive(firstBlock, jettisonedBlock, boundaryNodeOrigin, virtualRegisterForLocal(i)); } for (size_t i = 0; i < secondBlock->phis.size(); ++i) @@ -335,7 +351,7 @@ private: for (size_t i = 0; i < secondBlock->size(); ++i) firstBlock->append(secondBlock->at(i)); - ASSERT(firstBlock->last()->isTerminal()); + ASSERT(firstBlock->terminal()->isTerminal()); // Fix the predecessors of my new successors. This is tricky, since we are going to reset // all predecessors anyway due to reachability analysis. But we need to fix the diff --git a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h index a0f4856a4..0007fc9d2 100644 --- a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h +++ b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.h @@ -26,8 +26,6 @@ #ifndef DFGCFGSimplificationPhase_h #define DFGCFGSimplificationPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp b/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp index 5f646f3a0..09dbf328b 100644 --- a/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,7 +31,7 @@ #include "DFGBasicBlockInlines.h" #include "DFGGraph.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -44,6 +44,8 @@ public: bool run() { + RELEASE_ASSERT(m_graph.m_refCountState == EverythingIsLive); + if (m_graph.m_form == ThreadedCPS) return false; @@ -51,8 +53,10 @@ public: freeUnnecessaryNodes(); m_graph.clearReplacements(); canonicalizeLocalsInBlocks(); + specialCaseArguments(); propagatePhis<LocalOperand>(); propagatePhis<ArgumentOperand>(); + computeIsFlushed(); m_graph.m_form = ThreadedCPS; return true; @@ -87,13 +91,15 @@ private: node->children.setChild1(Edge()); break; case Phantom: - if (!node->child1()) + if (!node->child1()) { + m_graph.m_allocator.free(node); continue; + } switch (node->child1()->op()) { case Phi: case SetArgument: case SetLocal: - node->convertToPhantomLocal(); + node->convertPhantomToPhantomLocal(); break; default: ASSERT(node->child1()->hasResult()); @@ -114,37 +120,37 @@ private: } template<OperandKind operandKind> - void clearVariablesAtHeadAndTail() + void clearVariables() { ASSERT( m_block->variablesAtHead.sizeFor<operandKind>() == m_block->variablesAtTail.sizeFor<operandKind>()); for (unsigned i = m_block->variablesAtHead.sizeFor<operandKind>(); i--;) { - m_block->variablesAtHead.atFor<operandKind>(i) = 0; - m_block->variablesAtTail.atFor<operandKind>(i) = 0; + m_block->variablesAtHead.atFor<operandKind>(i) = nullptr; + m_block->variablesAtTail.atFor<operandKind>(i) = nullptr; } } - ALWAYS_INLINE Node* addPhiSilently(BasicBlock* block, const CodeOrigin& codeOrigin, VariableAccessData* variable) + ALWAYS_INLINE Node* addPhiSilently(BasicBlock* block, const NodeOrigin& origin, VariableAccessData* variable) { - Node* result = m_graph.addNode(SpecNone, Phi, codeOrigin, OpInfo(variable)); + Node* result = m_graph.addNode(SpecNone, Phi, origin, OpInfo(variable)); block->phis.append(result); return result; } template<OperandKind operandKind> - ALWAYS_INLINE Node* addPhi(BasicBlock* block, const CodeOrigin& codeOrigin, VariableAccessData* variable, size_t index) + ALWAYS_INLINE Node* addPhi(BasicBlock* block, const NodeOrigin& origin, VariableAccessData* variable, size_t index) { - Node* result = addPhiSilently(block, codeOrigin, variable); + Node* result = addPhiSilently(block, origin, variable); phiStackFor<operandKind>().append(PhiStackEntry(block, index, result)); return result; } template<OperandKind operandKind> - ALWAYS_INLINE Node* addPhi(const CodeOrigin& codeOrigin, VariableAccessData* variable, size_t index) + ALWAYS_INLINE Node* addPhi(const NodeOrigin& origin, VariableAccessData* variable, size_t index) { - return addPhi<operandKind>(m_block, codeOrigin, variable, index); + return addPhi<operandKind>(m_block, origin, variable, index); } template<OperandKind operandKind> @@ -181,34 +187,19 @@ private: return; } - if (variable->isCaptured()) { - variable->setIsLoadedFrom(true); - if (otherNode->op() == GetLocal) - otherNode = otherNode->child1().node(); - else - ASSERT(otherNode->op() == SetLocal || otherNode->op() == SetArgument); - - ASSERT(otherNode->op() == Phi || otherNode->op() == SetLocal || otherNode->op() == SetArgument); - - // Keep this GetLocal but link it to the prior ones. - node->children.setChild1(Edge(otherNode)); - m_block->variablesAtTail.atFor<operandKind>(idx) = node; - return; - } - if (otherNode->op() == GetLocal) { // Replace all references to this GetLocal with otherNode. - node->misc.replacement = otherNode; + node->replaceWith(otherNode); return; } ASSERT(otherNode->op() == SetLocal); - node->misc.replacement = otherNode->child1().node(); + node->replaceWith(otherNode->child1().node()); return; } variable->setIsLoadedFrom(true); - Node* phi = addPhi<operandKind>(node->codeOrigin, variable, idx); + Node* phi = addPhi<operandKind>(node->origin, variable, idx); node->children.setChild1(Edge(phi)); m_block->variablesAtHead.atFor<operandKind>(idx) = phi; m_block->variablesAtTail.atFor<operandKind>(idx) = node; @@ -223,11 +214,6 @@ private: canonicalizeGetLocalFor<LocalOperand>(node, variable, variable->local().toLocal()); } - void canonicalizeSetLocal(Node* node) - { - m_block->variablesAtTail.setOperand(node->local(), node); - } - template<NodeType nodeType, OperandKind operandKind> void canonicalizeFlushOrPhantomLocalFor(Node* node, VariableAccessData* variable, size_t idx) { @@ -254,13 +240,9 @@ private: // for the purpose of OSR. PhantomLocal(SetLocal) means: at this point I // know that I would have read the value written by that SetLocal. This is // redundant and inefficient, since really it just means that we want to - // be keeping the operand to the SetLocal alive. The SetLocal may die, and - // we'll be fine because OSR tracks dead SetLocals. - - // So we turn this into a Phantom on the child of the SetLocal. + // keep the last MovHinted value of that local alive. - node->convertToPhantom(); - node->children.setChild1(otherNode->child1()); + node->remove(); return; } @@ -276,7 +258,7 @@ private: } variable->setIsLoadedFrom(true); - node->children.setChild1(Edge(addPhi<operandKind>(node->codeOrigin, variable, idx))); + node->children.setChild1(Edge(addPhi<operandKind>(node->origin, variable, idx))); m_block->variablesAtHead.atFor<operandKind>(idx) = node; m_block->variablesAtTail.atFor<operandKind>(idx) = node; } @@ -291,13 +273,9 @@ private: canonicalizeFlushOrPhantomLocalFor<nodeType, LocalOperand>(node, variable, variable->local().toLocal()); } - void canonicalizeSetArgument(Node* node) + void canonicalizeSet(Node* node) { - VirtualRegister local = node->local(); - ASSERT(local.isArgument()); - int argument = local.toArgument(); - m_block->variablesAtHead.setArgumentFirstTime(argument, node); - m_block->variablesAtTail.setArgumentFirstTime(argument, node); + m_block->variablesAtTail.setOperand(node->local(), node); } void canonicalizeLocalsInBlock() @@ -306,8 +284,8 @@ private: return; ASSERT(m_block->isReachable); - clearVariablesAtHeadAndTail<ArgumentOperand>(); - clearVariablesAtHeadAndTail<LocalOperand>(); + clearVariables<ArgumentOperand>(); + clearVariables<LocalOperand>(); // Assumes that all phi references have been removed. Assumes that things that // should be live have a non-zero ref count, but doesn't assume that the ref @@ -336,10 +314,8 @@ private: // there ever was a SetLocal and it was followed by Flushes, then the tail // variable will be a SetLocal and not those subsequent Flushes. // - // Child of GetLocal: the operation that the GetLocal keeps alive. For - // uncaptured locals, it may be a Phi from the current block. For arguments, - // it may be a SetArgument. For captured locals and arguments it may also be - // a SetLocal. + // Child of GetLocal: the operation that the GetLocal keeps alive. It may be + // a Phi from the current block. For arguments, it may be a SetArgument. // // Child of SetLocal: must be a value producing node. // @@ -362,7 +338,7 @@ private: break; case SetLocal: - canonicalizeSetLocal(node); + canonicalizeSet(node); break; case Flush: @@ -374,7 +350,7 @@ private: break; case SetArgument: - canonicalizeSetArgument(node); + canonicalizeSet(node); break; default: @@ -393,6 +369,16 @@ private: } } + void specialCaseArguments() + { + // Normally, a SetArgument denotes the start of a live range for a local's value on the stack. + // But those SetArguments used for the actual arguments to the machine CodeBlock get + // special-cased. We could have instead used two different node types - one for the arguments + // at the prologue case, and another for the other uses. But this seemed like IR overkill. + for (unsigned i = m_graph.m_arguments.size(); i--;) + m_graph.block(0)->variablesAtHead.setArgumentFirstTime(i, m_graph.m_arguments[i]); + } + template<OperandKind operandKind> void propagatePhis() { @@ -418,7 +404,7 @@ private: Node* variableInPrevious = predecessorBlock->variablesAtTail.atFor<operandKind>(index); if (!variableInPrevious) { - variableInPrevious = addPhi<operandKind>(predecessorBlock, currentPhi->codeOrigin, variable, index); + variableInPrevious = addPhi<operandKind>(predecessorBlock, currentPhi->origin, variable, index); predecessorBlock->variablesAtTail.atFor<operandKind>(index) = variableInPrevious; predecessorBlock->variablesAtHead.atFor<operandKind>(index) = variableInPrevious; } else { @@ -452,7 +438,7 @@ private: continue; } - Node* newPhi = addPhiSilently(block, currentPhi->codeOrigin, variable); + Node* newPhi = addPhiSilently(block, currentPhi->origin, variable); newPhi->children = currentPhi->children; currentPhi->children.initialize(newPhi, variableInPrevious, 0); } @@ -480,9 +466,56 @@ private: return m_localPhiStack; } + void computeIsFlushed() + { + m_graph.clearFlagsOnAllNodes(NodeIsFlushed); + + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + for (unsigned nodeIndex = block->size(); nodeIndex--;) { + Node* node = block->at(nodeIndex); + if (node->op() != Flush) + continue; + addFlushedLocalOp(node); + } + } + while (!m_flushedLocalOpWorklist.isEmpty()) { + Node* node = m_flushedLocalOpWorklist.takeLast(); + switch (node->op()) { + case SetLocal: + case SetArgument: + break; + + case Flush: + case Phi: + ASSERT(node->flags() & NodeIsFlushed); + DFG_NODE_DO_TO_CHILDREN(m_graph, node, addFlushedLocalEdge); + break; + + default: + DFG_CRASH(m_graph, node, "Invalid node in flush graph"); + break; + } + } + } + + void addFlushedLocalOp(Node* node) + { + if (node->mergeFlags(NodeIsFlushed)) + m_flushedLocalOpWorklist.append(node); + } + + void addFlushedLocalEdge(Node*, Edge edge) + { + addFlushedLocalOp(edge.node()); + } + BasicBlock* m_block; Vector<PhiStackEntry, 128> m_argumentPhiStack; Vector<PhiStackEntry, 128> m_localPhiStack; + Vector<Node*, 128> m_flushedLocalOpWorklist; }; bool performCPSRethreading(Graph& graph) diff --git a/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.h b/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.h index 128847f2e..755bc799d 100644 --- a/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.h +++ b/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.h @@ -26,8 +26,6 @@ #ifndef DFGCPSRethreadingPhase_h #define DFGCPSRethreadingPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp index a4e159e73..1cdee4df8 100644 --- a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,30 +28,66 @@ #if ENABLE(DFG_JIT) +#include "DFGAbstractHeap.h" +#include "DFGBlockMapInlines.h" +#include "DFGClobberSet.h" +#include "DFGClobberize.h" +#include "DFGDominators.h" #include "DFGEdgeUsesStructure.h" #include "DFGGraph.h" #include "DFGPhase.h" -#include "JSCellInlines.h" +#include "JSCInlines.h" #include <array> #include <wtf/FastBitVector.h> namespace JSC { namespace DFG { -enum CSEMode { NormalCSE, StoreElimination }; +// This file contains two CSE implementations: local and global. LocalCSE typically runs when we're +// in DFG mode, i.e. we want to compile quickly. LocalCSE contains a lot of optimizations for +// compile time. GlobalCSE, on the other hand, is fairly straight-forward. It will find more +// optimization opportunities by virtue of being global. -template<CSEMode cseMode> -class CSEPhase : public Phase { +namespace { + +const bool verbose = false; + +class ClobberFilter { public: - CSEPhase(Graph& graph) - : Phase(graph, cseMode == NormalCSE ? "common subexpression elimination" : "store elimination") + ClobberFilter(AbstractHeap heap) + : m_heap(heap) + { + } + + bool operator()(const ImpureMap::KeyValuePairType& pair) const + { + return m_heap.overlaps(pair.key.heap()); + } + +private: + AbstractHeap m_heap; +}; + +inline void clobber(ImpureMap& map, AbstractHeap heap) +{ + ClobberFilter filter(heap); + map.removeIf(filter); +} + +class LocalCSEPhase : public Phase { +public: + LocalCSEPhase(Graph& graph) + : Phase(graph, "local common subexpression elimination") + , m_smallBlock(graph) + , m_largeBlock(graph) { } bool run() { - ASSERT(m_graph.m_fixpointState != BeforeFixpoint); + ASSERT(m_graph.m_fixpointState == FixpointNotConverged); + ASSERT(m_graph.m_form == ThreadedCPS || m_graph.m_form == LoadStore); - m_changed = false; + bool changed = false; m_graph.clearReplacements(); @@ -60,1389 +96,621 @@ public: if (!block) continue; - // All Phis need to already be marked as relevant to OSR. - if (!ASSERT_DISABLED) { - for (unsigned i = 0; i < block->phis.size(); ++i) - ASSERT(block->phis[i]->flags() & NodeRelevantToOSR); - } - - for (unsigned i = block->size(); i--;) { - Node* node = block->at(i); - - switch (node->op()) { - case SetLocal: - case GetLocal: // FIXME: The GetLocal case is only necessary until we do https://bugs.webkit.org/show_bug.cgi?id=106707. - node->mergeFlags(NodeRelevantToOSR); - break; - default: - node->clearFlags(NodeRelevantToOSR); - break; - } - } + if (block->size() <= SmallMaps::capacity) + changed |= m_smallBlock.run(block); + else + changed |= m_largeBlock.run(block); } - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - - for (unsigned i = block->size(); i--;) { - Node* node = block->at(i); - if (!node->containsMovHint()) - continue; - - ASSERT(node->op() != ZombieHint); - node->child1()->mergeFlags(NodeRelevantToOSR); - } - } - - if (m_graph.m_form == SSA) { - Vector<BasicBlock*> depthFirst; - m_graph.getBlocksInDepthFirstOrder(depthFirst); - for (unsigned i = 0; i < depthFirst.size(); ++i) - performBlockCSE(depthFirst[i]); - } else { - for (unsigned blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) - performBlockCSE(m_graph.block(blockIndex)); - } - - return m_changed; + return changed; } private: - - unsigned endIndexForPureCSE() - { - unsigned result = m_lastSeen[m_currentNode->op()]; - if (result == UINT_MAX) - result = 0; - else - result++; - ASSERT(result <= m_indexInBlock); - return result; - } - - Node* pureCSE(Node* node) - { - Edge child1 = node->child1(); - Edge child2 = node->child2(); - Edge child3 = node->child3(); - - for (unsigned i = endIndexForPureCSE(); i--;) { - Node* otherNode = m_currentBlock->at(i); - if (otherNode == child1 || otherNode == child2 || otherNode == child3) - break; - - if (node->op() != otherNode->op()) - continue; - - if (node->hasArithMode()) { - if (node->arithMode() != otherNode->arithMode()) - continue; - } - - Edge otherChild = otherNode->child1(); - if (!otherChild) - return otherNode; - if (otherChild != child1) - continue; - - otherChild = otherNode->child2(); - if (!otherChild) - return otherNode; - if (otherChild != child2) - continue; - - otherChild = otherNode->child3(); - if (!otherChild) - return otherNode; - if (otherChild != child3) - continue; - - return otherNode; - } - return 0; - } - - Node* int32ToDoubleCSE(Node* node) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* otherNode = m_currentBlock->at(i); - if (otherNode == node->child1()) - return 0; - switch (otherNode->op()) { - case Int32ToDouble: - if (otherNode->child1() == node->child1()) - return otherNode; - break; - default: - break; - } - } - return 0; - } - - Node* constantCSE(Node* node) - { - for (unsigned i = endIndexForPureCSE(); i--;) { - Node* otherNode = m_currentBlock->at(i); - if (otherNode->op() != JSConstant) - continue; - - if (otherNode->constantNumber() != node->constantNumber()) - continue; - - return otherNode; - } - return 0; - } - - Node* weakConstantCSE(Node* node) - { - for (unsigned i = endIndexForPureCSE(); i--;) { - Node* otherNode = m_currentBlock->at(i); - if (otherNode->op() != WeakJSConstant) - continue; - - if (otherNode->weakConstant() != node->weakConstant()) - continue; - - return otherNode; + class SmallMaps { + public: + // This permits SmallMaps to be used for blocks that have up to 100 nodes. In practice, + // fewer than half of the nodes in a block have pure defs, and even fewer have impure defs. + // Thus, a capacity limit of 100 probably means that somewhere around ~40 things may end up + // in one of these "small" list-based maps. That number still seems largeish, except that + // the overhead of HashMaps can be quite high currently: clearing them, or even removing + // enough things from them, deletes (or resizes) their backing store eagerly. Hence + // HashMaps induce a lot of malloc traffic. + static const unsigned capacity = 100; + + SmallMaps() + : m_pureLength(0) + , m_impureLength(0) + { } - return 0; - } - Node* constantStoragePointerCSE(Node* node) - { - for (unsigned i = endIndexForPureCSE(); i--;) { - Node* otherNode = m_currentBlock->at(i); - if (otherNode->op() != ConstantStoragePointer) - continue; - - if (otherNode->storagePointer() != node->storagePointer()) - continue; - - return otherNode; + void clear() + { + m_pureLength = 0; + m_impureLength = 0; } - return 0; - } - Node* getCalleeLoadElimination() - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - switch (node->op()) { - case GetCallee: - return node; - default: - break; + void write(AbstractHeap heap) + { + for (unsigned i = 0; i < m_impureLength; ++i) { + if (heap.overlaps(m_impureMap[i].key.heap())) + m_impureMap[i--] = m_impureMap[--m_impureLength]; } } - return 0; - } - Node* getArrayLengthElimination(Node* array) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - switch (node->op()) { - case GetArrayLength: - if (node->child1() == array) - return node; - break; - - case PutByValDirect: - case PutByVal: - if (!m_graph.byValIsPure(node)) - return 0; - if (node->arrayMode().mayStoreToHole()) - return 0; - break; - - default: - if (m_graph.clobbersWorld(node)) - return 0; + Node* addPure(PureValue value, Node* node) + { + for (unsigned i = m_pureLength; i--;) { + if (m_pureMap[i].key == value) + return m_pureMap[i].value; } + + ASSERT(m_pureLength < capacity); + m_pureMap[m_pureLength++] = WTF::KeyValuePair<PureValue, Node*>(value, node); + return nullptr; } - return 0; - } - - Node* globalVarLoadElimination(WriteBarrier<Unknown>* registerPointer) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - switch (node->op()) { - case GetGlobalVar: - if (node->registerPointer() == registerPointer) - return node; - break; - case PutGlobalVar: - if (node->registerPointer() == registerPointer) - return node->child1().node(); - break; - default: - break; + + LazyNode findReplacement(HeapLocation location) + { + for (unsigned i = m_impureLength; i--;) { + if (m_impureMap[i].key == location) + return m_impureMap[i].value; } - if (m_graph.clobbersWorld(node)) - break; + return nullptr; } - return 0; - } - Node* scopedVarLoadElimination(Node* registers, int varNumber) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - switch (node->op()) { - case GetClosureVar: { - if (node->child1() == registers && node->varNumber() == varNumber) - return node; - break; - } - case PutClosureVar: { - if (node->varNumber() != varNumber) - break; - if (node->child2() == registers) - return node->child3().node(); - return 0; - } - case SetLocal: { - VariableAccessData* variableAccessData = node->variableAccessData(); - if (variableAccessData->isCaptured() - && variableAccessData->local() == static_cast<VirtualRegister>(varNumber)) - return 0; - break; - } - default: - break; - } - if (m_graph.clobbersWorld(node)) - break; + LazyNode addImpure(HeapLocation location, LazyNode node) + { + // FIXME: If we are using small maps, we must not def() derived values. + // For now the only derived values we def() are constant-based. + if (location.index() && !location.index().isNode()) + return nullptr; + if (LazyNode result = findReplacement(location)) + return result; + ASSERT(m_impureLength < capacity); + m_impureMap[m_impureLength++] = WTF::KeyValuePair<HeapLocation, LazyNode>(location, node); + return nullptr; } - return 0; - } - bool varInjectionWatchpointElimination() - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - if (node->op() == VarInjectionWatchpoint) - return true; - if (m_graph.clobbersWorld(node)) - break; + private: + WTF::KeyValuePair<PureValue, Node*> m_pureMap[capacity]; + WTF::KeyValuePair<HeapLocation, LazyNode> m_impureMap[capacity]; + unsigned m_pureLength; + unsigned m_impureLength; + }; + + class LargeMaps { + public: + LargeMaps() + { } - return false; - } - Node* globalVarStoreElimination(WriteBarrier<Unknown>* registerPointer) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - switch (node->op()) { - case PutGlobalVar: - if (node->registerPointer() == registerPointer) - return node; - break; - - case GetGlobalVar: - if (node->registerPointer() == registerPointer) - return 0; - break; - - default: - break; - } - if (m_graph.clobbersWorld(node) || node->canExit()) - return 0; + void clear() + { + m_pureMap.clear(); + m_impureMap.clear(); } - return 0; - } - Node* scopedVarStoreElimination(Node* scope, Node* registers, int varNumber) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - switch (node->op()) { - case PutClosureVar: { - if (node->varNumber() != varNumber) - break; - if (node->child1() == scope && node->child2() == registers) - return node; - return 0; - } - - case GetClosureVar: { - // Let's be conservative. - if (node->varNumber() == varNumber) - return 0; - break; - } - - case GetLocal: - case SetLocal: { - VariableAccessData* variableAccessData = node->variableAccessData(); - if (variableAccessData->isCaptured() - && variableAccessData->local() == static_cast<VirtualRegister>(varNumber)) - return 0; - break; - } - - default: - break; - } - if (m_graph.clobbersWorld(node) || node->canExit()) - return 0; + void write(AbstractHeap heap) + { + clobber(m_impureMap, heap); } - return 0; - } - Node* getByValLoadElimination(Node* child1, Node* child2, ArrayMode arrayMode) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1 || node == child2) - break; - - switch (node->op()) { - case GetByVal: - if (!m_graph.byValIsPure(node)) - return 0; - if (node->child1() == child1 - && node->child2() == child2 - && node->arrayMode().type() == arrayMode.type()) - return node; - break; - - case PutByValDirect: - case PutByVal: - case PutByValAlias: { - if (!m_graph.byValIsPure(node)) - return 0; - // Typed arrays - if (arrayMode.typedArrayType() != NotTypedArray) - return 0; - if (m_graph.varArgChild(node, 0) == child1 - && m_graph.varArgChild(node, 1) == child2 - && node->arrayMode().type() == arrayMode.type()) - return m_graph.varArgChild(node, 2).node(); - // We must assume that the PutByVal will clobber the location we're getting from. - // FIXME: We can do better; if we know that the PutByVal is accessing an array of a - // different type than the GetByVal, then we know that they won't clobber each other. - // ... except of course for typed arrays, where all typed arrays clobber all other - // typed arrays! An Int32Array can alias a Float64Array for example, and so on. - return 0; - } - default: - if (m_graph.clobbersWorld(node)) - return 0; - break; - } + Node* addPure(PureValue value, Node* node) + { + auto result = m_pureMap.add(value, node); + if (result.isNewEntry) + return nullptr; + return result.iterator->value; } - return 0; - } - - bool checkFunctionElimination(JSCell* function, Node* child1) - { - for (unsigned i = endIndexForPureCSE(); i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1) - break; - - if (node->op() == CheckFunction && node->child1() == child1 && node->function() == function) - return true; + + LazyNode findReplacement(HeapLocation location) + { + return m_impureMap.get(location); } - return false; - } - bool checkExecutableElimination(ExecutableBase* executable, Node* child1) - { - for (unsigned i = endIndexForPureCSE(); i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1) - break; - - if (node->op() == CheckExecutable && node->child1() == child1 && node->executable() == executable) - return true; + LazyNode addImpure(HeapLocation location, LazyNode node) + { + auto result = m_impureMap.add(location, node); + if (result.isNewEntry) + return nullptr; + return result.iterator->value; } - return false; - } - bool checkStructureElimination(const StructureSet& structureSet, Node* child1) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1) - break; + private: + HashMap<PureValue, Node*> m_pureMap; + HashMap<HeapLocation, LazyNode> m_impureMap; + }; - switch (node->op()) { - case CheckStructure: - if (node->child1() == child1 - && structureSet.isSupersetOf(node->structureSet())) - return true; - break; - - case StructureTransitionWatchpoint: - if (node->child1() == child1 - && structureSet.contains(node->structure())) - return true; - break; - - case PutStructure: - if (node->child1() == child1 - && structureSet.contains(node->structureTransitionData().newStructure)) - return true; - if (structureSet.contains(node->structureTransitionData().previousStructure)) - return false; - break; - - case PutByOffset: - // Setting a property cannot change the structure. - break; - - case PutByValDirect: - case PutByVal: - case PutByValAlias: - if (m_graph.byValIsPure(node)) { - // If PutByVal speculates that it's accessing an array with an - // integer index, then it's impossible for it to cause a structure - // change. - break; - } - return false; - - case Arrayify: - case ArrayifyToStructure: - // We could check if the arrayification could affect our structures. - // But that seems like it would take Effort. - return false; - - default: - if (m_graph.clobbersWorld(node)) - return false; - break; - } + template<typename Maps> + class BlockCSE { + public: + BlockCSE(Graph& graph) + : m_graph(graph) + , m_insertionSet(graph) + { } - return false; - } - bool structureTransitionWatchpointElimination(Structure* structure, Node* child1) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1) - break; - - switch (node->op()) { - case CheckStructure: - if (node->child1() == child1 - && node->structureSet().containsOnly(structure)) - return true; - break; - - case PutStructure: - ASSERT(node->structureTransitionData().previousStructure != structure); - break; - - case PutByOffset: - // Setting a property cannot change the structure. - break; - - case PutByValDirect: - case PutByVal: - case PutByValAlias: - if (m_graph.byValIsPure(node)) { - // If PutByVal speculates that it's accessing an array with an - // integer index, then it's impossible for it to cause a structure - // change. - break; + bool run(BasicBlock* block) + { + m_maps.clear(); + m_changed = false; + m_block = block; + + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + m_node = block->at(nodeIndex); + m_graph.performSubstitution(m_node); + + if (m_node->op() == Identity) { + m_node->replaceWith(m_node->child1().node()); + m_changed = true; + } else { + // This rule only makes sense for local CSE, since in SSA form we have already + // factored the bounds check out of the PutByVal. It's kind of gross, but we + // still have reason to believe that PutByValAlias is a good optimization and + // that it's better to do it with a single node rather than separating out the + // CheckInBounds. + if (m_node->op() == PutByVal || m_node->op() == PutByValDirect) { + HeapLocation heap; + + Node* base = m_graph.varArgChild(m_node, 0).node(); + Node* index = m_graph.varArgChild(m_node, 1).node(); + + ArrayMode mode = m_node->arrayMode(); + switch (mode.type()) { + case Array::Int32: + if (!mode.isInBounds()) + break; + heap = HeapLocation( + IndexedPropertyLoc, IndexedInt32Properties, base, index); + break; + + case Array::Double: + if (!mode.isInBounds()) + break; + heap = HeapLocation( + IndexedPropertyLoc, IndexedDoubleProperties, base, index); + break; + + case Array::Contiguous: + if (!mode.isInBounds()) + break; + heap = HeapLocation( + IndexedPropertyLoc, IndexedContiguousProperties, base, index); + break; + + case Array::Int8Array: + case Array::Int16Array: + case Array::Int32Array: + case Array::Uint8Array: + case Array::Uint8ClampedArray: + case Array::Uint16Array: + case Array::Uint32Array: + case Array::Float32Array: + case Array::Float64Array: + if (!mode.isInBounds()) + break; + heap = HeapLocation( + IndexedPropertyLoc, TypedArrayProperties, base, index); + break; + + default: + break; + } + + if (!!heap && m_maps.findReplacement(heap)) + m_node->setOp(PutByValAlias); + } + + clobberize(m_graph, m_node, *this); } - return false; - - case StructureTransitionWatchpoint: - if (node->structure() == structure && node->child1() == child1) - return true; - break; - - case Arrayify: - case ArrayifyToStructure: - // We could check if the arrayification could affect our structures. - // But that seems like it would take Effort. - return false; - - default: - if (m_graph.clobbersWorld(node)) - return false; - break; } + + m_insertionSet.execute(block); + + return m_changed; } - return false; - } - Node* putStructureStoreElimination(Node* child1) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1) - break; - switch (node->op()) { - case CheckStructure: - return 0; - - case PhantomPutStructure: - if (node->child1() == child1) // No need to retrace our steps. - return 0; - break; - - case PutStructure: - if (node->child1() == child1) - return node; - break; - - // PutStructure needs to execute if we GC. Hence this needs to - // be careful with respect to nodes that GC. - case CreateArguments: - case TearOffArguments: - case NewFunctionNoCheck: - case NewFunction: - case NewFunctionExpression: - case CreateActivation: - case TearOffActivation: - case ToPrimitive: - case NewRegexp: - case NewArrayBuffer: - case NewArray: - case NewObject: - case CreateThis: - case AllocatePropertyStorage: - case ReallocatePropertyStorage: - case TypeOf: - case ToString: - case NewStringObject: - case MakeRope: - case NewTypedArray: - return 0; - - // This either exits, causes a GC (lazy string allocation), or clobbers - // the world. The chances of it not doing any of those things are so - // slim that we might as well not even try to reason about it. - case GetByVal: - return 0; - - case GetIndexedPropertyStorage: - if (node->arrayMode().getIndexedPropertyStorageMayTriggerGC()) - return 0; - break; - - default: - break; - } - if (m_graph.clobbersWorld(node) || node->canExit()) - return 0; - if (edgesUseStructure(m_graph, node)) - return 0; - } - return 0; - } + void read(AbstractHeap) { } - Node* getByOffsetLoadElimination(unsigned identifierNumber, Node* child1) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1) - break; - - switch (node->op()) { - case GetByOffset: - if (node->child1() == child1 - && m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber == identifierNumber) - return node; - break; - - case PutByOffset: - if (m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber == identifierNumber) { - if (node->child1() == child1) // Must be same property storage. - return node->child3().node(); - return 0; - } - break; - - case PutByValDirect: - case PutByVal: - case PutByValAlias: - if (m_graph.byValIsPure(node)) { - // If PutByVal speculates that it's accessing an array with an - // integer index, then it's impossible for it to cause a structure - // change. - break; - } - return 0; - - default: - if (m_graph.clobbersWorld(node)) - return 0; - break; - } + void write(AbstractHeap heap) + { + m_maps.write(heap); } - return 0; - } - - Node* putByOffsetStoreElimination(unsigned identifierNumber, Node* child1) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1) - break; + + void def(PureValue value) + { + Node* match = m_maps.addPure(value, m_node); + if (!match) + return; - switch (node->op()) { - case GetByOffset: - if (m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber == identifierNumber) - return 0; - break; - - case PutByOffset: - if (m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber == identifierNumber) { - if (node->child1() == child1) // Must be same property storage. - return node; - return 0; - } - break; - - case PutByValDirect: - case PutByVal: - case PutByValAlias: - case GetByVal: - if (m_graph.byValIsPure(node)) { - // If PutByVal speculates that it's accessing an array with an - // integer index, then it's impossible for it to cause a structure - // change. - break; - } - return 0; - - default: - if (m_graph.clobbersWorld(node)) - return 0; - break; - } - if (node->canExit()) - return 0; + m_node->replaceWith(match); + m_changed = true; } - return 0; - } - Node* getPropertyStorageLoadElimination(Node* child1) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1) - break; - - switch (node->op()) { - case GetButterfly: - if (node->child1() == child1) - return node; - break; - - case AllocatePropertyStorage: - case ReallocatePropertyStorage: - // If we can cheaply prove this is a change to our object's storage, we - // can optimize and use its result. - if (node->child1() == child1) - return node; - // Otherwise, we currently can't prove that this doesn't change our object's - // storage, so we conservatively assume that it may change the storage - // pointer of any object, including ours. - return 0; - - case PutByValDirect: - case PutByVal: - case PutByValAlias: - if (m_graph.byValIsPure(node)) { - // If PutByVal speculates that it's accessing an array with an - // integer index, then it's impossible for it to cause a structure - // change. - break; - } - return 0; - - case Arrayify: - case ArrayifyToStructure: - // We could check if the arrayification could affect our butterfly. - // But that seems like it would take Effort. - return 0; - - default: - if (m_graph.clobbersWorld(node)) - return 0; - break; + void def(HeapLocation location, LazyNode value) + { + LazyNode match = m_maps.addImpure(location, value); + if (!match) + return; + + if (m_node->op() == GetLocal) { + // Usually the CPS rethreading phase does this. But it's OK for us to mess with + // locals so long as: + // + // - We dethread the graph. Any changes we make may invalidate the assumptions of + // our CPS form, particularly if this GetLocal is linked to the variablesAtTail. + // + // - We don't introduce a Phantom for the child of the GetLocal. This wouldn't be + // totally wrong but it would pessimize the code. Just because there is a + // GetLocal doesn't mean that the child was live. Simply rerouting the all uses + // of this GetLocal will preserve the live-at-exit information just fine. + // + // We accomplish the latter by just clearing the child; then the Phantom that we + // introduce won't have children and so it will eventually just be deleted. + + m_node->child1() = Edge(); + m_graph.dethread(); } - } - return 0; - } - - bool checkArrayElimination(Node* child1, ArrayMode arrayMode) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1) - break; - - switch (node->op()) { - case CheckArray: - if (node->child1() == child1 && node->arrayMode() == arrayMode) - return true; - break; - - case Arrayify: - case ArrayifyToStructure: - // We could check if the arrayification could affect our array. - // But that seems like it would take Effort. - return false; - - default: - if (m_graph.clobbersWorld(node)) - return false; - break; + + if (value.isNode() && value.asNode() == m_node) { + match.ensureIsNode(m_insertionSet, m_block, 0)->owner = m_block; + ASSERT(match.isNode()); + m_node->replaceWith(match.asNode()); + m_changed = true; } } - return false; - } + + private: + Graph& m_graph; + + bool m_changed; + Node* m_node; + BasicBlock* m_block; + + Maps m_maps; - Node* getIndexedPropertyStorageLoadElimination(Node* child1, ArrayMode arrayMode) - { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1) - break; + InsertionSet m_insertionSet; + }; - switch (node->op()) { - case GetIndexedPropertyStorage: { - if (node->child1() == child1 && node->arrayMode() == arrayMode) - return node; - break; - } + BlockCSE<SmallMaps> m_smallBlock; + BlockCSE<LargeMaps> m_largeBlock; +}; - default: - if (m_graph.clobbersWorld(node)) - return 0; - break; - } - } - return 0; - } - - Node* getTypedArrayByteOffsetLoadElimination(Node* child1) +class GlobalCSEPhase : public Phase { +public: + GlobalCSEPhase(Graph& graph) + : Phase(graph, "global common subexpression elimination") + , m_impureDataMap(graph) + , m_insertionSet(graph) { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - if (node == child1) - break; - - switch (node->op()) { - case GetTypedArrayByteOffset: { - if (node->child1() == child1) - return node; - break; - } - - default: - if (m_graph.clobbersWorld(node)) - return 0; - break; - } - } - return 0; } - Node* getMyScopeLoadElimination() + bool run() { - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - switch (node->op()) { - case CreateActivation: - // This may cause us to return a different scope. - return 0; - case GetMyScope: - return node; - default: - break; - } + ASSERT(m_graph.m_fixpointState == FixpointNotConverged); + ASSERT(m_graph.m_form == SSA); + + m_graph.initializeNodeOwners(); + m_graph.ensureDominators(); + + m_preOrder = m_graph.blocksInPreOrder(); + + // First figure out what gets clobbered by blocks. Node that this uses the preOrder list + // for convenience only. + for (unsigned i = m_preOrder.size(); i--;) { + m_block = m_preOrder[i]; + m_impureData = &m_impureDataMap[m_block]; + for (unsigned nodeIndex = m_block->size(); nodeIndex--;) + addWrites(m_graph, m_block->at(nodeIndex), m_impureData->writes); } - return 0; + + // Based on my experience doing this before, what follows might have to be made iterative. + // Right now it doesn't have to be iterative because everything is dominator-bsed. But when + // validation is enabled, we check if iterating would find new CSE opportunities. + + bool changed = iterate(); + + // FIXME: It should be possible to assert that CSE will not find any new opportunities if you + // run it a second time. Unfortunately, we cannot assert this right now. Note that if we did + // this, we'd have to first reset all of our state. + // https://bugs.webkit.org/show_bug.cgi?id=145853 + + return changed; } - Node* getLocalLoadElimination(VirtualRegister local, Node*& relevantLocalOp, bool careAboutClobbering) + bool iterate() { - relevantLocalOp = 0; + if (verbose) + dataLog("Performing iteration.\n"); - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - switch (node->op()) { - case GetLocal: - if (node->local() == local) { - relevantLocalOp = node; - return node; - } - break; - - case GetLocalUnlinked: - if (node->unlinkedLocal() == local) { - relevantLocalOp = node; - return node; - } - break; - - case SetLocal: - if (node->local() == local) { - relevantLocalOp = node; - return node->child1().node(); - } - break; - - case GetClosureVar: - case PutClosureVar: - if (static_cast<VirtualRegister>(node->varNumber()) == local) - return 0; - break; - - default: - if (careAboutClobbering && m_graph.clobbersWorld(node)) - return 0; - break; - } - } - return 0; - } - - struct SetLocalStoreEliminationResult { - SetLocalStoreEliminationResult() - : mayBeAccessed(false) - , mayExit(false) - , mayClobberWorld(false) - { - } + m_changed = false; + m_graph.clearReplacements(); - bool mayBeAccessed; - bool mayExit; - bool mayClobberWorld; - }; - SetLocalStoreEliminationResult setLocalStoreElimination( - VirtualRegister local, Node* expectedNode) - { - SetLocalStoreEliminationResult result; - for (unsigned i = m_indexInBlock; i--;) { - Node* node = m_currentBlock->at(i); - switch (node->op()) { - case GetLocal: - case Flush: - if (node->local() == local) - result.mayBeAccessed = true; - break; - - case GetLocalUnlinked: - if (node->unlinkedLocal() == local) - result.mayBeAccessed = true; - break; - - case SetLocal: { - if (node->local() != local) - break; - if (node != expectedNode) - result.mayBeAccessed = true; - return result; - } - - case GetClosureVar: - case PutClosureVar: - if (static_cast<VirtualRegister>(node->varNumber()) == local) - result.mayBeAccessed = true; - break; - - case GetMyScope: - case SkipTopScope: - if (node->codeOrigin.inlineCallFrame) - break; - if (m_graph.uncheckedActivationRegister() == local) - result.mayBeAccessed = true; - break; - - case CheckArgumentsNotCreated: - case GetMyArgumentsLength: - case GetMyArgumentsLengthSafe: - if (m_graph.uncheckedArgumentsRegisterFor(node->codeOrigin) == local) - result.mayBeAccessed = true; - break; - - case GetMyArgumentByVal: - case GetMyArgumentByValSafe: - result.mayBeAccessed = true; - break; - - case GetByVal: - // If this is accessing arguments then it's potentially accessing locals. - if (node->arrayMode().type() == Array::Arguments) - result.mayBeAccessed = true; - break; + for (unsigned i = 0; i < m_preOrder.size(); ++i) { + m_block = m_preOrder[i]; + m_impureData = &m_impureDataMap[m_block]; + m_writesSoFar.clear(); + + if (verbose) + dataLog("Processing block ", *m_block, ":\n"); + + for (unsigned nodeIndex = 0; nodeIndex < m_block->size(); ++nodeIndex) { + m_nodeIndex = nodeIndex; + m_node = m_block->at(nodeIndex); + if (verbose) + dataLog(" Looking at node ", m_node, ":\n"); - case CreateArguments: - case TearOffActivation: - case TearOffArguments: - // If an activation is being torn off then it means that captured variables - // are live. We could be clever here and check if the local qualifies as an - // argument register. But that seems like it would buy us very little since - // any kind of tear offs are rare to begin with. - result.mayBeAccessed = true; - break; + m_graph.performSubstitution(m_node); - default: - break; + if (m_node->op() == Identity) { + m_node->replaceWith(m_node->child1().node()); + m_changed = true; + } else + clobberize(m_graph, m_node, *this); } - result.mayExit |= node->canExit(); - result.mayClobberWorld |= m_graph.clobbersWorld(node); + + m_insertionSet.execute(m_block); + + m_impureData->didVisit = true; } - RELEASE_ASSERT_NOT_REACHED(); - // Be safe in release mode. - result.mayBeAccessed = true; - return result; + + return m_changed; } + + void read(AbstractHeap) { } - void eliminateIrrelevantPhantomChildren(Node* node) + void write(AbstractHeap heap) { - ASSERT(node->op() == Phantom); - for (unsigned i = 0; i < AdjacencyList::Size; ++i) { - Edge edge = node->children.child(i); - if (!edge) - continue; - if (edge.useKind() != UntypedUse) - continue; // Keep the type check. - if (edge->flags() & NodeRelevantToOSR) - continue; - - node->children.removeEdge(i--); - m_changed = true; - } + clobber(m_impureData->availableAtTail, heap); + m_writesSoFar.add(heap); + if (verbose) + dataLog(" Clobbered, new tail map: ", mapDump(m_impureData->availableAtTail), "\n"); } - bool setReplacement(Node* replacement) + void def(PureValue value) { - if (!replacement) - return false; - - m_currentNode->convertToPhantom(); - eliminateIrrelevantPhantomChildren(m_currentNode); + // With pure values we do not have to worry about the possibility of some control flow path + // clobbering the value. So, we just search for all of the like values that have been + // computed. We pick one that is in a block that dominates ours. Note that this means that + // a PureValue will map to a list of nodes, since there may be many places in the control + // flow graph that compute a value but only one of them that dominates us. We may build up + // a large list of nodes that compute some value in the case of gnarly control flow. This + // is probably OK. - // At this point we will eliminate all references to this node. - m_currentNode->misc.replacement = replacement; + auto result = m_pureValues.add(value, Vector<Node*>()); + if (result.isNewEntry) { + result.iterator->value.append(m_node); + return; + } - m_changed = true; + for (unsigned i = result.iterator->value.size(); i--;) { + Node* candidate = result.iterator->value[i]; + if (m_graph.m_dominators->dominates(candidate->owner, m_block)) { + m_node->replaceWith(candidate); + m_changed = true; + return; + } + } - return true; + result.iterator->value.append(m_node); } - void eliminate() + LazyNode findReplacement(HeapLocation location) { - ASSERT(m_currentNode->mustGenerate()); - m_currentNode->convertToPhantom(); - eliminateIrrelevantPhantomChildren(m_currentNode); + // At this instant, our "availableAtTail" reflects the set of things that are available in + // this block so far. We check this map to find block-local CSE opportunities before doing + // a global search. + LazyNode match = m_impureData->availableAtTail.get(location); + if (!!match) { + if (verbose) + dataLog(" Found local match: ", match, "\n"); + return match; + } - m_changed = true; - } - - void eliminate(Node* node, NodeType phantomType = Phantom) - { - if (!node) - return; - ASSERT(node->mustGenerate()); - node->setOpAndDefaultFlags(phantomType); - if (phantomType == Phantom) - eliminateIrrelevantPhantomChildren(node); + // If it's not available at this point in the block, and at some prior point in the block + // we have clobbered this heap location, then there is no point in doing a global search. + if (m_writesSoFar.overlaps(location.heap())) { + if (verbose) + dataLog(" Not looking globally because of local clobber: ", m_writesSoFar, "\n"); + return nullptr; + } - m_changed = true; - } - - void performNodeCSE(Node* node) - { - if (cseMode == NormalCSE) - m_graph.performSubstitution(node); + // This perfoms a backward search over the control flow graph to find some possible + // non-local def() that matches our heap location. Such a match is only valid if there does + // not exist any path from that def() to our block that contains a write() that overlaps + // our heap. This algorithm looks for both of these things (the matching def and the + // overlapping writes) in one backwards DFS pass. + // + // This starts by looking at the starting block's predecessors, and then it continues along + // their predecessors. As soon as this finds a possible def() - one that defines the heap + // location we want while dominating our starting block - it assumes that this one must be + // the match. It then lets the DFS over predecessors complete, but it doesn't add the + // def()'s predecessors; this ensures that any blocks we visit thereafter are on some path + // from the def() to us. As soon as the DFG finds a write() that overlaps the location's + // heap, it stops, assuming that there is no possible match. Note that the write() case may + // trigger before we find a def(), or after. Either way, the write() case causes this + // function to immediately return nullptr. + // + // If the write() is found before we find the def(), then we know that any def() we would + // find would have a path to us that trips over the write() and hence becomes invalid. This + // is just a direct outcome of us looking for a def() that dominates us. Given a block A + // that dominates block B - so that A is the one that would have the def() and B is our + // starting block - we know that any other block must either be on the path from A to B, or + // it must be on a path from the root to A, but not both. So, if we haven't found A yet but + // we already have found a block C that has a write(), then C must be on some path from A + // to B, which means that A's def() is invalid for our purposes. Hence, before we find the + // def(), stopping on write() is the right thing to do. + // + // Stopping on write() is also the right thing to do after we find the def(). After we find + // the def(), we don't add that block's predecessors to the search worklist. That means + // that henceforth the only blocks we will see in the search are blocks on the path from + // the def() to us. If any such block has a write() that clobbers our heap then we should + // give up. + // + // Hence this graph search algorithm ends up being deceptively simple: any overlapping + // write() causes us to immediately return nullptr, and a matching def() means that we just + // record it and neglect to visit its precessors. - switch (node->op()) { + Vector<BasicBlock*, 8> worklist; + Vector<BasicBlock*, 8> seenList; + BitVector seen; - case Identity: - if (cseMode == StoreElimination) - break; - setReplacement(node->child1().node()); - break; - - // Handle the pure nodes. These nodes never have any side-effects. - case BitAnd: - case BitOr: - case BitXor: - case BitRShift: - case BitLShift: - case BitURShift: - case ArithAdd: - case ArithSub: - case ArithNegate: - case ArithMul: - case ArithMod: - case ArithDiv: - case ArithAbs: - case ArithMin: - case ArithMax: - case ArithSqrt: - case ArithSin: - case ArithCos: - case StringCharAt: - case StringCharCodeAt: - case IsUndefined: - case IsBoolean: - case IsNumber: - case IsString: - case IsObject: - case IsFunction: - case DoubleAsInt32: - case LogicalNot: - case SkipTopScope: - case SkipScope: - case GetClosureRegisters: - case GetScope: - case TypeOf: - case CompareEqConstant: - case ValueToInt32: - case MakeRope: - case Int52ToDouble: - case Int52ToValue: - if (cseMode == StoreElimination) - break; - setReplacement(pureCSE(node)); - break; - - case Int32ToDouble: - if (cseMode == StoreElimination) - break; - setReplacement(int32ToDoubleCSE(node)); - break; - - case GetCallee: - if (cseMode == StoreElimination) - break; - setReplacement(getCalleeLoadElimination()); - break; - - case GetLocal: { - if (cseMode == StoreElimination) - break; - VariableAccessData* variableAccessData = node->variableAccessData(); - if (!variableAccessData->isCaptured()) - break; - Node* relevantLocalOp; - Node* possibleReplacement = getLocalLoadElimination(variableAccessData->local(), relevantLocalOp, variableAccessData->isCaptured()); - if (!relevantLocalOp) - break; - if (relevantLocalOp->op() != GetLocalUnlinked - && relevantLocalOp->variableAccessData() != variableAccessData) - break; - Node* phi = node->child1().node(); - if (!setReplacement(possibleReplacement)) - break; - - m_graph.dethread(); - - // If we replace a GetLocal with a GetLocalUnlinked, then turn the GetLocalUnlinked - // into a GetLocal. - if (relevantLocalOp->op() == GetLocalUnlinked) - relevantLocalOp->convertToGetLocal(variableAccessData, phi); - - m_changed = true; - break; - } - - case GetLocalUnlinked: { - if (cseMode == StoreElimination) - break; - Node* relevantLocalOpIgnored; - setReplacement(getLocalLoadElimination(node->unlinkedLocal(), relevantLocalOpIgnored, true)); - break; - } - - case Flush: { - if (m_graph.m_form == SSA) { - // FIXME: Enable Flush store elimination in SSA form. - // https://bugs.webkit.org/show_bug.cgi?id=125429 - break; - } - VariableAccessData* variableAccessData = node->variableAccessData(); - VirtualRegister local = variableAccessData->local(); - Node* replacement = node->child1().node(); - if (replacement->op() != SetLocal) - break; - ASSERT(replacement->variableAccessData() == variableAccessData); - // FIXME: We should be able to remove SetLocals that can exit; we just need - // to replace them with appropriate type checks. - if (cseMode == NormalCSE) { - // Need to be conservative at this time; if the SetLocal has any chance of performing - // any speculations then we cannot do anything. - FlushFormat format = variableAccessData->flushFormat(); - ASSERT(format != DeadFlush); - if (format != FlushedJSValue) - break; - } else { - if (replacement->canExit()) - break; + for (unsigned i = m_block->predecessors.size(); i--;) { + BasicBlock* predecessor = m_block->predecessors[i]; + if (!seen.get(predecessor->index)) { + worklist.append(predecessor); + seen.set(predecessor->index); } - SetLocalStoreEliminationResult result = - setLocalStoreElimination(local, replacement); - if (result.mayBeAccessed || result.mayClobberWorld) - break; - ASSERT(replacement->op() == SetLocal); - // FIXME: Investigate using mayExit as a further optimization. - node->convertToPhantom(); - Node* dataNode = replacement->child1().node(); - ASSERT(dataNode->hasResult()); - node->child1() = Edge(dataNode); - m_graph.dethread(); - m_changed = true; - break; } - - case JSConstant: - if (cseMode == StoreElimination) - break; - // This is strange, but necessary. Some phases will convert nodes to constants, - // which may result in duplicated constants. We use CSE to clean this up. - setReplacement(constantCSE(node)); - break; - - case WeakJSConstant: - if (cseMode == StoreElimination) - break; - // FIXME: have CSE for weak constants against strong constants and vice-versa. - setReplacement(weakConstantCSE(node)); - break; - - case ConstantStoragePointer: - if (cseMode == StoreElimination) - break; - setReplacement(constantStoragePointerCSE(node)); - break; - - case GetArrayLength: - if (cseMode == StoreElimination) - break; - setReplacement(getArrayLengthElimination(node->child1().node())); - break; - - case GetMyScope: - if (cseMode == StoreElimination) - break; - setReplacement(getMyScopeLoadElimination()); - break; - - // Handle nodes that are conditionally pure: these are pure, and can - // be CSE'd, so long as the prediction is the one we want. - case CompareLess: - case CompareLessEq: - case CompareGreater: - case CompareGreaterEq: - case CompareEq: { - if (cseMode == StoreElimination) - break; - if (m_graph.isPredictedNumerical(node)) { - Node* replacement = pureCSE(node); - if (replacement && m_graph.isPredictedNumerical(replacement)) - setReplacement(replacement); + + while (!worklist.isEmpty()) { + BasicBlock* block = worklist.takeLast(); + seenList.append(block); + + if (verbose) + dataLog(" Searching in block ", *block, "\n"); + ImpureBlockData& data = m_impureDataMap[block]; + + // We require strict domination because this would only see things in our own block if + // they came *after* our position in the block. Clearly, while our block dominates + // itself, the things in the block after us don't dominate us. + if (m_graph.m_dominators->strictlyDominates(block, m_block)) { + if (verbose) + dataLog(" It strictly dominates.\n"); + DFG_ASSERT(m_graph, m_node, data.didVisit); + DFG_ASSERT(m_graph, m_node, !match); + if (verbose) + dataLog(" Availability map: ", mapDump(data.availableAtTail), "\n"); + match = data.availableAtTail.get(location); + if (verbose) + dataLog(" Availability: ", match, "\n"); + if (!!match) { + // Don't examine the predecessors of a match. At this point we just want to + // establish that other blocks on the path from here to there don't clobber + // the location we're interested in. + continue; + } } - break; - } - - // Finally handle heap accesses. These are not quite pure, but we can still - // optimize them provided that some subtle conditions are met. - case GetGlobalVar: - if (cseMode == StoreElimination) - break; - setReplacement(globalVarLoadElimination(node->registerPointer())); - break; - - case GetClosureVar: { - if (cseMode == StoreElimination) - break; - setReplacement(scopedVarLoadElimination(node->child1().node(), node->varNumber())); - break; - } - - case VarInjectionWatchpoint: - if (cseMode == StoreElimination) - break; - if (varInjectionWatchpointElimination()) - eliminate(); - break; - - case PutGlobalVar: - if (cseMode == NormalCSE) - break; - eliminate(globalVarStoreElimination(node->registerPointer())); - break; - case PutClosureVar: { - if (cseMode == NormalCSE) - break; - eliminate(scopedVarStoreElimination(node->child1().node(), node->child2().node(), node->varNumber())); - break; - } - - case GetByVal: - if (cseMode == StoreElimination) - break; - if (m_graph.byValIsPure(node)) - setReplacement(getByValLoadElimination(node->child1().node(), node->child2().node(), node->arrayMode())); - break; - - case PutByValDirect: - case PutByVal: { - if (cseMode == StoreElimination) - break; - Edge child1 = m_graph.varArgChild(node, 0); - Edge child2 = m_graph.varArgChild(node, 1); - if (node->arrayMode().canCSEStorage()) { - Node* replacement = getByValLoadElimination(child1.node(), child2.node(), node->arrayMode()); - if (!replacement) - break; - node->setOp(PutByValAlias); + if (verbose) + dataLog(" Dealing with write set ", data.writes, "\n"); + if (data.writes.overlaps(location.heap())) { + if (verbose) + dataLog(" Clobbered.\n"); + return nullptr; } - break; - } - - case CheckStructure: - if (cseMode == StoreElimination) - break; - if (checkStructureElimination(node->structureSet(), node->child1().node())) - eliminate(); - break; - - case StructureTransitionWatchpoint: - if (cseMode == StoreElimination) - break; - if (structureTransitionWatchpointElimination(node->structure(), node->child1().node())) - eliminate(); - break; - - case PutStructure: - if (cseMode == NormalCSE) - break; - eliminate(putStructureStoreElimination(node->child1().node()), PhantomPutStructure); - break; - - case CheckFunction: - if (cseMode == StoreElimination) - break; - if (checkFunctionElimination(node->function(), node->child1().node())) - eliminate(); - break; - - case CheckExecutable: - if (cseMode == StoreElimination) - break; - if (checkExecutableElimination(node->executable(), node->child1().node())) - eliminate(); - break; - - case CheckArray: - if (cseMode == StoreElimination) - break; - if (checkArrayElimination(node->child1().node(), node->arrayMode())) - eliminate(); - break; - case GetIndexedPropertyStorage: { - if (cseMode == StoreElimination) - break; - setReplacement(getIndexedPropertyStorageLoadElimination(node->child1().node(), node->arrayMode())); - break; - } - - case GetTypedArrayByteOffset: { - if (cseMode == StoreElimination) - break; - setReplacement(getTypedArrayByteOffsetLoadElimination(node->child1().node())); - break; - } - - case GetButterfly: - if (cseMode == StoreElimination) - break; - setReplacement(getPropertyStorageLoadElimination(node->child1().node())); - break; - - case GetByOffset: - if (cseMode == StoreElimination) - break; - setReplacement(getByOffsetLoadElimination(m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber, node->child1().node())); - break; - - case PutByOffset: - if (cseMode == NormalCSE) - break; - eliminate(putByOffsetStoreElimination(m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber, node->child1().node())); - break; - - case Phantom: - // FIXME: we ought to remove Phantom's that have no children. - - eliminateIrrelevantPhantomChildren(node); - break; - - default: - // do nothing. - break; + for (unsigned i = block->predecessors.size(); i--;) { + BasicBlock* predecessor = block->predecessors[i]; + if (!seen.get(predecessor->index)) { + worklist.append(predecessor); + seen.set(predecessor->index); + } + } } - m_lastSeen[node->op()] = m_indexInBlock; + if (!match) + return nullptr; + + // Cache the results for next time. We cache them both for this block and for all of our + // predecessors, since even though we've already visited our predecessors, our predecessors + // probably have successors other than us. + // FIXME: Consider caching failed searches as well, when match is null. It's not clear that + // the reduction in compile time would warrant the increase in complexity, though. + // https://bugs.webkit.org/show_bug.cgi?id=134876 + for (BasicBlock* block : seenList) + m_impureDataMap[block].availableAtTail.add(location, match); + m_impureData->availableAtTail.add(location, match); + + return match; } - void performBlockCSE(BasicBlock* block) + void def(HeapLocation location, LazyNode value) { - if (!block) - return; - if (!block->isReachable) - return; + if (verbose) + dataLog(" Got heap location def: ", location, " -> ", value, "\n"); - m_currentBlock = block; - for (unsigned i = 0; i < LastNodeType; ++i) - m_lastSeen[i] = UINT_MAX; + LazyNode match = findReplacement(location); - for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) { - m_currentNode = block->at(m_indexInBlock); - performNodeCSE(m_currentNode); - } + if (verbose) + dataLog(" Got match: ", match, "\n"); - if (!ASSERT_DISABLED && cseMode == StoreElimination) { - // Nobody should have replacements set. - for (unsigned i = 0; i < block->size(); ++i) - ASSERT(!block->at(i)->misc.replacement); + if (!match) { + if (verbose) + dataLog(" Adding at-tail mapping: ", location, " -> ", value, "\n"); + auto result = m_impureData->availableAtTail.add(location, value); + ASSERT_UNUSED(result, result.isNewEntry); + return; + } + + if (value.isNode() && value.asNode() == m_node) { + if (!match.isNode()) { + // We need to properly record the constant in order to use an existing one if applicable. + // This ensures that re-running GCSE will not find new optimizations. + match.ensureIsNode(m_insertionSet, m_block, m_nodeIndex)->owner = m_block; + auto result = m_pureValues.add(PureValue(match.asNode(), match->constant()), Vector<Node*>()); + bool replaced = false; + if (!result.isNewEntry) { + for (unsigned i = result.iterator->value.size(); i--;) { + Node* candidate = result.iterator->value[i]; + if (m_graph.m_dominators->dominates(candidate->owner, m_block)) { + ASSERT(candidate); + match->replaceWith(candidate); + match.setNode(candidate); + replaced = true; + break; + } + } + } + if (!replaced) + result.iterator->value.append(match.asNode()); + } + ASSERT(match.asNode()); + m_node->replaceWith(match.asNode()); + m_changed = true; } } - BasicBlock* m_currentBlock; - Node* m_currentNode; - unsigned m_indexInBlock; - std::array<unsigned, LastNodeType> m_lastSeen; - bool m_changed; // Only tracks changes that have a substantive effect on other optimizations. + struct ImpureBlockData { + ImpureBlockData() + : didVisit(false) + { + } + + ClobberSet writes; + ImpureMap availableAtTail; + bool didVisit; + }; + + Vector<BasicBlock*> m_preOrder; + + PureMultiMap m_pureValues; + BlockMap<ImpureBlockData> m_impureDataMap; + + BasicBlock* m_block; + Node* m_node; + unsigned m_nodeIndex; + ImpureBlockData* m_impureData; + ClobberSet m_writesSoFar; + InsertionSet m_insertionSet; + + bool m_changed; }; -bool performCSE(Graph& graph) +} // anonymous namespace + +bool performLocalCSE(Graph& graph) { - SamplingRegion samplingRegion("DFG CSE Phase"); - return runPhase<CSEPhase<NormalCSE>>(graph); + SamplingRegion samplingRegion("DFG LocalCSE Phase"); + return runPhase<LocalCSEPhase>(graph); } -bool performStoreElimination(Graph& graph) +bool performGlobalCSE(Graph& graph) { - SamplingRegion samplingRegion("DFG Store Elimination Phase"); - return runPhase<CSEPhase<StoreElimination>>(graph); + SamplingRegion samplingRegion("DFG GlobalCSE Phase"); + return runPhase<GlobalCSEPhase>(graph); } } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) - - diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.h b/Source/JavaScriptCore/dfg/DFGCSEPhase.h index 1dfd2b7dd..562fd9bca 100644 --- a/Source/JavaScriptCore/dfg/DFGCSEPhase.h +++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGCSEPhase_h #define DFGCSEPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" @@ -36,14 +34,20 @@ namespace JSC { namespace DFG { class Graph; -// Block-local common subexpression elimination. This is an optional phase, but -// it is rather profitable. It has fairly accurate heap modeling and will match -// a wide range of subexpression similarities. It's known to produce big wins -// on a few benchmarks, and is relatively cheap to run. -bool performCSE(Graph&); - -// Perform just block-local store elimination. -bool performStoreElimination(Graph&); +// Block-local common subexpression elimination. It uses clobberize() for heap +// modeling, which is quite precise. This phase is known to produce big wins on +// a few benchmarks, and is relatively cheap to run. +// +// Note that this phase also gets rid of Identity nodes, which means that it's +// currently not an optional phase. Basically, DFG IR doesn't have use-lists, +// so there is no instantaneous replaceAllUsesWith operation. Instead, you turn +// a node into an Identity and wait for CSE to clean it up. +bool performLocalCSE(Graph&); + +// Same, but global. Only works for SSA. This will find common subexpressions +// both in the same block and in any block that dominates the current block. It +// has no limits on how far it will look for load-elimination opportunities. +bool performGlobalCSE(Graph&); } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h index ac7dc45fe..12739abd6 100644 --- a/Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h +++ b/Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h @@ -26,8 +26,6 @@ #ifndef DFGCallArrayAllocatorSlowPathGenerator_h #define DFGCallArrayAllocatorSlowPathGenerator_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" @@ -63,6 +61,7 @@ protected: GPRReg canTrample = SpeculativeJIT::pickCanTrample(m_resultGPR); for (unsigned i = m_plans.size(); i--;) jit->silentFill(m_plans[i], canTrample); + jit->m_jit.exceptionCheck(); jit->m_jit.loadPtr(MacroAssembler::Address(m_resultGPR, JSObject::butterflyOffset()), m_storageGPR); jumpTo(jit); } @@ -98,7 +97,7 @@ protected: for (unsigned i = 0; i < m_plans.size(); ++i) jit->silentSpill(m_plans[i]); GPRReg scratchGPR = AssemblyHelpers::selectScratchGPR(m_sizeGPR); - MacroAssembler::Jump bigLength = jit->m_jit.branch32(MacroAssembler::AboveOrEqual, m_sizeGPR, MacroAssembler::TrustedImm32(MIN_SPARSE_ARRAY_INDEX)); + MacroAssembler::Jump bigLength = jit->m_jit.branch32(MacroAssembler::AboveOrEqual, m_sizeGPR, MacroAssembler::TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)); jit->m_jit.move(MacroAssembler::TrustedImmPtr(m_contiguousStructure), scratchGPR); MacroAssembler::Jump done = jit->m_jit.jump(); bigLength.link(&jit->m_jit); @@ -108,6 +107,7 @@ protected: GPRReg canTrample = SpeculativeJIT::pickCanTrample(m_resultGPR); for (unsigned i = m_plans.size(); i--;) jit->silentFill(m_plans[i], canTrample); + jit->m_jit.exceptionCheck(); jumpTo(jit); } diff --git a/Source/JavaScriptCore/dfg/DFGCallCreateDirectArgumentsSlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGCallCreateDirectArgumentsSlowPathGenerator.h new file mode 100644 index 000000000..dc06ae194 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGCallCreateDirectArgumentsSlowPathGenerator.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGCallCreateDirectArgumentsSlowPathGenerator_h +#define DFGCallCreateDirectArgumentsSlowPathGenerator_h + +#if ENABLE(DFG_JIT) + +#include "DFGCommon.h" +#include "DFGOperations.h" +#include "DFGSlowPathGenerator.h" +#include "DFGSpeculativeJIT.h" +#include "DirectArguments.h" + +namespace JSC { namespace DFG { + +// This calls operationCreateDirectArguments but then restores the value of lengthGPR. +class CallCreateDirectArgumentsSlowPathGenerator : public JumpingSlowPathGenerator<MacroAssembler::JumpList> { +public: + CallCreateDirectArgumentsSlowPathGenerator( + MacroAssembler::JumpList from, SpeculativeJIT* jit, GPRReg resultGPR, Structure* structure, + GPRReg lengthGPR, unsigned minCapacity) + : JumpingSlowPathGenerator<MacroAssembler::JumpList>(from, jit) + , m_resultGPR(resultGPR) + , m_structure(structure) + , m_lengthGPR(lengthGPR) + , m_minCapacity(minCapacity) + { + jit->silentSpillAllRegistersImpl(false, m_plans, resultGPR); + } + +protected: + void generateInternal(SpeculativeJIT* jit) override + { + linkFrom(jit); + for (unsigned i = 0; i < m_plans.size(); ++i) + jit->silentSpill(m_plans[i]); + jit->callOperation( + operationCreateDirectArguments, m_resultGPR, m_structure, m_lengthGPR, m_minCapacity); + GPRReg canTrample = SpeculativeJIT::pickCanTrample(m_resultGPR); + for (unsigned i = m_plans.size(); i--;) + jit->silentFill(m_plans[i], canTrample); + jit->m_jit.exceptionCheck(); + jit->m_jit.loadPtr( + MacroAssembler::Address(m_resultGPR, DirectArguments::offsetOfLength()), m_lengthGPR); + jumpTo(jit); + } + +private: + GPRReg m_resultGPR; + Structure* m_structure; + GPRReg m_lengthGPR; + unsigned m_minCapacity; + Vector<SilentRegisterSavePlan, 2> m_plans; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGCallCreateDirectArgumentsSlowPathGenerator_h + diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp index e6fcd8c11..d100d97d5 100644 --- a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp +++ b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,44 +31,65 @@ #include "CodeBlock.h" #include "DFGCommon.h" #include "Interpreter.h" +#include "JSCInlines.h" +#include "Options.h" namespace JSC { namespace DFG { -#if ENABLE(DFG_JIT) +bool isSupported() +{ + return Options::useDFGJIT() + && MacroAssembler::supportsFloatingPoint(); +} + +bool isSupportedForInlining(CodeBlock* codeBlock) +{ +#if ENABLE(WEBASSEMBLY) + if (codeBlock->ownerExecutable()->isWebAssemblyExecutable()) + return false; +#endif + return codeBlock->ownerScriptExecutable()->isInliningCandidate(); +} + bool mightCompileEval(CodeBlock* codeBlock) { - return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount(); + return isSupported() + && codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount() + && codeBlock->ownerScriptExecutable()->isOkToOptimize(); } bool mightCompileProgram(CodeBlock* codeBlock) { - return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount(); + return isSupported() + && codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount() + && codeBlock->ownerScriptExecutable()->isOkToOptimize(); } bool mightCompileFunctionForCall(CodeBlock* codeBlock) { - return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount(); + return isSupported() + && codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount() + && codeBlock->ownerScriptExecutable()->isOkToOptimize(); } bool mightCompileFunctionForConstruct(CodeBlock* codeBlock) { - return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount(); + return isSupported() + && codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount() + && codeBlock->ownerScriptExecutable()->isOkToOptimize(); } bool mightInlineFunctionForCall(CodeBlock* codeBlock) { return codeBlock->instructionCount() <= Options::maximumFunctionForCallInlineCandidateInstructionCount() - && !codeBlock->ownerExecutable()->needsActivation() - && codeBlock->ownerExecutable()->isInliningCandidate(); + && isSupportedForInlining(codeBlock); } bool mightInlineFunctionForClosureCall(CodeBlock* codeBlock) { return codeBlock->instructionCount() <= Options::maximumFunctionForClosureCallInlineCandidateInstructionCount() - && !codeBlock->ownerExecutable()->needsActivation() - && codeBlock->ownerExecutable()->isInliningCandidate(); + && isSupportedForInlining(codeBlock); } bool mightInlineFunctionForConstruct(CodeBlock* codeBlock) { return codeBlock->instructionCount() <= Options::maximumFunctionForConstructInlineCandidateInstructionCount() - && !codeBlock->ownerExecutable()->needsActivation() - && codeBlock->ownerExecutable()->isInliningCandidate(); + && isSupportedForInlining(codeBlock); } inline void debugFail(CodeBlock* codeBlock, OpcodeID opcodeID, CapabilityLevel result) @@ -79,12 +100,13 @@ inline void debugFail(CodeBlock* codeBlock, OpcodeID opcodeID, CapabilityLevel r CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruction* pc) { + UNUSED_PARAM(codeBlock); // This function does some bytecode parsing. Ordinarily bytecode parsing requires the owning CodeBlock. It's sort of strange that we don't use it here right now. + switch (opcodeID) { case op_enter: - case op_touch_entry: case op_to_this: + case op_check_tdz: case op_create_this: - case op_get_callee: case op_bitand: case op_bitor: case op_bitxor: @@ -103,15 +125,18 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc case op_debug: case op_profile_will_call: case op_profile_did_call: + case op_profile_type: + case op_profile_control_flow: case op_mov: - case op_captured_mov: - case op_check_has_instance: + case op_overrides_has_instance: case op_instanceof: + case op_instanceof_custom: case op_is_undefined: case op_is_boolean: case op_is_number: case op_is_string: case op_is_object: + case op_is_object_or_null: case op_is_function: case op_not: case op_less: @@ -128,16 +153,13 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc case op_put_by_val: case op_put_by_val_direct: case op_get_by_id: - case op_get_by_id_out_of_line: case op_get_array_length: case op_put_by_id: - case op_put_by_id_out_of_line: - case op_put_by_id_transition_direct: - case op_put_by_id_transition_direct_out_of_line: - case op_put_by_id_transition_normal: - case op_put_by_id_transition_normal_out_of_line: - case op_init_global_const_nop: - case op_init_global_const: + case op_put_getter_by_id: + case op_put_setter_by_id: + case op_put_getter_setter_by_id: + case op_put_getter_by_val: + case op_put_setter_by_val: case op_jmp: case op_jtrue: case op_jfalse: @@ -152,6 +174,7 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc case op_jngreater: case op_jngreatereq: case op_loop_hint: + case op_watchdog: case op_ret: case op_end: case op_new_object: @@ -163,23 +186,48 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc case op_throw: case op_throw_static_error: case op_call: + case op_tail_call: case op_construct: - case op_init_lazy_reg: - case op_create_arguments: - case op_tear_off_arguments: - case op_get_argument_by_val: - case op_get_arguments_length: + case op_call_varargs: + case op_tail_call_varargs: + case op_construct_varargs: + case op_create_direct_arguments: + case op_create_scoped_arguments: + case op_create_out_of_band_arguments: + case op_get_from_arguments: + case op_put_to_arguments: case op_jneq_ptr: case op_typeof: case op_to_number: + case op_to_string: case op_switch_imm: case op_switch_char: case op_in: + case op_get_scope: case op_get_from_scope: + case op_get_enumerable_length: + case op_has_generic_property: + case op_has_structure_property: + case op_has_indexed_property: + case op_get_direct_pname: + case op_get_property_enumerator: + case op_enumerator_structure_pname: + case op_enumerator_generic_pname: + case op_to_index_string: + case op_new_func: + case op_new_func_exp: + case op_new_generator_func: + case op_new_generator_func_exp: + case op_new_arrow_func_exp: + case op_create_lexical_environment: + case op_get_parent_scope: + case op_catch: + case op_copy_rest: + case op_get_rest_length: return CanCompileAndInline; case op_put_to_scope: { - ResolveType resolveType = ResolveModeAndType(pc[4].u.operand).type(); + ResolveType resolveType = GetPutInfo(pc[4].u.operand).resolveType(); // If we're writing to a readonly property we emit a Dynamic put that // the DFG can't currently handle. if (resolveType == Dynamic) @@ -189,23 +237,13 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc case op_resolve_scope: { // We don't compile 'catch' or 'with', so there's no point in compiling variable resolution within them. - ResolveType resolveType = ResolveModeAndType(pc[3].u.operand).type(); + ResolveType resolveType = static_cast<ResolveType>(pc[4].u.operand); if (resolveType == Dynamic) return CannotCompile; return CanCompileAndInline; } - case op_call_varargs: - if (codeBlock->usesArguments() && pc[4].u.operand == codeBlock->argumentsRegister().offset()) - return CanInline; - return CannotCompile; - - case op_new_regexp: - case op_create_activation: - case op_tear_off_activation: - case op_new_func: - case op_new_captured_func: - case op_new_func_exp: + case op_new_regexp: case op_switch_string: // Don't inline because we don't want to copy string tables in the concurrent JIT. return CanCompile; @@ -244,8 +282,6 @@ CapabilityLevel capabilityLevel(CodeBlock* codeBlock) return result; } -#endif - } } // namespace JSC::DFG #endif diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h index 5bd80c517..4010bb291 100644 --- a/Source/JavaScriptCore/dfg/DFGCapabilities.h +++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,18 +28,18 @@ #include "CodeBlock.h" #include "DFGCommon.h" -#include "DFGNode.h" #include "Executable.h" #include "Interpreter.h" #include "Intrinsic.h" #include "Options.h" -#include <wtf/Platform.h> namespace JSC { namespace DFG { #if ENABLE(DFG_JIT) // Fast check functions; if they return true it is still necessary to // check opcodes. +bool isSupported(); +bool isSupportedForInlining(CodeBlock*); bool mightCompileEval(CodeBlock*); bool mightCompileProgram(CodeBlock*); bool mightCompileFunctionForCall(CodeBlock*); @@ -80,35 +80,56 @@ inline CapabilityLevel programCapabilityLevel(CodeBlock* codeBlock) return capabilityLevel(codeBlock); } -inline CapabilityLevel functionForCallCapabilityLevel(CodeBlock* codeBlock) +inline CapabilityLevel functionCapabilityLevel(bool mightCompile, bool mightInline, CapabilityLevel computedCapabilityLevel) { - if (!mightCompileFunctionForCall(codeBlock)) + if (mightCompile && mightInline) + return leastUpperBound(CanCompileAndInline, computedCapabilityLevel); + if (mightCompile && !mightInline) + return leastUpperBound(CanCompile, computedCapabilityLevel); + if (!mightCompile) return CannotCompile; - - return capabilityLevel(codeBlock); + RELEASE_ASSERT_NOT_REACHED(); + return CannotCompile; +} + +inline CapabilityLevel functionForCallCapabilityLevel(CodeBlock* codeBlock) +{ + return functionCapabilityLevel( + mightCompileFunctionForCall(codeBlock), + mightInlineFunctionForCall(codeBlock), + capabilityLevel(codeBlock)); } inline CapabilityLevel functionForConstructCapabilityLevel(CodeBlock* codeBlock) { - if (!mightCompileFunctionForConstruct(codeBlock)) - return CannotCompile; - - return capabilityLevel(codeBlock); + return functionCapabilityLevel( + mightCompileFunctionForConstruct(codeBlock), + mightInlineFunctionForConstruct(codeBlock), + capabilityLevel(codeBlock)); } -inline bool canInlineFunctionForCall(CodeBlock* codeBlock) +inline CapabilityLevel inlineFunctionForCallCapabilityLevel(CodeBlock* codeBlock) { - return mightInlineFunctionForCall(codeBlock) && canInline(capabilityLevel(codeBlock)); + if (!mightInlineFunctionForCall(codeBlock)) + return CannotCompile; + + return capabilityLevel(codeBlock); } -inline bool canInlineFunctionForClosureCall(CodeBlock* codeBlock) +inline CapabilityLevel inlineFunctionForClosureCallCapabilityLevel(CodeBlock* codeBlock) { - return mightInlineFunctionForClosureCall(codeBlock) && canInline(capabilityLevel(codeBlock)); + if (!mightInlineFunctionForClosureCall(codeBlock)) + return CannotCompile; + + return capabilityLevel(codeBlock); } -inline bool canInlineFunctionForConstruct(CodeBlock* codeBlock) +inline CapabilityLevel inlineFunctionForConstructCapabilityLevel(CodeBlock* codeBlock) { - return mightInlineFunctionForConstruct(codeBlock) && canInline(capabilityLevel(codeBlock)); + if (!mightInlineFunctionForConstruct(codeBlock)) + return CannotCompile; + + return capabilityLevel(codeBlock); } inline bool mightInlineFunctionFor(CodeBlock* codeBlock, CodeSpecializationKind kind) @@ -119,21 +140,35 @@ inline bool mightInlineFunctionFor(CodeBlock* codeBlock, CodeSpecializationKind return mightInlineFunctionForConstruct(codeBlock); } +inline bool mightCompileFunctionFor(CodeBlock* codeBlock, CodeSpecializationKind kind) +{ + if (kind == CodeForCall) + return mightCompileFunctionForCall(codeBlock); + ASSERT(kind == CodeForConstruct); + return mightCompileFunctionForConstruct(codeBlock); +} + inline bool mightInlineFunction(CodeBlock* codeBlock) { return mightInlineFunctionFor(codeBlock, codeBlock->specializationKind()); } -inline bool canInlineFunctionFor(CodeBlock* codeBlock, CodeSpecializationKind kind, bool isClosureCall) +inline CapabilityLevel inlineFunctionForCapabilityLevel(CodeBlock* codeBlock, CodeSpecializationKind kind, bool isClosureCall) { if (isClosureCall) { - ASSERT(kind == CodeForCall); - return canInlineFunctionForClosureCall(codeBlock); + if (kind != CodeForCall) + return CannotCompile; + return inlineFunctionForClosureCallCapabilityLevel(codeBlock); } if (kind == CodeForCall) - return canInlineFunctionForCall(codeBlock); + return inlineFunctionForCallCapabilityLevel(codeBlock); ASSERT(kind == CodeForConstruct); - return canInlineFunctionForConstruct(codeBlock); + return inlineFunctionForConstructCapabilityLevel(codeBlock); +} + +inline bool isSmallEnoughToInlineCodeInto(CodeBlock* codeBlock) +{ + return codeBlock->instructionCount() <= Options::maximumInliningCallerSize(); } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGCleanUpPhase.cpp b/Source/JavaScriptCore/dfg/DFGCleanUpPhase.cpp new file mode 100644 index 000000000..313094c39 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGCleanUpPhase.cpp @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGCleanUpPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGGraph.h" +#include "DFGInsertionSet.h" +#include "DFGPhase.h" +#include "DFGPredictionPropagationPhase.h" +#include "DFGVariableAccessDataDump.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +class CleanUpPhase : public Phase { +public: + CleanUpPhase(Graph& graph) + : Phase(graph, "clean up") + { + } + + bool run() + { + bool changed = false; + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + unsigned sourceIndex = 0; + unsigned targetIndex = 0; + while (sourceIndex < block->size()) { + Node* node = block->at(sourceIndex++); + bool kill = false; + + if (node->op() == Check) + node->children = node->children.justChecks(); + + switch (node->op()) { + case Phantom: + case Check: + if (node->children.isEmpty()) + kill = true; + break; + default: + break; + } + + if (kill) + m_graph.m_allocator.free(node); + else + block->at(targetIndex++) = node; + } + block->resize(targetIndex); + } + + return changed; + } +}; + +bool performCleanUp(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Clean Up Phase"); + return runPhase<CleanUpPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGCleanUpPhase.h b/Source/JavaScriptCore/dfg/DFGCleanUpPhase.h new file mode 100644 index 000000000..3a1bc6916 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGCleanUpPhase.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGCleanUpPhase_h +#define DFGCleanUpPhase_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; + +// Cleans up unneeded nodes, like empty Checks and Phantoms. + +bool performCleanUp(Graph&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGCleanUpPhase_h diff --git a/Source/JavaScriptCore/dfg/DFGClobberSet.cpp b/Source/JavaScriptCore/dfg/DFGClobberSet.cpp index 791314172..d4630e370 100644 --- a/Source/JavaScriptCore/dfg/DFGClobberSet.cpp +++ b/Source/JavaScriptCore/dfg/DFGClobberSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,7 +29,7 @@ #if ENABLE(DFG_JIT) #include "DFGClobberize.h" -#include "Operations.h" +#include "JSCInlines.h" #include <wtf/ListDump.h> namespace JSC { namespace DFG { @@ -122,37 +122,38 @@ HashSet<AbstractHeap> ClobberSet::setOf(bool direct) const void addReads(Graph& graph, Node* node, ClobberSet& readSet) { ClobberSetAdd addRead(readSet); - NoOpClobberize addWrite; - clobberize(graph, node, addRead, addWrite); + NoOpClobberize noOp; + clobberize(graph, node, addRead, noOp, noOp); } void addWrites(Graph& graph, Node* node, ClobberSet& writeSet) { - NoOpClobberize addRead; + NoOpClobberize noOp; ClobberSetAdd addWrite(writeSet); - clobberize(graph, node, addRead, addWrite); + clobberize(graph, node, noOp, addWrite, noOp); } void addReadsAndWrites(Graph& graph, Node* node, ClobberSet& readSet, ClobberSet& writeSet) { ClobberSetAdd addRead(readSet); ClobberSetAdd addWrite(writeSet); - clobberize(graph, node, addRead, addWrite); + NoOpClobberize noOp; + clobberize(graph, node, addRead, addWrite, noOp); } bool readsOverlap(Graph& graph, Node* node, ClobberSet& readSet) { ClobberSetOverlaps addRead(readSet); - NoOpClobberize addWrite; - clobberize(graph, node, addRead, addWrite); + NoOpClobberize noOp; + clobberize(graph, node, addRead, noOp, noOp); return addRead.result(); } bool writesOverlap(Graph& graph, Node* node, ClobberSet& writeSet) { - NoOpClobberize addRead; + NoOpClobberize noOp; ClobberSetOverlaps addWrite(writeSet); - clobberize(graph, node, addRead, addWrite); + clobberize(graph, node, noOp, addWrite, noOp); return addWrite.result(); } diff --git a/Source/JavaScriptCore/dfg/DFGClobberSet.h b/Source/JavaScriptCore/dfg/DFGClobberSet.h index 18514f61b..d76d3559d 100644 --- a/Source/JavaScriptCore/dfg/DFGClobberSet.h +++ b/Source/JavaScriptCore/dfg/DFGClobberSet.h @@ -26,8 +26,6 @@ #ifndef DFGClobberSet_h #define DFGClobberSet_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGAbstractHeap.h" @@ -82,7 +80,7 @@ public: { } - void operator()(AbstractHeap heap) + void operator()(AbstractHeap heap) const { m_set.add(heap); } @@ -98,7 +96,7 @@ public: { } - void operator()(AbstractHeap heap) + void operator()(AbstractHeap heap) const { m_result |= m_set.overlaps(heap); } @@ -107,7 +105,7 @@ public: private: const ClobberSet& m_set; - bool m_result; + mutable bool m_result; }; void addReads(Graph&, Node*, ClobberSet&); diff --git a/Source/JavaScriptCore/dfg/DFGClobberize.cpp b/Source/JavaScriptCore/dfg/DFGClobberize.cpp index be6185629..a693ba41b 100644 --- a/Source/JavaScriptCore/dfg/DFGClobberize.cpp +++ b/Source/JavaScriptCore/dfg/DFGClobberize.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,26 +28,53 @@ #if ENABLE(DFG_JIT) -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { bool doesWrites(Graph& graph, Node* node) { - NoOpClobberize addRead; + NoOpClobberize noOp; CheckClobberize addWrite; - clobberize(graph, node, addRead, addWrite); + clobberize(graph, node, noOp, addWrite, noOp); return addWrite.result(); } +bool accessesOverlap(Graph& graph, Node* node, AbstractHeap heap) +{ + NoOpClobberize noOp; + AbstractHeapOverlaps addAccess(heap); + clobberize(graph, node, addAccess, addAccess, noOp); + return addAccess.result(); +} + bool writesOverlap(Graph& graph, Node* node, AbstractHeap heap) { - NoOpClobberize addRead; + NoOpClobberize noOp; AbstractHeapOverlaps addWrite(heap); - clobberize(graph, node, addRead, addWrite); + clobberize(graph, node, noOp, addWrite, noOp); return addWrite.result(); } +bool clobbersHeap(Graph& graph, Node* node) +{ + bool result = false; + clobberize( + graph, node, NoOpClobberize(), + [&] (AbstractHeap heap) { + switch (heap.kind()) { + case World: + case Heap: + result = true; + break; + default: + break; + } + }, + NoOpClobberize()); + return result; +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGClobberize.h b/Source/JavaScriptCore/dfg/DFGClobberize.h index 6f2a03bf6..461217772 100644 --- a/Source/JavaScriptCore/dfg/DFGClobberize.h +++ b/Source/JavaScriptCore/dfg/DFGClobberize.h @@ -1,5 +1,5 @@ - /* - * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. +/* + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,30 +26,28 @@ #ifndef DFGClobberize_h #define DFGClobberize_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGAbstractHeap.h" #include "DFGEdgeUsesStructure.h" #include "DFGGraph.h" +#include "DFGHeapLocation.h" +#include "DFGLazyNode.h" +#include "DFGPureValue.h" namespace JSC { namespace DFG { -template<typename ReadFunctor, typename WriteFunctor> -void clobberizeForAllocation(ReadFunctor& read, WriteFunctor& write) -{ - read(GCState); - read(BarrierState); - write(GCState); - write(BarrierState); -} - -template<typename ReadFunctor, typename WriteFunctor> -void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write) +template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor> +void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def) { // Some notes: // + // - The canonical way of clobbering the world is to read world and write + // heap. This is because World subsumes Heap and Stack, and Stack can be + // read by anyone but only written to by explicit stack writing operations. + // Of course, claiming to also write World is not wrong; it'll just + // pessimise some important optimizations. + // // - We cannot hoist, or sink, anything that has effects. This means that the // easiest way of indicating that something cannot be hoisted is to claim // that it side-effects some miscellaneous thing. @@ -62,9 +60,9 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write // versions of those nodes that backward-exit instead, but I'm not convinced // of the soundness. // - // - Some nodes lie, and claim that they do not read the JSCell_structure. - // These are nodes that use the structure in a way that does not depend on - // things that change under structure transitions. + // - Some nodes lie, and claim that they do not read the JSCell_structureID, + // JSCell_typeInfoFlags, etc. These are nodes that use the structure in a way + // that does not depend on things that change under structure transitions. // // - It's implicitly understood that OSR exits read the world. This is why we // generally don't move or eliminate stores. Every node can exit, so the @@ -79,153 +77,451 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write // can use it for IR dumps. No promises on whether the answers are sound // prior to type inference - though they probably could be if we did some // small hacking. + // + // - If you do read(Stack) or read(World), then make sure that readTop() in + // PreciseLocalClobberize is correct. + // While read() and write() are fairly self-explanatory - they track what sorts of things the + // node may read or write - the def() functor is more tricky. It tells you the heap locations + // (not just abstract heaps) that are defined by a node. A heap location comprises an abstract + // heap, some nodes, and a LocationKind. Briefly, a location defined by a node is a location + // whose value can be deduced from looking at the node itself. The locations returned must obey + // the following properties: + // + // - If someone wants to CSE a load from the heap, then a HeapLocation object should be + // sufficient to find a single matching node. + // + // - The abstract heap is the only abstract heap that could be clobbered to invalidate any such + // CSE attempt. I.e. if clobberize() reports that on every path between some node and a node + // that defines a HeapLocation that it wanted, there were no writes to any abstract heap that + // overlap the location's heap, then we have a sound match. Effectively, the semantics of + // write() and def() are intertwined such that for them to be sound they must agree on what + // is CSEable. + // + // read(), write(), and def() for heap locations is enough to do GCSE on effectful things. To + // keep things simple, this code will also def() pure things. def() must be overloaded to also + // accept PureValue. This way, a client of clobberize() can implement GCSE entirely using the + // information that clobberize() passes to write() and def(). Other clients of clobberize() can + // just ignore def() by using a NoOpClobberize functor. + if (edgesUseStructure(graph, node)) - read(JSCell_structure); + read(JSCell_structureID); switch (node->op()) { case JSConstant: - case WeakJSConstant: + case DoubleConstant: + case Int52Constant: + def(PureValue(node, node->constant())); + return; + case Identity: case Phantom: - case Breakpoint: - case ProfileWillCall: - case ProfileDidCall: - case BitAnd: - case BitOr: - case BitXor: - case BitLShift: - case BitRShift: - case BitURShift: - case ValueToInt32: - case ArithAdd: - case ArithSub: - case ArithNegate: - case ArithMul: + case Check: + case ExtractOSREntryLocal: + case CheckStructureImmediate: + return; + case ArithIMul: - case ArithDiv: - case ArithMod: case ArithAbs: + case ArithClz32: case ArithMin: case ArithMax: + case ArithPow: case ArithSqrt: + case ArithFRound: case ArithSin: case ArithCos: + case ArithLog: case GetScope: case SkipScope: - case CheckFunction: case StringCharCodeAt: - case StringFromCharCode: - case CompareEqConstant: - case CompareStrictEqConstant: case CompareStrictEq: case IsUndefined: case IsBoolean: case IsNumber: case IsString: + case IsObject: case LogicalNot: - case Int32ToDouble: - case ExtractOSREntryLocal: - case Int52ToDouble: - case Int52ToValue: case CheckInBounds: - case ConstantStoragePointer: - case UInt32ToNumber: - case DoubleAsInt32: - case Check: + case DoubleRep: + case ValueRep: + case Int52Rep: + case BooleanToNumber: + case FiatInt52: + case MakeRope: + case StrCat: + case ValueToInt32: + case GetExecutable: + case BottomValue: + case TypeOf: + def(PureValue(node)); + return; + + case BitAnd: + case BitOr: + case BitXor: + case BitLShift: + case BitRShift: + case BitURShift: + if (node->child1().useKind() == UntypedUse || node->child2().useKind() == UntypedUse) { + read(World); + write(Heap); + return; + } + def(PureValue(node)); + return; + + case ArithRandom: + read(MathDotRandomState); + write(MathDotRandomState); return; + case HasGenericProperty: + case HasStructureProperty: + case GetEnumerableLength: + case GetPropertyEnumerator: { + read(Heap); + write(SideState); + return; + } + + case GetDirectPname: { + // This reads and writes heap because it can end up calling a generic getByVal + // if the Structure changed, which could in turn end up calling a getter. + read(World); + write(Heap); + return; + } + + case ToIndexString: + case GetEnumeratorStructurePname: + case GetEnumeratorGenericPname: { + def(PureValue(node)); + return; + } + + case HasIndexedProperty: { + read(JSObject_butterfly); + ArrayMode mode = node->arrayMode(); + switch (mode.type()) { + case Array::Int32: { + if (mode.isInBounds()) { + read(Butterfly_publicLength); + read(IndexedInt32Properties); + def(HeapLocation(HasIndexedPropertyLoc, IndexedInt32Properties, node->child1(), node->child2()), LazyNode(node)); + return; + } + read(Heap); + return; + } + + case Array::Double: { + if (mode.isInBounds()) { + read(Butterfly_publicLength); + read(IndexedDoubleProperties); + def(HeapLocation(HasIndexedPropertyLoc, IndexedDoubleProperties, node->child1(), node->child2()), LazyNode(node)); + return; + } + read(Heap); + return; + } + + case Array::Contiguous: { + if (mode.isInBounds()) { + read(Butterfly_publicLength); + read(IndexedContiguousProperties); + def(HeapLocation(HasIndexedPropertyLoc, IndexedContiguousProperties, node->child1(), node->child2()), LazyNode(node)); + return; + } + read(Heap); + return; + } + + case Array::ArrayStorage: { + if (mode.isInBounds()) { + read(Butterfly_vectorLength); + read(IndexedArrayStorageProperties); + return; + } + read(Heap); + return; + } + + default: { + read(World); + write(Heap); + return; + } + } + RELEASE_ASSERT_NOT_REACHED(); + return; + } + + case StringFromCharCode: + switch (node->child1().useKind()) { + case Int32Use: + def(PureValue(node)); + return; + case UntypedUse: + read(World); + write(Heap); + return; + default: + DFG_CRASH(graph, node, "Bad use kind"); + } + return; + + case ArithAdd: + case ArithNegate: + case ArithMod: + case DoubleAsInt32: + case UInt32ToNumber: + def(PureValue(node, node->arithMode())); + return; + + case ArithDiv: + case ArithMul: + case ArithSub: + switch (node->binaryUseKind()) { + case Int32Use: + case Int52RepUse: + case DoubleRepUse: + def(PureValue(node, node->arithMode())); + return; + case UntypedUse: + read(World); + write(Heap); + return; + default: + DFG_CRASH(graph, node, "Bad use kind"); + } + + case ArithRound: + case ArithFloor: + case ArithCeil: + def(PureValue(node, static_cast<uintptr_t>(node->arithRoundingMode()))); + return; + + case CheckCell: + def(PureValue(CheckCell, AdjacencyList(AdjacencyList::Fixed, node->child1()), node->cellOperand())); + return; + + case CheckNotEmpty: + def(PureValue(CheckNotEmpty, AdjacencyList(AdjacencyList::Fixed, node->child1()))); + return; + + case CheckIdent: + def(PureValue(CheckIdent, AdjacencyList(AdjacencyList::Fixed, node->child1()), node->uidOperand())); + return; + + case ConstantStoragePointer: + def(PureValue(node, node->storagePointer())); + return; + case MovHint: case ZombieHint: + case ExitOK: + case KillStack: case Upsilon: case Phi: - case Flush: case PhantomLocal: case SetArgument: - case PhantomArguments: case Jump: case Branch: case Switch: case Throw: case ForceOSRExit: + case CheckBadCell: case Return: case Unreachable: case CheckTierUpInLoop: case CheckTierUpAtReturn: case CheckTierUpAndOSREnter: + case CheckTierUpWithNestedTriggerAndOSREnter: case LoopHint: - case InvalidationPoint: + case Breakpoint: + case ProfileWillCall: + case ProfileDidCall: + case ProfileType: + case ProfileControlFlow: + case StoreBarrier: + case PutHint: write(SideState); return; - case VariableWatchpoint: - case TypedArrayWatchpoint: - read(Watchpoint_fire); + case InvalidationPoint: write(SideState); + def(HeapLocation(InvalidationPointLoc, Watchpoint_fire), LazyNode(node)); return; - + + case Flush: + read(AbstractHeap(Stack, node->local())); + write(SideState); + return; + case NotifyWrite: write(Watchpoint_fire); write(SideState); return; - case CreateActivation: - case CreateArguments: - clobberizeForAllocation(read, write); - write(SideState); - write(Watchpoint_fire); + case CreateActivation: { + SymbolTable* table = node->castOperand<SymbolTable*>(); + if (table->singletonScope()->isStillValid()) + write(Watchpoint_fire); + read(HeapObjectCount); + write(HeapObjectCount); return; + } - case FunctionReentryWatchpoint: - read(Watchpoint_fire); + case CreateDirectArguments: + case CreateScopedArguments: + case CreateClonedArguments: + read(Stack); + read(HeapObjectCount); + write(HeapObjectCount); + return; + + case PhantomDirectArguments: + case PhantomClonedArguments: + // DFG backend requires that the locals that this reads are flushed. FTL backend can handle those + // locals being promoted. + if (!isFTL(graph.m_plan.mode)) + read(Stack); + + // Even though it's phantom, it still has the property that one can't be replaced with another. + read(HeapObjectCount); + write(HeapObjectCount); return; case ToThis: case CreateThis: read(MiscFields); - clobberizeForAllocation(read, write); + read(HeapObjectCount); + write(HeapObjectCount); return; case VarInjectionWatchpoint: - case AllocationProfileWatchpoint: - case IsObject: + read(MiscFields); + def(HeapLocation(VarInjectionWatchpointLoc, MiscFields), LazyNode(node)); + return; + + case IsObjectOrNull: + read(MiscFields); + def(HeapLocation(IsObjectOrNullLoc, MiscFields, node->child1()), LazyNode(node)); + return; + case IsFunction: - case TypeOf: read(MiscFields); + def(HeapLocation(IsFunctionLoc, MiscFields, node->child1()), LazyNode(node)); return; case GetById: case GetByIdFlush: case PutById: + case PutByIdFlush: case PutByIdDirect: + case PutGetterById: + case PutSetterById: + case PutGetterSetterById: + case PutGetterByVal: + case PutSetterByVal: case ArrayPush: case ArrayPop: case Call: + case TailCallInlinedCaller: case Construct: + case CallVarargs: + case CallForwardVarargs: + case TailCallVarargsInlinedCaller: + case TailCallForwardVarargsInlinedCaller: + case ConstructVarargs: + case ConstructForwardVarargs: case ToPrimitive: case In: - case GetMyArgumentsLengthSafe: - case GetMyArgumentByValSafe: case ValueAdd: read(World); - write(World); + write(Heap); + return; + + case TailCall: + case TailCallVarargs: + case TailCallForwardVarargs: + read(World); + write(SideState); + return; + + case GetGetter: + read(GetterSetter_getter); + def(HeapLocation(GetterLoc, GetterSetter_getter, node->child1()), LazyNode(node)); + return; + + case GetSetter: + read(GetterSetter_setter); + def(HeapLocation(SetterLoc, GetterSetter_setter, node->child1()), LazyNode(node)); return; case GetCallee: - read(AbstractHeap(Variables, JSStack::Callee)); + read(AbstractHeap(Stack, JSStack::Callee)); + def(HeapLocation(StackLoc, AbstractHeap(Stack, JSStack::Callee)), LazyNode(node)); + return; + + case GetArgumentCount: + read(AbstractHeap(Stack, JSStack::ArgumentCount)); + def(HeapLocation(StackPayloadLoc, AbstractHeap(Stack, JSStack::ArgumentCount)), LazyNode(node)); + return; + + case GetRestLength: + read(Stack); return; case GetLocal: - case GetArgument: - read(AbstractHeap(Variables, node->local())); + read(AbstractHeap(Stack, node->local())); + def(HeapLocation(StackLoc, AbstractHeap(Stack, node->local())), LazyNode(node)); return; case SetLocal: - write(AbstractHeap(Variables, node->local())); + write(AbstractHeap(Stack, node->local())); + def(HeapLocation(StackLoc, AbstractHeap(Stack, node->local())), LazyNode(node->child1().node())); return; + case GetStack: { + AbstractHeap heap(Stack, node->stackAccessData()->local); + read(heap); + def(HeapLocation(StackLoc, heap), LazyNode(node)); + return; + } + + case PutStack: { + AbstractHeap heap(Stack, node->stackAccessData()->local); + write(heap); + def(HeapLocation(StackLoc, heap), LazyNode(node->child1().node())); + return; + } + + case LoadVarargs: { + read(World); + write(Heap); + LoadVarargsData* data = node->loadVarargsData(); + write(AbstractHeap(Stack, data->count.offset())); + for (unsigned i = data->limit; i--;) + write(AbstractHeap(Stack, data->start.offset() + static_cast<int>(i))); + return; + } + + case ForwardVarargs: { + // We could be way more precise here. + read(Stack); + + LoadVarargsData* data = node->loadVarargsData(); + write(AbstractHeap(Stack, data->count.offset())); + for (unsigned i = data->limit; i--;) + write(AbstractHeap(Stack, data->start.offset() + static_cast<int>(i))); + return; + } + case GetLocalUnlinked: - read(AbstractHeap(Variables, node->unlinkedLocal())); + read(AbstractHeap(Stack, node->unlinkedLocal())); + def(HeapLocation(StackLoc, AbstractHeap(Stack, node->unlinkedLocal())), LazyNode(node)); return; case GetByVal: { @@ -233,10 +529,10 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write switch (mode.type()) { case Array::SelectUsingPredictions: case Array::Unprofiled: - case Array::Undecided: + case Array::SelectUsingArguments: // Assume the worst since we don't have profiling yet. read(World); - write(World); + write(Heap); return; case Array::ForceExit: @@ -245,61 +541,75 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write case Array::Generic: read(World); - write(World); + write(Heap); return; case Array::String: if (mode.isOutOfBounds()) { read(World); - write(World); + write(Heap); return; } // This appears to read nothing because it's only reading immutable data. + def(PureValue(node, mode.asWord())); + return; + + case Array::DirectArguments: + read(DirectArgumentsProperties); + def(HeapLocation(IndexedPropertyLoc, DirectArgumentsProperties, node->child1(), node->child2()), LazyNode(node)); return; - case Array::Arguments: - read(Arguments_registers); - read(Variables); + case Array::ScopedArguments: + read(ScopeProperties); + def(HeapLocation(IndexedPropertyLoc, ScopeProperties, node->child1(), node->child2()), LazyNode(node)); return; case Array::Int32: if (mode.isInBounds()) { read(Butterfly_publicLength); - read(Butterfly_vectorLength); read(IndexedInt32Properties); + def(HeapLocation(IndexedPropertyLoc, IndexedInt32Properties, node->child1(), node->child2()), LazyNode(node)); return; } read(World); - write(World); + write(Heap); return; case Array::Double: if (mode.isInBounds()) { read(Butterfly_publicLength); - read(Butterfly_vectorLength); read(IndexedDoubleProperties); + def(HeapLocation(IndexedPropertyLoc, IndexedDoubleProperties, node->child1(), node->child2()), LazyNode(node)); return; } read(World); - write(World); + write(Heap); return; case Array::Contiguous: if (mode.isInBounds()) { read(Butterfly_publicLength); - read(Butterfly_vectorLength); read(IndexedContiguousProperties); + def(HeapLocation(IndexedPropertyLoc, IndexedContiguousProperties, node->child1(), node->child2()), LazyNode(node)); return; } read(World); - write(World); + write(Heap); + return; + + case Array::Undecided: + def(PureValue(node)); return; case Array::ArrayStorage: case Array::SlowPutArrayStorage: - // Give up on life for now. + if (mode.isInBounds()) { + read(Butterfly_vectorLength); + read(IndexedArrayStorageProperties); + return; + } read(World); - write(World); + write(Heap); return; case Array::Int8Array: @@ -312,26 +622,41 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write case Array::Float32Array: case Array::Float64Array: read(TypedArrayProperties); - read(JSArrayBufferView_vector); - read(JSArrayBufferView_length); + read(MiscFields); + def(HeapLocation(IndexedPropertyLoc, TypedArrayProperties, node->child1(), node->child2()), LazyNode(node)); + return; + // We should not get an AnyTypedArray in a GetByVal as AnyTypedArray is only created from intrinsics, which + // are only added from Inline Caching a GetById. + case Array::AnyTypedArray: + DFG_CRASH(graph, node, "impossible array mode for get"); return; } RELEASE_ASSERT_NOT_REACHED(); return; } + + case GetMyArgumentByVal: { + read(Stack); + // FIXME: It would be trivial to have a def here. + // https://bugs.webkit.org/show_bug.cgi?id=143077 + return; + } case PutByValDirect: case PutByVal: case PutByValAlias: { ArrayMode mode = node->arrayMode(); + Node* base = graph.varArgChild(node, 0).node(); + Node* index = graph.varArgChild(node, 1).node(); + Node* value = graph.varArgChild(node, 2).node(); switch (mode.modeForPut().type()) { case Array::SelectUsingPredictions: + case Array::SelectUsingArguments: case Array::Unprofiled: case Array::Undecided: - case Array::String: // Assume the worst since we don't have profiling yet. read(World); - write(World); + write(Heap); return; case Array::ForceExit: @@ -340,57 +665,59 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write case Array::Generic: read(World); - write(World); - return; - - case Array::Arguments: - read(Arguments_registers); - read(Arguments_numArguments); - read(Arguments_slowArguments); - write(Variables); + write(Heap); return; case Array::Int32: if (node->arrayMode().isOutOfBounds()) { read(World); - write(World); + write(Heap); return; } read(Butterfly_publicLength); read(Butterfly_vectorLength); read(IndexedInt32Properties); write(IndexedInt32Properties); + if (node->arrayMode().mayStoreToHole()) + write(Butterfly_publicLength); + def(HeapLocation(IndexedPropertyLoc, IndexedInt32Properties, base, index), LazyNode(value)); return; case Array::Double: if (node->arrayMode().isOutOfBounds()) { read(World); - write(World); + write(Heap); return; } read(Butterfly_publicLength); read(Butterfly_vectorLength); read(IndexedDoubleProperties); write(IndexedDoubleProperties); + if (node->arrayMode().mayStoreToHole()) + write(Butterfly_publicLength); + def(HeapLocation(IndexedPropertyLoc, IndexedDoubleProperties, base, index), LazyNode(value)); return; case Array::Contiguous: if (node->arrayMode().isOutOfBounds()) { read(World); - write(World); + write(Heap); return; } read(Butterfly_publicLength); read(Butterfly_vectorLength); read(IndexedContiguousProperties); write(IndexedContiguousProperties); + if (node->arrayMode().mayStoreToHole()) + write(Butterfly_publicLength); + def(HeapLocation(IndexedPropertyLoc, IndexedContiguousProperties, base, index), LazyNode(value)); return; case Array::ArrayStorage: case Array::SlowPutArrayStorage: // Give up on life for now. read(World); - write(World); + write(Heap); return; case Array::Int8Array: @@ -402,9 +729,16 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write case Array::Uint32Array: case Array::Float32Array: case Array::Float64Array: - read(JSArrayBufferView_vector); - read(JSArrayBufferView_length); + read(MiscFields); write(TypedArrayProperties); + // FIXME: We can't def() anything here because these operations truncate their inputs. + // https://bugs.webkit.org/show_bug.cgi?id=134737 + return; + case Array::AnyTypedArray: + case Array::String: + case Array::DirectArguments: + case Array::ScopedArguments: + DFG_CRASH(graph, node, "impossible array mode for put"); return; } RELEASE_ASSERT_NOT_REACHED(); @@ -412,66 +746,128 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write } case CheckStructure: - case StructureTransitionWatchpoint: + read(JSCell_structureID); + return; + case CheckArray: - case CheckHasInstance: + read(JSCell_indexingType); + read(JSCell_typeInfoType); + read(JSCell_structureID); + return; + + case CheckTypeInfoFlags: + read(JSCell_typeInfoFlags); + def(HeapLocation(CheckTypeInfoFlagsLoc, JSCell_typeInfoFlags, node->child1()), LazyNode(node)); + return; + + case OverridesHasInstance: + read(JSCell_typeInfoFlags); + def(HeapLocation(OverridesHasInstanceLoc, JSCell_typeInfoFlags, node->child1()), LazyNode(node)); + return; + case InstanceOf: - read(JSCell_structure); + read(JSCell_structureID); + def(HeapLocation(InstanceOfLoc, JSCell_structureID, node->child1(), node->child2()), LazyNode(node)); return; - - case CheckExecutable: - read(JSFunction_executable); + + case InstanceOfCustom: + read(World); + write(Heap); return; - + case PutStructure: - case PhantomPutStructure: - write(JSCell_structure); + write(JSCell_structureID); + write(JSCell_typeInfoType); + write(JSCell_typeInfoFlags); + write(JSCell_indexingType); return; case AllocatePropertyStorage: write(JSObject_butterfly); - clobberizeForAllocation(read, write); + def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node)); return; case ReallocatePropertyStorage: read(JSObject_butterfly); write(JSObject_butterfly); - clobberizeForAllocation(read, write); + def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node)); return; case GetButterfly: read(JSObject_butterfly); + def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node)); + return; + + case GetButterflyReadOnly: + // This rule is separate to prevent CSE of GetButterfly with GetButterflyReadOnly. But in reality, + // this works because we don't introduce GetButterflyReadOnly until the bitter end of compilation. + read(JSObject_butterfly); + def(HeapLocation(ButterflyReadOnlyLoc, JSObject_butterfly, node->child1()), LazyNode(node)); return; case Arrayify: case ArrayifyToStructure: - read(JSCell_structure); + read(JSCell_structureID); + read(JSCell_indexingType); read(JSObject_butterfly); - write(JSCell_structure); + write(JSCell_structureID); + write(JSCell_indexingType); write(JSObject_butterfly); - clobberizeForAllocation(read, write); + write(Watchpoint_fire); return; case GetIndexedPropertyStorage: - if (node->arrayMode().type() == Array::String) + if (node->arrayMode().type() == Array::String) { + def(PureValue(node, node->arrayMode().asWord())); return; - read(JSArrayBufferView_vector); + } + read(MiscFields); + def(HeapLocation(IndexedPropertyStorageLoc, MiscFields, node->child1()), LazyNode(node)); return; case GetTypedArrayByteOffset: - read(JSArrayBufferView_vector); - read(JSArrayBufferView_mode); - read(Butterfly_arrayBuffer); - read(ArrayBuffer_data); + read(MiscFields); + def(HeapLocation(TypedArrayByteOffsetLoc, MiscFields, node->child1()), LazyNode(node)); return; case GetByOffset: - read(AbstractHeap(NamedProperties, graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber)); + case GetGetterSetterByOffset: { + unsigned identifierNumber = node->storageAccessData().identifierNumber; + AbstractHeap heap(NamedProperties, identifierNumber); + read(heap); + def(HeapLocation(NamedPropertyLoc, heap, node->child2()), LazyNode(node)); + return; + } + + case MultiGetByOffset: { + read(JSCell_structureID); + read(JSObject_butterfly); + AbstractHeap heap(NamedProperties, node->multiGetByOffsetData().identifierNumber); + read(heap); + def(HeapLocation(NamedPropertyLoc, heap, node->child1()), LazyNode(node)); return; + } + + case MultiPutByOffset: { + read(JSCell_structureID); + read(JSObject_butterfly); + AbstractHeap heap(NamedProperties, node->multiPutByOffsetData().identifierNumber); + write(heap); + if (node->multiPutByOffsetData().writesStructures()) + write(JSCell_structureID); + if (node->multiPutByOffsetData().reallocatesStorage()) + write(JSObject_butterfly); + def(HeapLocation(NamedPropertyLoc, heap, node->child1()), LazyNode(node->child2().node())); + return; + } - case PutByOffset: - write(AbstractHeap(NamedProperties, graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber)); + case PutByOffset: { + unsigned identifierNumber = node->storageAccessData().identifierNumber; + AbstractHeap heap(NamedProperties, identifierNumber); + write(heap); + def(HeapLocation(NamedPropertyLoc, heap, node->child2()), LazyNode(node->child3().node())); return; + } case GetArrayLength: { ArrayMode mode = node->arrayMode(); @@ -482,89 +878,219 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write case Array::ArrayStorage: case Array::SlowPutArrayStorage: read(Butterfly_publicLength); + def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node->child1()), LazyNode(node)); return; case Array::String: + def(PureValue(node, mode.asWord())); return; - - case Array::Arguments: - read(Arguments_overrideLength); - read(Arguments_numArguments); + + case Array::DirectArguments: + case Array::ScopedArguments: + read(MiscFields); + def(HeapLocation(ArrayLengthLoc, MiscFields, node->child1()), LazyNode(node)); return; - + default: - read(JSArrayBufferView_length); + ASSERT(mode.isSomeTypedArrayView()); + read(MiscFields); + def(HeapLocation(ArrayLengthLoc, MiscFields, node->child1()), LazyNode(node)); return; } } - case GetMyScope: - read(AbstractHeap(Variables, JSStack::ScopeChain)); - return; - - case SkipTopScope: - read(AbstractHeap(Variables, graph.activationRegister())); + case GetClosureVar: + read(AbstractHeap(ScopeProperties, node->scopeOffset().offset())); + def(HeapLocation(ClosureVariableLoc, AbstractHeap(ScopeProperties, node->scopeOffset().offset()), node->child1()), LazyNode(node)); return; - case GetClosureRegisters: - read(JSVariableObject_registers); + case PutClosureVar: + write(AbstractHeap(ScopeProperties, node->scopeOffset().offset())); + def(HeapLocation(ClosureVariableLoc, AbstractHeap(ScopeProperties, node->scopeOffset().offset()), node->child1()), LazyNode(node->child2().node())); return; - case GetClosureVar: - read(AbstractHeap(Variables, node->varNumber())); + case GetFromArguments: { + AbstractHeap heap(DirectArgumentsProperties, node->capturedArgumentsOffset().offset()); + read(heap); + def(HeapLocation(DirectArgumentsLoc, heap, node->child1()), LazyNode(node)); return; + } - case PutClosureVar: - write(AbstractHeap(Variables, node->varNumber())); + case PutToArguments: { + AbstractHeap heap(DirectArgumentsProperties, node->capturedArgumentsOffset().offset()); + write(heap); + def(HeapLocation(DirectArgumentsLoc, heap, node->child1()), LazyNode(node->child2().node())); return; + } case GetGlobalVar: - read(AbstractHeap(Absolute, node->registerPointer())); + case GetGlobalLexicalVariable: + read(AbstractHeap(Absolute, node->variablePointer())); + def(HeapLocation(GlobalVariableLoc, AbstractHeap(Absolute, node->variablePointer())), LazyNode(node)); return; - case PutGlobalVar: - write(AbstractHeap(Absolute, node->registerPointer())); + case PutGlobalVariable: + write(AbstractHeap(Absolute, node->variablePointer())); + def(HeapLocation(GlobalVariableLoc, AbstractHeap(Absolute, node->variablePointer())), LazyNode(node->child2().node())); return; - case NewObject: - case NewArray: case NewArrayWithSize: - case NewArrayBuffer: - case NewRegexp: - case NewStringObject: - case MakeRope: - case NewFunctionNoCheck: - case NewFunction: - case NewFunctionExpression: - clobberizeForAllocation(read, write); - return; - case NewTypedArray: - clobberizeForAllocation(read, write); - switch (node->child1().useKind()) { - case Int32Use: + read(HeapObjectCount); + write(HeapObjectCount); + return; + + case NewArray: { + read(HeapObjectCount); + write(HeapObjectCount); + + unsigned numElements = node->numChildren(); + + def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node), + LazyNode(graph.freeze(jsNumber(numElements)))); + + if (!numElements) return; - case UntypedUse: - read(World); - write(World); + + AbstractHeap heap; + switch (node->indexingType()) { + case ALL_DOUBLE_INDEXING_TYPES: + heap = IndexedDoubleProperties; + break; + + case ALL_INT32_INDEXING_TYPES: + heap = IndexedInt32Properties; + break; + + case ALL_CONTIGUOUS_INDEXING_TYPES: + heap = IndexedContiguousProperties; + break; + + default: return; + } + + if (numElements < graph.m_uint32ValuesInUse.size()) { + for (unsigned operandIdx = 0; operandIdx < numElements; ++operandIdx) { + Edge use = graph.m_varArgChildren[node->firstChild() + operandIdx]; + def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(operandIdx)))), + LazyNode(use.node())); + } + } else { + for (uint32_t operandIdx : graph.m_uint32ValuesInUse) { + if (operandIdx >= numElements) + continue; + Edge use = graph.m_varArgChildren[node->firstChild() + operandIdx]; + // operandIdx comes from graph.m_uint32ValuesInUse and thus is guaranteed to be already frozen + def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(operandIdx)))), + LazyNode(use.node())); + } + } + return; + } + + case NewArrayBuffer: { + read(HeapObjectCount); + write(HeapObjectCount); + + unsigned numElements = node->numConstants(); + def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node), + LazyNode(graph.freeze(jsNumber(numElements)))); + + AbstractHeap heap; + NodeType op = JSConstant; + switch (node->indexingType()) { + case ALL_DOUBLE_INDEXING_TYPES: + heap = IndexedDoubleProperties; + op = DoubleConstant; + break; + + case ALL_INT32_INDEXING_TYPES: + heap = IndexedInt32Properties; + break; + + case ALL_CONTIGUOUS_INDEXING_TYPES: + heap = IndexedContiguousProperties; + break; + default: - RELEASE_ASSERT_NOT_REACHED(); return; } - + + JSValue* data = graph.m_codeBlock->constantBuffer(node->startConstant()); + if (numElements < graph.m_uint32ValuesInUse.size()) { + for (unsigned index = 0; index < numElements; ++index) { + def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(index)))), + LazyNode(graph.freeze(data[index]), op)); + } + } else { + Vector<uint32_t> possibleIndices; + for (uint32_t index : graph.m_uint32ValuesInUse) { + if (index >= numElements) + continue; + possibleIndices.append(index); + } + for (uint32_t index : possibleIndices) { + def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(index)))), + LazyNode(graph.freeze(data[index]), op)); + } + } + return; + } + + case CopyRest: { + read(Stack); + write(Heap); + return; + } + + case NewObject: + case NewRegexp: + case NewStringObject: + case PhantomNewObject: + case MaterializeNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case MaterializeCreateActivation: + read(HeapObjectCount); + write(HeapObjectCount); + return; + + case NewArrowFunction: + case NewFunction: + case NewGeneratorFunction: + if (node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid()) + write(Watchpoint_fire); + read(HeapObjectCount); + write(HeapObjectCount); + return; + case RegExpExec: case RegExpTest: read(RegExpState); write(RegExpState); return; + case StringReplace: + if (node->child1().useKind() == StringUse + && node->child2().useKind() == RegExpObjectUse + && node->child3().useKind() == StringUse) { + read(RegExpState); + write(RegExpState); + return; + } + read(World); + write(Heap); + return; + case StringCharAt: if (node->arrayMode().isOutOfBounds()) { read(World); - write(World); + write(Heap); return; } + def(PureValue(node)); return; case CompareEq: @@ -572,53 +1098,36 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write case CompareLessEq: case CompareGreater: case CompareGreaterEq: - if (!node->isBinaryUseKind(UntypedUse)) + if (!node->isBinaryUseKind(UntypedUse)) { + def(PureValue(node)); return; + } read(World); - write(World); + write(Heap); return; case ToString: + case CallStringConstructor: switch (node->child1().useKind()) { case StringObjectUse: case StringOrStringObjectUse: + // These don't def a pure value, unfortunately. I'll avoid load-eliminating these for + // now. return; case CellUse: case UntypedUse: read(World); - write(World); + write(Heap); return; default: RELEASE_ASSERT_NOT_REACHED(); return; } - - case TearOffActivation: - write(JSVariableObject_registers); - return; - case TearOffArguments: - write(Arguments_registers); - return; - - case GetMyArgumentsLength: - read(AbstractHeap(Variables, graph.argumentsRegisterFor(node->codeOrigin))); - read(AbstractHeap(Variables, JSStack::ArgumentCount)); - return; - - case GetMyArgumentByVal: - read(Variables); - return; - - case CheckArgumentsNotCreated: - read(AbstractHeap(Variables, graph.argumentsRegisterFor(node->codeOrigin))); - return; - case ThrowReferenceError: write(SideState); - clobberizeForAllocation(read, write); return; case CountExecution: @@ -626,26 +1135,20 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write read(InternalState); write(InternalState); return; - - case StoreBarrier: - case ConditionalStoreBarrier: - case StoreBarrierWithNullCheck: - read(BarrierState); - write(BarrierState); - return; case LastNodeType: RELEASE_ASSERT_NOT_REACHED(); return; } - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(graph, node, toCString("Unrecognized node type: ", Graph::opName(node->op())).data()); } class NoOpClobberize { public: NoOpClobberize() { } - void operator()(AbstractHeap) { } + template<typename... T> + void operator()(T...) const { } }; class CheckClobberize { @@ -655,12 +1158,13 @@ public: { } - void operator()(AbstractHeap) { m_result = true; } + template<typename... T> + void operator()(T...) const { m_result = true; } bool result() const { return m_result; } private: - bool m_result; + mutable bool m_result; }; bool doesWrites(Graph&, Node*); @@ -673,7 +1177,7 @@ public: { } - void operator()(AbstractHeap otherHeap) + void operator()(AbstractHeap otherHeap) const { if (m_result) return; @@ -684,11 +1188,80 @@ public: private: AbstractHeap m_heap; - bool m_result; + mutable bool m_result; }; +bool accessesOverlap(Graph&, Node*, AbstractHeap); bool writesOverlap(Graph&, Node*, AbstractHeap); +bool clobbersHeap(Graph&, Node*); + +// We would have used bind() for these, but because of the overlaoding that we are doing, +// it's quite a bit of clearer to just write this out the traditional way. + +template<typename T> +class ReadMethodClobberize { +public: + ReadMethodClobberize(T& value) + : m_value(value) + { + } + + void operator()(AbstractHeap heap) const + { + m_value.read(heap); + } +private: + T& m_value; +}; + +template<typename T> +class WriteMethodClobberize { +public: + WriteMethodClobberize(T& value) + : m_value(value) + { + } + + void operator()(AbstractHeap heap) const + { + m_value.write(heap); + } +private: + T& m_value; +}; + +template<typename T> +class DefMethodClobberize { +public: + DefMethodClobberize(T& value) + : m_value(value) + { + } + + void operator()(PureValue value) const + { + m_value.def(value); + } + + void operator()(HeapLocation location, LazyNode node) const + { + m_value.def(location, node); + } + +private: + T& m_value; +}; + +template<typename Adaptor> +void clobberize(Graph& graph, Node* node, Adaptor& adaptor) +{ + ReadMethodClobberize<Adaptor> read(adaptor); + WriteMethodClobberize<Adaptor> write(adaptor); + DefMethodClobberize<Adaptor> def(adaptor); + clobberize(graph, node, read, write, def); +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGClobbersExitState.cpp b/Source/JavaScriptCore/dfg/DFGClobbersExitState.cpp new file mode 100644 index 000000000..4e0c50778 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGClobbersExitState.cpp @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGClobbersExitState.h" + +#if ENABLE(DFG_JIT) + +#include "DFGClobberize.h" +#include "DFGGraph.h" +#include "DFGNode.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +bool clobbersExitState(Graph& graph, Node* node) +{ + // There are certain nodes whose effect on the exit state has nothing to do with what they + // normally clobber. + switch (node->op()) { + case MovHint: + case ZombieHint: + case PutHint: + case KillStack: + return true; + + case SetLocal: + case PutStack: + // These nodes write to the stack, but they may only do so after we have already had a MovHint + // for the exact same value and the same stack location. Hence, they have no further effect on + // exit state. + return false; + + case ArrayifyToStructure: + case Arrayify: + case NewObject: + case NewRegexp: + case NewStringObject: + case PhantomNewObject: + case MaterializeNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case MaterializeCreateActivation: + case CountExecution: + case AllocatePropertyStorage: + case ReallocatePropertyStorage: + // These do clobber memory, but nothing that is observable. It may be nice to separate the + // heaps into those that are observable and those that aren't, but we don't do that right now. + // FIXME: https://bugs.webkit.org/show_bug.cgi?id=148440 + return false; + + case CreateActivation: + // Like above, but with the activation allocation caveat. + return node->castOperand<SymbolTable*>()->singletonScope()->isStillValid(); + + case NewArrowFunction: + case NewFunction: + case NewGeneratorFunction: + // Like above, but with the JSFunction allocation caveat. + return node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid(); + + default: + // For all other nodes, we just care about whether they write to something other than SideState. + bool result = false; + clobberize( + graph, node, NoOpClobberize(), + [&] (const AbstractHeap& heap) { + // There shouldn't be such a thing as a strict subtype of SideState. That's what allows + // us to use a fast != check, below. + ASSERT(!heap.isStrictSubtypeOf(SideState)); + + if (heap != SideState) + result = true; + }, + NoOpClobberize()); + return result; + } +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGClobbersExitState.h b/Source/JavaScriptCore/dfg/DFGClobbersExitState.h new file mode 100644 index 000000000..6e4586226 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGClobbersExitState.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGClobbersExitState_h +#define DFGClobbersExitState_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; +struct Node; + +// A conservative approximation of whether the node will perform the kind of effect that would prevent +// subsequent nodes from exiting to this node's exit origin. Exiting after an effect to that effect's +// exit origin would cause the effect to execute a second time. Two kinds of such effects can exist: +// +// Observable heap or stack effect: If we perform such an effect and then exit to the same origin, that +// effect will be executed a second time, which is incorrect. +// +// OSR exit state update: This doesn't do any observable side-effect, but it tells OSR exit that it +// should recover some value as if an effect had happened. For example, a MovHint will tell OSR exit +// that some bytecode variable now has a new value. If we exit to the exit origin of a MovHint after we +// "execute" the MovHint, then the bytecode state will look as if we had already executed that bytecode +// instruction. This could cause issues for example for bytecode like: +// +// op_add r1, r1, r2 +// +// which will get lowered to something like: +// +// a: ArithAdd(...) +// b: MovHint(@a, r1) +// +// If we exit to the op_add after executing the MovHint, then r1 will already contain the result of the +// add. Then after exit we'll do the add again, and r1 will have the wrong value. Because of object +// allocation elimination and PutStack sinking, we can also have other OSR exit updates, like +// KillStack, PutHint, among others. They don't do anything so long as we stay in optimized code, but +// they tell OSR exit how to reconstitute state. + +bool clobbersExitState(Graph&, Node*); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGClobbersExitState_h + diff --git a/Source/JavaScriptCore/dfg/DFGCombinedLiveness.cpp b/Source/JavaScriptCore/dfg/DFGCombinedLiveness.cpp new file mode 100644 index 000000000..a0d93274c --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGCombinedLiveness.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGCombinedLiveness.h" + +#if ENABLE(DFG_JIT) + +#include "DFGAvailabilityMap.h" +#include "DFGBlockMapInlines.h" +#include "FullBytecodeLiveness.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +HashSet<Node*> liveNodesAtHead(Graph& graph, BasicBlock* block) +{ + HashSet<Node*> seen; + for (Node* node : block->ssa->liveAtHead) + seen.add(node); + + AvailabilityMap& availabilityMap = block->ssa->availabilityAtHead; + graph.forAllLocalsLiveInBytecode( + block->at(0)->origin.forExit, + [&] (VirtualRegister reg) { + availabilityMap.closeStartingWithLocal( + reg, + [&] (Node* node) -> bool { + return seen.contains(node); + }, + [&] (Node* node) -> bool { + return seen.add(node).isNewEntry; + }); + }); + + return seen; +} + +CombinedLiveness::CombinedLiveness(Graph& graph) + : liveAtHead(graph) + , liveAtTail(graph) +{ + // First compute the liveAtHead for each block. + for (BasicBlock* block : graph.blocksInNaturalOrder()) + liveAtHead[block] = liveNodesAtHead(graph, block); + + // Now compute the liveAtTail by unifying the liveAtHead of the successors. + for (BasicBlock* block : graph.blocksInNaturalOrder()) { + for (BasicBlock* successor : block->successors()) { + for (Node* node : liveAtHead[successor]) + liveAtTail[block].add(node); + } + } +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGCombinedLiveness.h b/Source/JavaScriptCore/dfg/DFGCombinedLiveness.h new file mode 100644 index 000000000..ff761cf72 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGCombinedLiveness.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGCombinedLiveness_h +#define DFGCombinedLiveness_h + +#if ENABLE(DFG_JIT) + +#include "DFGBlockMap.h" +#include "DFGGraph.h" + +namespace JSC { namespace DFG { + +// Returns the set of nodes live at tail, both due to due DFG and due to bytecode (i.e. OSR exit). +HashSet<Node*> liveNodesAtHead(Graph&, BasicBlock*); + +struct CombinedLiveness { + CombinedLiveness() { } + + CombinedLiveness(Graph&); + + BlockMap<HashSet<Node*>> liveAtHead; + BlockMap<HashSet<Node*>> liveAtTail; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGCombinedLiveness_h + diff --git a/Source/JavaScriptCore/dfg/DFGCommon.cpp b/Source/JavaScriptCore/dfg/DFGCommon.cpp index adb08b595..cd2a12c73 100644 --- a/Source/JavaScriptCore/dfg/DFGCommon.cpp +++ b/Source/JavaScriptCore/dfg/DFGCommon.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,9 +26,38 @@ #include "config.h" #include "DFGCommon.h" +#include "DFGNode.h" +#include "JSCInlines.h" +#include <wtf/PrintStream.h> + #if ENABLE(DFG_JIT) -#include "DFGNode.h" +namespace JSC { namespace DFG { + +static StaticLock crashLock; + +void startCrashing() +{ + crashLock.lock(); +} + +bool isCrashing() +{ + return crashLock.isLocked(); +} + +bool stringLessThan(StringImpl& a, StringImpl& b) +{ + unsigned minLength = std::min(a.length(), b.length()); + for (unsigned i = 0; i < minLength; ++i) { + if (a[i] == b[i]) + continue; + return a[i] < b[i]; + } + return a.length() < b.length(); +} + +} } // namespace JSC::DFG namespace WTF { @@ -109,3 +138,28 @@ void printInternal(PrintStream& out, ProofStatus status) #endif // ENABLE(DFG_JIT) +namespace WTF { + +using namespace JSC::DFG; + +void printInternal(PrintStream& out, CapabilityLevel capabilityLevel) +{ + switch (capabilityLevel) { + case CannotCompile: + out.print("CannotCompile"); + return; + case CanCompile: + out.print("CanCompile"); + return; + case CanCompileAndInline: + out.print("CanCompileAndInline"); + return; + case CapabilityLevelNotSet: + out.print("CapabilityLevelNotSet"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + diff --git a/Source/JavaScriptCore/dfg/DFGCommon.h b/Source/JavaScriptCore/dfg/DFGCommon.h index 7b4b1db5f..01a0b35a6 100644 --- a/Source/JavaScriptCore/dfg/DFGCommon.h +++ b/Source/JavaScriptCore/dfg/DFGCommon.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,11 +26,10 @@ #ifndef DFGCommon_h #define DFGCommon_h -#include <wtf/Platform.h> +#include "DFGCompilationMode.h" #if ENABLE(DFG_JIT) -#include "CodeOrigin.h" #include "Options.h" #include "VirtualRegister.h" @@ -41,11 +40,6 @@ struct Node; typedef uint32_t BlockIndex; static const BlockIndex NoBlock = UINT_MAX; -struct NodePointerTraits { - static Node* defaultValue() { return 0; } - static bool isEmptyForDump(Node* value) { return !value; } -}; - // Use RefChildren if the child ref counts haven't already been adjusted using // other means and either of the following is true: // - The node you're creating is MustGenerate. @@ -63,19 +57,28 @@ enum RefNodeMode { DontRefNode }; -inline bool verboseCompilationEnabled() +enum SwitchKind { + SwitchImm, + SwitchChar, + SwitchString, + SwitchCell +}; + +inline bool verboseCompilationEnabled(CompilationMode mode = DFGMode) { - return Options::verboseCompilation() || Options::dumpGraphAtEachPhase(); + return Options::verboseCompilation() || Options::dumpGraphAtEachPhase() || (isFTL(mode) && Options::verboseFTLCompilation()); } -inline bool logCompilationChanges() +inline bool logCompilationChanges(CompilationMode mode = DFGMode) { - return verboseCompilationEnabled() || Options::logCompilationChanges(); + return verboseCompilationEnabled(mode) || Options::logCompilationChanges(); } -inline bool shouldDumpGraphAtEachPhase() +inline bool shouldDumpGraphAtEachPhase(CompilationMode mode) { - return Options::dumpGraphAtEachPhase(); + if (isFTL(mode)) + return Options::dumpGraphAtEachPhase() || Options::dumpDFGFTLGraphAtEachPhase(); + return Options::dumpGraphAtEachPhase() || Options::dumpDFGGraphAtEachPhase(); } inline bool validationEnabled() @@ -87,15 +90,6 @@ inline bool validationEnabled() #endif } -inline bool enableConcurrentJIT() -{ -#if ENABLE(CONCURRENT_JIT) - return Options::enableConcurrentJIT() && Options::numberOfCompilerThreads(); -#else - return false; -#endif -} - inline bool enableInt52() { #if USE(JSVALUE64) @@ -105,10 +99,60 @@ inline bool enableInt52() #endif } -enum SpillRegistersMode { NeedToSpill, DontSpill }; - enum NoResultTag { NoResult }; +// The prediction propagator effectively does four passes, with the last pass +// being done by the separate FixuPhase. +enum PredictionPass { + // We're converging in a straight-forward forward flow fixpoint. This is the + // most conventional part of the propagator - it makes only monotonic decisions + // based on value profiles and rare case profiles. It ignores baseline JIT rare + // case profiles. The goal here is to develop a good guess of which variables + // are likely to be purely numerical, which generally doesn't require knowing + // the rare case profiles. + PrimaryPass, + + // At this point we know what is numerical and what isn't. Non-numerical inputs + // to arithmetic operations will not have useful information in the Baseline JIT + // rare case profiles because Baseline may take slow path on non-numerical + // inputs even if the DFG could handle the input on the fast path. Boolean + // inputs are the most obvious example. This pass of prediction propagation will + // use Baseline rare case profiles for purely numerical operations and it will + // ignore them for everything else. The point of this pass is to develop a good + // guess of which variables are likely to be doubles. + // + // This pass is intentionally weird and goes against what is considered good + // form when writing a static analysis: a new data flow of booleans will cause + // us to ignore rare case profiles except that by then, we will have already + // propagated double types based on our prior assumption that we shouldn't + // ignore rare cases. This probably won't happen because the PrimaryPass is + // almost certainly going to establish what is and isn't numerical. But it's + // conceivable that during this pass we will discover a new boolean data flow. + // This ends up being sound because the prediction propagator could literally + // make any guesses it wants and still be sound (worst case, we OSR exit more + // often or use too general of types are run a bit slower). This will converge + // because we force monotonicity on the types of nodes and variables. So, the + // worst thing that can happen is that we violate basic laws of theoretical + // decency. + RareCasePass, + + // At this point we know what is numerical and what isn't, and we also know what + // is a double and what isn't. So, we start forcing variables to be double. + // Doing so may have a cascading effect so this is a fixpoint. It's monotonic + // in the sense that once a variable is forced double, it cannot be forced in + // the other direction. + DoubleVotingPass, + + // This pass occurs once we have converged. At this point we are just installing + // type checks based on the conclusions we have already reached. It's important + // for this pass to reach the same conclusions that DoubleVotingPass reached. + FixupPass +}; + +enum StructureRegistrationState { HaveNotStartedRegistering, AllStructuresAreRegistered }; + +enum StructureRegistrationResult { StructureRegisteredNormally, StructureRegisteredAndWatched }; + enum OptimizationFixpointState { BeforeFixpoint, FixpointNotConverged, FixpointConverged }; // Describes the form you can expect the entire graph to be in. @@ -145,12 +189,10 @@ enum GraphForm { // expect to be live at the head, and which locals they make available at the // tail. ThreadedCPS form also implies that: // - // - GetLocals and SetLocals to uncaptured variables are not redundant within - // a basic block. + // - GetLocals and SetLocals are not redundant within a basic block. // // - All GetLocals and Flushes are linked directly to the last access point - // of the variable, which must not be another GetLocal if the variable is - // uncaptured. + // of the variable, which must not be another GetLocal. // // - Phantom(Phi) is not legal, but PhantomLocal is. // @@ -208,6 +250,11 @@ inline KillStatus killStatusForDoesKill(bool doesKill) return doesKill ? DoesKill : DoesNotKill; } +enum class PlanStage { + Initial, + AfterFixup +}; + template<typename T, typename U> bool checkAndSet(T& left, U right) { @@ -217,6 +264,40 @@ bool checkAndSet(T& left, U right) return true; } +// If possible, this will acquire a lock to make sure that if multiple threads +// start crashing at the same time, you get coherent dump output. Use this only +// when you're forcing a crash with diagnostics. +void startCrashing(); + +JS_EXPORT_PRIVATE bool isCrashing(); + +struct NodeAndIndex { + NodeAndIndex() + : node(nullptr) + , index(UINT_MAX) + { + } + + NodeAndIndex(Node* node, unsigned index) + : node(node) + , index(index) + { + ASSERT(!node == (index == UINT_MAX)); + } + + bool operator!() const + { + return !node; + } + + Node* node; + unsigned index; +}; + +// A less-than operator for strings that is useful for generating string switches. Sorts by < +// relation on characters. Ensures that if a is a prefix of b, then a < b. +bool stringLessThan(StringImpl& a, StringImpl& b); + } } // namespace JSC::DFG namespace WTF { @@ -235,7 +316,12 @@ namespace JSC { namespace DFG { // Put things here that must be defined even if ENABLE(DFG_JIT) is false. -enum CapabilityLevel { CannotCompile, CanInline, CanCompile, CanCompileAndInline, CapabilityLevelNotSet }; +enum CapabilityLevel { + CannotCompile, + CanCompile, + CanCompileAndInline, + CapabilityLevelNotSet +}; inline bool canCompile(CapabilityLevel level) { @@ -251,7 +337,6 @@ inline bool canCompile(CapabilityLevel level) inline bool canInline(CapabilityLevel level) { switch (level) { - case CanInline: case CanCompileAndInline: return true; default: @@ -264,14 +349,6 @@ inline CapabilityLevel leastUpperBound(CapabilityLevel a, CapabilityLevel b) switch (a) { case CannotCompile: return CannotCompile; - case CanInline: - switch (b) { - case CanInline: - case CanCompileAndInline: - return CanInline; - default: - return CannotCompile; - } case CanCompile: switch (b) { case CanCompile: @@ -291,16 +368,23 @@ inline CapabilityLevel leastUpperBound(CapabilityLevel a, CapabilityLevel b) } // Unconditionally disable DFG disassembly support if the DFG is not compiled in. -inline bool shouldShowDisassembly() +inline bool shouldDumpDisassembly(CompilationMode mode = DFGMode) { #if ENABLE(DFG_JIT) - return Options::showDisassembly() || Options::showDFGDisassembly(); + return Options::dumpDisassembly() || Options::dumpDFGDisassembly() || (isFTL(mode) && Options::dumpFTLDisassembly()); #else + UNUSED_PARAM(mode); return false; #endif } } } // namespace JSC::DFG +namespace WTF { + +void printInternal(PrintStream&, JSC::DFG::CapabilityLevel); + +} // namespace WTF + #endif // DFGCommon_h diff --git a/Source/JavaScriptCore/dfg/DFGCommonData.cpp b/Source/JavaScriptCore/dfg/DFGCommonData.cpp index 7b7ed0e40..ee7ec3b5a 100644 --- a/Source/JavaScriptCore/dfg/DFGCommonData.cpp +++ b/Source/JavaScriptCore/dfg/DFGCommonData.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,7 +31,9 @@ #include "CodeBlock.h" #include "DFGNode.h" #include "DFGPlan.h" -#include "Operations.h" +#include "InlineCallFrame.h" +#include "JSCInlines.h" +#include "TrackedReferences.h" #include "VM.h" namespace JSC { namespace DFG { @@ -40,19 +42,42 @@ void CommonData::notifyCompilingStructureTransition(Plan& plan, CodeBlock* codeB { plan.transitions.addLazily( codeBlock, - node->codeOrigin.codeOriginOwner(), - node->structureTransitionData().previousStructure, - node->structureTransitionData().newStructure); + node->origin.semantic.codeOriginOwner(), + node->transition()->previous, + node->transition()->next); } -unsigned CommonData::addCodeOrigin(CodeOrigin codeOrigin) +CallSiteIndex CommonData::addCodeOrigin(CodeOrigin codeOrigin) { if (codeOrigins.isEmpty() || codeOrigins.last() != codeOrigin) codeOrigins.append(codeOrigin); unsigned index = codeOrigins.size() - 1; ASSERT(codeOrigins[index] == codeOrigin); - return index; + return CallSiteIndex(index); +} + +CallSiteIndex CommonData::addUniqueCallSiteIndex(CodeOrigin codeOrigin) +{ + if (callSiteIndexFreeList.size()) + return CallSiteIndex(callSiteIndexFreeList.takeAny()); + + codeOrigins.append(codeOrigin); + unsigned index = codeOrigins.size() - 1; + ASSERT(codeOrigins[index] == codeOrigin); + return CallSiteIndex(index); +} + +CallSiteIndex CommonData::lastCallSite() const +{ + RELEASE_ASSERT(codeOrigins.size()); + return CallSiteIndex(codeOrigins.size() - 1); +} + +void CommonData::removeCallSiteIndex(CallSiteIndex callSite) +{ + RELEASE_ASSERT(callSite.bits() < codeOrigins.size()); + callSiteIndexFreeList.add(callSite.bits()); } void CommonData::shrinkToFit() @@ -72,6 +97,27 @@ bool CommonData::invalidate() return true; } +void CommonData::validateReferences(const TrackedReferences& trackedReferences) +{ + if (InlineCallFrameSet* set = inlineCallFrames.get()) { + for (InlineCallFrame* inlineCallFrame : *set) { + for (ValueRecovery& recovery : inlineCallFrame->arguments) { + if (recovery.isConstant()) + trackedReferences.check(recovery.constant()); + } + + if (CodeBlock* baselineCodeBlock = inlineCallFrame->baselineCodeBlock.get()) + trackedReferences.check(baselineCodeBlock); + + if (inlineCallFrame->calleeRecovery.isConstant()) + trackedReferences.check(inlineCallFrame->calleeRecovery.constant()); + } + } + + for (AdaptiveStructureWatchpoint* watchpoint : adaptiveStructureWatchpoints) + watchpoint->key().validateReferences(trackedReferences); +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGCommonData.h b/Source/JavaScriptCore/dfg/DFGCommonData.h index 17c5cce11..bf4f94fcc 100644 --- a/Source/JavaScriptCore/dfg/DFGCommonData.h +++ b/Source/JavaScriptCore/dfg/DFGCommonData.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,23 +26,24 @@ #ifndef DFGCommonData_h #define DFGCommonData_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "CodeBlockJettisoningWatchpoint.h" +#include "DFGAdaptiveInferredPropertyValueWatchpoint.h" +#include "DFGAdaptiveStructureWatchpoint.h" #include "DFGJumpReplacement.h" #include "InlineCallFrameSet.h" #include "JSCell.h" -#include "ProfiledCodeBlockJettisoningWatchpoint.h" #include "ProfilerCompilation.h" #include "SymbolTable.h" +#include <wtf/Bag.h> #include <wtf/Noncopyable.h> namespace JSC { class CodeBlock; class Identifier; +class TrackedReferences; namespace DFG { @@ -73,13 +74,15 @@ class CommonData { public: CommonData() : isStillValid(true) - , machineCaptureStart(std::numeric_limits<int>::max()) , frameRegisterCount(std::numeric_limits<unsigned>::max()) , requiredRegisterCountForExit(std::numeric_limits<unsigned>::max()) { } void notifyCompilingStructureTransition(Plan&, CodeBlock*, Node*); - unsigned addCodeOrigin(CodeOrigin codeOrigin); + CallSiteIndex addCodeOrigin(CodeOrigin); + CallSiteIndex addUniqueCallSiteIndex(CodeOrigin); + CallSiteIndex lastCallSite() const; + void removeCallSiteIndex(CallSiteIndex); void shrinkToFit(); @@ -89,15 +92,21 @@ public: { return std::max(frameRegisterCount, requiredRegisterCountForExit); } + + void validateReferences(const TrackedReferences&); + + static ptrdiff_t frameRegisterCountOffset() { return OBJECT_OFFSETOF(CommonData, frameRegisterCount); } - OwnPtr<InlineCallFrameSet> inlineCallFrames; + RefPtr<InlineCallFrameSet> inlineCallFrames; Vector<CodeOrigin, 0, UnsafeVectorOverflow> codeOrigins; Vector<Identifier> dfgIdentifiers; Vector<WeakReferenceTransition> transitions; Vector<WriteBarrier<JSCell>> weakReferences; - SegmentedVector<CodeBlockJettisoningWatchpoint, 1, 0> watchpoints; - SegmentedVector<ProfiledCodeBlockJettisoningWatchpoint, 1, 0> profiledWatchpoints; + Vector<WriteBarrier<Structure>> weakStructureReferences; + Bag<CodeBlockJettisoningWatchpoint> watchpoints; + Bag<AdaptiveStructureWatchpoint> adaptiveStructureWatchpoints; + Bag<AdaptiveInferredPropertyValueWatchpoint> adaptiveInferredPropertyValueWatchpoints; Vector<JumpReplacement> jumpReplacements; RefPtr<Profiler::Compilation> compilation; @@ -105,11 +114,16 @@ public: bool allTransitionsHaveBeenMarked; // Initialized and used on every GC. bool isStillValid; - int machineCaptureStart; - std::unique_ptr<SlowArgument[]> slowArguments; +#if USE(JSVALUE32_64) + std::unique_ptr<Bag<double>> doubleConstants; +#endif unsigned frameRegisterCount; unsigned requiredRegisterCountForExit; + +private: + HashSet<unsigned, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> callSiteIndexFreeList; + }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGCompilationKey.cpp b/Source/JavaScriptCore/dfg/DFGCompilationKey.cpp index d31ac9e2d..20ad082cc 100644 --- a/Source/JavaScriptCore/dfg/DFGCompilationKey.cpp +++ b/Source/JavaScriptCore/dfg/DFGCompilationKey.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,10 +26,12 @@ #include "config.h" #include "DFGCompilationKey.h" -#include "CodeBlock.h" - #if ENABLE(DFG_JIT) +#include "CodeBlock.h" +#include "CodeBlockSet.h" +#include "JSCInlines.h" + namespace JSC { namespace DFG { void CompilationKey::dump(PrintStream& out) const diff --git a/Source/JavaScriptCore/dfg/DFGCompilationKey.h b/Source/JavaScriptCore/dfg/DFGCompilationKey.h index a866acdf1..ff562a048 100644 --- a/Source/JavaScriptCore/dfg/DFGCompilationKey.h +++ b/Source/JavaScriptCore/dfg/DFGCompilationKey.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,6 +32,7 @@ namespace JSC { class CodeBlock; +class CodeBlockSet; namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGCompilationMode.cpp b/Source/JavaScriptCore/dfg/DFGCompilationMode.cpp index 99d95331c..20de99603 100644 --- a/Source/JavaScriptCore/dfg/DFGCompilationMode.cpp +++ b/Source/JavaScriptCore/dfg/DFGCompilationMode.cpp @@ -28,6 +28,8 @@ #if ENABLE(DFG_JIT) +#include "JSCInlines.h" + namespace WTF { using namespace JSC::DFG; diff --git a/Source/JavaScriptCore/dfg/DFGCompilationMode.h b/Source/JavaScriptCore/dfg/DFGCompilationMode.h index 1035f60bc..2d6e49ad5 100644 --- a/Source/JavaScriptCore/dfg/DFGCompilationMode.h +++ b/Source/JavaScriptCore/dfg/DFGCompilationMode.h @@ -37,6 +37,17 @@ enum CompilationMode { FTLForOSREntryMode }; +inline bool isFTL(CompilationMode mode) +{ + switch (mode) { + case FTLMode: + case FTLForOSREntryMode: + return true; + default: + return false; + } +} + } } // namespace JSC::DFG namespace WTF { diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp index f58761160..e5be8efb5 100644 --- a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,13 +29,15 @@ #if ENABLE(DFG_JIT) #include "DFGAbstractInterpreterInlines.h" -#include "DFGBasicBlock.h" +#include "DFGArgumentsUtilities.h" +#include "DFGBasicBlockInlines.h" #include "DFGGraph.h" #include "DFGInPlaceAbstractState.h" +#include "DFGInferredTypeCheck.h" #include "DFGInsertionSet.h" #include "DFGPhase.h" #include "GetByIdStatus.h" -#include "Operations.h" +#include "JSCInlines.h" #include "PutByIdStatus.h" namespace JSC { namespace DFG { @@ -53,15 +55,53 @@ public: bool run() { bool changed = false; - - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { if (block->cfaFoundConstants) changed |= foldConstants(block); } + if (changed && m_graph.m_form == SSA) { + // It's now possible that we have Upsilons pointed at JSConstants. Fix that. + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) + fixUpsilons(block); + } + + if (m_graph.m_form == SSA) { + // It's now possible to simplify basic blocks by placing an Unreachable terminator right + // after anything that invalidates AI. + bool didClipBlock = false; + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + m_state.beginBasicBlock(block); + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + if (block->at(nodeIndex)->isTerminal()) { + // It's possible that we have something after the terminal. It could be a + // no-op Check node, for example. We don't want the logic below to turn that + // node into Unreachable, since then we'd have two terminators. + break; + } + if (!m_state.isValid()) { + NodeOrigin origin = block->at(nodeIndex)->origin; + for (unsigned killIndex = nodeIndex; killIndex < block->size(); ++killIndex) + m_graph.m_allocator.free(block->at(killIndex)); + block->resize(nodeIndex); + block->appendNode(m_graph, SpecNone, Unreachable, origin); + didClipBlock = true; + break; + } + m_interpreter.execute(nodeIndex); + } + m_state.reset(); + } + + if (didClipBlock) { + changed = true; + m_graph.invalidateCFG(); + m_graph.resetReachability(); + m_graph.killUnreachableBlocks(); + } + } + return changed; } @@ -76,19 +116,25 @@ private: Node* node = block->at(indexInBlock); + bool alreadyHandled = false; bool eliminated = false; switch (node->op()) { - case CheckArgumentsNotCreated: { - if (!isEmptySpeculation( - m_state.variables().operand( - m_graph.argumentsRegisterFor(node->codeOrigin)).m_type)) - break; - node->convertToPhantom(); - eliminated = true; + case BooleanToNumber: { + if (node->child1().useKind() == UntypedUse + && !m_interpreter.needsTypeCheck(node->child1(), SpecBoolean)) + node->child1().setUseKind(BooleanUse); break; } - + + case CompareEq: { + if (!m_interpreter.needsTypeCheck(node->child1(), SpecOther)) + node->child1().setUseKind(OtherUse); + if (!m_interpreter.needsTypeCheck(node->child2(), SpecOther)) + node->child2().setUseKind(OtherUse); + break; + } + case CheckStructure: case ArrayifyToStructure: { AbstractValue& value = m_state.forNode(node->child1()); @@ -97,27 +143,71 @@ private: set = node->structure(); else set = node->structureSet(); - if (value.m_currentKnownStructure.isSubsetOf(set)) { + if (value.m_structure.isSubsetOf(set)) { m_interpreter.execute(indexInBlock); // Catch the fact that we may filter on cell. - node->convertToPhantom(); + node->remove(); eliminated = true; break; } - StructureAbstractValue& structureValue = value.m_futurePossibleStructure; - if (structureValue.isSubsetOf(set) - && structureValue.hasSingleton()) { - Structure* structure = structureValue.singleton(); - m_interpreter.execute(indexInBlock); // Catch the fact that we may filter on cell. - AdjacencyList children = node->children; - children.removeEdge(0); - if (!!children.child1()) - m_insertionSet.insertNode(indexInBlock, SpecNone, Phantom, node->codeOrigin, children); - node->children.setChild2(Edge()); - node->children.setChild3(Edge()); - node->convertToStructureTransitionWatchpoint(structure); - eliminated = true; + break; + } + + case GetIndexedPropertyStorage: { + JSArrayBufferView* view = m_graph.tryGetFoldableView( + m_state.forNode(node->child1()).m_value, node->arrayMode()); + if (!view) + break; + + if (view->mode() == FastTypedArray) { + // FIXME: It would be awesome to be able to fold the property storage for + // these GC-allocated typed arrays. For now it doesn't matter because the + // most common use-cases for constant typed arrays involve large arrays with + // aliased buffer views. + // https://bugs.webkit.org/show_bug.cgi?id=125425 break; } + + m_interpreter.execute(indexInBlock); + eliminated = true; + + m_insertionSet.insertCheck(indexInBlock, node->origin, node->children); + node->convertToConstantStoragePointer(view->vector()); + break; + } + + case CheckStructureImmediate: { + AbstractValue& value = m_state.forNode(node->child1()); + StructureSet& set = node->structureSet(); + + if (value.value()) { + if (Structure* structure = jsDynamicCast<Structure*>(value.value())) { + if (set.contains(structure)) { + m_interpreter.execute(indexInBlock); + node->remove(); + eliminated = true; + break; + } + } + } + + if (PhiChildren* phiChildren = m_interpreter.phiChildren()) { + bool allGood = true; + phiChildren->forAllTransitiveIncomingValues( + node, + [&] (Node* incoming) { + if (Structure* structure = incoming->dynamicCastConstant<Structure*>()) { + if (set.contains(structure)) + return; + } + allGood = false; + }); + if (allGood) { + m_interpreter.execute(indexInBlock); + node->remove(); + eliminated = true; + break; + } + } break; } @@ -125,235 +215,371 @@ private: case Arrayify: { if (!node->arrayMode().alreadyChecked(m_graph, node, m_state.forNode(node->child1()))) break; - node->convertToPhantom(); + node->remove(); eliminated = true; break; } - case CheckFunction: { - if (m_state.forNode(node->child1()).value() != node->function()) + case PutStructure: { + if (m_state.forNode(node->child1()).m_structure.onlyStructure() != node->transition()->next) break; - node->convertToPhantom(); + + node->remove(); eliminated = true; break; } + case CheckCell: { + if (m_state.forNode(node->child1()).value() != node->cellOperand()->value()) + break; + node->remove(); + eliminated = true; + break; + } + + case CheckNotEmpty: { + if (m_state.forNode(node->child1()).m_type & SpecEmpty) + break; + node->remove(); + eliminated = true; + break; + } + + case CheckIdent: { + UniquedStringImpl* uid = node->uidOperand(); + const UniquedStringImpl* constantUid = nullptr; + + JSValue childConstant = m_state.forNode(node->child1()).value(); + if (childConstant) { + if (uid->isSymbol()) { + if (childConstant.isSymbol()) + constantUid = asSymbol(childConstant)->privateName().uid(); + } else { + if (childConstant.isString()) { + if (const auto* impl = asString(childConstant)->tryGetValueImpl()) { + // Edge filtering requires that a value here should be StringIdent. + // However, a constant value propagated in DFG is not filtered. + // So here, we check the propagated value is actually an atomic string. + // And if it's not, we just ignore. + if (impl->isAtomic()) + constantUid = static_cast<const UniquedStringImpl*>(impl); + } + } + } + } + + if (constantUid == uid) { + node->remove(); + eliminated = true; + } + break; + } + case CheckInBounds: { JSValue left = m_state.forNode(node->child1()).value(); JSValue right = m_state.forNode(node->child2()).value(); if (left && right && left.isInt32() && right.isInt32() && static_cast<uint32_t>(left.asInt32()) < static_cast<uint32_t>(right.asInt32())) { - node->convertToPhantom(); + node->remove(); eliminated = true; break; } break; } - - case GetById: - case GetByIdFlush: { - CodeOrigin codeOrigin = node->codeOrigin; - Edge childEdge = node->child1(); - Node* child = childEdge.node(); - unsigned identifierNumber = node->identifierNumber(); - if (childEdge.useKind() != CellUse) + case GetMyArgumentByVal: { + JSValue index = m_state.forNode(node->child2()).value(); + if (!index || !index.isInt32()) break; - Structure* structure = m_state.forNode(child).bestProvenStructure(); - if (!structure) - break; + Node* arguments = node->child1().node(); + InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame; - bool needsWatchpoint = !m_state.forNode(child).m_currentKnownStructure.hasSingleton(); - bool needsCellCheck = m_state.forNode(child).m_type & ~SpecCell; + // Don't try to do anything if the index is known to be outside our static bounds. Note + // that our static bounds are usually strictly larger than the dynamic bounds. The + // exception is something like this, assuming foo() is not inlined: + // + // function foo() { return arguments[5]; } + // + // Here the static bound on number of arguments is 0, and we're accessing index 5. We + // will not strength-reduce this to GetStack because GetStack is otherwise assumed by the + // compiler to access those variables that are statically accounted for; for example if + // we emitted a GetStack on arg6 we would have out-of-bounds access crashes anywhere that + // uses an Operands<> map. There is not much cost to continuing to use a + // GetMyArgumentByVal in such statically-out-of-bounds accesses; we just lose CFA unless + // GCSE removes the access entirely. + if (inlineCallFrame) { + if (index.asUInt32() >= inlineCallFrame->arguments.size() - 1) + break; + } else { + if (index.asUInt32() >= m_state.variables().numberOfArguments() - 1) + break; + } - GetByIdStatus status = GetByIdStatus::computeFor( - vm(), structure, m_graph.identifiers()[identifierNumber]); + m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. - if (!status.isSimple()) { - // FIXME: We could handle prototype cases. - // https://bugs.webkit.org/show_bug.cgi?id=110386 - break; + StackAccessData* data; + if (inlineCallFrame) { + data = m_graph.m_stackAccessData.add( + VirtualRegister( + inlineCallFrame->stackOffset + + CallFrame::argumentOffset(index.asInt32())), + FlushedJSValue); + } else { + data = m_graph.m_stackAccessData.add( + virtualRegisterForArgument(index.asInt32() + 1), FlushedJSValue); } - ASSERT(status.structureSet().size() == 1); - ASSERT(!status.chain()); - ASSERT(status.structureSet().singletonStructure() == structure); + if (inlineCallFrame && !inlineCallFrame->isVarargs() + && index.asUInt32() < inlineCallFrame->arguments.size() - 1) { + node->convertToGetStack(data); + eliminated = true; + break; + } - // Now before we do anything else, push the CFA forward over the GetById - // and make sure we signal to the loop that it should continue and not - // do any eliminations. - m_interpreter.execute(indexInBlock); + Node* length = emitCodeToGetArgumentsArrayLength( + m_insertionSet, arguments, indexInBlock, node->origin); + m_insertionSet.insertNode( + indexInBlock, SpecNone, CheckInBounds, node->origin, + node->child2(), Edge(length, Int32Use)); + node->convertToGetStack(data); eliminated = true; + break; + } - if (needsWatchpoint) { - m_insertionSet.insertNode( - indexInBlock, SpecNone, StructureTransitionWatchpoint, codeOrigin, - OpInfo(structure), childEdge); - } else if (needsCellCheck) { - m_insertionSet.insertNode( - indexInBlock, SpecNone, Phantom, codeOrigin, childEdge); + case MultiGetByOffset: { + Edge baseEdge = node->child1(); + Node* base = baseEdge.node(); + MultiGetByOffsetData& data = node->multiGetByOffsetData(); + + // First prune the variants, then check if the MultiGetByOffset can be + // strength-reduced to a GetByOffset. + + AbstractValue baseValue = m_state.forNode(base); + + m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. + alreadyHandled = true; // Don't allow the default constant folder to do things to this. + + for (unsigned i = 0; i < data.cases.size(); ++i) { + MultiGetByOffsetCase& getCase = data.cases[i]; + getCase.set().filter(baseValue); + if (getCase.set().isEmpty()) { + data.cases[i--] = data.cases.last(); + data.cases.removeLast(); + changed = true; + } } - childEdge.setUseKind(KnownCellUse); + if (data.cases.size() != 1) + break; - Edge propertyStorage; + emitGetByOffset(indexInBlock, node, baseValue, data.cases[0], data.identifierNumber); + changed = true; + break; + } + + case MultiPutByOffset: { + Edge baseEdge = node->child1(); + Node* base = baseEdge.node(); + MultiPutByOffsetData& data = node->multiPutByOffsetData(); + + AbstractValue baseValue = m_state.forNode(base); + + m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. + alreadyHandled = true; // Don't allow the default constant folder to do things to this. - if (isInlineOffset(status.offset())) - propertyStorage = childEdge; - else { - propertyStorage = Edge(m_insertionSet.insertNode( - indexInBlock, SpecNone, GetButterfly, codeOrigin, childEdge)); + + for (unsigned i = 0; i < data.variants.size(); ++i) { + PutByIdVariant& variant = data.variants[i]; + variant.oldStructure().filter(baseValue); + + if (variant.oldStructure().isEmpty()) { + data.variants[i--] = data.variants.last(); + data.variants.removeLast(); + changed = true; + continue; + } + + if (variant.kind() == PutByIdVariant::Transition + && variant.oldStructure().onlyStructure() == variant.newStructure()) { + variant = PutByIdVariant::replace( + variant.oldStructure(), + variant.offset(), + variant.requiredType()); + changed = true; + } } + + if (data.variants.size() != 1) + break; - node->convertToGetByOffset(m_graph.m_storageAccessData.size(), propertyStorage); + emitPutByOffset( + indexInBlock, node, baseValue, data.variants[0], data.identifierNumber); + changed = true; + break; + } + + case GetById: + case GetByIdFlush: { + Edge childEdge = node->child1(); + Node* child = childEdge.node(); + unsigned identifierNumber = node->identifierNumber(); + + AbstractValue baseValue = m_state.forNode(child); + + m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. + alreadyHandled = true; // Don't allow the default constant folder to do things to this. + + if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered() + || (node->child1().useKind() == UntypedUse || (baseValue.m_type & ~SpecCell))) + break; - StorageAccessData storageAccessData; - storageAccessData.offset = status.offset(); - storageAccessData.identifierNumber = identifierNumber; - m_graph.m_storageAccessData.append(storageAccessData); + GetByIdStatus status = GetByIdStatus::computeFor( + baseValue.m_structure.set(), m_graph.identifiers()[identifierNumber]); + if (!status.isSimple()) + break; + + for (unsigned i = status.numVariants(); i--;) { + if (!status[i].conditionSet().isEmpty()) { + // FIXME: We could handle prototype cases. + // https://bugs.webkit.org/show_bug.cgi?id=110386 + break; + } + } + + if (status.numVariants() == 1) { + emitGetByOffset(indexInBlock, node, baseValue, status[0], identifierNumber); + changed = true; + break; + } + + if (!isFTL(m_graph.m_plan.mode)) + break; + + MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add(); + for (const GetByIdVariant& variant : status.variants()) { + data->cases.append( + MultiGetByOffsetCase( + variant.structureSet(), + GetByOffsetMethod::load(variant.offset()))); + } + data->identifierNumber = identifierNumber; + node->convertToMultiGetByOffset(data); + changed = true; break; } case PutById: - case PutByIdDirect: { - CodeOrigin codeOrigin = node->codeOrigin; + case PutByIdDirect: + case PutByIdFlush: { + NodeOrigin origin = node->origin; Edge childEdge = node->child1(); Node* child = childEdge.node(); unsigned identifierNumber = node->identifierNumber(); ASSERT(childEdge.useKind() == CellUse); - Structure* structure = m_state.forNode(child).bestProvenStructure(); - if (!structure) + AbstractValue baseValue = m_state.forNode(child); + AbstractValue valueValue = m_state.forNode(node->child2()); + + m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. + alreadyHandled = true; // Don't allow the default constant folder to do things to this. + + if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered()) break; - bool needsWatchpoint = !m_state.forNode(child).m_currentKnownStructure.hasSingleton(); - bool needsCellCheck = m_state.forNode(child).m_type & ~SpecCell; - PutByIdStatus status = PutByIdStatus::computeFor( - vm(), - m_graph.globalObjectFor(codeOrigin), - structure, + m_graph.globalObjectFor(origin.semantic), + baseValue.m_structure.set(), m_graph.identifiers()[identifierNumber], node->op() == PutByIdDirect); - if (!status.isSimpleReplace() && !status.isSimpleTransition()) + if (!status.isSimple()) break; + + ASSERT(status.numVariants()); - ASSERT(status.oldStructure() == structure); - - // Now before we do anything else, push the CFA forward over the PutById - // and make sure we signal to the loop that it should continue and not - // do any eliminations. - m_interpreter.execute(indexInBlock); - eliminated = true; - - if (needsWatchpoint) { - m_insertionSet.insertNode( - indexInBlock, SpecNone, StructureTransitionWatchpoint, codeOrigin, - OpInfo(structure), childEdge); - } else if (needsCellCheck) { - m_insertionSet.insertNode( - indexInBlock, SpecNone, Phantom, codeOrigin, childEdge); - } - - childEdge.setUseKind(KnownCellUse); + if (status.numVariants() > 1 && !isFTL(m_graph.m_plan.mode)) + break; - StructureTransitionData* transitionData = 0; - if (status.isSimpleTransition()) { - transitionData = m_graph.addStructureTransitionData( - StructureTransitionData(structure, status.newStructure())); - - if (node->op() == PutById) { - if (!structure->storedPrototype().isNull()) { - addStructureTransitionCheck( - codeOrigin, indexInBlock, - structure->storedPrototype().asCell()); - } - - m_graph.chains().addLazily(status.structureChain()); - - for (unsigned i = 0; i < status.structureChain()->size(); ++i) { - JSValue prototype = status.structureChain()->at(i)->storedPrototype(); - if (prototype.isNull()) - continue; - ASSERT(prototype.isCell()); - addStructureTransitionCheck( - codeOrigin, indexInBlock, prototype.asCell()); + changed = true; + + bool allGood = true; + for (const PutByIdVariant& variant : status.variants()) { + if (!allGood) + break; + for (const ObjectPropertyCondition& condition : variant.conditionSet()) { + if (m_graph.watchCondition(condition)) + continue; + + Structure* structure = condition.object()->structure(); + if (!condition.structureEnsuresValidity(structure)) { + allGood = false; + break; } + + m_insertionSet.insertNode( + indexInBlock, SpecNone, CheckStructure, node->origin, + OpInfo(m_graph.addStructureSet(structure)), + m_insertionSet.insertConstantForUse( + indexInBlock, node->origin, condition.object(), KnownCellUse)); } } + + if (!allGood) + break; - Edge propertyStorage; - - if (isInlineOffset(status.offset())) - propertyStorage = childEdge; - else if (status.isSimpleReplace() || structure->outOfLineCapacity() == status.newStructure()->outOfLineCapacity()) { - propertyStorage = Edge(m_insertionSet.insertNode( - indexInBlock, SpecNone, GetButterfly, codeOrigin, childEdge)); - } else if (!structure->outOfLineCapacity()) { - ASSERT(status.newStructure()->outOfLineCapacity()); - ASSERT(!isInlineOffset(status.offset())); - Node* allocatePropertyStorage = m_insertionSet.insertNode( - indexInBlock, SpecNone, AllocatePropertyStorage, - codeOrigin, OpInfo(transitionData), childEdge); - m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, codeOrigin, Edge(node->child1().node(), KnownCellUse)); - propertyStorage = Edge(allocatePropertyStorage); - } else { - ASSERT(structure->outOfLineCapacity()); - ASSERT(status.newStructure()->outOfLineCapacity() > structure->outOfLineCapacity()); - ASSERT(!isInlineOffset(status.offset())); - - Node* reallocatePropertyStorage = m_insertionSet.insertNode( - indexInBlock, SpecNone, ReallocatePropertyStorage, codeOrigin, - OpInfo(transitionData), childEdge, - Edge(m_insertionSet.insertNode( - indexInBlock, SpecNone, GetButterfly, codeOrigin, childEdge))); - m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, codeOrigin, Edge(node->child1().node(), KnownCellUse)); - propertyStorage = Edge(reallocatePropertyStorage); + if (status.numVariants() == 1) { + emitPutByOffset(indexInBlock, node, baseValue, status[0], identifierNumber); + break; } - if (status.isSimpleTransition()) { - Node* putStructure = m_graph.addNode(SpecNone, PutStructure, codeOrigin, OpInfo(transitionData), childEdge); - m_insertionSet.insertNode(indexInBlock, SpecNone, StoreBarrier, codeOrigin, Edge(node->child1().node(), KnownCellUse)); - m_insertionSet.insert(indexInBlock, putStructure); - } + ASSERT(isFTL(m_graph.m_plan.mode)); - node->convertToPutByOffset(m_graph.m_storageAccessData.size(), propertyStorage); - m_insertionSet.insertNode(indexInBlock, SpecNone, ConditionalStoreBarrier, codeOrigin, - Edge(node->child2().node(), KnownCellUse), Edge(node->child3().node(), UntypedUse)); - - StorageAccessData storageAccessData; - storageAccessData.offset = status.offset(); - storageAccessData.identifierNumber = identifierNumber; - m_graph.m_storageAccessData.append(storageAccessData); + MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add(); + data->variants = status.variants(); + data->identifierNumber = identifierNumber; + node->convertToMultiPutByOffset(data); break; } - case ConditionalStoreBarrier: { - if (!m_interpreter.needsTypeCheck(node->child2().node(), ~SpecCell)) { - node->convertToPhantom(); - eliminated = true; - } + case ToPrimitive: { + if (m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean | SpecString | SpecSymbol)) + break; + + node->convertToIdentity(); + changed = true; break; } - case StoreBarrier: - case StoreBarrierWithNullCheck: { + case Check: { + alreadyHandled = true; + m_interpreter.execute(indexInBlock); + for (unsigned i = 0; i < AdjacencyList::Size; ++i) { + Edge edge = node->children.child(i); + if (!edge) + break; + if (edge.isProved() || edge.willNotHaveCheck()) { + node->children.removeEdge(i--); + changed = true; + } + } break; } - + default: break; } - + if (eliminated) { changed = true; continue; } + if (alreadyHandled) + continue; + m_interpreter.execute(indexInBlock); if (!m_state.isValid()) { // If we invalidated then we shouldn't attempt to constant-fold. Here's an @@ -372,31 +598,23 @@ private: } if (!node->shouldGenerate() || m_state.didClobber() || node->hasConstant()) continue; - JSValue value = m_state.forNode(node).value(); - if (!value) - continue; - // Check if merging the abstract value of the constant into the abstract value - // we've proven for this node wouldn't widen the proof. If it widens the proof - // (i.e. says that the set contains more things in it than it previously did) - // then we refuse to fold. - AbstractValue oldValue = m_state.forNode(node); - AbstractValue constantValue; - constantValue.set(m_graph, value); - if (oldValue.merge(constantValue)) + // Interesting fact: this freezing that we do right here may turn an fragile value into + // a weak value. See DFGValueStrength.h. + FrozenValue* value = m_graph.freeze(m_state.forNode(node).value()); + if (!*value) continue; - - CodeOrigin codeOrigin = node->codeOrigin; - AdjacencyList children = node->children; - if (node->op() == GetLocal) + if (node->op() == GetLocal) { + // Need to preserve bytecode liveness in ThreadedCPS form. This wouldn't be necessary + // if it wasn't for https://bugs.webkit.org/show_bug.cgi?id=144086. + m_insertionSet.insertNode( + indexInBlock, SpecNone, PhantomLocal, node->origin, + OpInfo(node->variableAccessData())); m_graph.dethread(); - else - ASSERT(!node->hasVariableAccessData(m_graph)); - + } else + m_insertionSet.insertCheck(indexInBlock, node->origin, node->children); m_graph.convertToConstant(node, value); - m_insertionSet.insertNode( - indexInBlock, SpecNone, Phantom, codeOrigin, children); changed = true; } @@ -405,22 +623,198 @@ private: return changed; } + + void emitGetByOffset(unsigned indexInBlock, Node* node, const AbstractValue& baseValue, const MultiGetByOffsetCase& getCase, unsigned identifierNumber) + { + // When we get to here we have already emitted all of the requisite checks for everything. + // So, we just need to emit what the method object tells us to emit. + + addBaseCheck(indexInBlock, node, baseValue, getCase.set()); - void addStructureTransitionCheck(CodeOrigin codeOrigin, unsigned indexInBlock, JSCell* cell) + GetByOffsetMethod method = getCase.method(); + + switch (method.kind()) { + case GetByOffsetMethod::Invalid: + RELEASE_ASSERT_NOT_REACHED(); + return; + + case GetByOffsetMethod::Constant: + m_graph.convertToConstant(node, method.constant()); + return; + + case GetByOffsetMethod::Load: + emitGetByOffset(indexInBlock, node, node->child1(), identifierNumber, method.offset()); + return; + + case GetByOffsetMethod::LoadFromPrototype: { + Node* child = m_insertionSet.insertConstant( + indexInBlock, node->origin, method.prototype()); + emitGetByOffset( + indexInBlock, node, Edge(child, KnownCellUse), identifierNumber, method.offset()); + return; + } } + + RELEASE_ASSERT_NOT_REACHED(); + } + + void emitGetByOffset(unsigned indexInBlock, Node* node, const AbstractValue& baseValue, const GetByIdVariant& variant, unsigned identifierNumber) { - Node* weakConstant = m_insertionSet.insertNode( - indexInBlock, speculationFromValue(cell), WeakJSConstant, codeOrigin, OpInfo(cell)); + Edge childEdge = node->child1(); + + addBaseCheck(indexInBlock, node, baseValue, variant.structureSet()); + + // We aren't set up to handle prototype stuff. + DFG_ASSERT(m_graph, node, variant.conditionSet().isEmpty()); + + if (JSValue value = m_graph.tryGetConstantProperty(baseValue.m_value, variant.structureSet(), variant.offset())) { + m_graph.convertToConstant(node, m_graph.freeze(value)); + return; + } + + emitGetByOffset(indexInBlock, node, childEdge, identifierNumber, variant.offset()); + } + + void emitGetByOffset( + unsigned indexInBlock, Node* node, Edge childEdge, unsigned identifierNumber, + PropertyOffset offset, const InferredType::Descriptor& inferredType = InferredType::Top) + { + childEdge.setUseKind(KnownCellUse); + + Edge propertyStorage; + + if (isInlineOffset(offset)) + propertyStorage = childEdge; + else { + propertyStorage = Edge(m_insertionSet.insertNode( + indexInBlock, SpecNone, GetButterfly, node->origin, childEdge)); + } + + StorageAccessData& data = *m_graph.m_storageAccessData.add(); + data.offset = offset; + data.identifierNumber = identifierNumber; + data.inferredType = inferredType; + + node->convertToGetByOffset(data, propertyStorage); + } + + void emitPutByOffset(unsigned indexInBlock, Node* node, const AbstractValue& baseValue, const PutByIdVariant& variant, unsigned identifierNumber) + { + NodeOrigin origin = node->origin; + Edge childEdge = node->child1(); + + addBaseCheck(indexInBlock, node, baseValue, variant.oldStructure()); + insertInferredTypeCheck( + m_insertionSet, indexInBlock, origin, node->child2().node(), variant.requiredType()); + + node->child1().setUseKind(KnownCellUse); + childEdge.setUseKind(KnownCellUse); + + Transition* transition = 0; + if (variant.kind() == PutByIdVariant::Transition) { + transition = m_graph.m_transitions.add( + variant.oldStructureForTransition(), variant.newStructure()); + } + + Edge propertyStorage; + + DFG_ASSERT(m_graph, node, origin.exitOK); + bool canExit = true; + + if (isInlineOffset(variant.offset())) + propertyStorage = childEdge; + else if (!variant.reallocatesStorage()) { + propertyStorage = Edge(m_insertionSet.insertNode( + indexInBlock, SpecNone, GetButterfly, origin, childEdge)); + } else if (!variant.oldStructureForTransition()->outOfLineCapacity()) { + ASSERT(variant.newStructure()->outOfLineCapacity()); + ASSERT(!isInlineOffset(variant.offset())); + Node* allocatePropertyStorage = m_insertionSet.insertNode( + indexInBlock, SpecNone, AllocatePropertyStorage, + origin.takeValidExit(canExit), OpInfo(transition), childEdge); + propertyStorage = Edge(allocatePropertyStorage); + } else { + ASSERT(variant.oldStructureForTransition()->outOfLineCapacity()); + ASSERT(variant.newStructure()->outOfLineCapacity() > variant.oldStructureForTransition()->outOfLineCapacity()); + ASSERT(!isInlineOffset(variant.offset())); + + Node* reallocatePropertyStorage = m_insertionSet.insertNode( + indexInBlock, SpecNone, ReallocatePropertyStorage, origin.takeValidExit(canExit), + OpInfo(transition), childEdge, + Edge(m_insertionSet.insertNode( + indexInBlock, SpecNone, GetButterfly, origin, childEdge))); + propertyStorage = Edge(reallocatePropertyStorage); + } + + StorageAccessData& data = *m_graph.m_storageAccessData.add(); + data.offset = variant.offset(); + data.identifierNumber = identifierNumber; - if (m_graph.watchpoints().isStillValid(cell->structure()->transitionWatchpointSet())) { + node->convertToPutByOffset(data, propertyStorage); + node->origin.exitOK = canExit; + + if (variant.kind() == PutByIdVariant::Transition) { + // FIXME: PutStructure goes last until we fix either + // https://bugs.webkit.org/show_bug.cgi?id=142921 or + // https://bugs.webkit.org/show_bug.cgi?id=142924. m_insertionSet.insertNode( - indexInBlock, SpecNone, StructureTransitionWatchpoint, codeOrigin, - OpInfo(cell->structure()), Edge(weakConstant, CellUse)); + indexInBlock + 1, SpecNone, PutStructure, origin.withInvalidExit(), OpInfo(transition), + childEdge); + } + } + + void addBaseCheck( + unsigned indexInBlock, Node* node, const AbstractValue& baseValue, const StructureSet& set) + { + if (!baseValue.m_structure.isSubsetOf(set)) { + // Arises when we prune MultiGetByOffset. We could have a + // MultiGetByOffset with a single variant that checks for structure S, + // and the input has structures S and T, for example. + ASSERT(node->child1()); + m_insertionSet.insertNode( + indexInBlock, SpecNone, CheckStructure, node->origin, + OpInfo(m_graph.addStructureSet(set)), node->child1()); return; } + + if (baseValue.m_type & ~SpecCell) + m_insertionSet.insertCheck(indexInBlock, node->origin, node->child1()); + } + + void addStructureTransitionCheck(NodeOrigin origin, unsigned indexInBlock, JSCell* cell, Structure* structure) + { + if (m_graph.registerStructure(cell->structure()) == StructureRegisteredAndWatched) + return; + + m_graph.registerStructure(structure); + Node* weakConstant = m_insertionSet.insertNode( + indexInBlock, speculationFromValue(cell), JSConstant, origin, + OpInfo(m_graph.freeze(cell))); + m_insertionSet.insertNode( - indexInBlock, SpecNone, CheckStructure, codeOrigin, - OpInfo(m_graph.addStructureSet(cell->structure())), Edge(weakConstant, CellUse)); + indexInBlock, SpecNone, CheckStructure, origin, + OpInfo(m_graph.addStructureSet(structure)), Edge(weakConstant, CellUse)); + } + + void fixUpsilons(BasicBlock* block) + { + for (unsigned nodeIndex = block->size(); nodeIndex--;) { + Node* node = block->at(nodeIndex); + if (node->op() != Upsilon) + continue; + switch (node->phi()->op()) { + case Phi: + break; + case JSConstant: + case DoubleConstant: + case Int52Constant: + node->remove(); + break; + default: + DFG_CRASH(m_graph, node, "Bad Upsilon phi() pointer"); + break; + } + } } InPlaceAbstractState m_state; diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h index cde16806c..d2f7e1351 100644 --- a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h +++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.h @@ -26,8 +26,6 @@ #ifndef DFGConstantFoldingPhase_h #define DFGConstantFoldingPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.cpp b/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.cpp new file mode 100644 index 000000000..4a72f9505 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.cpp @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGConstantHoistingPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGGraph.h" +#include "DFGInsertionSet.h" +#include "DFGPhase.h" +#include "DFGPredictionPropagationPhase.h" +#include "DFGVariableAccessDataDump.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +namespace { + +class ConstantHoistingPhase : public Phase { +public: + ConstantHoistingPhase(Graph& graph) + : Phase(graph, "constant hoisting") + { + } + + bool run() + { + DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA); + + m_graph.clearReplacements(); + + HashMap<FrozenValue*, Node*> jsValues; + HashMap<FrozenValue*, Node*> doubleValues; + HashMap<FrozenValue*, Node*> int52Values; + + auto valuesFor = [&] (NodeType op) -> HashMap<FrozenValue*, Node*>& { + // Use a roundabout approach because clang thinks that this closure returning a + // reference to a stack-allocated value in outer scope is a bug. It's not. + HashMap<FrozenValue*, Node*>* result; + + switch (op) { + case JSConstant: + result = &jsValues; + break; + case DoubleConstant: + result = &doubleValues; + break; + case Int52Constant: + result = &int52Values; + break; + default: + DFG_CRASH(m_graph, nullptr, "Invalid node type in valuesFor()"); + result = nullptr; + break; + } + + return *result; + }; + + Vector<Node*> toFree; + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + unsigned sourceIndex = 0; + unsigned targetIndex = 0; + while (sourceIndex < block->size()) { + Node* node = block->at(sourceIndex++); + switch (node->op()) { + case JSConstant: + case DoubleConstant: + case Int52Constant: { + HashMap<FrozenValue*, Node*>& values = valuesFor(node->op()); + auto result = values.add(node->constant(), node); + if (result.isNewEntry) + node->origin = m_graph.block(0)->at(0)->origin; + else { + node->setReplacement(result.iterator->value); + toFree.append(node); + } + break; + } + default: + block->at(targetIndex++) = node; + break; + } + } + block->resize(targetIndex); + } + + // Insert the constants into the root block. + InsertionSet insertionSet(m_graph); + auto insertConstants = [&] (const HashMap<FrozenValue*, Node*>& values) { + for (auto& entry : values) + insertionSet.insert(0, entry.value); + }; + insertConstants(jsValues); + insertConstants(doubleValues); + insertConstants(int52Values); + insertionSet.execute(m_graph.block(0)); + + // Perform all of the substitutions. We want all instances of the removed constants to + // point at their replacements. + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + for (Node* node : *block) + m_graph.performSubstitution(node); + } + + // And finally free the constants that we removed. + for (Node* node : toFree) + m_graph.m_allocator.free(node); + + return true; + } +}; + +} // anonymous namespace + +bool performConstantHoisting(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Constant Hoisting Phase"); + return runPhase<ConstantHoistingPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.h b/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.h new file mode 100644 index 000000000..5124f168e --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGConstantHoistingPhase.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGConstantHoistingPhase_h +#define DFGConstantHoistingPhase_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; + +// Hoists all constants to the top of the root block. + +bool performConstantHoisting(Graph&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGConstantHoistingPhase_h diff --git a/Source/JavaScriptCore/dfg/DFGCopyBarrierOptimizationPhase.cpp b/Source/JavaScriptCore/dfg/DFGCopyBarrierOptimizationPhase.cpp new file mode 100644 index 000000000..68e08c98d --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGCopyBarrierOptimizationPhase.cpp @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGCopyBarrierOptimizationPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGClobberize.h" +#include "DFGDoesGC.h" +#include "DFGGraph.h" +#include "DFGPhase.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +namespace { + +bool verbose = false; + +class CopyBarrierOptimizationPhase : public Phase { +public: + CopyBarrierOptimizationPhase(Graph& graph) + : Phase(graph, "copy barrier optimization") + { + } + + bool run() + { + if (verbose) { + dataLog("Starting copy barrier optimization:\n"); + m_graph.dump(); + } + + // First convert all GetButterfly nodes into GetButterflyReadOnly. + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + for (Node* node : *block) { + if (node->op() == GetButterfly) + node->setOp(GetButterflyReadOnly); + } + } + + // Anytime we use a GetButterflyReadOnly in a setting that may write to the heap, or if we're in a + // new epoch, convert it to a GetButterfly. The epoch gets incremented at basic block boundaries, + // anytime we GC, and anytime a barrier on the butterfly may be executed. We traverse the program + // in pre-order so that we always see uses after defs. Note that this is a fixpoint because if we + // turn a GetButterflyReadOnly into a GetButterfly, then we've introduced a butterfly reallocation. + bool changed = true; + Epoch currentEpoch = Epoch::first(); + m_graph.clearEpochs(); + while (changed) { + changed = false; + for (BasicBlock* block : m_graph.blocksInPreOrder()) { + currentEpoch.bump(); + for (Node* node : *block) { + bool writesToHeap = writesOverlap(m_graph, node, Heap); + + bool reallocatesButterfly = false; + if (doesGC(m_graph, node) || writesOverlap(m_graph, node, JSObject_butterfly)) + reallocatesButterfly = true; + else { + // This is not an exhaustive list of things that will execute copy barriers. Most + // things that execute copy barriers also do GC or have writes that overlap the + // butterfly heap, and we catch that above. + switch (node->op()) { + case GetButterfly: + case MultiPutByOffset: + reallocatesButterfly = true; + break; + default: + break; + } + } + + m_graph.doToChildren( + node, + [&] (Edge edge) { + if (edge->op() != GetButterflyReadOnly) + return; + + if (writesToHeap || currentEpoch != edge->epoch()) { + changed = true; + edge->setOp(GetButterfly); + } + }); + + if (reallocatesButterfly) + currentEpoch.bump(); + + node->setEpoch(currentEpoch); + } + } + } + + // This phase always thinks that it changes the graph. That's OK, because it's a late phase. + return true; + } +}; + +} // anonymous namespace + +bool performCopyBarrierOptimization(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Copy Barrier Optimization Phase"); + return runPhase<CopyBarrierOptimizationPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.h b/Source/JavaScriptCore/dfg/DFGCopyBarrierOptimizationPhase.h index 94276bea1..49069e108 100644 --- a/Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.h +++ b/Source/JavaScriptCore/dfg/DFGCopyBarrierOptimizationPhase.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,15 +23,22 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef DFGStoreBarrierElisionPhase_h -#define DFGStoreBarrierElisionPhase_h +#ifndef DFGCopyBarrierOptimizationPhase_h +#define DFGCopyBarrierOptimizationPhase_h + +#if ENABLE(DFG_JIT) namespace JSC { namespace DFG { class Graph; -bool performStoreBarrierElision(Graph&); +// Converts GetButterfly nodes into GetButterflyReadOnly nodes whenever the butterfly is only used for +// read-only operations. +bool performCopyBarrierOptimization(Graph&); } } // namespace JSC::DFG -#endif // DFGStoreBarrierElisionPhase_h +#endif // ENABLE(DFG_JIT) + +#endif // DFGCopyBarrierOptimizationPhase_h + diff --git a/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.cpp b/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.cpp index 617bffd90..12130317f 100644 --- a/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.cpp @@ -32,7 +32,7 @@ #include "DFGBlockInsertionSet.h" #include "DFGGraph.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" #include <wtf/HashMap.h> namespace JSC { namespace DFG { @@ -73,9 +73,9 @@ public: private: void breakCriticalEdge(BasicBlock* predecessor, BasicBlock** successor) { - BasicBlock* pad = m_insertionSet.insertBefore(*successor); + BasicBlock* pad = m_insertionSet.insertBefore(*successor, (*successor)->executionCount); pad->appendNode( - m_graph, SpecNone, Jump, (*successor)->at(0)->codeOrigin, OpInfo(*successor)); + m_graph, SpecNone, Jump, (*successor)->at(0)->origin, OpInfo(*successor)); pad->predecessors.append(predecessor); (*successor)->replacePredecessor(predecessor, pad); diff --git a/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.h b/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.h index d801c1250..bc94f8256 100644 --- a/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.h +++ b/Source/JavaScriptCore/dfg/DFGCriticalEdgeBreakingPhase.h @@ -26,8 +26,6 @@ #ifndef DFGCriticalEdgeBreakingPhase_h #define DFGCriticalEdgeBreakingPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGDCEPhase.cpp b/Source/JavaScriptCore/dfg/DFGDCEPhase.cpp index 36f7683a8..5290f2422 100644 --- a/Source/JavaScriptCore/dfg/DFGDCEPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGDCEPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,7 +32,7 @@ #include "DFGGraph.h" #include "DFGInsertionSet.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -48,77 +48,34 @@ public: { ASSERT(m_graph.m_form == ThreadedCPS || m_graph.m_form == SSA); - // First reset the counts to 0 for all nodes. - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (unsigned indexInBlock = block->size(); indexInBlock--;) - block->at(indexInBlock)->setRefCount(0); - for (unsigned phiIndex = block->phis.size(); phiIndex--;) - block->phis[phiIndex]->setRefCount(0); - } - - // Now find the roots: - // - Nodes that are must-generate. - // - Nodes that are reachable from type checks. - // Set their ref counts to 1 and put them on the worklist. - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { + m_graph.computeRefCounts(); + + for (BasicBlock* block : m_graph.blocksInPreOrder()) + fixupBlock(block); + + cleanVariables(m_graph.m_arguments); + + // Just do a basic Phantom/Check clean-up. + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; - for (unsigned indexInBlock = block->size(); indexInBlock--;) { - Node* node = block->at(indexInBlock); - DFG_NODE_DO_TO_CHILDREN(m_graph, node, findTypeCheckRoot); - if (!(node->flags() & NodeMustGenerate)) - continue; - if (!node->postfixRef()) - m_worklist.append(node); - } - } - - while (!m_worklist.isEmpty()) { - while (!m_worklist.isEmpty()) { - Node* node = m_worklist.last(); - m_worklist.removeLast(); - ASSERT(node->shouldGenerate()); // It should not be on the worklist unless it's ref'ed. - DFG_NODE_DO_TO_CHILDREN(m_graph, node, countEdge); - } - - if (m_graph.m_form == SSA) { - // Find Phi->Upsilon edges, which are represented as meta-data in the - // Upsilon. - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) + unsigned sourceIndex = 0; + unsigned targetIndex = 0; + while (sourceIndex < block->size()) { + Node* node = block->at(sourceIndex++); + switch (node->op()) { + case Check: + case Phantom: + if (node->children.isEmpty()) continue; - for (unsigned nodeIndex = block->size(); nodeIndex--;) { - Node* node = block->at(nodeIndex); - if (node->op() != Upsilon) - continue; - if (node->shouldGenerate()) - continue; - if (node->phi()->shouldGenerate()) - countNode(node); - } + break; + default: + break; } + block->at(targetIndex++) = node; } - } - - if (m_graph.m_form == SSA) { - // Need to process the graph in reverse DFS order, so that we get to the uses - // of a node before we get to the node itself. - Vector<BasicBlock*> depthFirst; - m_graph.getBlocksInDepthFirstOrder(depthFirst); - for (unsigned i = depthFirst.size(); i--;) - fixupBlock(depthFirst[i]); - } else { - RELEASE_ASSERT(m_graph.m_form == ThreadedCPS); - - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) - fixupBlock(m_graph.block(blockIndex)); - - cleanVariables(m_graph.m_arguments); + block->resize(targetIndex); } m_graph.m_refCountState = ExactRefCount; @@ -127,51 +84,16 @@ public: } private: - void findTypeCheckRoot(Node*, Edge edge) - { - // We may have an "unproved" untyped use for code that is unreachable. The CFA - // will just not have gotten around to it. - if (edge.willNotHaveCheck()) - return; - if (!edge->postfixRef()) - m_worklist.append(edge.node()); - } - - void countNode(Node* node) - { - if (node->postfixRef()) - return; - m_worklist.append(node); - } - - void countEdge(Node*, Edge edge) - { - // Don't count edges that are already counted for their type checks. - if (edge.willHaveCheck()) - return; - countNode(edge.node()); - } - void fixupBlock(BasicBlock* block) { if (!block) return; - - switch (m_graph.m_form) { - case SSA: - break; - - case ThreadedCPS: { - // Clean up variable links for the block. We need to do this before the actual DCE - // because we need to see GetLocals, so we can bypass them in situations where the - // vars-at-tail point to a GetLocal, the GetLocal is dead, but the Phi it points - // to is alive. - + + if (m_graph.m_form == ThreadedCPS) { for (unsigned phiIndex = 0; phiIndex < block->phis.size(); ++phiIndex) { - if (!block->phis[phiIndex]->shouldGenerate()) { - // FIXME: We could actually free nodes here. Except that it probably - // doesn't matter, since we don't add any nodes after this phase. - // https://bugs.webkit.org/show_bug.cgi?id=126239 + Node* phi = block->phis[phiIndex]; + if (!phi->shouldGenerate()) { + m_graph.m_allocator.free(phi); block->phis[phiIndex--] = block->phis.last(); block->phis.removeLast(); } @@ -179,75 +101,37 @@ private: cleanVariables(block->variablesAtHead); cleanVariables(block->variablesAtTail); - break; - } - - default: - RELEASE_ASSERT_NOT_REACHED(); - return; } - for (unsigned indexInBlock = block->size(); indexInBlock--;) { + // This has to be a forward loop because we are using the insertion set. + for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { Node* node = block->at(indexInBlock); if (node->shouldGenerate()) continue; - switch (node->op()) { - case MovHint: { - ASSERT(node->child1().useKind() == UntypedUse); - if (!node->child1()->shouldGenerate()) { - node->setOpAndDefaultFlags(ZombieHint); - node->child1() = Edge(); - break; + if (node->flags() & NodeHasVarArgs) { + for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) { + Edge edge = m_graph.m_varArgChildren[childIdx]; + + if (!edge || edge.willNotHaveCheck()) + continue; + + m_insertionSet.insertNode(indexInBlock, SpecNone, Check, node->origin, edge); } - node->setOpAndDefaultFlags(MovHint); - break; - } - case ZombieHint: { - // Currently we assume that DCE runs only once. - RELEASE_ASSERT_NOT_REACHED(); - break; + node->setOpAndDefaultFlags(Check); + node->children.reset(); + node->setRefCount(1); + continue; } - default: { - if (node->flags() & NodeHasVarArgs) { - for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) { - Edge edge = m_graph.m_varArgChildren[childIdx]; - - if (!edge || edge.willNotHaveCheck()) - continue; - - m_insertionSet.insertNode(indexInBlock, SpecNone, Phantom, node->codeOrigin, edge); - } - - node->convertToPhantomUnchecked(); - node->children.reset(); - node->setRefCount(1); - break; - } - - node->convertToPhantom(); - eliminateIrrelevantPhantomChildren(node); - node->setRefCount(1); - break; - } } + node->remove(); + node->setRefCount(1); } m_insertionSet.execute(block); } - void eliminateIrrelevantPhantomChildren(Node* node) - { - for (unsigned i = 0; i < AdjacencyList::Size; ++i) { - Edge edge = node->children.child(i); - if (!edge) - continue; - if (edge.willNotHaveCheck()) - node->children.removeEdge(i--); - } - } - template<typename VariablesVectorType> void cleanVariables(VariablesVectorType& variables) { @@ -255,21 +139,12 @@ private: Node* node = variables[i]; if (!node) continue; - if (node->op() != Phantom && node->shouldGenerate()) + if (node->op() != Check && node->shouldGenerate()) continue; - if (node->op() == GetLocal) { - node = node->child1().node(); - ASSERT(node->op() == Phi || node->op() == SetArgument); - if (node->shouldGenerate()) { - variables[i] = node; - continue; - } - } - variables[i] = 0; + variables[i] = nullptr; } } - Vector<Node*, 128> m_worklist; InsertionSet m_insertionSet; }; diff --git a/Source/JavaScriptCore/dfg/DFGDCEPhase.h b/Source/JavaScriptCore/dfg/DFGDCEPhase.h index 2bb991306..b5e31ddd2 100644 --- a/Source/JavaScriptCore/dfg/DFGDCEPhase.h +++ b/Source/JavaScriptCore/dfg/DFGDCEPhase.h @@ -26,8 +26,6 @@ #ifndef DFGDCEPhase_h #define DFGDCEPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" diff --git a/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.cpp b/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.cpp index f6587f47f..48d77c3e1 100644 --- a/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.cpp +++ b/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,11 +29,20 @@ #if ENABLE(DFG_JIT) #include "CodeBlock.h" +#include "DFGCommonData.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { +DesiredIdentifiers::DesiredIdentifiers() + : m_codeBlock(nullptr) + , m_didProcessIdentifiers(false) +{ +} + DesiredIdentifiers::DesiredIdentifiers(CodeBlock* codeBlock) : m_codeBlock(codeBlock) + , m_didProcessIdentifiers(false) { } @@ -46,14 +55,28 @@ unsigned DesiredIdentifiers::numberOfIdentifiers() return m_codeBlock->numberOfIdentifiers() + m_addedIdentifiers.size(); } -void DesiredIdentifiers::addLazily(StringImpl* rep) +unsigned DesiredIdentifiers::ensure(UniquedStringImpl* rep) { - m_addedIdentifiers.append(rep); + if (!m_didProcessIdentifiers) { + // Do this now instead of the constructor so that we don't pay the price on the main + // thread. Also, not all compilations need to call ensure(). + for (unsigned index = m_codeBlock->numberOfIdentifiers(); index--;) + m_identifierNumberForName.add(m_codeBlock->identifier(index).impl(), index); + m_didProcessIdentifiers = true; + } + + auto addResult = m_identifierNumberForName.add(rep, numberOfIdentifiers()); + unsigned result = addResult.iterator->value; + if (addResult.isNewEntry) { + m_addedIdentifiers.append(rep); + ASSERT(at(result) == rep); + } + return result; } -StringImpl* DesiredIdentifiers::at(unsigned index) const +UniquedStringImpl* DesiredIdentifiers::at(unsigned index) const { - StringImpl* result; + UniquedStringImpl* result; if (index < m_codeBlock->numberOfIdentifiers()) result = m_codeBlock->identifier(index).impl(); else @@ -65,9 +88,9 @@ StringImpl* DesiredIdentifiers::at(unsigned index) const void DesiredIdentifiers::reallyAdd(VM& vm, CommonData* commonData) { for (unsigned i = 0; i < m_addedIdentifiers.size(); ++i) { - StringImpl* rep = m_addedIdentifiers[i]; + auto rep = m_addedIdentifiers[i]; ASSERT(rep->hasAtLeastOneRef()); - commonData->dfgIdentifiers.append(Identifier(&vm, rep)); + commonData->dfgIdentifiers.append(Identifier::fromUid(&vm, rep)); } } diff --git a/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.h b/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.h index a41f230b3..d8587dc6d 100644 --- a/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.h +++ b/Source/JavaScriptCore/dfg/DFGDesiredIdentifiers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGDesiredIdentifiers_h #define DFGDesiredIdentifiers_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "Identifier.h" @@ -42,21 +40,24 @@ class CommonData; class DesiredIdentifiers { public: + DesiredIdentifiers(); DesiredIdentifiers(CodeBlock*); ~DesiredIdentifiers(); unsigned numberOfIdentifiers(); - void addLazily(StringImpl*); + unsigned ensure(UniquedStringImpl*); - StringImpl* at(unsigned index) const; + UniquedStringImpl* at(unsigned index) const; - StringImpl* operator[](unsigned index) const { return at(index); } + UniquedStringImpl* operator[](unsigned index) const { return at(index); } void reallyAdd(VM&, CommonData*); private: CodeBlock* m_codeBlock; - Vector<StringImpl*> m_addedIdentifiers; + Vector<UniquedStringImpl*> m_addedIdentifiers; + HashMap<UniquedStringImpl*, unsigned> m_identifierNumberForName; + bool m_didProcessIdentifiers; }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGDesiredInferredType.h b/Source/JavaScriptCore/dfg/DFGDesiredInferredType.h new file mode 100644 index 000000000..d0212886c --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGDesiredInferredType.h @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGDesiredInferredType_h +#define DFGDesiredInferredType_h + +#if ENABLE(DFG_JIT) + +#include "InferredType.h" +#include <wtf/HashMap.h> + +namespace JSC { namespace DFG { + +// This documents the DFG's expectation for an InferredType: specifically, that we want it to have a live +// InferredType object and that this object has the type that we originally saw. + +class DesiredInferredType { +public: + DesiredInferredType() + : m_object(nullptr) + { + } + + DesiredInferredType(InferredType* object, const InferredType::Descriptor& expected) + : m_object(object) + , m_expected(expected) + { + } + + DesiredInferredType(WTF::HashTableDeletedValueType) + : m_object(nullptr) + , m_expected(InferredType::Top) + { + } + + explicit operator bool() const { return m_object && m_expected; } + + InferredType* object() const { return m_object; } + InferredType::Descriptor expected() const { return m_expected; } + + bool isStillValid() const + { + return m_object->canWatch(m_expected); + } + + void add(Watchpoint* watchpoint) const + { + m_object->addWatchpoint(watchpoint); + } + + bool operator==(const DesiredInferredType& other) const + { + return m_object == other.m_object + && m_expected == other.m_expected; + } + + bool operator!=(const DesiredInferredType& other) const + { + return !(*this == other); + } + + bool isHashTableDeletedValue() const + { + return !m_object && m_expected == InferredType::Top; + } + + unsigned hash() const + { + return WTF::PtrHash<InferredType*>::hash(m_object) + m_expected.hash() * 7; + } + + void dumpInContext(PrintStream& out, DumpContext* context) const + { + out.print(inContext(m_expected, context), " for ", RawPointer(m_object)); + } + + void dump(PrintStream& out) const + { + dumpInContext(out, nullptr); + } + +private: + InferredType* m_object; + InferredType::Descriptor m_expected; +}; + +struct DesiredInferredTypeHash { + static unsigned hash(const DesiredInferredType& key) { return key.hash(); } + static bool equal(const DesiredInferredType& a, const DesiredInferredType& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} } // namespace JSC::DFG + +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::DFG::DesiredInferredType> { + typedef JSC::DFG::DesiredInferredTypeHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::DFG::DesiredInferredType> : SimpleClassHashTraits<JSC::DFG::DesiredInferredType> { }; + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + +#endif // DFGDesiredInferredType_h + diff --git a/Source/JavaScriptCore/dfg/DFGDesiredStructureChains.h b/Source/JavaScriptCore/dfg/DFGDesiredStructureChains.h deleted file mode 100644 index 3c20194d7..000000000 --- a/Source/JavaScriptCore/dfg/DFGDesiredStructureChains.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef DFGDesiredStructureChains_h -#define DFGDesiredStructureChains_h - -#include <wtf/Platform.h> - -#if ENABLE(DFG_JIT) - -#include "IntendedStructureChain.h" -#include <wtf/Vector.h> - -namespace JSC { namespace DFG { - -class DesiredStructureChains { -public: - DesiredStructureChains(); - ~DesiredStructureChains(); - - void addLazily(PassRefPtr<IntendedStructureChain> chain) - { - m_vector.append(chain); - } - - bool areStillValid() const; -private: - Vector<RefPtr<IntendedStructureChain>> m_vector; -}; - -} } // namespace JSC::DFG - -#endif // ENABLE(DFG_JIT) - -#endif // DFGDesiredStructureChains_h - diff --git a/Source/JavaScriptCore/dfg/DFGDesiredTransitions.cpp b/Source/JavaScriptCore/dfg/DFGDesiredTransitions.cpp index 0cfa00f6f..e273ae891 100644 --- a/Source/JavaScriptCore/dfg/DFGDesiredTransitions.cpp +++ b/Source/JavaScriptCore/dfg/DFGDesiredTransitions.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -24,17 +24,17 @@ */ #include "config.h" +#include "DFGDesiredTransitions.h" #if ENABLE(DFG_JIT) -#include "DFGDesiredTransitions.h" - #include "CodeBlock.h" #include "DFGCommonData.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { -DesiredTransition::DesiredTransition(CodeBlock* codeBlock, ScriptExecutable* codeOriginOwner, Structure* oldStructure, Structure* newStructure) +DesiredTransition::DesiredTransition(CodeBlock* codeBlock, CodeBlock* codeOriginOwner, Structure* oldStructure, Structure* newStructure) : m_codeBlock(codeBlock) , m_codeOriginOwner(codeOriginOwner) , m_oldStructure(oldStructure) @@ -46,11 +46,18 @@ void DesiredTransition::reallyAdd(VM& vm, CommonData* common) { common->transitions.append( WeakReferenceTransition( - vm, m_codeBlock->ownerExecutable(), + vm, m_codeBlock, m_codeOriginOwner, m_oldStructure, m_newStructure)); } +void DesiredTransition::visitChildren(SlotVisitor& visitor) +{ + visitor.appendUnbarrieredPointer(&m_codeOriginOwner); + visitor.appendUnbarrieredPointer(&m_oldStructure); + visitor.appendUnbarrieredPointer(&m_newStructure); +} + DesiredTransitions::DesiredTransitions() { } @@ -59,7 +66,7 @@ DesiredTransitions::~DesiredTransitions() { } -void DesiredTransitions::addLazily(CodeBlock* codeBlock, ScriptExecutable* codeOriginOwner, Structure* oldStructure, Structure* newStructure) +void DesiredTransitions::addLazily(CodeBlock* codeBlock, CodeBlock* codeOriginOwner, Structure* oldStructure, Structure* newStructure) { m_transitions.append(DesiredTransition(codeBlock, codeOriginOwner, oldStructure, newStructure)); } @@ -70,6 +77,12 @@ void DesiredTransitions::reallyAdd(VM& vm, CommonData* common) m_transitions[i].reallyAdd(vm, common); } +void DesiredTransitions::visitChildren(SlotVisitor& visitor) +{ + for (unsigned i = 0; i < m_transitions.size(); i++) + m_transitions[i].visitChildren(visitor); +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGDesiredTransitions.h b/Source/JavaScriptCore/dfg/DFGDesiredTransitions.h index 246a81062..9ea6ada1d 100644 --- a/Source/JavaScriptCore/dfg/DFGDesiredTransitions.h +++ b/Source/JavaScriptCore/dfg/DFGDesiredTransitions.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -34,6 +34,7 @@ namespace JSC { class CodeBlock; class ScriptExecutable; +class SlotVisitor; class Structure; class VM; @@ -43,13 +44,15 @@ class CommonData; class DesiredTransition { public: - DesiredTransition(CodeBlock*, ScriptExecutable*, Structure*, Structure*); + DesiredTransition(CodeBlock*, CodeBlock* codeOriginOwner, Structure*, Structure*); void reallyAdd(VM&, CommonData*); + + void visitChildren(SlotVisitor&); private: CodeBlock* m_codeBlock; - ScriptExecutable* m_codeOriginOwner; + CodeBlock* m_codeOriginOwner; Structure* m_oldStructure; Structure* m_newStructure; }; @@ -59,8 +62,9 @@ public: DesiredTransitions(); ~DesiredTransitions(); - void addLazily(CodeBlock*, ScriptExecutable*, Structure*, Structure*); + void addLazily(CodeBlock*, CodeBlock* codeOriginOwner, Structure*, Structure*); void reallyAdd(VM&, CommonData*); + void visitChildren(SlotVisitor&); private: Vector<DesiredTransition> m_transitions; diff --git a/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.cpp b/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.cpp index 80400cb26..8761d9bd4 100644 --- a/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.cpp +++ b/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,13 +30,14 @@ #include "ArrayBufferNeuteringWatchpoint.h" #include "CodeBlock.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { void ArrayBufferViewWatchpointAdaptor::add( - CodeBlock* codeBlock, JSArrayBufferView* view, Watchpoint* watchpoint) + CodeBlock* codeBlock, JSArrayBufferView* view, CommonData& common) { + Watchpoint* watchpoint = common.watchpoints.add(codeBlock); ArrayBufferNeuteringWatchpoint* neuteringWatchpoint = ArrayBufferNeuteringWatchpoint::create(*codeBlock->vm()); neuteringWatchpoint->set()->add(watchpoint); @@ -44,6 +45,31 @@ void ArrayBufferViewWatchpointAdaptor::add( codeBlock->vm()->heap.addReference(neuteringWatchpoint, view->buffer()); } +void InferredValueAdaptor::add( + CodeBlock* codeBlock, InferredValue* inferredValue, CommonData& common) +{ + codeBlock->addConstant(inferredValue); // For common users, it doesn't really matter if it's weak or not. If references to it go away, we go away, too. + inferredValue->add(common.watchpoints.add(codeBlock)); +} + +void AdaptiveStructureWatchpointAdaptor::add( + CodeBlock* codeBlock, const ObjectPropertyCondition& key, CommonData& common) +{ + switch (key.kind()) { + case PropertyCondition::Equivalence: + common.adaptiveInferredPropertyValueWatchpoints.add(key, codeBlock)->install(); + break; + default: + common.adaptiveStructureWatchpoints.add(key, codeBlock)->install(); + break; + } +} + +void InferredTypeAdaptor::add(CodeBlock* codeBlock, const DesiredInferredType& key, CommonData& common) +{ + key.add(common.watchpoints.add(codeBlock)); +} + DesiredWatchpoints::DesiredWatchpoints() { } DesiredWatchpoints::~DesiredWatchpoints() { } @@ -57,33 +83,63 @@ void DesiredWatchpoints::addLazily(InlineWatchpointSet& set) m_inlineSets.addLazily(&set); } +void DesiredWatchpoints::addLazily(InferredValue* inferredValue) +{ + m_inferredValues.addLazily(inferredValue); +} + void DesiredWatchpoints::addLazily(JSArrayBufferView* view) { m_bufferViews.addLazily(view); } -void DesiredWatchpoints::addLazily(CodeOrigin codeOrigin, ExitKind exitKind, WatchpointSet* set) +void DesiredWatchpoints::addLazily(const ObjectPropertyCondition& key) { - m_sets.addLazily(codeOrigin, exitKind, set); + m_adaptiveStructureSets.addLazily(key); } -void DesiredWatchpoints::addLazily(CodeOrigin codeOrigin, ExitKind exitKind, InlineWatchpointSet& set) +void DesiredWatchpoints::addLazily(const DesiredInferredType& key) { - m_inlineSets.addLazily(codeOrigin, exitKind, &set); + m_inferredTypes.addLazily(key); +} + +bool DesiredWatchpoints::consider(Structure* structure) +{ + if (!structure->dfgShouldWatch()) + return false; + addLazily(structure->transitionWatchpointSet()); + return true; } void DesiredWatchpoints::reallyAdd(CodeBlock* codeBlock, CommonData& commonData) { m_sets.reallyAdd(codeBlock, commonData); m_inlineSets.reallyAdd(codeBlock, commonData); + m_inferredValues.reallyAdd(codeBlock, commonData); m_bufferViews.reallyAdd(codeBlock, commonData); + m_adaptiveStructureSets.reallyAdd(codeBlock, commonData); + m_inferredTypes.reallyAdd(codeBlock, commonData); } bool DesiredWatchpoints::areStillValid() const { return m_sets.areStillValid() && m_inlineSets.areStillValid() - && m_bufferViews.areStillValid(); + && m_inferredValues.areStillValid() + && m_bufferViews.areStillValid() + && m_adaptiveStructureSets.areStillValid() + && m_inferredTypes.areStillValid(); +} + +void DesiredWatchpoints::dumpInContext(PrintStream& out, DumpContext* context) const +{ + out.print("Desired watchpoints:\n"); + out.print(" Watchpoint sets: ", inContext(m_sets, context), "\n"); + out.print(" Inline watchpoint sets: ", inContext(m_inlineSets, context), "\n"); + out.print(" Inferred values: ", inContext(m_inferredValues, context), "\n"); + out.print(" Buffer views: ", inContext(m_bufferViews, context), "\n"); + out.print(" Object property conditions: ", inContext(m_adaptiveStructureSets, context), "\n"); + out.print(" Inferred types: ", inContext(m_inferredTypes, context), "\n"); } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.h b/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.h index 64e88c764..09ec8aab7 100644 --- a/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.h +++ b/Source/JavaScriptCore/dfg/DFGDesiredWatchpoints.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,66 +26,88 @@ #ifndef DFGDesiredWatchpoints_h #define DFGDesiredWatchpoints_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "CodeOrigin.h" #include "DFGCommonData.h" +#include "DFGDesiredInferredType.h" +#include "InferredValue.h" #include "JSArrayBufferView.h" +#include "ObjectPropertyCondition.h" #include "Watchpoint.h" -#include <wtf/HashMap.h> +#include <wtf/CommaPrinter.h> #include <wtf/HashSet.h> -#include <wtf/Noncopyable.h> -#include <wtf/Vector.h> namespace JSC { namespace DFG { -template<typename WatchpointSetType> -struct WatchpointForGenericWatchpointSet { - WatchpointForGenericWatchpointSet() - : m_exitKind(ExitKindUnset) - , m_set(0) +class Graph; + +template<typename T> +struct SetPointerAdaptor { + static void add(CodeBlock* codeBlock, T set, CommonData& common) { + return set->add(common.watchpoints.add(codeBlock)); } - - WatchpointForGenericWatchpointSet( - CodeOrigin codeOrigin, ExitKind exitKind, WatchpointSetType* set) - : m_codeOrigin(codeOrigin) - , m_exitKind(exitKind) - , m_set(set) + static bool hasBeenInvalidated(T set) { return set->hasBeenInvalidated(); } + static void dumpInContext(PrintStream& out, T set, DumpContext*) { + out.print(RawPointer(set)); } - - CodeOrigin m_codeOrigin; - ExitKind m_exitKind; - WatchpointSetType* m_set; }; -template<typename T> -struct GenericSetAdaptor { - static void add(CodeBlock*, T* set, Watchpoint* watchpoint) +struct InferredValueAdaptor { + static void add(CodeBlock*, InferredValue*, CommonData&); + static bool hasBeenInvalidated(InferredValue* inferredValue) + { + return inferredValue->hasBeenInvalidated(); + } + static void dumpInContext(PrintStream& out, InferredValue* inferredValue, DumpContext*) { - return set->add(watchpoint); + out.print(RawPointer(inferredValue)); } - static bool hasBeenInvalidated(T* set) { return set->hasBeenInvalidated(); } }; struct ArrayBufferViewWatchpointAdaptor { - static void add(CodeBlock*, JSArrayBufferView*, Watchpoint*); + static void add(CodeBlock*, JSArrayBufferView*, CommonData&); static bool hasBeenInvalidated(JSArrayBufferView* view) { - bool result = !view->length(); - WTF::loadLoadFence(); - return result; + return !view->length(); + } + static void dumpInContext(PrintStream& out, JSArrayBufferView* view, DumpContext* context) + { + out.print(inContext(JSValue(view), context)); } }; -template<typename WatchpointSetType, typename Adaptor = GenericSetAdaptor<WatchpointSetType>> +struct AdaptiveStructureWatchpointAdaptor { + static void add(CodeBlock*, const ObjectPropertyCondition&, CommonData&); + static bool hasBeenInvalidated(const ObjectPropertyCondition& key) + { + return !key.isWatchable(); + } + static void dumpInContext( + PrintStream& out, const ObjectPropertyCondition& key, DumpContext* context) + { + out.print(inContext(key, context)); + } +}; + +struct InferredTypeAdaptor { + static void add(CodeBlock*, const DesiredInferredType&, CommonData&); + static bool hasBeenInvalidated(const DesiredInferredType& key) + { + return !key.isStillValid(); + } + static void dumpInContext(PrintStream& out, const DesiredInferredType& key, DumpContext* context) + { + out.print(inContext(key, context)); + } +}; + +template<typename WatchpointSetType, typename Adaptor = SetPointerAdaptor<WatchpointSetType>> class GenericDesiredWatchpoints { - WTF_MAKE_NONCOPYABLE(GenericDesiredWatchpoints); #if !ASSERT_DISABLED - typedef HashMap<WatchpointSetType*, bool> StateMap; + typedef HashMap<WatchpointSetType, bool> StateMap; #endif public: GenericDesiredWatchpoints() @@ -93,95 +115,47 @@ public: { } - void addLazily(WatchpointSetType* set) + void addLazily(const WatchpointSetType& set) { m_sets.add(set); } - void addLazily(CodeOrigin codeOrigin, ExitKind exitKind, WatchpointSetType* set) - { - m_profiledWatchpoints.append( - WatchpointForGenericWatchpointSet<WatchpointSetType>(codeOrigin, exitKind, set)); - } - void reallyAdd(CodeBlock* codeBlock, CommonData& common) { RELEASE_ASSERT(!m_reallyAdded); - typename HashSet<WatchpointSetType*>::iterator iter = m_sets.begin(); - typename HashSet<WatchpointSetType*>::iterator end = m_sets.end(); - for (; iter != end; ++iter) { - common.watchpoints.append(CodeBlockJettisoningWatchpoint(codeBlock)); - Adaptor::add(codeBlock, *iter, &common.watchpoints.last()); - } - - for (unsigned i = m_profiledWatchpoints.size(); i--;) { - WatchpointForGenericWatchpointSet<WatchpointSetType> watchpoint = - m_profiledWatchpoints[i]; - common.profiledWatchpoints.append( - ProfiledCodeBlockJettisoningWatchpoint(watchpoint.m_codeOrigin, watchpoint.m_exitKind, codeBlock)); - Adaptor::add(codeBlock, watchpoint.m_set, &common.profiledWatchpoints.last()); - } + for (auto& set : m_sets) + Adaptor::add(codeBlock, set, common); m_reallyAdded = true; } bool areStillValid() const { - typename HashSet<WatchpointSetType*>::iterator iter = m_sets.begin(); - typename HashSet<WatchpointSetType*>::iterator end = m_sets.end(); - for (; iter != end; ++iter) { - if (Adaptor::hasBeenInvalidated(*iter)) + for (auto& set : m_sets) { + if (Adaptor::hasBeenInvalidated(set)) return false; } - for (unsigned i = m_profiledWatchpoints.size(); i--;) { - if (Adaptor::hasBeenInvalidated(m_profiledWatchpoints[i].m_set)) - return false; - } - - return true; - } - -#if ASSERT_DISABLED - bool isStillValid(WatchpointSetType* set) - { - return !Adaptor::hasBeenInvalidated(set); - } - - bool shouldAssumeMixedState(WatchpointSetType*) - { return true; } -#else - bool isStillValid(WatchpointSetType* set) - { - bool result = !Adaptor::hasBeenInvalidated(set); - m_firstKnownState.add(set, result); - return result; - } - bool shouldAssumeMixedState(WatchpointSetType* set) + bool isWatched(const WatchpointSetType& set) const { - typename StateMap::iterator iter = m_firstKnownState.find(set); - if (iter == m_firstKnownState.end()) - return false; - - return iter->value != !Adaptor::hasBeenInvalidated(set); + return m_sets.contains(set); } -#endif - - bool isValidOrMixed(WatchpointSetType* set) + + void dumpInContext(PrintStream& out, DumpContext* context) const { - return isStillValid(set) || shouldAssumeMixedState(set); + CommaPrinter comma; + for (const WatchpointSetType& entry : m_sets) { + out.print(comma); + Adaptor::dumpInContext(out, entry, context); + } } private: - Vector<WatchpointForGenericWatchpointSet<WatchpointSetType>> m_profiledWatchpoints; - HashSet<WatchpointSetType*> m_sets; -#if !ASSERT_DISABLED - StateMap m_firstKnownState; -#endif + HashSet<WatchpointSetType> m_sets; bool m_reallyAdded; }; @@ -192,55 +166,58 @@ public: void addLazily(WatchpointSet*); void addLazily(InlineWatchpointSet&); + void addLazily(InferredValue*); void addLazily(JSArrayBufferView*); - void addLazily(CodeOrigin, ExitKind, WatchpointSet*); - void addLazily(CodeOrigin, ExitKind, InlineWatchpointSet&); + + // It's recommended that you don't call this directly. Use Graph::watchCondition(), which does + // the required GC magic as well as some other bookkeeping. + void addLazily(const ObjectPropertyCondition&); + + // It's recommended that you don't call this directly. Use Graph::inferredTypeFor(), which does + // the required GC magic. + void addLazily(const DesiredInferredType&); + + bool consider(Structure*); void reallyAdd(CodeBlock*, CommonData&); bool areStillValid() const; - bool isStillValid(WatchpointSet* set) + bool isWatched(WatchpointSet* set) { - return m_sets.isStillValid(set); + return m_sets.isWatched(set); } - bool isStillValid(InlineWatchpointSet& set) + bool isWatched(InlineWatchpointSet& set) { - return m_inlineSets.isStillValid(&set); + return m_inlineSets.isWatched(&set); } - bool isStillValid(JSArrayBufferView* view) + bool isWatched(InferredValue* inferredValue) { - return m_bufferViews.isStillValid(view); + return m_inferredValues.isWatched(inferredValue); } - bool shouldAssumeMixedState(WatchpointSet* set) + bool isWatched(JSArrayBufferView* view) { - return m_sets.shouldAssumeMixedState(set); + return m_bufferViews.isWatched(view); } - bool shouldAssumeMixedState(InlineWatchpointSet& set) + bool isWatched(const ObjectPropertyCondition& key) { - return m_inlineSets.shouldAssumeMixedState(&set); + return m_adaptiveStructureSets.isWatched(key); } - bool shouldAssumeMixedState(JSArrayBufferView* view) + bool isWatched(const DesiredInferredType& key) { - return m_bufferViews.shouldAssumeMixedState(view); - } - bool isValidOrMixed(WatchpointSet* set) - { - return m_sets.isValidOrMixed(set); - } - bool isValidOrMixed(InlineWatchpointSet& set) - { - return m_inlineSets.isValidOrMixed(&set); - } - bool isValidOrMixed(JSArrayBufferView* view) - { - return m_bufferViews.isValidOrMixed(view); + return m_inferredTypes.isWatched(key); } + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; private: - GenericDesiredWatchpoints<WatchpointSet> m_sets; - GenericDesiredWatchpoints<InlineWatchpointSet> m_inlineSets; - GenericDesiredWatchpoints<JSArrayBufferView, ArrayBufferViewWatchpointAdaptor> m_bufferViews; + GenericDesiredWatchpoints<WatchpointSet*> m_sets; + GenericDesiredWatchpoints<InlineWatchpointSet*> m_inlineSets; + GenericDesiredWatchpoints<InferredValue*, InferredValueAdaptor> m_inferredValues; + GenericDesiredWatchpoints<JSArrayBufferView*, ArrayBufferViewWatchpointAdaptor> m_bufferViews; + GenericDesiredWatchpoints<ObjectPropertyCondition, AdaptiveStructureWatchpointAdaptor> m_adaptiveStructureSets; + GenericDesiredWatchpoints<DesiredInferredType, InferredTypeAdaptor> m_inferredTypes; }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.cpp b/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.cpp index a8376ea8a..1af13ef4e 100644 --- a/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.cpp +++ b/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -24,16 +24,21 @@ */ #include "config.h" +#include "DFGDesiredWeakReferences.h" #if ENABLE(DFG_JIT) -#include "DFGDesiredWeakReferences.h" - #include "CodeBlock.h" #include "DFGCommonData.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { +DesiredWeakReferences::DesiredWeakReferences() + : m_codeBlock(nullptr) +{ +} + DesiredWeakReferences::DesiredWeakReferences(CodeBlock* codeBlock) : m_codeBlock(codeBlock) { @@ -45,17 +50,40 @@ DesiredWeakReferences::~DesiredWeakReferences() void DesiredWeakReferences::addLazily(JSCell* cell) { - m_references.append(cell); + if (cell) + m_references.add(cell); +} + +void DesiredWeakReferences::addLazily(JSValue value) +{ + if (value.isCell()) + addLazily(value.asCell()); +} + +bool DesiredWeakReferences::contains(JSCell* cell) +{ + return m_references.contains(cell); } void DesiredWeakReferences::reallyAdd(VM& vm, CommonData* common) { - for (unsigned i = 0; i < m_references.size(); i++) { - JSCell* target = m_references[i]; - common->weakReferences.append(WriteBarrier<JSCell>(vm, m_codeBlock->ownerExecutable(), target)); + for (JSCell* target : m_references) { + if (Structure* structure = jsDynamicCast<Structure*>(target)) { + common->weakStructureReferences.append( + WriteBarrier<Structure>(vm, m_codeBlock, structure)); + } else { + common->weakReferences.append( + WriteBarrier<JSCell>(vm, m_codeBlock, target)); + } } } +void DesiredWeakReferences::visitChildren(SlotVisitor& visitor) +{ + for (JSCell* target : m_references) + visitor.appendUnbarrieredPointer(&target); +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.h b/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.h index 981e752ea..303b8df2a 100644 --- a/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.h +++ b/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,7 +26,7 @@ #ifndef DFGDesiredWeakReferences_h #define DFGDesiredWeakReferences_h -#include <wtf/Vector.h> +#include <wtf/HashSet.h> #if ENABLE(DFG_JIT) @@ -34,6 +34,8 @@ namespace JSC { class CodeBlock; class JSCell; +class JSValue; +class SlotVisitor; class VM; namespace DFG { @@ -42,15 +44,21 @@ class CommonData; class DesiredWeakReferences { public: + DesiredWeakReferences(); DesiredWeakReferences(CodeBlock*); ~DesiredWeakReferences(); void addLazily(JSCell*); + void addLazily(JSValue); + bool contains(JSCell*); + void reallyAdd(VM&, CommonData*); + + void visitChildren(SlotVisitor&); private: CodeBlock* m_codeBlock; - Vector<JSCell*> m_references; + HashSet<JSCell*> m_references; }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.h b/Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.h deleted file mode 100644 index cbbb2cb5e..000000000 --- a/Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef DFGDesiredWriteBarriers_h -#define DFGDesiredWriteBarriers_h - -#include "WriteBarrier.h" -#include <wtf/Vector.h> - -#if ENABLE(DFG_JIT) - -namespace JSC { - -class JSFunction; -class ScriptExecutable; -class VM; -struct InlineCallFrame; - -namespace DFG { - -class DesiredWriteBarrier { -public: - enum Type { - ConstantType, - InlineCallFrameExecutableType, - }; - DesiredWriteBarrier(Type, CodeBlock*, unsigned index, JSCell* owner); - DesiredWriteBarrier(Type, CodeBlock*, InlineCallFrame*, JSCell* owner); - - void trigger(VM&); - -private: - JSCell* m_owner; - Type m_type; - CodeBlock* m_codeBlock; - union { - unsigned index; - InlineCallFrame* inlineCallFrame; - } m_which; -}; - -class DesiredWriteBarriers { -public: - DesiredWriteBarriers(); - ~DesiredWriteBarriers(); - - DesiredWriteBarrier& add(DesiredWriteBarrier::Type type, CodeBlock* codeBlock, unsigned index, JSCell* owner) - { - m_barriers.append(DesiredWriteBarrier(type, codeBlock, index, owner)); - return m_barriers.last(); - } - DesiredWriteBarrier& add(DesiredWriteBarrier::Type type, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSCell* owner) - { - m_barriers.append(DesiredWriteBarrier(type, codeBlock, inlineCallFrame, owner)); - return m_barriers.last(); - } - - void trigger(VM&); - -private: - Vector<DesiredWriteBarrier> m_barriers; -}; - -inline void initializeLazyWriteBarrierForInlineCallFrameExecutable(DesiredWriteBarriers& barriers, WriteBarrier<ScriptExecutable>& barrier, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSCell* owner, ScriptExecutable* value) -{ - DesiredWriteBarrier& desiredBarrier = barriers.add(DesiredWriteBarrier::InlineCallFrameExecutableType, codeBlock, inlineCallFrame, owner); - barrier = WriteBarrier<ScriptExecutable>(desiredBarrier, value); -} - -inline void initializeLazyWriteBarrierForConstant(DesiredWriteBarriers& barriers, WriteBarrier<Unknown>& barrier, CodeBlock* codeBlock, unsigned index, JSCell* owner, JSValue value) -{ - DesiredWriteBarrier& desiredBarrier = barriers.add(DesiredWriteBarrier::ConstantType, codeBlock, index, owner); - barrier = WriteBarrier<Unknown>(desiredBarrier, value); -} - -} } // namespace JSC::DFG - -#endif // ENABLE(DFG_JIT) - -#endif // DFGDesiredWriteBarriers_h diff --git a/Source/JavaScriptCore/dfg/DFGDisassembler.cpp b/Source/JavaScriptCore/dfg/DFGDisassembler.cpp index 0a06c02f5..15cbea0a5 100644 --- a/Source/JavaScriptCore/dfg/DFGDisassembler.cpp +++ b/Source/JavaScriptCore/dfg/DFGDisassembler.cpp @@ -26,11 +26,14 @@ #include "config.h" #include "DFGDisassembler.h" -#if ENABLE(DFG_JIT) && ENABLE(DISASSEMBLER) +#if ENABLE(DFG_JIT) #include "CodeBlockWithJITType.h" #include "DFGGraph.h" #include "DFGJITCode.h" +#include "JSCInlines.h" +#include "LinkBuffer.h" +#include "ProfilerDatabase.h" #include <wtf/StdLibExtras.h> namespace JSC { namespace DFG { @@ -91,8 +94,8 @@ Vector<Disassembler::DumpedOp> Disassembler::createDumpList(LinkBuffer& linkBuff dumpHeader(out, linkBuffer); append(result, out, previousOrigin); - m_graph.m_dominators.computeIfNecessary(m_graph); - m_graph.m_naturalLoops.computeIfNecessary(m_graph); + m_graph.ensureDominators(); + m_graph.ensureNaturalLoops(); const char* prefix = " "; const char* disassemblyPrefix = " "; @@ -109,8 +112,6 @@ Vector<Disassembler::DumpedOp> Disassembler::createDumpList(LinkBuffer& linkBuff append(result, out, previousOrigin); Node* lastNodeForDisassembly = block->at(0); for (size_t i = 0; i < block->size(); ++i) { - if (!block->at(i)->willHaveCodeGenOrOSR() && !Options::showAllDFGNodes()) - continue; MacroAssembler::Label currentLabel; HashMap<Node*, MacroAssembler::Label>::iterator iter = m_labelForNode.find(block->at(i)); if (iter != m_labelForNode.end()) @@ -127,10 +128,10 @@ Vector<Disassembler::DumpedOp> Disassembler::createDumpList(LinkBuffer& linkBuff } dumpDisassembly(out, disassemblyPrefix, linkBuffer, previousLabel, currentLabel, lastNodeForDisassembly); append(result, out, previousOrigin); - previousOrigin = block->at(i)->codeOrigin; + previousOrigin = block->at(i)->origin.semantic; if (m_graph.dumpCodeOrigin(out, prefix, lastNode, block->at(i), &m_dumpContext)) { append(result, out, previousOrigin); - previousOrigin = block->at(i)->codeOrigin; + previousOrigin = block->at(i)->origin.semantic; } m_graph.dump(out, prefix, block->at(i), &m_dumpContext); lastNode = block->at(i); @@ -172,4 +173,4 @@ void Disassembler::dumpDisassembly(PrintStream& out, const char* prefix, LinkBuf } } // namespace JSC::DFG -#endif // ENABLE(DFG_JIT) && ENABLE(DISASSEMBLER) +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGDisassembler.h b/Source/JavaScriptCore/dfg/DFGDisassembler.h index 58163cb59..7b31946f9 100644 --- a/Source/JavaScriptCore/dfg/DFGDisassembler.h +++ b/Source/JavaScriptCore/dfg/DFGDisassembler.h @@ -26,23 +26,24 @@ #ifndef DFGDisassembler_h #define DFGDisassembler_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) +#include "CodeOrigin.h" #include "DFGCommon.h" #include "DumpContext.h" -#include "LinkBuffer.h" #include "MacroAssembler.h" +#include "ProfilerCompilation.h" #include <wtf/HashMap.h> #include <wtf/StringPrintStream.h> #include <wtf/Vector.h> -namespace JSC { namespace DFG { +namespace JSC { -class Graph; +class LinkBuffer; -#if ENABLE(DISASSEMBLER) +namespace DFG { + +class Graph; class Disassembler { WTF_MAKE_FAST_ALLOCATED; @@ -99,25 +100,6 @@ private: MacroAssembler::Label m_endOfCode; }; -#else // ENABLE(DISASSEMBLER) - -class Disassembler { - WTF_MAKE_FAST_ALLOCATED; -public: - Disassembler(Graph&) { } - - void setStartOfCode(MacroAssembler::Label) { } - void setForBlockIndex(BlockIndex, MacroAssembler::Label) { } - void setForNode(Node*, MacroAssembler::Label) { } - void setEndOfMainPath(MacroAssembler::Label) { } - void setEndOfCode(MacroAssembler::Label) { } - - void dump(LinkBuffer&) { } - void reportToProfiler(Profiler::Compilation*, LinkBuffer&) { } -}; - -#endif // ENABLE(DISASSEMBLER) - } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGDoesGC.cpp b/Source/JavaScriptCore/dfg/DFGDoesGC.cpp new file mode 100644 index 000000000..eec32fef1 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGDoesGC.cpp @@ -0,0 +1,282 @@ +/* + * Copyright (C) 2014-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGDoesGC.h" + +#if ENABLE(DFG_JIT) + +#include "DFGClobberize.h" +#include "DFGGraph.h" +#include "DFGNode.h" +#include "Operations.h" + +namespace JSC { namespace DFG { + +bool doesGC(Graph& graph, Node* node) +{ + if (clobbersHeap(graph, node)) + return true; + + // Now consider nodes that don't clobber the world but that still may GC. This includes all + // nodes. By convention we put world-clobbering nodes in the block of "false" cases but we can + // put them anywhere. + switch (node->op()) { + case JSConstant: + case DoubleConstant: + case Int52Constant: + case Identity: + case GetCallee: + case GetArgumentCount: + case GetRestLength: + case GetLocal: + case SetLocal: + case MovHint: + case ZombieHint: + case ExitOK: + case Phantom: + case Upsilon: + case Phi: + case Flush: + case PhantomLocal: + case GetLocalUnlinked: + case SetArgument: + case BitAnd: + case BitOr: + case BitXor: + case BitLShift: + case BitRShift: + case BitURShift: + case ValueToInt32: + case UInt32ToNumber: + case DoubleAsInt32: + case ArithAdd: + case ArithClz32: + case ArithSub: + case ArithNegate: + case ArithMul: + case ArithIMul: + case ArithDiv: + case ArithMod: + case ArithAbs: + case ArithMin: + case ArithMax: + case ArithPow: + case ArithSqrt: + case ArithRandom: + case ArithRound: + case ArithFloor: + case ArithCeil: + case ArithFRound: + case ArithSin: + case ArithCos: + case ArithLog: + case ValueAdd: + case GetById: + case GetByIdFlush: + case PutById: + case PutByIdFlush: + case PutByIdDirect: + case PutGetterById: + case PutSetterById: + case PutGetterSetterById: + case PutGetterByVal: + case PutSetterByVal: + case CheckStructure: + case GetExecutable: + case GetButterfly: + case GetButterflyReadOnly: + case CheckArray: + case GetScope: + case SkipScope: + case GetClosureVar: + case PutClosureVar: + case GetGlobalVar: + case GetGlobalLexicalVariable: + case PutGlobalVariable: + case VarInjectionWatchpoint: + case CheckCell: + case CheckNotEmpty: + case CheckIdent: + case RegExpExec: + case RegExpTest: + case CompareLess: + case CompareLessEq: + case CompareGreater: + case CompareGreaterEq: + case CompareEq: + case CompareStrictEq: + case Call: + case TailCallInlinedCaller: + case Construct: + case CallVarargs: + case TailCallVarargsInlinedCaller: + case ConstructVarargs: + case LoadVarargs: + case CallForwardVarargs: + case ConstructForwardVarargs: + case TailCallForwardVarargs: + case TailCallForwardVarargsInlinedCaller: + case Breakpoint: + case ProfileWillCall: + case ProfileDidCall: + case ProfileType: + case ProfileControlFlow: + case OverridesHasInstance: + case InstanceOf: + case InstanceOfCustom: + case IsUndefined: + case IsBoolean: + case IsNumber: + case IsString: + case IsObject: + case IsObjectOrNull: + case IsFunction: + case TypeOf: + case LogicalNot: + case ToPrimitive: + case ToString: + case CallStringConstructor: + case In: + case Jump: + case Branch: + case Switch: + case Return: + case TailCall: + case TailCallVarargs: + case Throw: + case CountExecution: + case ForceOSRExit: + case CheckWatchdogTimer: + case StringFromCharCode: + case Unreachable: + case ExtractOSREntryLocal: + case CheckTierUpInLoop: + case CheckTierUpAtReturn: + case CheckTierUpAndOSREnter: + case CheckTierUpWithNestedTriggerAndOSREnter: + case LoopHint: + case StoreBarrier: + case InvalidationPoint: + case NotifyWrite: + case CheckInBounds: + case ConstantStoragePointer: + case Check: + case CheckTypeInfoFlags: + case MultiGetByOffset: + case ValueRep: + case DoubleRep: + case Int52Rep: + case GetGetter: + case GetSetter: + case GetByVal: + case GetIndexedPropertyStorage: + case GetArrayLength: + case ArrayPush: + case ArrayPop: + case StringCharAt: + case StringCharCodeAt: + case GetTypedArrayByteOffset: + case PutByValDirect: + case PutByVal: + case PutByValAlias: + case PutStructure: + case GetByOffset: + case GetGetterSetterByOffset: + case PutByOffset: + case GetEnumerableLength: + case HasGenericProperty: + case HasStructureProperty: + case HasIndexedProperty: + case GetDirectPname: + case FiatInt52: + case BooleanToNumber: + case CheckBadCell: + case BottomValue: + case PhantomNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case PhantomDirectArguments: + case PhantomClonedArguments: + case GetMyArgumentByVal: + case ForwardVarargs: + case PutHint: + case CheckStructureImmediate: + case PutStack: + case KillStack: + case GetStack: + case GetFromArguments: + case PutToArguments: + case CopyRest: + return false; + + case CreateActivation: + case CreateDirectArguments: + case CreateScopedArguments: + case CreateClonedArguments: + case ToThis: + case CreateThis: + case AllocatePropertyStorage: + case ReallocatePropertyStorage: + case Arrayify: + case ArrayifyToStructure: + case NewObject: + case NewArray: + case NewArrayWithSize: + case NewArrayBuffer: + case NewRegexp: + case NewStringObject: + case MakeRope: + case NewArrowFunction: + case NewFunction: + case NewGeneratorFunction: + case NewTypedArray: + case ThrowReferenceError: + case GetPropertyEnumerator: + case GetEnumeratorStructurePname: + case GetEnumeratorGenericPname: + case ToIndexString: + case MaterializeNewObject: + case MaterializeCreateActivation: + case StrCat: + case StringReplace: + return true; + + case MultiPutByOffset: + return node->multiPutByOffsetData().reallocatesStorage(); + + case LastNodeType: + RELEASE_ASSERT_NOT_REACHED(); + return true; + } + + RELEASE_ASSERT_NOT_REACHED(); + return true; +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGDoesGC.h b/Source/JavaScriptCore/dfg/DFGDoesGC.h new file mode 100644 index 000000000..4503d21f8 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGDoesGC.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGDoesGC_h +#define DFGDoesGC_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; +struct Node; + +bool doesGC(Graph&, Node*); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGDoesGC_h + diff --git a/Source/JavaScriptCore/dfg/DFGDominators.cpp b/Source/JavaScriptCore/dfg/DFGDominators.cpp deleted file mode 100644 index a5ae614b9..000000000 --- a/Source/JavaScriptCore/dfg/DFGDominators.cpp +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (C) 2011 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "DFGDominators.h" - -#if ENABLE(DFG_JIT) - -#include "DFGGraph.h" - -namespace JSC { namespace DFG { - -Dominators::Dominators() -{ -} - -Dominators::~Dominators() -{ -} - -void Dominators::compute(Graph& graph) -{ - // This implements a naive dominator solver. - - ASSERT(graph.block(0)->predecessors.isEmpty()); - - unsigned numBlocks = graph.numBlocks(); - - // Allocate storage for the dense dominance matrix. - if (numBlocks > m_results.size()) { - m_results.grow(numBlocks); - for (unsigned i = numBlocks; i--;) - m_results[i].resize(numBlocks); - m_scratch.resize(numBlocks); - } - - // We know that the entry block is only dominated by itself. - m_results[0].clearAll(); - m_results[0].set(0); - - // Find all of the valid blocks. - m_scratch.clearAll(); - for (unsigned i = numBlocks; i--;) { - if (!graph.block(i)) - continue; - m_scratch.set(i); - } - - // Mark all nodes as dominated by everything. - for (unsigned i = numBlocks; i-- > 1;) { - if (!graph.block(i) || graph.block(i)->predecessors.isEmpty()) - m_results[i].clearAll(); - else - m_results[i].set(m_scratch); - } - - // Iteratively eliminate nodes that are not dominator. - bool changed; - do { - changed = false; - // Prune dominators in all non entry blocks: forward scan. - for (unsigned i = 1; i < numBlocks; ++i) - changed |= pruneDominators(graph, i); - - if (!changed) - break; - - // Prune dominators in all non entry blocks: backward scan. - changed = false; - for (unsigned i = numBlocks; i-- > 1;) - changed |= pruneDominators(graph, i); - } while (changed); -} - -bool Dominators::pruneDominators(Graph& graph, BlockIndex idx) -{ - BasicBlock* block = graph.block(idx); - - if (!block || block->predecessors.isEmpty()) - return false; - - // Find the intersection of dom(preds). - m_scratch.set(m_results[block->predecessors[0]->index]); - for (unsigned j = block->predecessors.size(); j-- > 1;) - m_scratch.filter(m_results[block->predecessors[j]->index]); - - // The block is also dominated by itself. - m_scratch.set(idx); - - return m_results[idx].setAndCheck(m_scratch); -} - -void Dominators::dump(Graph& graph, PrintStream& out) const -{ - for (BlockIndex blockIndex = 0; blockIndex < graph.numBlocks(); ++blockIndex) { - BasicBlock* block = graph.block(blockIndex); - if (!block) - continue; - out.print(" Block ", *block, ":"); - for (BlockIndex otherIndex = 0; otherIndex < graph.numBlocks(); ++otherIndex) { - if (!dominates(block->index, otherIndex)) - continue; - out.print(" #", otherIndex); - } - out.print("\n"); - } -} - -} } // namespace JSC::DFG - -#endif // ENABLE(DFG_JIT) - diff --git a/Source/JavaScriptCore/dfg/DFGDominators.h b/Source/JavaScriptCore/dfg/DFGDominators.h index c63a84baf..021a4c13e 100644 --- a/Source/JavaScriptCore/dfg/DFGDominators.h +++ b/Source/JavaScriptCore/dfg/DFGDominators.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2014, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,44 +26,28 @@ #ifndef DFGDominators_h #define DFGDominators_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) -#include "DFGAnalysis.h" #include "DFGBasicBlock.h" +#include "DFGBlockMap.h" +#include "DFGBlockSet.h" +#include "DFGCFG.h" #include "DFGCommon.h" -#include <wtf/FastBitVector.h> +#include "DFGGraph.h" +#include <wtf/Dominators.h> +#include <wtf/FastMalloc.h> +#include <wtf/Noncopyable.h> namespace JSC { namespace DFG { -class Graph; - -class Dominators : public Analysis<Dominators> { +class Dominators : public WTF::Dominators<CFG> { + WTF_MAKE_NONCOPYABLE(Dominators); + WTF_MAKE_FAST_ALLOCATED; public: - Dominators(); - ~Dominators(); - - void compute(Graph& graph); - - bool dominates(BlockIndex from, BlockIndex to) const - { - ASSERT(isValid()); - return m_results[to].get(from); - } - - bool dominates(BasicBlock* from, BasicBlock* to) const + Dominators(Graph& graph) + : WTF::Dominators<CFG>(*graph.m_cfg) { - return dominates(from->index, to->index); } - - void dump(Graph& graph, PrintStream&) const; - -private: - bool pruneDominators(Graph&, BlockIndex); - - Vector<FastBitVector> m_results; // For each block, the bitvector of blocks that dominate it. - FastBitVector m_scratch; // A temporary bitvector with bit for each block. We recycle this to save new/deletes. }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGDriver.cpp b/Source/JavaScriptCore/dfg/DFGDriver.cpp index 780ad6c22..0369848b1 100644 --- a/Source/JavaScriptCore/dfg/DFGDriver.cpp +++ b/Source/JavaScriptCore/dfg/DFGDriver.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,15 +30,16 @@ #include "JSString.h" #include "CodeBlock.h" +#include "DFGFunctionWhitelist.h" #include "DFGJITCode.h" #include "DFGPlan.h" #include "DFGThunks.h" #include "DFGWorklist.h" -#include "Debugger.h" #include "JITCode.h" -#include "Operations.h" +#include "JSCInlines.h" #include "Options.h" #include "SamplingTool.h" +#include "TypeProfilerLog.h" #include <wtf/Atomics.h> #if ENABLE(FTL_JIT) @@ -56,32 +57,24 @@ unsigned getNumCompilations() #if ENABLE(DFG_JIT) static CompilationResult compileImpl( - VM& vm, CodeBlock* codeBlock, CompilationMode mode, unsigned osrEntryBytecodeIndex, - const Operands<JSValue>& mustHandleValues, - PassRefPtr<DeferredCompilationCallback> callback, Worklist* worklist) + VM& vm, CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationMode mode, + unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues, + PassRefPtr<DeferredCompilationCallback> callback) { SamplingRegion samplingRegion("DFG Compilation (Driver)"); + if (!Options::bytecodeRangeToDFGCompile().isInRange(codeBlock->instructionCount()) + || !FunctionWhitelist::ensureGlobalWhitelist().contains(codeBlock)) + return CompilationFailed; + numCompilations++; ASSERT(codeBlock); ASSERT(codeBlock->alternative()); ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT); + ASSERT(!profiledDFGCodeBlock || profiledDFGCodeBlock->jitType() == JITCode::DFGJIT); - if (!Options::useDFGJIT() || !MacroAssembler::supportsFloatingPoint()) - return CompilationFailed; - - if (!Options::bytecodeRangeToDFGCompile().isInRange(codeBlock->instructionCount())) - return CompilationFailed; - - if (vm.enabledProfiler()) - return CompilationInvalidated; - - Debugger* debugger = codeBlock->globalObject()->debugger(); - if (debugger && (debugger->isStepping() || codeBlock->baselineAlternative()->hasDebuggerRequests())) - return CompilationInvalidated; - - if (logCompilationChanges()) + if (logCompilationChanges(mode)) dataLog("DFG(Driver) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n"); // Make sure that any stubs that the DFG is going to use are initialized. We want to @@ -89,44 +82,46 @@ static CompilationResult compileImpl( vm.getCTIStub(osrExitGenerationThunkGenerator); vm.getCTIStub(throwExceptionFromCallSlowPathGenerator); vm.getCTIStub(linkCallThunkGenerator); - vm.getCTIStub(linkConstructThunkGenerator); - vm.getCTIStub(linkClosureCallThunkGenerator); - vm.getCTIStub(virtualCallThunkGenerator); - vm.getCTIStub(virtualConstructThunkGenerator); + vm.getCTIStub(linkPolymorphicCallThunkGenerator); + + if (vm.typeProfiler()) + vm.typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for DFG compilation.")); RefPtr<Plan> plan = adoptRef( - new Plan(codeBlock, mode, osrEntryBytecodeIndex, mustHandleValues)); + new Plan(codeBlock, profiledDFGCodeBlock, mode, osrEntryBytecodeIndex, mustHandleValues)); - if (worklist) { - plan->callback = callback; - if (logCompilationChanges()) + plan->callback = callback; + if (Options::useConcurrentJIT()) { + Worklist* worklist = ensureGlobalWorklistFor(mode); + if (logCompilationChanges(mode)) dataLog("Deferring DFG compilation of ", *codeBlock, " with queue length ", worklist->queueLength(), ".\n"); worklist->enqueue(plan); return CompilationDeferred; } - plan->compileInThread(*vm.dfgState); + plan->compileInThread(*vm.dfgState, 0); return plan->finalizeWithoutNotifyingCallback(); } #else // ENABLE(DFG_JIT) static CompilationResult compileImpl( - VM&, CodeBlock*, CompilationMode, unsigned, const Operands<JSValue>&, - PassRefPtr<DeferredCompilationCallback>, Worklist*) + VM&, CodeBlock*, CodeBlock*, CompilationMode, unsigned, const Operands<JSValue>&, + PassRefPtr<DeferredCompilationCallback>) { return CompilationFailed; } #endif // ENABLE(DFG_JIT) CompilationResult compile( - VM& vm, CodeBlock* codeBlock, CompilationMode mode, unsigned osrEntryBytecodeIndex, - const Operands<JSValue>& mustHandleValues, - PassRefPtr<DeferredCompilationCallback> passedCallback, Worklist* worklist) + VM& vm, CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationMode mode, + unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues, + PassRefPtr<DeferredCompilationCallback> passedCallback) { RefPtr<DeferredCompilationCallback> callback = passedCallback; CompilationResult result = compileImpl( - vm, codeBlock, mode, osrEntryBytecodeIndex, mustHandleValues, callback, worklist); + vm, codeBlock, profiledDFGCodeBlock, mode, osrEntryBytecodeIndex, mustHandleValues, + callback); if (result != CompilationDeferred) - callback->compilationDidComplete(codeBlock, result); + callback->compilationDidComplete(codeBlock, profiledDFGCodeBlock, result); return result; } diff --git a/Source/JavaScriptCore/dfg/DFGDriver.h b/Source/JavaScriptCore/dfg/DFGDriver.h index 9d43638df..a456dc66e 100644 --- a/Source/JavaScriptCore/dfg/DFGDriver.h +++ b/Source/JavaScriptCore/dfg/DFGDriver.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,7 +29,6 @@ #include "CallFrame.h" #include "DFGCompilationMode.h" #include "DFGPlan.h" -#include <wtf/Platform.h> namespace JSC { @@ -40,16 +39,14 @@ class VM; namespace DFG { -class Worklist; - JS_EXPORT_PRIVATE unsigned getNumCompilations(); // If the worklist is non-null, we do a concurrent compile. Otherwise we do a synchronous // compile. Even if we do a synchronous compile, we call the callback with the result. CompilationResult compile( - VM&, CodeBlock*, CompilationMode, unsigned osrEntryBytecodeIndex, - const Operands<JSValue>& mustHandleValues, - PassRefPtr<DeferredCompilationCallback>, Worklist*); + VM&, CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationMode, + unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues, + PassRefPtr<DeferredCompilationCallback>); } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGEdge.cpp b/Source/JavaScriptCore/dfg/DFGEdge.cpp index eafe31faf..f4843a2e2 100644 --- a/Source/JavaScriptCore/dfg/DFGEdge.cpp +++ b/Source/JavaScriptCore/dfg/DFGEdge.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,17 +29,16 @@ #if ENABLE(DFG_JIT) #include "DFGNode.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { void Edge::dump(PrintStream& out) const { - if (useKind() != UntypedUse) { - if (needsCheck()) - out.print("Check:"); - out.print(useKind(), ":"); - } - if (doesKill()) + if (!isProved()) + out.print("Check:"); + out.print(useKind(), ":"); + if (DFG::doesKill(killStatusUnchecked())) out.print("Kill:"); out.print(node()); } diff --git a/Source/JavaScriptCore/dfg/DFGEdge.h b/Source/JavaScriptCore/dfg/DFGEdge.h index e641b65b7..3dec1893f 100644 --- a/Source/JavaScriptCore/dfg/DFGEdge.h +++ b/Source/JavaScriptCore/dfg/DFGEdge.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGEdge_h #define DFGEdge_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" @@ -117,14 +115,10 @@ public: { return proofStatus() == IsProved; } - bool needsCheck() const - { - return proofStatus() == NeedsCheck; - } bool willNotHaveCheck() const { - return isProved() || useKind() == UntypedUse; + return isProved() || shouldNotHaveTypeCheck(useKind()); } bool willHaveCheck() const { @@ -153,11 +147,20 @@ public: bool doesNotKill() const { return !doesKill(); } bool isSet() const { return !!node(); } - - typedef void* Edge::*UnspecifiedBoolType; - operator UnspecifiedBoolType*() const { return reinterpret_cast<UnspecifiedBoolType*>(isSet()); } - + + Edge sanitized() const + { + Edge result = *this; +#if USE(JSVALUE64) + result.m_encodedWord = makeWord(node(), useKindUnchecked(), NeedsCheck, DoesNotKill); +#else + result.m_encodedWord = makeWord(useKindUnchecked(), NeedsCheck, DoesNotKill); +#endif + return result; + } + bool operator!() const { return !isSet(); } + explicit operator bool() const { return isSet(); } bool operator==(Edge other) const { @@ -173,6 +176,15 @@ public: } void dump(PrintStream&) const; + + unsigned hash() const + { +#if USE(JSVALUE64) + return IntHash<uintptr_t>::hash(m_encodedWord); +#else + return PtrHash<Node*>::hash(m_node) + m_encodedWord; +#endif + } private: friend class AdjacencyList; @@ -187,13 +199,13 @@ private: ASSERT((shiftedValue >> shift()) == bitwise_cast<uintptr_t>(node)); ASSERT(useKind >= 0 && useKind < LastUseKind); ASSERT((static_cast<uintptr_t>(LastUseKind) << 2) <= (static_cast<uintptr_t>(2) << shift())); - return shiftedValue | (static_cast<uintptr_t>(useKind) << 2) | (DFG::doesKill(killStatus) << 1) | DFG::isProved(proofStatus); + return shiftedValue | (static_cast<uintptr_t>(useKind) << 2) | (DFG::doesKill(killStatus) << 1) | static_cast<uintptr_t>(DFG::isProved(proofStatus)); } #else static uintptr_t makeWord(UseKind useKind, ProofStatus proofStatus, KillStatus killStatus) { - return (static_cast<uintptr_t>(useKind) << 2) | (DFG::doesKill(killStatus) << 1) | DFG::isProved(proofStatus); + return (static_cast<uintptr_t>(useKind) << 2) | (DFG::doesKill(killStatus) << 1) | static_cast<uintptr_t>(DFG::isProved(proofStatus)); } Node* m_node; diff --git a/Source/JavaScriptCore/dfg/DFGEdgeDominates.h b/Source/JavaScriptCore/dfg/DFGEdgeDominates.h index 0d514db55..3da18da9b 100644 --- a/Source/JavaScriptCore/dfg/DFGEdgeDominates.h +++ b/Source/JavaScriptCore/dfg/DFGEdgeDominates.h @@ -26,10 +26,9 @@ #ifndef DFGEdgeDominates_h #define DFGEdgeDominates_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) +#include "DFGDominators.h" #include "DFGGraph.h" namespace JSC { namespace DFG { @@ -47,10 +46,10 @@ public: void operator()(Node*, Edge edge) { - bool result = m_graph.m_dominators.dominates(edge.node()->misc.owner, m_block); + bool result = m_graph.m_dominators->dominates(edge.node()->owner, m_block); if (verbose) { dataLog( - "Checking if ", edge, " in ", *edge.node()->misc.owner, + "Checking if ", edge, " in ", *edge.node()->owner, " dominates ", *m_block, ": ", result, "\n"); } m_result &= result; diff --git a/Source/JavaScriptCore/dfg/DFGEdgeUsesStructure.h b/Source/JavaScriptCore/dfg/DFGEdgeUsesStructure.h index bd8063a0c..d5e748f1f 100644 --- a/Source/JavaScriptCore/dfg/DFGEdgeUsesStructure.h +++ b/Source/JavaScriptCore/dfg/DFGEdgeUsesStructure.h @@ -26,8 +26,6 @@ #ifndef DFGEdgeUsesStructure_h #define DFGEdgeUsesStructure_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGGraph.h" diff --git a/Source/JavaScriptCore/dfg/DFGEpoch.cpp b/Source/JavaScriptCore/dfg/DFGEpoch.cpp new file mode 100644 index 000000000..7da1deb8c --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGEpoch.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGEpoch.h" + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +void Epoch::dump(PrintStream& out) const +{ + if (!*this) + out.print("none"); + else + out.print(m_epoch); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGEpoch.h b/Source/JavaScriptCore/dfg/DFGEpoch.h new file mode 100644 index 000000000..9865dd7e0 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGEpoch.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGEpoch_h +#define DFGEpoch_h + +#if ENABLE(DFG_JIT) + +#include <wtf/PrintStream.h> + +namespace JSC { namespace DFG { + +// Utility class for epoch-based analyses. + +class Epoch { +public: + Epoch() + : m_epoch(s_none) + { + } + + static Epoch fromUnsigned(unsigned value) + { + Epoch result; + result.m_epoch = value; + return result; + } + + unsigned toUnsigned() const + { + return m_epoch; + } + + static Epoch first() + { + Epoch result; + result.m_epoch = s_first; + return result; + } + + bool operator!() const + { + return m_epoch == s_none; + } + + Epoch next() const + { + Epoch result; + result.m_epoch = m_epoch + 1; + return result; + } + + void bump() + { + *this = next(); + } + + bool operator==(const Epoch& other) const + { + return m_epoch == other.m_epoch; + } + + bool operator!=(const Epoch& other) const + { + return !(*this == other); + } + + bool operator<(const Epoch& other) const + { + return m_epoch < other.m_epoch; + } + + bool operator>(const Epoch& other) const + { + return other < *this; + } + + bool operator<=(const Epoch& other) const + { + return !(*this > other); + } + + bool operator>=(const Epoch& other) const + { + return !(*this < other); + } + + void dump(PrintStream&) const; + +private: + static const unsigned s_none = 0; + static const unsigned s_first = 1; + + unsigned m_epoch; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGEpoch_h + diff --git a/Source/JavaScriptCore/dfg/DFGFailedFinalizer.cpp b/Source/JavaScriptCore/dfg/DFGFailedFinalizer.cpp index fb00e2021..d2562a8e5 100644 --- a/Source/JavaScriptCore/dfg/DFGFailedFinalizer.cpp +++ b/Source/JavaScriptCore/dfg/DFGFailedFinalizer.cpp @@ -28,6 +28,8 @@ #if ENABLE(DFG_JIT) +#include "JSCInlines.h" + namespace JSC { namespace DFG { FailedFinalizer::FailedFinalizer(Plan& plan) @@ -39,6 +41,11 @@ FailedFinalizer::~FailedFinalizer() { } +size_t FailedFinalizer::codeSize() +{ + return 0; +} + bool FailedFinalizer::finalize() { return false; diff --git a/Source/JavaScriptCore/dfg/DFGFailedFinalizer.h b/Source/JavaScriptCore/dfg/DFGFailedFinalizer.h index 6df5a30ad..1afbe0dcb 100644 --- a/Source/JavaScriptCore/dfg/DFGFailedFinalizer.h +++ b/Source/JavaScriptCore/dfg/DFGFailedFinalizer.h @@ -26,8 +26,6 @@ #ifndef DFGFailedFinalizer_h #define DFGFailedFinalizer_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGFinalizer.h" @@ -39,6 +37,7 @@ public: FailedFinalizer(Plan&); virtual ~FailedFinalizer(); + virtual size_t codeSize() override; virtual bool finalize() override; virtual bool finalizeFunction() override; }; diff --git a/Source/JavaScriptCore/dfg/DFGFiltrationResult.h b/Source/JavaScriptCore/dfg/DFGFiltrationResult.h index 8c80cea90..ad0bf4d2e 100644 --- a/Source/JavaScriptCore/dfg/DFGFiltrationResult.h +++ b/Source/JavaScriptCore/dfg/DFGFiltrationResult.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,14 +26,24 @@ #ifndef DFGFiltrationResult_h #define DFGFiltrationResult_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { +// Tells you if an operation that filters type (i.e. does a type check/speculation) will always +// exit. Formally, this means that the proven type of a value prior to the filter was not +// bottom (i.e. not "clear" or "SpecEmpty") but becomes bottom as a result of executing the +// filter. +// +// Note that per this definition, a filter will not return Contradiction if the node's proven +// type was already bottom. This is necessary because we have this yucky convention of using +// a proven type of bottom for nodes that don't hold JS values, like Phi nodes in ThreadedCPS +// and storage nodes. enum FiltrationResult { + // Means that this operation may not always exit. FiltrationOK, + + // Means taht this operation will always exit. Contradiction }; diff --git a/Source/JavaScriptCore/dfg/DFGFinalizer.cpp b/Source/JavaScriptCore/dfg/DFGFinalizer.cpp index b4025313c..4e74d7c29 100644 --- a/Source/JavaScriptCore/dfg/DFGFinalizer.cpp +++ b/Source/JavaScriptCore/dfg/DFGFinalizer.cpp @@ -29,6 +29,7 @@ #if ENABLE(DFG_JIT) #include "DFGPlan.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGFinalizer.h b/Source/JavaScriptCore/dfg/DFGFinalizer.h index 782c744b1..3eb71f199 100644 --- a/Source/JavaScriptCore/dfg/DFGFinalizer.h +++ b/Source/JavaScriptCore/dfg/DFGFinalizer.h @@ -26,8 +26,6 @@ #ifndef DFGFinalizer_h #define DFGFinalizer_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "JITCode.h" @@ -46,6 +44,7 @@ public: Finalizer(Plan&); virtual ~Finalizer(); + virtual size_t codeSize() = 0; virtual bool finalize() = 0; virtual bool finalizeFunction() = 0; diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp index 185f03591..64d7f63c9 100644 --- a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2012-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,12 +28,15 @@ #if ENABLE(DFG_JIT) +#include "ArrayPrototype.h" #include "DFGGraph.h" +#include "DFGInferredTypeCheck.h" #include "DFGInsertionSet.h" #include "DFGPhase.h" #include "DFGPredictionPropagationPhase.h" #include "DFGVariableAccessDataDump.h" -#include "Operations.h" +#include "JSCInlines.h" +#include "TypeLocation.h" namespace JSC { namespace DFG { @@ -61,12 +64,14 @@ public: m_graph.m_argumentPositions[i].mergeArgumentUnboxingAwareness(); for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) - fixupSetLocalsInBlock(m_graph.block(blockIndex)); + fixupGetAndSetLocalsInBlock(m_graph.block(blockIndex)); } for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) - fixupUntypedSetLocalsInBlock(m_graph.block(blockIndex)); - + fixupChecksInBlock(m_graph.block(blockIndex)); + + m_graph.m_planStage = PlanStage::AfterFixup; + return true; } @@ -90,7 +95,7 @@ private: switch (op) { case SetLocal: { - // This gets handled by fixupSetLocalsInBlock(). + // This gets handled by fixupGetAndSetLocalsInBlock(). return; } @@ -100,67 +105,81 @@ private: case BitRShift: case BitLShift: case BitURShift: { - fixBinaryIntEdges(); + if (Node::shouldSpeculateUntypedForBitOps(node->child1().node(), node->child2().node()) + && m_graph.hasExitSite(node->origin.semantic, BadType)) { + fixEdge<UntypedUse>(node->child1()); + fixEdge<UntypedUse>(node->child2()); + break; + } + fixIntConvertingEdge(node->child1()); + fixIntConvertingEdge(node->child2()); break; } case ArithIMul: { - fixBinaryIntEdges(); + fixIntConvertingEdge(node->child1()); + fixIntConvertingEdge(node->child2()); node->setOp(ArithMul); node->setArithMode(Arith::Unchecked); node->child1().setUseKind(Int32Use); node->child2().setUseKind(Int32Use); break; } + + case ArithClz32: { + fixIntConvertingEdge(node->child1()); + node->setArithMode(Arith::Unchecked); + break; + } case UInt32ToNumber: { - fixIntEdge(node->child1()); + fixIntConvertingEdge(node->child1()); if (bytecodeCanTruncateInteger(node->arithNodeFlags())) node->convertToIdentity(); - else if (nodeCanSpeculateInt32(node->arithNodeFlags())) + else if (node->canSpeculateInt32(FixupPass)) node->setArithMode(Arith::CheckOverflow); - else + else { node->setArithMode(Arith::DoOverflow); + node->setResult(NodeResultDouble); + } break; } case ValueAdd: { if (attemptToMakeIntegerAdd(node)) { node->setOp(ArithAdd); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); break; } - if (Node::shouldSpeculateNumberExpectingDefined(node->child1().node(), node->child2().node())) { - fixEdge<NumberUse>(node->child1()); - fixEdge<NumberUse>(node->child2()); + if (Node::shouldSpeculateNumberOrBooleanExpectingDefined(node->child1().node(), node->child2().node())) { + fixDoubleOrBooleanEdge(node->child1()); + fixDoubleOrBooleanEdge(node->child2()); node->setOp(ArithAdd); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + node->setResult(NodeResultDouble); break; } - // FIXME: Optimize for the case where one of the operands is the - // empty string. Also consider optimizing for the case where we don't - // believe either side is the emtpy string. Both of these things should - // be easy. - - if (node->child1()->shouldSpeculateString() - && attemptToMakeFastStringAdd<StringUse>(node, node->child1(), node->child2())) + if (attemptToMakeFastStringAdd(node)) break; - if (node->child2()->shouldSpeculateString() - && attemptToMakeFastStringAdd<StringUse>(node, node->child2(), node->child1())) - break; - if (node->child1()->shouldSpeculateStringObject() - && attemptToMakeFastStringAdd<StringObjectUse>(node, node->child1(), node->child2())) - break; - if (node->child2()->shouldSpeculateStringObject() - && attemptToMakeFastStringAdd<StringObjectUse>(node, node->child2(), node->child1())) - break; - if (node->child1()->shouldSpeculateStringOrStringObject() - && attemptToMakeFastStringAdd<StringOrStringObjectUse>(node, node->child1(), node->child2())) - break; - if (node->child2()->shouldSpeculateStringOrStringObject() - && attemptToMakeFastStringAdd<StringOrStringObjectUse>(node, node->child2(), node->child1())) + + fixEdge<UntypedUse>(node->child1()); + fixEdge<UntypedUse>(node->child2()); + node->setResult(NodeResultJS); + break; + } + + case StrCat: { + if (attemptToMakeFastStringAdd(node)) break; + + // FIXME: Remove empty string arguments and possibly turn this into a ToString operation. That + // would require a form of ToString that takes a KnownPrimitiveUse. This is necessary because + // the implementation of StrCat doesn't dynamically optimize for empty strings. + // https://bugs.webkit.org/show_bug.cgi?id=148540 + m_graph.doToChildren( + node, + [&] (Edge& edge) { + fixEdge<KnownPrimitiveUse>(edge); + }); break; } @@ -171,16 +190,26 @@ private: case ArithAdd: case ArithSub: { + if (op == ArithSub + && Node::shouldSpeculateUntypedForArithmetic(node->child1().node(), node->child2().node()) + && m_graph.hasExitSite(node->origin.semantic, BadType)) { + + fixEdge<UntypedUse>(node->child1()); + fixEdge<UntypedUse>(node->child2()); + node->setResult(NodeResultJS); + break; + } if (attemptToMakeIntegerAdd(node)) break; - fixEdge<NumberUse>(node->child1()); - fixEdge<NumberUse>(node->child2()); + fixDoubleOrBooleanEdge(node->child1()); + fixDoubleOrBooleanEdge(node->child2()); + node->setResult(NodeResultDouble); break; } case ArithNegate: { - if (m_graph.negateShouldSpeculateInt32(node)) { - fixEdge<Int32Use>(node->child1()); + if (m_graph.unaryArithShouldSpeculateInt32(node, FixupPass)) { + fixIntOrBooleanEdge(node->child1()); if (bytecodeCanTruncateInteger(node->arithNodeFlags())) node->setArithMode(Arith::Unchecked); else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) @@ -189,22 +218,33 @@ private: node->setArithMode(Arith::CheckOverflowAndNegativeZero); break; } - if (m_graph.negateShouldSpeculateMachineInt(node)) { - fixEdge<MachineIntUse>(node->child1()); + if (m_graph.unaryArithShouldSpeculateMachineInt(node, FixupPass)) { + fixEdge<Int52RepUse>(node->child1()); if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) node->setArithMode(Arith::CheckOverflow); else node->setArithMode(Arith::CheckOverflowAndNegativeZero); + node->setResult(NodeResultInt52); break; } - fixEdge<NumberUse>(node->child1()); + fixDoubleOrBooleanEdge(node->child1()); + node->setResult(NodeResultDouble); break; } case ArithMul: { - if (m_graph.mulShouldSpeculateInt32(node)) { - fixEdge<Int32Use>(node->child1()); - fixEdge<Int32Use>(node->child2()); + Edge& leftChild = node->child1(); + Edge& rightChild = node->child2(); + if (Node::shouldSpeculateUntypedForArithmetic(leftChild.node(), rightChild.node()) + && m_graph.hasExitSite(node->origin.semantic, BadType)) { + fixEdge<UntypedUse>(leftChild); + fixEdge<UntypedUse>(rightChild); + node->setResult(NodeResultJS); + break; + } + if (m_graph.binaryArithShouldSpeculateInt32(node, FixupPass)) { + fixIntOrBooleanEdge(leftChild); + fixIntOrBooleanEdge(rightChild); if (bytecodeCanTruncateInteger(node->arithNodeFlags())) node->setArithMode(Arith::Unchecked); else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) @@ -213,27 +253,38 @@ private: node->setArithMode(Arith::CheckOverflowAndNegativeZero); break; } - if (m_graph.mulShouldSpeculateMachineInt(node)) { - fixEdge<MachineIntUse>(node->child1()); - fixEdge<MachineIntUse>(node->child2()); + if (m_graph.binaryArithShouldSpeculateMachineInt(node, FixupPass)) { + fixEdge<Int52RepUse>(leftChild); + fixEdge<Int52RepUse>(rightChild); if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) node->setArithMode(Arith::CheckOverflow); else node->setArithMode(Arith::CheckOverflowAndNegativeZero); + node->setResult(NodeResultInt52); break; } - fixEdge<NumberUse>(node->child1()); - fixEdge<NumberUse>(node->child2()); + fixDoubleOrBooleanEdge(leftChild); + fixDoubleOrBooleanEdge(rightChild); + node->setResult(NodeResultDouble); break; } case ArithDiv: case ArithMod: { - if (Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node()) - && node->canSpeculateInt32()) { - if (optimizeForX86() || optimizeForARM64() || optimizeForARMv7s()) { - fixEdge<Int32Use>(node->child1()); - fixEdge<Int32Use>(node->child2()); + Edge& leftChild = node->child1(); + Edge& rightChild = node->child2(); + if (op == ArithDiv + && Node::shouldSpeculateUntypedForArithmetic(leftChild.node(), rightChild.node()) + && m_graph.hasExitSite(node->origin.semantic, BadType)) { + fixEdge<UntypedUse>(leftChild); + fixEdge<UntypedUse>(rightChild); + node->setResult(NodeResultJS); + break; + } + if (m_graph.binaryArithShouldSpeculateInt32(node, FixupPass)) { + if (optimizeForX86() || optimizeForARM64() || optimizeForARMv7IDIVSupported()) { + fixIntOrBooleanEdge(leftChild); + fixIntOrBooleanEdge(rightChild); if (bytecodeCanTruncateInteger(node->arithNodeFlags())) node->setArithMode(Arith::Unchecked); else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) @@ -242,85 +293,133 @@ private: node->setArithMode(Arith::CheckOverflowAndNegativeZero); break; } - Edge child1 = node->child1(); - Edge child2 = node->child2(); - injectInt32ToDoubleNode(node->child1()); - injectInt32ToDoubleNode(node->child2()); - + // This will cause conversion nodes to be inserted later. + fixDoubleOrBooleanEdge(leftChild); + fixDoubleOrBooleanEdge(rightChild); + // We don't need to do ref'ing on the children because we're stealing them from // the original division. Node* newDivision = m_insertionSet.insertNode( - m_indexInBlock, SpecDouble, *node); + m_indexInBlock, SpecBytecodeDouble, *node); + newDivision->setResult(NodeResultDouble); node->setOp(DoubleAsInt32); - node->children.initialize(Edge(newDivision, KnownNumberUse), Edge(), Edge()); + node->children.initialize(Edge(newDivision, DoubleRepUse), Edge(), Edge()); if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) node->setArithMode(Arith::CheckOverflow); else node->setArithMode(Arith::CheckOverflowAndNegativeZero); - - m_insertionSet.insertNode(m_indexInBlock + 1, SpecNone, Phantom, node->codeOrigin, child1, child2); break; } - fixEdge<NumberUse>(node->child1()); - fixEdge<NumberUse>(node->child2()); + fixDoubleOrBooleanEdge(leftChild); + fixDoubleOrBooleanEdge(rightChild); + node->setResult(NodeResultDouble); break; } case ArithMin: case ArithMax: { - if (Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node()) - && node->canSpeculateInt32()) { - fixEdge<Int32Use>(node->child1()); - fixEdge<Int32Use>(node->child2()); + if (m_graph.binaryArithShouldSpeculateInt32(node, FixupPass)) { + fixIntOrBooleanEdge(node->child1()); + fixIntOrBooleanEdge(node->child2()); break; } - fixEdge<NumberUse>(node->child1()); - fixEdge<NumberUse>(node->child2()); + fixDoubleOrBooleanEdge(node->child1()); + fixDoubleOrBooleanEdge(node->child2()); + node->setResult(NodeResultDouble); break; } case ArithAbs: { - if (node->child1()->shouldSpeculateInt32ForArithmetic() - && node->canSpeculateInt32()) { - fixEdge<Int32Use>(node->child1()); + if (m_graph.unaryArithShouldSpeculateInt32(node, FixupPass)) { + fixIntOrBooleanEdge(node->child1()); + if (bytecodeCanTruncateInteger(node->arithNodeFlags())) + node->setArithMode(Arith::Unchecked); + else + node->setArithMode(Arith::CheckOverflow); + break; + } + fixDoubleOrBooleanEdge(node->child1()); + node->setResult(NodeResultDouble); + break; + } + + case ArithPow: { + node->setResult(NodeResultDouble); + if (node->child2()->shouldSpeculateInt32OrBooleanForArithmetic()) { + fixDoubleOrBooleanEdge(node->child1()); + fixIntOrBooleanEdge(node->child2()); break; } - fixEdge<NumberUse>(node->child1()); + + fixDoubleOrBooleanEdge(node->child1()); + fixDoubleOrBooleanEdge(node->child2()); + break; + } + + case ArithRandom: { + node->setResult(NodeResultDouble); + break; + } + + case ArithRound: + case ArithFloor: + case ArithCeil: { + if (m_graph.unaryArithShouldSpeculateInt32(node, FixupPass)) { + fixIntOrBooleanEdge(node->child1()); + insertCheck<Int32Use>(m_indexInBlock, node->child1().node()); + node->convertToIdentity(); + break; + } + fixDoubleOrBooleanEdge(node->child1()); + + if (isInt32OrBooleanSpeculation(node->getHeapPrediction()) && m_graph.roundShouldSpeculateInt32(node, FixupPass)) { + node->setResult(NodeResultInt32); + if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) + node->setArithRoundingMode(Arith::RoundingMode::Int32); + else + node->setArithRoundingMode(Arith::RoundingMode::Int32WithNegativeZeroCheck); + } else { + node->setResult(NodeResultDouble); + node->setArithRoundingMode(Arith::RoundingMode::Double); + } break; } case ArithSqrt: + case ArithFRound: case ArithSin: - case ArithCos: { - fixEdge<NumberUse>(node->child1()); + case ArithCos: + case ArithLog: { + fixDoubleOrBooleanEdge(node->child1()); + node->setResult(NodeResultDouble); break; } case LogicalNot: { - if (node->child1()->shouldSpeculateBoolean()) - fixEdge<BooleanUse>(node->child1()); - else if (node->child1()->shouldSpeculateObjectOrOther()) + if (node->child1()->shouldSpeculateBoolean()) { + if (node->child1()->result() == NodeResultBoolean) { + // This is necessary in case we have a bytecode instruction implemented by: + // + // a: CompareEq(...) + // b: LogicalNot(@a) + // + // In that case, CompareEq might have a side-effect. Then, we need to make + // sure that we know that Branch does not exit. + fixEdge<KnownBooleanUse>(node->child1()); + } else + fixEdge<BooleanUse>(node->child1()); + } else if (node->child1()->shouldSpeculateObjectOrOther()) fixEdge<ObjectOrOtherUse>(node->child1()); - else if (node->child1()->shouldSpeculateInt32()) - fixEdge<Int32Use>(node->child1()); + else if (node->child1()->shouldSpeculateInt32OrBoolean()) + fixIntOrBooleanEdge(node->child1()); else if (node->child1()->shouldSpeculateNumber()) - fixEdge<NumberUse>(node->child1()); + fixEdge<DoubleRepUse>(node->child1()); else if (node->child1()->shouldSpeculateString()) fixEdge<StringUse>(node->child1()); - break; - } - - case TypeOf: { - if (node->child1()->shouldSpeculateString()) - fixEdge<StringUse>(node->child1()); - else if (node->child1()->shouldSpeculateCell()) - fixEdge<CellUse>(node->child1()); - break; - } - - case CompareEqConstant: { + else if (node->child1()->shouldSpeculateStringOrOther()) + fixEdge<StringOrOtherUse>(node->child1()); break; } @@ -329,67 +428,97 @@ private: case CompareLessEq: case CompareGreater: case CompareGreaterEq: { - if (Node::shouldSpeculateInt32(node->child1().node(), node->child2().node())) { - fixEdge<Int32Use>(node->child1()); - fixEdge<Int32Use>(node->child2()); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + if (node->op() == CompareEq + && Node::shouldSpeculateBoolean(node->child1().node(), node->child2().node())) { + fixEdge<BooleanUse>(node->child1()); + fixEdge<BooleanUse>(node->child2()); + node->clearFlags(NodeMustGenerate); + break; + } + if (Node::shouldSpeculateInt32OrBoolean(node->child1().node(), node->child2().node())) { + fixIntOrBooleanEdge(node->child1()); + fixIntOrBooleanEdge(node->child2()); + node->clearFlags(NodeMustGenerate); break; } if (enableInt52() && Node::shouldSpeculateMachineInt(node->child1().node(), node->child2().node())) { - fixEdge<MachineIntUse>(node->child1()); - fixEdge<MachineIntUse>(node->child2()); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + fixEdge<Int52RepUse>(node->child1()); + fixEdge<Int52RepUse>(node->child2()); + node->clearFlags(NodeMustGenerate); break; } - if (Node::shouldSpeculateNumber(node->child1().node(), node->child2().node())) { - fixEdge<NumberUse>(node->child1()); - fixEdge<NumberUse>(node->child2()); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + if (Node::shouldSpeculateNumberOrBoolean(node->child1().node(), node->child2().node())) { + fixDoubleOrBooleanEdge(node->child1()); + fixDoubleOrBooleanEdge(node->child2()); + node->clearFlags(NodeMustGenerate); break; } if (node->op() != CompareEq) break; - if (Node::shouldSpeculateBoolean(node->child1().node(), node->child2().node())) { - fixEdge<BooleanUse>(node->child1()); - fixEdge<BooleanUse>(node->child2()); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + if (Node::shouldSpeculateSymbol(node->child1().node(), node->child2().node())) { + fixEdge<SymbolUse>(node->child1()); + fixEdge<SymbolUse>(node->child2()); + node->clearFlags(NodeMustGenerate); break; } if (node->child1()->shouldSpeculateStringIdent() && node->child2()->shouldSpeculateStringIdent()) { fixEdge<StringIdentUse>(node->child1()); fixEdge<StringIdentUse>(node->child2()); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + node->clearFlags(NodeMustGenerate); break; } if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && GPRInfo::numberOfRegisters >= 7) { fixEdge<StringUse>(node->child1()); fixEdge<StringUse>(node->child2()); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + node->clearFlags(NodeMustGenerate); break; } if (node->child1()->shouldSpeculateObject() && node->child2()->shouldSpeculateObject()) { fixEdge<ObjectUse>(node->child1()); fixEdge<ObjectUse>(node->child2()); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + node->clearFlags(NodeMustGenerate); + break; + } + + // If either child can be proved to be Null or Undefined, comparing them is greatly simplified. + bool oneArgumentIsUsedAsSpecOther = false; + if (node->child1()->isUndefinedOrNullConstant()) { + fixEdge<OtherUse>(node->child1()); + oneArgumentIsUsedAsSpecOther = true; + } else if (node->child1()->shouldSpeculateOther()) { + m_insertionSet.insertNode(m_indexInBlock, SpecNone, Check, node->origin, + Edge(node->child1().node(), OtherUse)); + fixEdge<OtherUse>(node->child1()); + oneArgumentIsUsedAsSpecOther = true; + } + if (node->child2()->isUndefinedOrNullConstant()) { + fixEdge<OtherUse>(node->child2()); + oneArgumentIsUsedAsSpecOther = true; + } else if (node->child2()->shouldSpeculateOther()) { + m_insertionSet.insertNode(m_indexInBlock, SpecNone, Check, node->origin, + Edge(node->child2().node(), OtherUse)); + fixEdge<OtherUse>(node->child2()); + oneArgumentIsUsedAsSpecOther = true; + } + if (oneArgumentIsUsedAsSpecOther) { + node->clearFlags(NodeMustGenerate); break; } + if (node->child1()->shouldSpeculateObject() && node->child2()->shouldSpeculateObjectOrOther()) { fixEdge<ObjectUse>(node->child1()); fixEdge<ObjectOrOtherUse>(node->child2()); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + node->clearFlags(NodeMustGenerate); break; } if (node->child1()->shouldSpeculateObjectOrOther() && node->child2()->shouldSpeculateObject()) { fixEdge<ObjectOrOtherUse>(node->child1()); fixEdge<ObjectUse>(node->child2()); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + node->clearFlags(NodeMustGenerate); break; } - break; - } - - case CompareStrictEqConstant: { + break; } @@ -406,13 +535,18 @@ private: } if (enableInt52() && Node::shouldSpeculateMachineInt(node->child1().node(), node->child2().node())) { - fixEdge<MachineIntUse>(node->child1()); - fixEdge<MachineIntUse>(node->child2()); + fixEdge<Int52RepUse>(node->child1()); + fixEdge<Int52RepUse>(node->child2()); break; } if (Node::shouldSpeculateNumber(node->child1().node(), node->child2().node())) { - fixEdge<NumberUse>(node->child1()); - fixEdge<NumberUse>(node->child2()); + fixEdge<DoubleRepUse>(node->child1()); + fixEdge<DoubleRepUse>(node->child2()); + break; + } + if (Node::shouldSpeculateSymbol(node->child1().node(), node->child2().node())) { + fixEdge<SymbolUse>(node->child1()); + fixEdge<SymbolUse>(node->child2()); break; } if (node->child1()->shouldSpeculateStringIdent() && node->child2()->shouldSpeculateStringIdent()) { @@ -420,21 +554,66 @@ private: fixEdge<StringIdentUse>(node->child2()); break; } - if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && GPRInfo::numberOfRegisters >= 7) { + if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 7) || isFTL(m_graph.m_plan.mode))) { fixEdge<StringUse>(node->child1()); fixEdge<StringUse>(node->child2()); break; } - if (node->child1()->shouldSpeculateObject() && node->child2()->shouldSpeculateObject()) { + WatchpointSet* masqueradesAsUndefinedWatchpoint = m_graph.globalObjectFor(node->origin.semantic)->masqueradesAsUndefinedWatchpoint(); + if (masqueradesAsUndefinedWatchpoint->isStillValid()) { + + if (node->child1()->shouldSpeculateObject()) { + m_graph.watchpoints().addLazily(masqueradesAsUndefinedWatchpoint); + fixEdge<ObjectUse>(node->child1()); + break; + } + if (node->child2()->shouldSpeculateObject()) { + m_graph.watchpoints().addLazily(masqueradesAsUndefinedWatchpoint); + fixEdge<ObjectUse>(node->child2()); + break; + } + + } else if (node->child1()->shouldSpeculateObject() && node->child2()->shouldSpeculateObject()) { fixEdge<ObjectUse>(node->child1()); fixEdge<ObjectUse>(node->child2()); break; } + if (node->child1()->shouldSpeculateMisc()) { + fixEdge<MiscUse>(node->child1()); + break; + } + if (node->child2()->shouldSpeculateMisc()) { + fixEdge<MiscUse>(node->child2()); + break; + } + if (node->child1()->shouldSpeculateStringIdent() + && node->child2()->shouldSpeculateNotStringVar()) { + fixEdge<StringIdentUse>(node->child1()); + fixEdge<NotStringVarUse>(node->child2()); + break; + } + if (node->child2()->shouldSpeculateStringIdent() + && node->child1()->shouldSpeculateNotStringVar()) { + fixEdge<StringIdentUse>(node->child2()); + fixEdge<NotStringVarUse>(node->child1()); + break; + } + if (node->child1()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || isFTL(m_graph.m_plan.mode))) { + fixEdge<StringUse>(node->child1()); + break; + } + if (node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || isFTL(m_graph.m_plan.mode))) { + fixEdge<StringUse>(node->child2()); + break; + } break; } case StringFromCharCode: - fixEdge<Int32Use>(node->child1()); + if (node->child1()->shouldSpeculateInt32()) + fixEdge<Int32Use>(node->child1()); + else + fixEdge<UntypedUse>(node->child1()); break; case StringCharAt: @@ -448,27 +627,84 @@ private: } case GetByVal: { + if (!node->prediction()) { + m_insertionSet.insertNode( + m_indexInBlock, SpecNone, ForceOSRExit, node->origin); + } + node->setArrayMode( node->arrayMode().refine( + m_graph, node, node->child1()->prediction(), node->child2()->prediction(), - SpecNone, node->flags())); + SpecNone)); blessArrayOperation(node->child1(), node->child2(), node->child3()); ArrayMode arrayMode = node->arrayMode(); switch (arrayMode.type()) { + case Array::Contiguous: case Array::Double: if (arrayMode.arrayClass() == Array::OriginalArray - && arrayMode.speculation() == Array::InBounds - && m_graph.globalObjectFor(node->codeOrigin)->arrayPrototypeChainIsSane() - && !(node->flags() & NodeBytecodeUsesAsOther)) - node->setArrayMode(arrayMode.withSpeculation(Array::SaneChain)); + && arrayMode.speculation() == Array::InBounds) { + JSGlobalObject* globalObject = m_graph.globalObjectFor(node->origin.semantic); + if (globalObject->arrayPrototypeChainIsSane()) { + // Check if SaneChain will work on a per-type basis. Note that: + // + // 1) We don't want double arrays to sometimes return undefined, since + // that would require a change to the return type and it would pessimise + // things a lot. So, we'd only want to do that if we actually had + // evidence that we could read from a hole. That's pretty annoying. + // Likely the best way to handle that case is with an equivalent of + // SaneChain for OutOfBounds. For now we just detect when Undefined and + // NaN are indistinguishable according to backwards propagation, and just + // use SaneChain in that case. This happens to catch a lot of cases. + // + // 2) We don't want int32 array loads to have to do a hole check just to + // coerce to Undefined, since that would mean twice the checks. + // + // This has two implications. First, we have to do more checks than we'd + // like. It's unfortunate that we have to do the hole check. Second, + // some accesses that hit a hole will now need to take the full-blown + // out-of-bounds slow path. We can fix that with: + // https://bugs.webkit.org/show_bug.cgi?id=144668 + + bool canDoSaneChain = false; + switch (arrayMode.type()) { + case Array::Contiguous: + // This is happens to be entirely natural. We already would have + // returned any JSValue, and now we'll return Undefined. We still do + // the check but it doesn't require taking any kind of slow path. + canDoSaneChain = true; + break; + + case Array::Double: + if (!(node->flags() & NodeBytecodeUsesAsOther)) { + // Holes look like NaN already, so if the user doesn't care + // about the difference between Undefined and NaN then we can + // do this. + canDoSaneChain = true; + } + break; + + default: + break; + } + + if (canDoSaneChain) { + m_graph.watchpoints().addLazily( + globalObject->arrayPrototype()->structure()->transitionWatchpointSet()); + m_graph.watchpoints().addLazily( + globalObject->objectPrototype()->structure()->transitionWatchpointSet()); + node->setArrayMode(arrayMode.withSpeculation(Array::SaneChain)); + } + } + } break; case Array::String: if ((node->prediction() & ~SpecString) - || m_graph.hasExitSite(node->codeOrigin, OutOfBounds)) + || m_graph.hasExitSite(node->origin.semantic, OutOfBounds)) node->setArrayMode(arrayMode.withSpeculation(Array::OutOfBounds)); break; @@ -476,10 +712,10 @@ private: break; } - switch (node->arrayMode().type()) { + arrayMode = node->arrayMode(); + switch (arrayMode.type()) { case Array::SelectUsingPredictions: case Array::Unprofiled: - case Array::Undecided: RELEASE_ASSERT_NOT_REACHED(); break; case Array::Generic: @@ -495,6 +731,30 @@ private: break; } + switch (arrayMode.type()) { + case Array::Double: + if (!arrayMode.isOutOfBounds()) + node->setResult(NodeResultDouble); + break; + + case Array::Float32Array: + case Array::Float64Array: + node->setResult(NodeResultDouble); + break; + + case Array::Uint32Array: + if (node->shouldSpeculateInt32()) + break; + if (node->shouldSpeculateMachineInt() && enableInt52()) + node->setResult(NodeResultInt52); + else + node->setResult(NodeResultDouble); + break; + + default: + break; + } + break; } @@ -507,6 +767,7 @@ private: node->setArrayMode( node->arrayMode().refine( + m_graph, node, child1->prediction(), child2->prediction(), child3->prediction())); @@ -515,6 +776,7 @@ private: switch (node->arrayMode().modeForPut().type()) { case Array::SelectUsingPredictions: + case Array::SelectUsingArguments: case Array::Unprofiled: case Array::Undecided: RELEASE_ASSERT_NOT_REACHED(); @@ -532,15 +794,11 @@ private: fixEdge<KnownCellUse>(child1); fixEdge<Int32Use>(child2); fixEdge<Int32Use>(child3); - if (child3->prediction() & SpecInt52) - fixEdge<MachineIntUse>(child3); - else - fixEdge<Int32Use>(child3); break; case Array::Double: fixEdge<KnownCellUse>(child1); fixEdge<Int32Use>(child2); - fixEdge<RealNumberUse>(child3); + fixEdge<DoubleRepRealUse>(child3); break; case Array::Int8Array: case Array::Int16Array: @@ -552,25 +810,24 @@ private: fixEdge<KnownCellUse>(child1); fixEdge<Int32Use>(child2); if (child3->shouldSpeculateInt32()) - fixEdge<Int32Use>(child3); + fixIntOrBooleanEdge(child3); else if (child3->shouldSpeculateMachineInt()) - fixEdge<MachineIntUse>(child3); + fixEdge<Int52RepUse>(child3); else - fixEdge<NumberUse>(child3); + fixDoubleOrBooleanEdge(child3); break; case Array::Float32Array: case Array::Float64Array: fixEdge<KnownCellUse>(child1); fixEdge<Int32Use>(child2); - fixEdge<NumberUse>(child3); + fixDoubleOrBooleanEdge(child3); break; case Array::Contiguous: case Array::ArrayStorage: case Array::SlowPutArrayStorage: - case Array::Arguments: fixEdge<KnownCellUse>(child1); fixEdge<Int32Use>(child2); - insertStoreBarrier(m_indexInBlock, child1, child3); + speculateForBarrier(child3); break; default: fixEdge<KnownCellUse>(child1); @@ -592,6 +849,7 @@ private: // that would break things. node->setArrayMode( node->arrayMode().refine( + m_graph, node, node->child1()->prediction() & SpecCell, SpecInt32, node->child2()->prediction())); @@ -603,11 +861,11 @@ private: fixEdge<Int32Use>(node->child2()); break; case Array::Double: - fixEdge<RealNumberUse>(node->child2()); + fixEdge<DoubleRepRealUse>(node->child2()); break; case Array::Contiguous: case Array::ArrayStorage: - insertStoreBarrier(m_indexInBlock, node->child1(), node->child2()); + speculateForBarrier(node->child2()); break; default: break; @@ -623,54 +881,52 @@ private: case RegExpExec: case RegExpTest: { - fixEdge<CellUse>(node->child1()); - fixEdge<CellUse>(node->child2()); + // FIXME: These should probably speculate something stronger than cell. + // https://bugs.webkit.org/show_bug.cgi?id=154900 + if (node->child1()->shouldSpeculateCell() + && node->child2()->shouldSpeculateCell()) { + fixEdge<CellUse>(node->child1()); + fixEdge<CellUse>(node->child2()); + break; + } + break; + } + + case StringReplace: { + if (node->child1()->shouldSpeculateString() + && node->child2()->shouldSpeculateRegExpObject() + && node->child3()->shouldSpeculateString()) { + fixEdge<StringUse>(node->child1()); + fixEdge<RegExpObjectUse>(node->child2()); + fixEdge<StringUse>(node->child3()); + break; + } break; } case Branch: { - if (node->child1()->shouldSpeculateBoolean()) - fixEdge<BooleanUse>(node->child1()); - else if (node->child1()->shouldSpeculateObjectOrOther()) + if (node->child1()->shouldSpeculateBoolean()) { + if (node->child1()->result() == NodeResultBoolean) { + // This is necessary in case we have a bytecode instruction implemented by: + // + // a: CompareEq(...) + // b: Branch(@a) + // + // In that case, CompareEq might have a side-effect. Then, we need to make + // sure that we know that Branch does not exit. + fixEdge<KnownBooleanUse>(node->child1()); + } else + fixEdge<BooleanUse>(node->child1()); + } else if (node->child1()->shouldSpeculateObjectOrOther()) fixEdge<ObjectOrOtherUse>(node->child1()); - else if (node->child1()->shouldSpeculateInt32()) - fixEdge<Int32Use>(node->child1()); + else if (node->child1()->shouldSpeculateInt32OrBoolean()) + fixIntOrBooleanEdge(node->child1()); else if (node->child1()->shouldSpeculateNumber()) - fixEdge<NumberUse>(node->child1()); - - Node* logicalNot = node->child1().node(); - if (logicalNot->op() == LogicalNot) { - - // Make sure that OSR exit can't observe the LogicalNot. If it can, - // then we must compute it and cannot peephole around it. - bool found = false; - bool ok = true; - for (unsigned i = m_indexInBlock; i--;) { - Node* candidate = m_block->at(i); - if (candidate == logicalNot) { - found = true; - break; - } - if (candidate->canExit()) { - ok = false; - found = true; - break; - } - } - ASSERT_UNUSED(found, found); - - if (ok) { - Edge newChildEdge = logicalNot->child1(); - if (newChildEdge->hasBooleanResult()) { - node->children.setChild1(newChildEdge); - - BasicBlock* toBeTaken = node->notTakenBlock(); - BasicBlock* toBeNotTaken = node->takenBlock(); - node->setTakenBlock(toBeTaken); - node->setNotTakenBlock(toBeNotTaken); - } - } - } + fixEdge<DoubleRepUse>(node->child1()); + else if (node->child1()->shouldSpeculateString()) + fixEdge<StringUse>(node->child1()); + else if (node->child1()->shouldSpeculateStringOrOther()) + fixEdge<StringOrOtherUse>(node->child1()); break; } @@ -691,6 +947,12 @@ private: else if (node->child1()->shouldSpeculateString()) fixEdge<StringUse>(node->child1()); break; + case SwitchCell: + if (node->child1()->shouldSpeculateCell()) + fixEdge<CellUse>(node->child1()); + // else it's fine for this to have UntypedUse; we will handle this by just making + // non-cells take the default case. + break; } break; } @@ -700,8 +962,9 @@ private: break; } - case ToString: { - fixupToString(node); + case ToString: + case CallStringConstructor: { + fixupToStringOrCallStringConstructor(node); break; } @@ -711,6 +974,8 @@ private: } case NewArray: { + watchHavingABadTime(node); + for (unsigned i = m_graph.varArgNumChildren(node); i--;) { node->setIndexingType( leastUpperBoundOfIndexingTypeAndType( @@ -726,7 +991,7 @@ private: // would have already exited by now, but insert a forced exit just to // be safe. m_insertionSet.insertNode( - m_indexInBlock, SpecNone, ForceOSRExit, node->codeOrigin); + m_indexInBlock, SpecNone, ForceOSRExit, node->origin); } break; case ALL_INT32_INDEXING_TYPES: @@ -735,7 +1000,7 @@ private: break; case ALL_DOUBLE_INDEXING_TYPES: for (unsigned operandIndex = 0; operandIndex < node->numChildren(); ++operandIndex) - fixEdge<RealNumberUse>(m_graph.m_varArgChildren[node->firstChild() + operandIndex]); + fixEdge<DoubleRepRealUse>(m_graph.m_varArgChildren[node->firstChild() + operandIndex]); break; case ALL_CONTIGUOUS_INDEXING_TYPES: case ALL_ARRAY_STORAGE_INDEXING_TYPES: @@ -748,68 +1013,49 @@ private: } case NewTypedArray: { + watchHavingABadTime(node); + if (node->child1()->shouldSpeculateInt32()) { fixEdge<Int32Use>(node->child1()); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + node->clearFlags(NodeMustGenerate); break; } break; } case NewArrayWithSize: { + watchHavingABadTime(node); fixEdge<Int32Use>(node->child1()); break; } case ToThis: { - ECMAMode ecmaMode = m_graph.executableFor(node->codeOrigin)->isStrictMode() ? StrictMode : NotStrictMode; - - if (isOtherSpeculation(node->child1()->prediction())) { - if (ecmaMode == StrictMode) { - fixEdge<OtherUse>(node->child1()); - node->convertToIdentity(); - break; - } - - m_insertionSet.insertNode( - m_indexInBlock, SpecNone, Phantom, node->codeOrigin, - Edge(node->child1().node(), OtherUse)); - observeUseKindOnNode<OtherUse>(node->child1().node()); - node->convertToWeakConstant(m_graph.globalThisObjectFor(node->codeOrigin)); - break; - } - - if (isFinalObjectSpeculation(node->child1()->prediction())) { - fixEdge<FinalObjectUse>(node->child1()); - node->convertToIdentity(); - break; - } - + fixupToThis(node); break; } - case GetMyArgumentByVal: - case GetMyArgumentByValSafe: { - fixEdge<Int32Use>(node->child1()); + case PutStructure: { + fixEdge<KnownCellUse>(node->child1()); break; } - case PutStructure: { + case GetClosureVar: + case GetFromArguments: { fixEdge<KnownCellUse>(node->child1()); - insertStoreBarrier(m_indexInBlock, node->child1()); break; } - case PutClosureVar: { + case PutClosureVar: + case PutToArguments: { fixEdge<KnownCellUse>(node->child1()); - insertStoreBarrier(m_indexInBlock, node->child1(), node->child3()); + speculateForBarrier(node->child2()); break; } - case GetClosureRegisters: - case SkipTopScope: case SkipScope: - case GetScope: { + case GetScope: + case GetGetter: + case GetSetter: { fixEdge<KnownCellUse>(node->child1()); break; } @@ -817,48 +1063,80 @@ private: case AllocatePropertyStorage: case ReallocatePropertyStorage: { fixEdge<KnownCellUse>(node->child1()); - insertStoreBarrier(m_indexInBlock + 1, node->child1()); break; } case GetById: case GetByIdFlush: { - if (!node->child1()->shouldSpeculateCell()) - break; - StringImpl* impl = m_graph.identifiers()[node->identifierNumber()]; - if (impl == vm().propertyNames->length.impl()) { - attemptToMakeGetArrayLength(node); - break; - } - if (impl == vm().propertyNames->byteLength.impl()) { - attemptToMakeGetTypedArrayByteLength(node); - break; - } - if (impl == vm().propertyNames->byteOffset.impl()) { - attemptToMakeGetTypedArrayByteOffset(node); - break; + // FIXME: This should be done in the ByteCodeParser based on reading the + // PolymorphicAccess, which will surely tell us that this is a AccessCase::ArrayLength. + // https://bugs.webkit.org/show_bug.cgi?id=154990 + if (node->child1()->shouldSpeculateCellOrOther() + && !m_graph.hasExitSite(node->origin.semantic, BadType) + && !m_graph.hasExitSite(node->origin.semantic, BadCache) + && !m_graph.hasExitSite(node->origin.semantic, BadIndexingType) + && !m_graph.hasExitSite(node->origin.semantic, ExoticObjectMode)) { + auto uid = m_graph.identifiers()[node->identifierNumber()]; + if (uid == vm().propertyNames->length.impl()) { + attemptToMakeGetArrayLength(node); + break; + } } - fixEdge<CellUse>(node->child1()); + + if (node->child1()->shouldSpeculateCell()) + fixEdge<CellUse>(node->child1()); break; } case PutById: + case PutByIdFlush: case PutByIdDirect: { fixEdge<CellUse>(node->child1()); - insertStoreBarrier(m_indexInBlock, node->child1(), node->child2()); break; } - case CheckExecutable: + case PutGetterById: + case PutSetterById: { + fixEdge<KnownCellUse>(node->child1()); + fixEdge<KnownCellUse>(node->child2()); + break; + } + + case PutGetterSetterById: { + fixEdge<KnownCellUse>(node->child1()); + break; + } + + case PutGetterByVal: + case PutSetterByVal: { + fixEdge<KnownCellUse>(node->child1()); + fixEdge<KnownCellUse>(node->child3()); + break; + } + + case GetExecutable: { + fixEdge<FunctionUse>(node->child1()); + break; + } + + case OverridesHasInstance: case CheckStructure: - case StructureTransitionWatchpoint: - case CheckFunction: - case CheckHasInstance: + case CheckCell: case CreateThis: - case GetButterfly: { + case GetButterfly: + case GetButterflyReadOnly: { fixEdge<CellUse>(node->child1()); break; } + + case CheckIdent: { + UniquedStringImpl* uid = node->uidOperand(); + if (uid->isSymbol()) + fixEdge<SymbolUse>(node->child1()); + else + fixEdge<StringIdentUse>(node->child1()); + break; + } case Arrayify: case ArrayifyToStructure: { @@ -868,30 +1146,47 @@ private: break; } - case GetByOffset: { + case GetByOffset: + case GetGetterSetterByOffset: { if (!node->child1()->hasStorageResult()) fixEdge<KnownCellUse>(node->child1()); fixEdge<KnownCellUse>(node->child2()); break; } + case MultiGetByOffset: { + fixEdge<CellUse>(node->child1()); + break; + } + case PutByOffset: { if (!node->child1()->hasStorageResult()) fixEdge<KnownCellUse>(node->child1()); fixEdge<KnownCellUse>(node->child2()); - insertStoreBarrier(m_indexInBlock, node->child2(), node->child3()); + insertInferredTypeCheck( + m_insertionSet, m_indexInBlock, node->origin, node->child3().node(), + node->storageAccessData().inferredType); + speculateForBarrier(node->child3()); + break; + } + + case MultiPutByOffset: { + fixEdge<CellUse>(node->child1()); + speculateForBarrier(node->child2()); break; } case InstanceOf: { - // FIXME: This appears broken: CheckHasInstance already does an unconditional cell - // check. https://bugs.webkit.org/show_bug.cgi?id=107479 if (!(node->child1()->prediction() & ~SpecCell)) fixEdge<CellUse>(node->child1()); fixEdge<CellUse>(node->child2()); break; } - + + case InstanceOfCustom: + fixEdge<CellUse>(node->child2()); + break; + case In: { // FIXME: We should at some point have array profiling on op_in, in which // case we would be able to turn this into a kind of GetByVal. @@ -900,93 +1195,257 @@ private: break; } - case Phantom: - case Identity: case Check: { - switch (node->child1().useKind()) { - case NumberUse: - if (node->child1()->shouldSpeculateInt32ForArithmetic()) - node->child1().setUseKind(Int32Use); - break; - default: - break; - } - observeUseKindOnEdge(node->child1()); + m_graph.doToChildren( + node, + [&] (Edge& edge) { + switch (edge.useKind()) { + case NumberUse: + if (edge->shouldSpeculateInt32ForArithmetic()) + edge.setUseKind(Int32Use); + break; + default: + break; + } + observeUseKindOnEdge(edge); + }); + break; + } + + case Phantom: + // Phantoms are meaningless past Fixup. We recreate them on-demand in the backend. + node->remove(); + break; + + case FiatInt52: { + RELEASE_ASSERT(enableInt52()); + node->convertToIdentity(); + fixEdge<Int52RepUse>(node->child1()); + node->setResult(NodeResultInt52); + break; + } + + case GetArrayLength: { + fixEdge<KnownCellUse>(node->child1()); + break; + } + + case GetTypedArrayByteOffset: { + fixEdge<KnownCellUse>(node->child1()); break; } - case GetArrayLength: case Phi: case Upsilon: - case GetArgument: - case PhantomPutStructure: case GetIndexedPropertyStorage: - case GetTypedArrayByteOffset: case LastNodeType: case CheckTierUpInLoop: case CheckTierUpAtReturn: case CheckTierUpAndOSREnter: - case Int52ToDouble: - case Int52ToValue: + case CheckTierUpWithNestedTriggerAndOSREnter: case InvalidationPoint: case CheckArray: case CheckInBounds: case ConstantStoragePointer: case DoubleAsInt32: - case Int32ToDouble: case ValueToInt32: + case DoubleRep: + case ValueRep: + case Int52Rep: + case Int52Constant: + case Identity: // This should have been cleaned up. + case BooleanToNumber: + case PhantomNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case PhantomDirectArguments: + case PhantomClonedArguments: + case ForwardVarargs: + case GetMyArgumentByVal: + case PutHint: + case CheckStructureImmediate: + case MaterializeNewObject: + case MaterializeCreateActivation: + case PutStack: + case KillStack: + case GetStack: + case StoreBarrier: // These are just nodes that we don't currently expect to see during fixup. // If we ever wanted to insert them prior to fixup, then we just have to create // fixup rules for them. - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_graph, node, "Unexpected node during fixup"); break; - case PutGlobalVar: { - Node* globalObjectNode = m_insertionSet.insertNode(m_indexInBlock, SpecNone, WeakJSConstant, node->codeOrigin, - OpInfo(m_graph.globalObjectFor(node->codeOrigin))); - Node* barrierNode = m_graph.addNode(SpecNone, ConditionalStoreBarrier, m_currentNode->codeOrigin, - Edge(globalObjectNode, KnownCellUse), Edge(node->child1().node(), UntypedUse)); - fixupNode(barrierNode); - m_insertionSet.insert(m_indexInBlock, barrierNode); - break; - } - - case TearOffActivation: { - Node* barrierNode = m_graph.addNode(SpecNone, StoreBarrierWithNullCheck, m_currentNode->codeOrigin, - Edge(node->child1().node(), UntypedUse)); - fixupNode(barrierNode); - m_insertionSet.insert(m_indexInBlock, barrierNode); + case PutGlobalVariable: { + fixEdge<CellUse>(node->child1()); + speculateForBarrier(node->child2()); break; } case IsString: if (node->child1()->shouldSpeculateString()) { - m_insertionSet.insertNode(m_indexInBlock, SpecNone, Phantom, node->codeOrigin, + m_insertionSet.insertNode( + m_indexInBlock, SpecNone, Check, node->origin, Edge(node->child1().node(), StringUse)); m_graph.convertToConstant(node, jsBoolean(true)); observeUseKindOnNode<StringUse>(node); } break; + + case IsObject: + if (node->child1()->shouldSpeculateObject()) { + m_insertionSet.insertNode( + m_indexInBlock, SpecNone, Check, node->origin, + Edge(node->child1().node(), ObjectUse)); + m_graph.convertToConstant(node, jsBoolean(true)); + observeUseKindOnNode<ObjectUse>(node); + } + break; + + case GetEnumerableLength: { + fixEdge<CellUse>(node->child1()); + break; + } + case HasGenericProperty: { + fixEdge<CellUse>(node->child2()); + break; + } + case HasStructureProperty: { + fixEdge<StringUse>(node->child2()); + fixEdge<KnownCellUse>(node->child3()); + break; + } + case HasIndexedProperty: { + node->setArrayMode( + node->arrayMode().refine( + m_graph, node, + node->child1()->prediction(), + node->child2()->prediction(), + SpecNone)); + blessArrayOperation(node->child1(), node->child2(), node->child3()); + fixEdge<CellUse>(node->child1()); + fixEdge<KnownInt32Use>(node->child2()); + break; + } + case GetDirectPname: { + Edge& base = m_graph.varArgChild(node, 0); + Edge& property = m_graph.varArgChild(node, 1); + Edge& index = m_graph.varArgChild(node, 2); + Edge& enumerator = m_graph.varArgChild(node, 3); + fixEdge<CellUse>(base); + fixEdge<KnownCellUse>(property); + fixEdge<KnownInt32Use>(index); + fixEdge<KnownCellUse>(enumerator); + break; + } + case GetPropertyEnumerator: { + fixEdge<CellUse>(node->child1()); + break; + } + case GetEnumeratorStructurePname: { + fixEdge<KnownCellUse>(node->child1()); + fixEdge<KnownInt32Use>(node->child2()); + break; + } + case GetEnumeratorGenericPname: { + fixEdge<KnownCellUse>(node->child1()); + fixEdge<KnownInt32Use>(node->child2()); + break; + } + case ToIndexString: { + fixEdge<KnownInt32Use>(node->child1()); + break; + } + case ProfileType: { + // We want to insert type checks based on the instructionTypeSet of the TypeLocation, not the globalTypeSet. + // Because the instructionTypeSet is contained in globalTypeSet, if we produce a type check for + // type T for the instructionTypeSet, the global type set must also have information for type T. + // So if it the type check succeeds for type T in the instructionTypeSet, a type check for type T + // in the globalTypeSet would've also succeeded. + // (The other direction does not hold in general). + + RefPtr<TypeSet> typeSet = node->typeLocation()->m_instructionTypeSet; + RuntimeTypeMask seenTypes = typeSet->seenTypes(); + if (typeSet->doesTypeConformTo(TypeMachineInt)) { + if (node->child1()->shouldSpeculateInt32()) + fixEdge<Int32Use>(node->child1()); + else + fixEdge<MachineIntUse>(node->child1()); + node->remove(); + } else if (typeSet->doesTypeConformTo(TypeNumber | TypeMachineInt)) { + fixEdge<NumberUse>(node->child1()); + node->remove(); + } else if (typeSet->doesTypeConformTo(TypeString)) { + fixEdge<StringUse>(node->child1()); + node->remove(); + } else if (typeSet->doesTypeConformTo(TypeBoolean)) { + fixEdge<BooleanUse>(node->child1()); + node->remove(); + } else if (typeSet->doesTypeConformTo(TypeUndefined | TypeNull) && (seenTypes & TypeUndefined) && (seenTypes & TypeNull)) { + fixEdge<OtherUse>(node->child1()); + node->remove(); + } else if (typeSet->doesTypeConformTo(TypeObject)) { + StructureSet set = typeSet->structureSet(); + if (!set.isEmpty()) { + fixEdge<CellUse>(node->child1()); + node->convertToCheckStructure(m_graph.addStructureSet(set)); + } + } + + break; + } + + case CreateScopedArguments: + case CreateActivation: + case NewFunction: + case NewGeneratorFunction: { + fixEdge<CellUse>(node->child1()); + break; + } + + case NewArrowFunction: { + fixEdge<CellUse>(node->child1()); + fixEdge<CellUse>(node->child2()); + break; + } + + case CopyRest: { + fixEdge<KnownCellUse>(node->child1()); + fixEdge<KnownInt32Use>(node->child2()); + break; + } + #if !ASSERT_DISABLED // Have these no-op cases here to ensure that nobody forgets to add handlers for new opcodes. case SetArgument: case JSConstant: - case WeakJSConstant: + case DoubleConstant: case GetLocal: case GetCallee: + case GetArgumentCount: + case GetRestLength: case Flush: case PhantomLocal: case GetLocalUnlinked: - case GetMyScope: - case GetClosureVar: case GetGlobalVar: + case GetGlobalLexicalVariable: case NotifyWrite: - case VariableWatchpoint: case VarInjectionWatchpoint: - case AllocationProfileWatchpoint: case Call: + case CheckTypeInfoFlags: + case TailCallInlinedCaller: case Construct: + case CallVarargs: + case TailCallVarargsInlinedCaller: + case ConstructVarargs: + case CallForwardVarargs: + case ConstructForwardVarargs: + case TailCallForwardVarargs: + case TailCallForwardVarargsInlinedCaller: + case LoadVarargs: + case ProfileControlFlow: case NewObject: case NewArrayBuffer: case NewRegexp: @@ -996,58 +1455,56 @@ private: case IsUndefined: case IsBoolean: case IsNumber: - case IsObject: + case IsObjectOrNull: case IsFunction: - case CreateActivation: - case CreateArguments: - case PhantomArguments: - case TearOffArguments: - case GetMyArgumentsLength: - case GetMyArgumentsLengthSafe: - case CheckArgumentsNotCreated: - case NewFunction: - case NewFunctionNoCheck: - case NewFunctionExpression: + case CreateDirectArguments: + case CreateClonedArguments: case Jump: case Return: + case TailCall: + case TailCallVarargs: case Throw: case ThrowReferenceError: case CountExecution: case ForceOSRExit: + case CheckBadCell: + case CheckNotEmpty: case CheckWatchdogTimer: case Unreachable: case ExtractOSREntryLocal: case LoopHint: - case StoreBarrier: - case ConditionalStoreBarrier: - case StoreBarrierWithNullCheck: - case FunctionReentryWatchpoint: - case TypedArrayWatchpoint: case MovHint: case ZombieHint: + case ExitOK: + case BottomValue: + case TypeOf: break; #else default: break; #endif } - - if (!node->containsMovHint()) - DFG_NODE_DO_TO_CHILDREN(m_graph, node, observeUntypedEdge); } - - void observeUntypedEdge(Node*, Edge& edge) + + void watchHavingABadTime(Node* node) { - if (edge.useKind() != UntypedUse) - return; - fixEdge<UntypedUse>(edge); + JSGlobalObject* globalObject = m_graph.globalObjectFor(node->origin.semantic); + + // If this global object is not having a bad time, watch it. We go down this path anytime the code + // does an array allocation. The types of array allocations may change if we start to have a bad + // time. It's easier to reason about this if we know that whenever the types change after we start + // optimizing, the code just gets thrown out. Doing this at FixupPhase is just early enough, since + // prior to this point nobody should have been doing optimizations based on the indexing type of + // the allocation. + if (!globalObject->isHavingABadTime()) + m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint()); } template<UseKind useKind> void createToString(Node* node, Edge& edge) { edge.setNode(m_insertionSet.insertNode( - m_indexInBlock, SpecString, ToString, node->codeOrigin, + m_indexInBlock, SpecString, ToString, node->origin, Edge(edge.node(), useKind))); } @@ -1056,7 +1513,7 @@ private: { ASSERT(arrayMode == ArrayMode(Array::Generic)); - if (!canOptimizeStringObjectAccess(node->codeOrigin)) + if (!m_graph.canOptimizeStringObjectAccess(node->origin.semantic)) return; createToString<useKind>(node, node->child1()); @@ -1079,19 +1536,14 @@ private: void convertStringAddUse(Node* node, Edge& edge) { if (useKind == StringUse) { - // This preserves the binaryUseKind() invariant ot ValueAdd: ValueAdd's - // two edges will always have identical use kinds, which makes the - // decision process much easier. observeUseKindOnNode<StringUse>(edge.node()); m_insertionSet.insertNode( - m_indexInBlock, SpecNone, Phantom, node->codeOrigin, + m_indexInBlock, SpecNone, Check, node->origin, Edge(edge.node(), StringUse)); edge.setUseKind(KnownStringUse); return; } - // FIXME: We ought to be able to have a ToPrimitiveToString node. - observeUseKindOnNode<useKind>(edge.node()); createToString<useKind>(node, edge); } @@ -1109,9 +1561,9 @@ private: if (!edge) break; edge.setUseKind(KnownStringUse); - if (!m_graph.isConstant(edge.node())) + JSString* string = edge->dynamicCastConstant<JSString*>(); + if (!string) continue; - JSString* string = jsCast<JSString*>(m_graph.valueOfJSConstant(edge.node()).asCell()); if (string->length()) continue; @@ -1127,6 +1579,85 @@ private: node->convertToIdentity(); } } + + void fixupToThis(Node* node) + { + ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->isStrictMode() ? StrictMode : NotStrictMode; + + if (ecmaMode == StrictMode) { + if (node->child1()->shouldSpeculateBoolean()) { + fixEdge<BooleanUse>(node->child1()); + node->convertToIdentity(); + return; + } + + if (node->child1()->shouldSpeculateInt32()) { + fixEdge<Int32Use>(node->child1()); + node->convertToIdentity(); + return; + } + + if (enableInt52() && node->child1()->shouldSpeculateMachineInt()) { + fixEdge<Int52RepUse>(node->child1()); + node->convertToIdentity(); + node->setResult(NodeResultInt52); + return; + } + + if (node->child1()->shouldSpeculateNumber()) { + fixEdge<DoubleRepUse>(node->child1()); + node->convertToIdentity(); + node->setResult(NodeResultDouble); + return; + } + + if (node->child1()->shouldSpeculateSymbol()) { + fixEdge<SymbolUse>(node->child1()); + node->convertToIdentity(); + return; + } + + if (node->child1()->shouldSpeculateStringIdent()) { + fixEdge<StringIdentUse>(node->child1()); + node->convertToIdentity(); + return; + } + + if (node->child1()->shouldSpeculateString()) { + fixEdge<StringUse>(node->child1()); + node->convertToIdentity(); + return; + } + } + + if (node->child1()->shouldSpeculateOther()) { + if (ecmaMode == StrictMode) { + fixEdge<OtherUse>(node->child1()); + node->convertToIdentity(); + return; + } + + m_insertionSet.insertNode( + m_indexInBlock, SpecNone, Check, node->origin, + Edge(node->child1().node(), OtherUse)); + observeUseKindOnNode<OtherUse>(node->child1().node()); + m_graph.convertToConstant( + node, m_graph.globalThisObjectFor(node->origin.semantic)); + return; + } + + if (node->child1()->shouldSpeculateStringObject()) { + fixEdge<StringObjectUse>(node->child1()); + node->convertToIdentity(); + return; + } + + if (isFinalObjectSpeculation(node->child1()->prediction())) { + fixEdge<FinalObjectUse>(node->child1()); + node->convertToIdentity(); + return; + } + } void fixupToPrimitive(Node* node) { @@ -1143,21 +1674,21 @@ private: } if (node->child1()->shouldSpeculateStringObject() - && canOptimizeStringObjectAccess(node->codeOrigin)) { + && m_graph.canOptimizeStringObjectAccess(node->origin.semantic)) { fixEdge<StringObjectUse>(node->child1()); node->convertToToString(); return; } if (node->child1()->shouldSpeculateStringOrStringObject() - && canOptimizeStringObjectAccess(node->codeOrigin)) { + && m_graph.canOptimizeStringObjectAccess(node->origin.semantic)) { fixEdge<StringOrStringObjectUse>(node->child1()); node->convertToToString(); return; } } - void fixupToString(Node* node) + void fixupToStringOrCallStringConstructor(Node* node) { if (node->child1()->shouldSpeculateString()) { fixEdge<StringUse>(node->child1()); @@ -1166,13 +1697,13 @@ private: } if (node->child1()->shouldSpeculateStringObject() - && canOptimizeStringObjectAccess(node->codeOrigin)) { + && m_graph.canOptimizeStringObjectAccess(node->origin.semantic)) { fixEdge<StringObjectUse>(node->child1()); return; } if (node->child1()->shouldSpeculateStringOrStringObject() - && canOptimizeStringObjectAccess(node->codeOrigin)) { + && m_graph.canOptimizeStringObjectAccess(node->origin.semantic)) { fixEdge<StringOrStringObjectUse>(node->child1()); return; } @@ -1182,110 +1713,50 @@ private: return; } } - - template<UseKind leftUseKind> - bool attemptToMakeFastStringAdd(Node* node, Edge& left, Edge& right) + + bool attemptToMakeFastStringAdd(Node* node) { - Node* originalLeft = left.node(); - Node* originalRight = right.node(); - - ASSERT(leftUseKind == StringUse || leftUseKind == StringObjectUse || leftUseKind == StringOrStringObjectUse); - - if (isStringObjectUse<leftUseKind>() && !canOptimizeStringObjectAccess(node->codeOrigin)) + bool goodToGo = true; + m_graph.doToChildren( + node, + [&] (Edge& edge) { + if (edge->shouldSpeculateString()) + return; + if (m_graph.canOptimizeStringObjectAccess(node->origin.semantic)) { + if (edge->shouldSpeculateStringObject()) + return; + if (edge->shouldSpeculateStringOrStringObject()) + return; + } + goodToGo = false; + }); + if (!goodToGo) return false; - - convertStringAddUse<leftUseKind>(node, left); - - if (right->shouldSpeculateString()) - convertStringAddUse<StringUse>(node, right); - else if (right->shouldSpeculateStringObject() && canOptimizeStringObjectAccess(node->codeOrigin)) - convertStringAddUse<StringObjectUse>(node, right); - else if (right->shouldSpeculateStringOrStringObject() && canOptimizeStringObjectAccess(node->codeOrigin)) - convertStringAddUse<StringOrStringObjectUse>(node, right); - else { - // At this point we know that the other operand is something weird. The semantically correct - // way of dealing with this is: - // - // MakeRope(@left, ToString(ToPrimitive(@right))) - // - // So that's what we emit. NB, we need to do all relevant type checks on @left before we do - // anything to @right, since ToPrimitive may be effectful. - - Node* toPrimitive = m_insertionSet.insertNode( - m_indexInBlock, resultOfToPrimitive(right->prediction()), ToPrimitive, node->codeOrigin, - Edge(right.node())); - Node* toString = m_insertionSet.insertNode( - m_indexInBlock, SpecString, ToString, node->codeOrigin, Edge(toPrimitive)); - - fixupToPrimitive(toPrimitive); - fixupToString(toString); - - right.setNode(toString); - } - - // We're doing checks up there, so we need to make sure that the - // *original* inputs to the addition are live up to here. - m_insertionSet.insertNode( - m_indexInBlock, SpecNone, Phantom, node->codeOrigin, - Edge(originalLeft), Edge(originalRight)); + + m_graph.doToChildren( + node, + [&] (Edge& edge) { + if (edge->shouldSpeculateString()) { + convertStringAddUse<StringUse>(node, edge); + return; + } + ASSERT(m_graph.canOptimizeStringObjectAccess(node->origin.semantic)); + if (edge->shouldSpeculateStringObject()) { + convertStringAddUse<StringObjectUse>(node, edge); + return; + } + if (edge->shouldSpeculateStringOrStringObject()) { + convertStringAddUse<StringOrStringObjectUse>(node, edge); + return; + } + RELEASE_ASSERT_NOT_REACHED(); + }); convertToMakeRope(node); return true; } - - bool isStringPrototypeMethodSane(Structure* stringPrototypeStructure, StringImpl* uid) - { - unsigned attributesUnused; - JSCell* specificValue; - PropertyOffset offset = stringPrototypeStructure->getConcurrently( - vm(), uid, attributesUnused, specificValue); - if (!isValidOffset(offset)) - return false; - - if (!specificValue) - return false; - - if (!specificValue->inherits(JSFunction::info())) - return false; - - JSFunction* function = jsCast<JSFunction*>(specificValue); - if (function->executable()->intrinsicFor(CodeForCall) != StringPrototypeValueOfIntrinsic) - return false; - - return true; - } - - bool canOptimizeStringObjectAccess(const CodeOrigin& codeOrigin) - { - if (m_graph.hasExitSite(codeOrigin, NotStringObject)) - return false; - - Structure* stringObjectStructure = m_graph.globalObjectFor(codeOrigin)->stringObjectStructure(); - ASSERT(stringObjectStructure->storedPrototype().isObject()); - ASSERT(stringObjectStructure->storedPrototype().asCell()->classInfo() == StringPrototype::info()); - - JSObject* stringPrototypeObject = asObject(stringObjectStructure->storedPrototype()); - Structure* stringPrototypeStructure = stringPrototypeObject->structure(); - if (!m_graph.watchpoints().isStillValid(stringPrototypeStructure->transitionWatchpointSet())) - return false; - - if (stringPrototypeStructure->isDictionary()) - return false; - - // We're being conservative here. We want DFG's ToString on StringObject to be - // used in both numeric contexts (that would call valueOf()) and string contexts - // (that would call toString()). We don't want the DFG to have to distinguish - // between the two, just because that seems like it would get confusing. So we - // just require both methods to be sane. - if (!isStringPrototypeMethodSane(stringPrototypeStructure, vm().propertyNames->valueOf.impl())) - return false; - if (!isStringPrototypeMethodSane(stringPrototypeStructure, vm().propertyNames->toString.impl())) - return false; - - return true; - } - - void fixupSetLocalsInBlock(BasicBlock* block) + + void fixupGetAndSetLocalsInBlock(BasicBlock* block) { if (!block) return; @@ -1293,28 +1764,52 @@ private: m_block = block; for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) { Node* node = m_currentNode = block->at(m_indexInBlock); - if (node->op() != SetLocal) + if (node->op() != SetLocal && node->op() != GetLocal) continue; VariableAccessData* variable = node->variableAccessData(); - switch (variable->flushFormat()) { - case FlushedJSValue: - break; - case FlushedDouble: - fixEdge<NumberUse>(node->child1()); - break; - case FlushedInt32: - fixEdge<Int32Use>(node->child1()); - break; - case FlushedInt52: - fixEdge<MachineIntUse>(node->child1()); - break; - case FlushedCell: - fixEdge<CellUse>(node->child1()); + switch (node->op()) { + case GetLocal: + switch (variable->flushFormat()) { + case FlushedDouble: + node->setResult(NodeResultDouble); + break; + case FlushedInt52: + node->setResult(NodeResultInt52); + break; + default: + break; + } break; - case FlushedBoolean: - fixEdge<BooleanUse>(node->child1()); + + case SetLocal: + // NOTE: Any type checks we put here may get hoisted by fixupChecksInBlock(). So, if we + // add new type checking use kind for SetLocals, we need to modify that code as well. + + switch (variable->flushFormat()) { + case FlushedJSValue: + break; + case FlushedDouble: + fixEdge<DoubleRepUse>(node->child1()); + break; + case FlushedInt32: + fixEdge<Int32Use>(node->child1()); + break; + case FlushedInt52: + fixEdge<Int52RepUse>(node->child1()); + break; + case FlushedCell: + fixEdge<CellUse>(node->child1()); + break; + case FlushedBoolean: + fixEdge<BooleanUse>(node->child1()); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } break; + default: RELEASE_ASSERT_NOT_REACHED(); break; @@ -1323,54 +1818,38 @@ private: m_insertionSet.execute(block); } - void fixupUntypedSetLocalsInBlock(BasicBlock* block) - { - if (!block) - return; - ASSERT(block->isReachable); - m_block = block; - for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) { - Node* node = m_currentNode = block->at(m_indexInBlock); - if (node->op() != SetLocal) - continue; - - if (node->child1().useKind() == UntypedUse) - fixEdge<UntypedUse>(node->child1()); - } - m_insertionSet.execute(block); - } - - Node* checkArray(ArrayMode arrayMode, const CodeOrigin& codeOrigin, Node* array, Node* index, bool (*storageCheck)(const ArrayMode&) = canCSEStorage) + Node* checkArray(ArrayMode arrayMode, const NodeOrigin& origin, Node* array, Node* index, bool (*storageCheck)(const ArrayMode&) = canCSEStorage) { ASSERT(arrayMode.isSpecific()); if (arrayMode.type() == Array::String) { m_insertionSet.insertNode( - m_indexInBlock, SpecNone, Phantom, codeOrigin, - Edge(array, StringUse)); + m_indexInBlock, SpecNone, Check, origin, Edge(array, StringUse)); } else { - Structure* structure = arrayMode.originalArrayStructure(m_graph, codeOrigin); + // Note that we only need to be using a structure check if we opt for SaneChain, since + // that needs to protect against JSArray's __proto__ being changed. + Structure* structure = arrayMode.originalArrayStructure(m_graph, origin.semantic); Edge indexEdge = index ? Edge(index, Int32Use) : Edge(); - + if (arrayMode.doesConversion()) { if (structure) { m_insertionSet.insertNode( - m_indexInBlock, SpecNone, ArrayifyToStructure, codeOrigin, + m_indexInBlock, SpecNone, ArrayifyToStructure, origin, OpInfo(structure), OpInfo(arrayMode.asWord()), Edge(array, CellUse), indexEdge); } else { m_insertionSet.insertNode( - m_indexInBlock, SpecNone, Arrayify, codeOrigin, + m_indexInBlock, SpecNone, Arrayify, origin, OpInfo(arrayMode.asWord()), Edge(array, CellUse), indexEdge); } } else { if (structure) { m_insertionSet.insertNode( - m_indexInBlock, SpecNone, CheckStructure, codeOrigin, + m_indexInBlock, SpecNone, CheckStructure, origin, OpInfo(m_graph.addStructureSet(structure)), Edge(array, CellUse)); } else { m_insertionSet.insertNode( - m_indexInBlock, SpecNone, CheckArray, codeOrigin, + m_indexInBlock, SpecNone, CheckArray, origin, OpInfo(arrayMode.asWord()), Edge(array, CellUse)); } } @@ -1381,11 +1860,11 @@ private: if (arrayMode.usesButterfly()) { return m_insertionSet.insertNode( - m_indexInBlock, SpecNone, GetButterfly, codeOrigin, Edge(array, CellUse)); + m_indexInBlock, SpecNone, GetButterfly, origin, Edge(array, CellUse)); } return m_insertionSet.insertNode( - m_indexInBlock, SpecNone, GetIndexedPropertyStorage, codeOrigin, + m_indexInBlock, SpecNone, GetIndexedPropertyStorage, origin, OpInfo(arrayMode.asWord()), Edge(array, KnownCellUse)); } @@ -1396,7 +1875,7 @@ private: switch (node->arrayMode().type()) { case Array::ForceExit: { m_insertionSet.insertNode( - m_indexInBlock, SpecNone, ForceOSRExit, node->codeOrigin); + m_indexInBlock, SpecNone, ForceOSRExit, node->origin); return; } @@ -1409,7 +1888,7 @@ private: return; default: { - Node* storage = checkArray(node->arrayMode(), node->codeOrigin, base.node(), index.node()); + Node* storage = checkArray(node->arrayMode(), node->origin, base.node(), index.node()); if (!storage) return; @@ -1453,29 +1932,35 @@ private: VariableAccessData* variable = node->variableAccessData(); switch (useKind) { case Int32Use: + case KnownInt32Use: if (alwaysUnboxSimplePrimitives() || isInt32Speculation(variable->prediction())) m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true); break; case NumberUse: case RealNumberUse: + case DoubleRepUse: + case DoubleRepRealUse: if (variable->doubleFormatState() == UsingDoubleFormat) m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true); break; case BooleanUse: + case KnownBooleanUse: if (alwaysUnboxSimplePrimitives() || isBooleanSpeculation(variable->prediction())) m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true); break; - case MachineIntUse: + case Int52RepUse: if (isMachineIntSpeculation(variable->prediction())) m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true); break; case CellUse: case KnownCellUse: case ObjectUse: + case FunctionUse: case StringUse: case KnownStringUse: + case SymbolUse: case StringObjectUse: case StringOrStringObjectUse: if (alwaysUnboxSimplePrimitives() @@ -1487,179 +1972,131 @@ private: } } - // Set the use kind of the edge and perform any actions that need to be done for - // that use kind, like inserting intermediate conversion nodes. Never call this - // with useKind = UntypedUse explicitly; edges have UntypedUse implicitly and any - // edge that survives fixup and still has UntypedUse will have this method called - // from observeUntypedEdge(). Also, make sure that if you do change the type of an - // edge, you either call fixEdge() or perform the equivalent functionality - // yourself. Obviously, you should have a really good reason if you do the latter. template<UseKind useKind> void fixEdge(Edge& edge) { - if (isDouble(useKind)) { - if (edge->shouldSpeculateInt32ForArithmetic()) { - injectInt32ToDoubleNode(edge, useKind); - return; - } + observeUseKindOnNode<useKind>(edge.node()); + edge.setUseKind(useKind); + } + + void speculateForBarrier(Edge value) + { + // Currently, the DFG won't take advantage of this speculation. But, we want to do it in + // the DFG anyway because if such a speculation would be wrong, we want to know before + // we do an expensive compile. + + if (value->shouldSpeculateInt32()) { + insertCheck<Int32Use>(m_indexInBlock, value.node()); + return; + } - if (enableInt52() && edge->shouldSpeculateMachineInt()) { - // Make all double uses of int52 values have an intermediate Int52ToDouble. - // This is for the same reason as Int52ToValue (see below) except that - // Int8ToDouble will convert int52's that fit in an int32 into a double - // rather than trying to create a boxed int32 like Int52ToValue does. - - Node* result = m_insertionSet.insertNode( - m_indexInBlock, SpecInt52AsDouble, Int52ToDouble, - m_currentNode->codeOrigin, Edge(edge.node(), NumberUse)); - edge = Edge(result, useKind); - return; - } + if (value->shouldSpeculateBoolean()) { + insertCheck<BooleanUse>(m_indexInBlock, value.node()); + return; } - - if (enableInt52() && useKind != MachineIntUse - && edge->shouldSpeculateMachineInt() && !edge->shouldSpeculateInt32()) { - // We make all non-int52 uses of int52 values have an intermediate Int52ToValue - // node to ensure that we handle this properly: - // - // a: SomeInt52 - // b: ArithAdd(@a, ...) - // c: Call(..., @a) - // d: ArithAdd(@a, ...) - // - // Without an intermediate node and just labeling the uses, we will get: - // - // a: SomeInt52 - // b: ArithAdd(Int52:@a, ...) - // c: Call(..., Untyped:@a) - // d: ArithAdd(Int52:@a, ...) - // - // And now the c->Untyped:@a edge will box the value of @a into a double. This - // is bad, because now the d->Int52:@a edge will either have to do double-to-int - // conversions, or will have to OSR exit unconditionally. Alternatively we could - // have the c->Untyped:@a edge box the value by copying rather than in-place. - // But these boxings are also costly so this wouldn't be great. - // - // The solution we use is to always have non-Int52 uses of predicted Int52's use - // an intervening Int52ToValue node: - // - // a: SomeInt52 - // b: ArithAdd(Int52:@a, ...) - // x: Int52ToValue(Int52:@a) - // c: Call(..., Untyped:@x) - // d: ArithAdd(Int52:@a, ...) - // - // Note that even if we had multiple non-int52 uses of @a, the multiple - // Int52ToValue's would get CSE'd together. So the boxing would only happen once. - // At the same time, @a would continue to be represented as a native int52. - // - // An alternative would have been to insert ToNativeInt52 nodes on int52 uses of - // int52's. This would have handled the above example but would fall over for: - // - // a: SomeInt52 - // b: Call(..., @a) - // c: ArithAdd(@a, ...) - // - // But the solution we use handles the above gracefully. - - Node* result = m_insertionSet.insertNode( - m_indexInBlock, SpecInt52, Int52ToValue, - m_currentNode->codeOrigin, Edge(edge.node(), UntypedUse)); - edge = Edge(result, useKind); + + if (value->shouldSpeculateOther()) { + insertCheck<OtherUse>(m_indexInBlock, value.node()); + return; + } + + if (value->shouldSpeculateNumber()) { + insertCheck<NumberUse>(m_indexInBlock, value.node()); + return; + } + + if (value->shouldSpeculateNotCell()) { + insertCheck<NotCellUse>(m_indexInBlock, value.node()); return; } - - observeUseKindOnNode<useKind>(edge.node()); - - edge.setUseKind(useKind); } - void insertStoreBarrier(unsigned indexInBlock, Edge child1, Edge child2 = Edge()) + template<UseKind useKind> + void insertCheck(unsigned indexInBlock, Node* node) { - Node* barrierNode; - if (!child2) - barrierNode = m_graph.addNode(SpecNone, StoreBarrier, m_currentNode->codeOrigin, Edge(child1.node(), child1.useKind())); - else { - barrierNode = m_graph.addNode(SpecNone, ConditionalStoreBarrier, m_currentNode->codeOrigin, - Edge(child1.node(), child1.useKind()), Edge(child2.node(), child2.useKind())); - } - fixupNode(barrierNode); - m_insertionSet.insert(indexInBlock, barrierNode); + observeUseKindOnNode<useKind>(node); + m_insertionSet.insertNode( + indexInBlock, SpecNone, Check, m_currentNode->origin, Edge(node, useKind)); } - bool fixIntEdge(Edge& edge) + void fixIntConvertingEdge(Edge& edge) { Node* node = edge.node(); - if (node->shouldSpeculateInt32()) { - fixEdge<Int32Use>(edge); - return false; + if (node->shouldSpeculateInt32OrBoolean()) { + fixIntOrBooleanEdge(edge); + return; } UseKind useKind; if (node->shouldSpeculateMachineInt()) - useKind = MachineIntUse; + useKind = Int52RepUse; else if (node->shouldSpeculateNumber()) - useKind = NumberUse; - else if (node->shouldSpeculateBoolean()) - useKind = BooleanUse; + useKind = DoubleRepUse; else useKind = NotCellUse; Node* newNode = m_insertionSet.insertNode( - m_indexInBlock, SpecInt32, ValueToInt32, m_currentNode->codeOrigin, + m_indexInBlock, SpecInt32, ValueToInt32, m_currentNode->origin, Edge(node, useKind)); observeUseKindOnNode(node, useKind); edge = Edge(newNode, KnownInt32Use); - return true; } - void fixBinaryIntEdges() + void fixIntOrBooleanEdge(Edge& edge) { - AdjacencyList children = m_currentNode->children; + Node* node = edge.node(); + if (!node->sawBooleans()) { + fixEdge<Int32Use>(edge); + return; + } - // Call fixIntEdge() on both edges. - bool needPhantom = - fixIntEdge(m_currentNode->child1()) | fixIntEdge(m_currentNode->child2()); + UseKind useKind; + if (node->shouldSpeculateBoolean()) + useKind = BooleanUse; + else + useKind = UntypedUse; + Node* newNode = m_insertionSet.insertNode( + m_indexInBlock, SpecInt32, BooleanToNumber, m_currentNode->origin, + Edge(node, useKind)); + observeUseKindOnNode(node, useKind); - if (!needPhantom) - return; - m_insertionSet.insertNode(m_indexInBlock + 1, SpecNone, Phantom, m_currentNode->codeOrigin, children); + edge = Edge(newNode, Int32Use); } - void injectInt32ToDoubleNode(Edge& edge, UseKind useKind = NumberUse) + void fixDoubleOrBooleanEdge(Edge& edge) { - Node* result = m_insertionSet.insertNode( - m_indexInBlock, SpecInt52AsDouble, Int32ToDouble, - m_currentNode->codeOrigin, Edge(edge.node(), NumberUse)); + Node* node = edge.node(); + if (!node->sawBooleans()) { + fixEdge<DoubleRepUse>(edge); + return; + } + + UseKind useKind; + if (node->shouldSpeculateBoolean()) + useKind = BooleanUse; + else + useKind = UntypedUse; + Node* newNode = m_insertionSet.insertNode( + m_indexInBlock, SpecInt32, BooleanToNumber, m_currentNode->origin, + Edge(node, useKind)); + observeUseKindOnNode(node, useKind); - edge = Edge(result, useKind); + edge = Edge(newNode, DoubleRepUse); } void truncateConstantToInt32(Edge& edge) { Node* oldNode = edge.node(); - ASSERT(oldNode->hasConstant()); - JSValue value = m_graph.valueOfJSConstant(oldNode); + JSValue value = oldNode->asJSValue(); if (value.isInt32()) return; value = jsNumber(JSC::toInt32(value.asNumber())); ASSERT(value.isInt32()); - unsigned constantRegister; - if (!codeBlock()->findConstant(value, constantRegister)) { - constantRegister = codeBlock()->addConstantLazily(); - initializeLazyWriteBarrierForConstant( - m_graph.m_plan.writeBarriers, - codeBlock()->constants()[constantRegister], - codeBlock(), - constantRegister, - codeBlock()->ownerExecutable(), - value); - } edge.setNode(m_insertionSet.insertNode( - m_indexInBlock, SpecInt32, JSConstant, m_currentNode->codeOrigin, - OpInfo(constantRegister))); + m_indexInBlock, SpecInt32, JSConstant, m_currentNode->origin, + OpInfo(m_graph.freeze(value)))); } void truncateConstantsIfNecessary(Node* node, AddSpeculationMode mode) @@ -1676,11 +2113,11 @@ private: bool attemptToMakeIntegerAdd(Node* node) { - AddSpeculationMode mode = m_graph.addSpeculationMode(node); + AddSpeculationMode mode = m_graph.addSpeculationMode(node, FixupPass); if (mode != DontSpeculateInt32) { truncateConstantsIfNecessary(node, mode); - fixEdge<Int32Use>(node->child1()); - fixEdge<Int32Use>(node->child2()); + fixIntOrBooleanEdge(node->child1()); + fixIntOrBooleanEdge(node->child2()); if (bytecodeCanTruncateInteger(node->arithNodeFlags())) node->setArithMode(Arith::Unchecked); else @@ -1689,9 +2126,10 @@ private: } if (m_graph.addShouldSpeculateMachineInt(node)) { - fixEdge<MachineIntUse>(node->child1()); - fixEdge<MachineIntUse>(node->child2()); + fixEdge<Int52RepUse>(node->child1()); + fixEdge<Int52RepUse>(node->child2()); node->setArithMode(Arith::CheckOverflow); + node->setResult(NodeResultInt52); return true; } @@ -1702,9 +2140,9 @@ private: { if (!isInt32Speculation(node->prediction())) return false; - CodeBlock* profiledBlock = m_graph.baselineCodeBlockFor(node->codeOrigin); + CodeBlock* profiledBlock = m_graph.baselineCodeBlockFor(node->origin.semantic); ArrayProfile* arrayProfile = - profiledBlock->getArrayProfile(node->codeOrigin.bytecodeIndex); + profiledBlock->getArrayProfile(node->origin.semantic.bytecodeIndex); ArrayMode arrayMode = ArrayMode(Array::SelectUsingPredictions); if (arrayProfile) { ConcurrentJITLocker locker(profiledBlock->m_lock); @@ -1722,7 +2160,8 @@ private: } } - arrayMode = arrayMode.refine(node->child1()->prediction(), node->prediction()); + arrayMode = arrayMode.refine( + m_graph, node, node->child1()->prediction(), node->prediction()); if (arrayMode.type() == Array::Generic) { // Check if the input is something that we can't get array length for, but for which we @@ -1740,80 +2179,227 @@ private: convertToGetArrayLength(node, arrayMode); return true; } - - bool attemptToMakeGetTypedArrayByteLength(Node* node) - { - if (!isInt32Speculation(node->prediction())) - return false; - - TypedArrayType type = typedArrayTypeFromSpeculation(node->child1()->prediction()); - if (!isTypedView(type)) - return false; - - if (elementSize(type) == 1) { - convertToGetArrayLength(node, ArrayMode(toArrayType(type))); - return true; - } - - Node* length = prependGetArrayLength( - node->codeOrigin, node->child1().node(), ArrayMode(toArrayType(type))); - - Node* shiftAmount = m_insertionSet.insertNode( - m_indexInBlock, SpecInt32, JSConstant, node->codeOrigin, - OpInfo(m_graph.constantRegisterForConstant(jsNumber(logElementSize(type))))); - - // We can use a BitLShift here because typed arrays will never have a byteLength - // that overflows int32. - node->setOp(BitLShift); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); - observeUseKindOnNode(length, Int32Use); - observeUseKindOnNode(shiftAmount, Int32Use); - node->child1() = Edge(length, Int32Use); - node->child2() = Edge(shiftAmount, Int32Use); - return true; - } - + void convertToGetArrayLength(Node* node, ArrayMode arrayMode) { node->setOp(GetArrayLength); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); + node->clearFlags(NodeMustGenerate); fixEdge<KnownCellUse>(node->child1()); node->setArrayMode(arrayMode); - Node* storage = checkArray(arrayMode, node->codeOrigin, node->child1().node(), 0, lengthNeedsStorage); + Node* storage = checkArray(arrayMode, node->origin, node->child1().node(), 0, lengthNeedsStorage); if (!storage) return; node->child2() = Edge(storage); } - Node* prependGetArrayLength(CodeOrigin codeOrigin, Node* child, ArrayMode arrayMode) + Node* prependGetArrayLength(NodeOrigin origin, Node* child, ArrayMode arrayMode) { - Node* storage = checkArray(arrayMode, codeOrigin, child, 0, lengthNeedsStorage); + Node* storage = checkArray(arrayMode, origin, child, 0, lengthNeedsStorage); return m_insertionSet.insertNode( - m_indexInBlock, SpecInt32, GetArrayLength, codeOrigin, + m_indexInBlock, SpecInt32, GetArrayLength, origin, OpInfo(arrayMode.asWord()), Edge(child, KnownCellUse), Edge(storage)); } - bool attemptToMakeGetTypedArrayByteOffset(Node* node) + void fixupChecksInBlock(BasicBlock* block) { - if (!isInt32Speculation(node->prediction())) - return false; - - TypedArrayType type = typedArrayTypeFromSpeculation(node->child1()->prediction()); - if (!isTypedView(type)) - return false; - - checkArray( - ArrayMode(toArrayType(type)), node->codeOrigin, node->child1().node(), - 0, neverNeedsStorage); + if (!block) + return; + ASSERT(block->isReachable); + m_block = block; + unsigned indexForChecks = UINT_MAX; + NodeOrigin originForChecks; + for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { + Node* node = block->at(indexInBlock); + + // If this is a node at which we could exit, then save its index. If nodes after this one + // cannot exit, then we will hoist checks to here. + if (node->origin.exitOK) { + indexForChecks = indexInBlock; + originForChecks = node->origin; + } + + originForChecks = originForChecks.withSemantic(node->origin.semantic); + + // First, try to relax the representational demands of each node, in order to have + // fewer conversions. + switch (node->op()) { + case MovHint: + case Check: + m_graph.doToChildren( + node, + [&] (Edge& edge) { + switch (edge.useKind()) { + case DoubleRepUse: + case DoubleRepRealUse: + if (edge->hasDoubleResult()) + break; + + if (edge->hasInt52Result()) + edge.setUseKind(Int52RepUse); + else if (edge.useKind() == DoubleRepUse) + edge.setUseKind(NumberUse); + break; + + case Int52RepUse: + // Nothing we can really do. + break; + + case UntypedUse: + case NumberUse: + if (edge->hasDoubleResult()) + edge.setUseKind(DoubleRepUse); + else if (edge->hasInt52Result()) + edge.setUseKind(Int52RepUse); + break; + + case RealNumberUse: + if (edge->hasDoubleResult()) + edge.setUseKind(DoubleRepRealUse); + else if (edge->hasInt52Result()) + edge.setUseKind(Int52RepUse); + break; + + default: + break; + } + }); + break; + + case ValueToInt32: + if (node->child1().useKind() == DoubleRepUse + && !node->child1()->hasDoubleResult()) { + node->child1().setUseKind(NumberUse); + break; + } + break; + + default: + break; + } + + // Now, insert type conversions if necessary. + m_graph.doToChildren( + node, + [&] (Edge& edge) { + Node* result = nullptr; + + switch (edge.useKind()) { + case DoubleRepUse: + case DoubleRepRealUse: + case DoubleRepMachineIntUse: { + if (edge->hasDoubleResult()) + break; + + if (edge->isNumberConstant()) { + result = m_insertionSet.insertNode( + indexForChecks, SpecBytecodeDouble, DoubleConstant, originForChecks, + OpInfo(m_graph.freeze(jsDoubleNumber(edge->asNumber())))); + } else if (edge->hasInt52Result()) { + result = m_insertionSet.insertNode( + indexForChecks, SpecInt52AsDouble, DoubleRep, originForChecks, + Edge(edge.node(), Int52RepUse)); + } else { + UseKind useKind; + if (edge->shouldSpeculateDoubleReal()) + useKind = RealNumberUse; + else if (edge->shouldSpeculateNumber()) + useKind = NumberUse; + else + useKind = NotCellUse; + + result = m_insertionSet.insertNode( + indexForChecks, SpecBytecodeDouble, DoubleRep, originForChecks, + Edge(edge.node(), useKind)); + } + + edge.setNode(result); + break; + } + + case Int52RepUse: { + if (edge->hasInt52Result()) + break; + + if (edge->isMachineIntConstant()) { + result = m_insertionSet.insertNode( + indexForChecks, SpecMachineInt, Int52Constant, originForChecks, + OpInfo(edge->constant())); + } else if (edge->hasDoubleResult()) { + result = m_insertionSet.insertNode( + indexForChecks, SpecMachineInt, Int52Rep, originForChecks, + Edge(edge.node(), DoubleRepMachineIntUse)); + } else if (edge->shouldSpeculateInt32ForArithmetic()) { + result = m_insertionSet.insertNode( + indexForChecks, SpecInt32, Int52Rep, originForChecks, + Edge(edge.node(), Int32Use)); + } else { + result = m_insertionSet.insertNode( + indexForChecks, SpecMachineInt, Int52Rep, originForChecks, + Edge(edge.node(), MachineIntUse)); + } + + edge.setNode(result); + break; + } + + default: { + if (!edge->hasDoubleResult() && !edge->hasInt52Result()) + break; + + if (edge->hasDoubleResult()) { + result = m_insertionSet.insertNode( + indexForChecks, SpecBytecodeDouble, ValueRep, originForChecks, + Edge(edge.node(), DoubleRepUse)); + } else { + result = m_insertionSet.insertNode( + indexForChecks, SpecInt32 | SpecInt52AsDouble, ValueRep, + originForChecks, Edge(edge.node(), Int52RepUse)); + } + + edge.setNode(result); + break; + } } + + // It's remotely possible that this node cannot do type checks, but we now have a + // type check on this node. We don't have to handle the general form of this + // problem. It only arises when ByteCodeParser emits an immediate SetLocal, rather + // than a delayed one. So, we only worry about those checks that we may have put on + // a SetLocal. Note that "indexForChecks != indexInBlock" is just another way of + // saying "!node->origin.exitOK". + if (indexForChecks != indexInBlock && mayHaveTypeCheck(edge.useKind())) { + UseKind knownUseKind; + + switch (edge.useKind()) { + case Int32Use: + knownUseKind = KnownInt32Use; + break; + case CellUse: + knownUseKind = KnownCellUse; + break; + case BooleanUse: + knownUseKind = KnownBooleanUse; + break; + default: + // This can only arise if we have a Check node, and in that case, we can + // just remove the original check. + DFG_ASSERT(m_graph, node, node->op() == Check); + knownUseKind = UntypedUse; + break; + } + + m_insertionSet.insertNode( + indexForChecks, SpecNone, Check, originForChecks, edge); + + edge.setUseKind(knownUseKind); + } + }); + } - node->setOp(GetTypedArrayByteOffset); - node->clearFlags(NodeMustGenerate | NodeClobbersWorld); - fixEdge<KnownCellUse>(node->child1()); - return true; + m_insertionSet.execute(block); } - + BasicBlock* m_block; unsigned m_indexInBlock; Node* m_currentNode; diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.h b/Source/JavaScriptCore/dfg/DFGFixupPhase.h index d496d59b2..55f84a9f4 100644 --- a/Source/JavaScriptCore/dfg/DFGFixupPhase.h +++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.h @@ -26,8 +26,6 @@ #ifndef DFGFixupPhase_h #define DFGFixupPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGFlushFormat.cpp b/Source/JavaScriptCore/dfg/DFGFlushFormat.cpp index fd6c249e6..fa483ac6c 100644 --- a/Source/JavaScriptCore/dfg/DFGFlushFormat.cpp +++ b/Source/JavaScriptCore/dfg/DFGFlushFormat.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,6 +28,8 @@ #if ENABLE(DFG_JIT) +#include "JSCInlines.h" + namespace WTF { using namespace JSC::DFG; @@ -56,9 +58,6 @@ void printInternal(PrintStream& out, FlushFormat format) case FlushedJSValue: out.print("FlushedJSValue"); return; - case FlushedArguments: - out.print("FlushedArguments"); - return; case ConflictingFlush: out.print("ConflictingFlush"); return; diff --git a/Source/JavaScriptCore/dfg/DFGFlushFormat.h b/Source/JavaScriptCore/dfg/DFGFlushFormat.h index 9083a107e..7531173c7 100644 --- a/Source/JavaScriptCore/dfg/DFGFlushFormat.h +++ b/Source/JavaScriptCore/dfg/DFGFlushFormat.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGFlushFormat_h #define DFGFlushFormat_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGNodeFlags.h" @@ -46,7 +44,6 @@ enum FlushFormat { FlushedCell, FlushedBoolean, FlushedJSValue, - FlushedArguments, ConflictingFlush }; @@ -57,14 +54,13 @@ inline NodeFlags resultFor(FlushFormat format) case FlushedJSValue: case FlushedCell: case ConflictingFlush: - case FlushedArguments: return NodeResultJS; case FlushedInt32: return NodeResultInt32; case FlushedInt52: return NodeResultInt52; case FlushedDouble: - return NodeResultNumber; + return NodeResultDouble; case FlushedBoolean: return NodeResultBoolean; } @@ -78,16 +74,15 @@ inline UseKind useKindFor(FlushFormat format) case DeadFlush: case FlushedJSValue: case ConflictingFlush: - case FlushedArguments: return UntypedUse; case FlushedCell: return CellUse; case FlushedInt32: return Int32Use; case FlushedInt52: - return MachineIntUse; + return Int52RepUse; case FlushedDouble: - return NumberUse; + return DoubleRepUse; case FlushedBoolean: return BooleanUse; } @@ -95,6 +90,33 @@ inline UseKind useKindFor(FlushFormat format) return UntypedUse; } +inline UseKind uncheckedUseKindFor(FlushFormat format) +{ + switch (format) { + case DeadFlush: + case FlushedJSValue: + case ConflictingFlush: + return UntypedUse; + case FlushedCell: + return KnownCellUse; + case FlushedInt32: + return KnownInt32Use; + case FlushedInt52: + return Int52RepUse; + case FlushedDouble: + return DoubleRepUse; + case FlushedBoolean: + return KnownBooleanUse; + } + RELEASE_ASSERT_NOT_REACHED(); + return UntypedUse; +} + +inline SpeculatedType typeFilterFor(FlushFormat format) +{ + return typeFilterFor(useKindFor(format)); +} + inline DataFormat dataFormatFor(FlushFormat format) { switch (format) { @@ -113,13 +135,27 @@ inline DataFormat dataFormatFor(FlushFormat format) return DataFormatCell; case FlushedBoolean: return DataFormatBoolean; - case FlushedArguments: - return DataFormatArguments; } RELEASE_ASSERT_NOT_REACHED(); return DataFormatDead; } +inline FlushFormat merge(FlushFormat a, FlushFormat b) +{ + if (a == DeadFlush) + return b; + if (b == DeadFlush) + return a; + if (a == b) + return a; + return ConflictingFlush; +} + +inline bool isConcrete(FlushFormat format) +{ + return format != DeadFlush && format != ConflictingFlush; +} + } } // namespace JSC::DFG namespace WTF { diff --git a/Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.cpp b/Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.cpp deleted file mode 100644 index c4db38268..000000000 --- a/Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.cpp +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "DFGFlushLivenessAnalysisPhase.h" - -#if ENABLE(DFG_JIT) - -#include "DFGBasicBlockInlines.h" -#include "DFGGraph.h" -#include "DFGInsertionSet.h" -#include "DFGPhase.h" -#include "OperandsInlines.h" -#include "Operations.h" - -namespace JSC { namespace DFG { - -class FlushLivenessAnalysisPhase : public Phase { -public: - FlushLivenessAnalysisPhase(Graph& graph) - : Phase(graph, "flush-liveness analysis") - { - } - - bool run() - { - ASSERT(m_graph.m_form == SSA); - - // Liveness is a backwards analysis; the roots are the blocks that - // end in a terminal (Return/Unreachable). For now, we - // use a fixpoint formulation since liveness is a rapid analysis with - // convergence guaranteed after O(connectivity). - - // Start by assuming that everything is dead. - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - block->ssa->flushAtHead.fill(FlushedAt()); - block->ssa->flushAtTail.fill(FlushedAt()); - } - - do { - m_changed = false; - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) - process(blockIndex); - } while (m_changed); - - Operands<FlushedAt>& root = m_graph.block(0)->ssa->flushAtHead; - for (unsigned i = root.size(); i--;) { - if (root.isArgument(i)) { - if (!root[i] - || root[i] == FlushedAt(FlushedJSValue, VirtualRegister(root.operandForIndex(i)))) - continue; - } else { - if (!root[i]) - continue; - } - dataLog( - "Bad flush liveness analysis result: bad flush liveness at root: ", - root, "\n"); - dataLog("IR at time of error:\n"); - m_graph.dump(); - CRASH(); - } - - return true; - } - -private: - void process(BlockIndex blockIndex) - { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - return; - - m_live = block->ssa->flushAtTail; - - for (unsigned nodeIndex = block->size(); nodeIndex--;) { - Node* node = block->at(nodeIndex); - - switch (node->op()) { - case SetLocal: { - VariableAccessData* variable = node->variableAccessData(); - FlushedAt& current = m_live.operand(variable->local()); - if (!!current && current != variable->flushedAt()) - reportError(node); - current = FlushedAt(); - break; - } - - case GetArgument: { - VariableAccessData* variable = node->variableAccessData(); - ASSERT(variable->local() == variable->machineLocal()); - ASSERT(variable->local().isArgument()); - FlushedAt& current = m_live.operand(variable->local()); - if (!!current && current != variable->flushedAt()) - reportError(node); - current = FlushedAt(FlushedJSValue, node->local()); - break; - } - - case Flush: - case GetLocal: { - VariableAccessData* variable = node->variableAccessData(); - FlushedAt& current = m_live.operand(variable->local()); - if (!!current && current != variable->flushedAt()) - reportError(node); - current = variable->flushedAt(); - break; - } - - default: - break; - } - } - - if (m_live == block->ssa->flushAtHead) - return; - - m_changed = true; - block->ssa->flushAtHead = m_live; - for (unsigned i = block->predecessors.size(); i--;) { - BasicBlock* predecessor = block->predecessors[i]; - for (unsigned j = m_live.size(); j--;) { - FlushedAt& predecessorFlush = predecessor->ssa->flushAtTail[j]; - FlushedAt myFlush = m_live[j]; - - // Three possibilities: - // 1) Predecessor format is Dead, in which case it acquires our format. - // 2) Predecessor format is not Dead but our format is dead, in which - // case we acquire the predecessor format. - // 3) Predecessor format is identical to our format, in which case we - // do nothing. - // 4) Predecessor format is different from our format and it's not Dead, - // in which case we have an erroneous set of Flushes and SetLocals. - - if (!predecessorFlush) { - predecessorFlush = myFlush; - continue; - } - - if (!myFlush) { - m_live[j] = predecessorFlush; - continue; - } - - if (predecessorFlush == myFlush) - continue; - - dataLog( - "Bad Flush merge at edge ", *predecessor, " -> ", *block, - ", local variable r", m_live.operandForIndex(j), ": ", *predecessor, - " has ", predecessorFlush, " and ", *block, " has ", myFlush, ".\n"); - dataLog("IR at time of error:\n"); - m_graph.dump(); - CRASH(); - } - } - } - - NO_RETURN_DUE_TO_CRASH void reportError(Node* node) - { - dataLog( - "Bad flush merge at node ", node, ", r", node->local(), ": node claims ", - node->variableAccessData()->flushedAt(), " but backwards flow claims ", - m_live.operand(node->local()), ".\n"); - dataLog("IR at time of error:\n"); - m_graph.dump(); - CRASH(); - } - - bool m_changed; - Operands<FlushedAt> m_live; -}; - -bool performFlushLivenessAnalysis(Graph& graph) -{ - SamplingRegion samplingRegion("DFG Flush-Liveness Analysis Phase"); - return runPhase<FlushLivenessAnalysisPhase>(graph); -} - -} } // namespace JSC::DFG - -#endif // ENABLE(DFG_JIT) - diff --git a/Source/JavaScriptCore/dfg/DFGFlushedAt.cpp b/Source/JavaScriptCore/dfg/DFGFlushedAt.cpp index ce95f45d5..c15a2e6b0 100644 --- a/Source/JavaScriptCore/dfg/DFGFlushedAt.cpp +++ b/Source/JavaScriptCore/dfg/DFGFlushedAt.cpp @@ -28,14 +28,18 @@ #if ENABLE(DFG_JIT) +#include "JSCInlines.h" + namespace JSC { namespace DFG { void FlushedAt::dump(PrintStream& out) const { if (m_format == DeadFlush || m_format == ConflictingFlush) out.print(m_format); + else if (m_virtualRegister.isValid()) + out.print(m_virtualRegister, ":", m_format); else - out.print("r", m_virtualRegister, ":", m_format); + out.print(m_format); } void FlushedAt::dumpInContext(PrintStream& out, DumpContext*) const diff --git a/Source/JavaScriptCore/dfg/DFGFlushedAt.h b/Source/JavaScriptCore/dfg/DFGFlushedAt.h index 6dfe716cc..ea913dd51 100644 --- a/Source/JavaScriptCore/dfg/DFGFlushedAt.h +++ b/Source/JavaScriptCore/dfg/DFGFlushedAt.h @@ -26,8 +26,6 @@ #ifndef DFGFlushedAt_h #define DFGFlushedAt_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGFlushFormat.h" @@ -54,8 +52,6 @@ public: { if (format == DeadFlush) ASSERT(!virtualRegister.isValid()); - else - ASSERT(virtualRegister.isValid()); } bool operator!() const { return m_format == DeadFlush; } diff --git a/Source/JavaScriptCore/dfg/DFGForAllKills.h b/Source/JavaScriptCore/dfg/DFGForAllKills.h new file mode 100644 index 000000000..f5f4cb523 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGForAllKills.h @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGForAllKills_h +#define DFGForAllKills_h + +#include "DFGCombinedLiveness.h" +#include "DFGGraph.h" +#include "DFGOSRAvailabilityAnalysisPhase.h" +#include "FullBytecodeLiveness.h" + +namespace JSC { namespace DFG { + +// Utilities for finding the last points where a node is live in DFG SSA. This accounts for liveness due +// to OSR exit. This is usually used for enumerating over all of the program points where a node is live, +// by exploring all blocks where the node is live at tail and then exploring all program points where the +// node is killed. A prerequisite to using these utilities is having liveness and OSR availability +// computed. + +// This tells you those things that die on the boundary between nodeBefore and nodeAfter. It is +// conservative in the sense that it might resort to telling you some things that are still live at +// nodeAfter. +template<typename Functor> +void forAllKilledOperands(Graph& graph, Node* nodeBefore, Node* nodeAfter, const Functor& functor) +{ + CodeOrigin before = nodeBefore->origin.forExit; + + if (!nodeAfter) { + graph.forAllLiveInBytecode(before, functor); + return; + } + + CodeOrigin after = nodeAfter->origin.forExit; + + VirtualRegister alreadyNoted; + // If we MovHint something that is live at the time, then we kill the old value. + if (nodeAfter->containsMovHint()) { + VirtualRegister reg = nodeAfter->unlinkedLocal(); + if (graph.isLiveInBytecode(reg, after)) { + functor(reg); + alreadyNoted = reg; + } + } + + if (before == after) + return; + + // It's easier to do this if the inline call frames are the same. This is way faster than the + // other loop, below. + if (before.inlineCallFrame == after.inlineCallFrame) { + int stackOffset = before.inlineCallFrame ? before.inlineCallFrame->stackOffset : 0; + CodeBlock* codeBlock = graph.baselineCodeBlockFor(before.inlineCallFrame); + FullBytecodeLiveness& fullLiveness = graph.livenessFor(codeBlock); + const FastBitVector& liveBefore = fullLiveness.getLiveness(before.bytecodeIndex); + const FastBitVector& liveAfter = fullLiveness.getLiveness(after.bytecodeIndex); + + for (unsigned relativeLocal = codeBlock->m_numCalleeLocals; relativeLocal--;) { + if (liveBefore.get(relativeLocal) && !liveAfter.get(relativeLocal)) + functor(virtualRegisterForLocal(relativeLocal) + stackOffset); + } + + return; + } + + // Detect kills the super conservative way: it is killed if it was live before and dead after. + BitVector liveAfter = graph.localsLiveInBytecode(after); + graph.forAllLocalsLiveInBytecode( + before, + [&] (VirtualRegister reg) { + if (reg == alreadyNoted) + return; + if (liveAfter.get(reg.toLocal())) + return; + functor(reg); + }); +} + +// Tells you all of the nodes that would no longer be live across the node at this nodeIndex. +template<typename Functor> +void forAllKilledNodesAtNodeIndex( + Graph& graph, AvailabilityMap& availabilityMap, BasicBlock* block, unsigned nodeIndex, + const Functor& functor) +{ + static const unsigned seenInClosureFlag = 1; + static const unsigned calledFunctorFlag = 2; + HashMap<Node*, unsigned> flags; + + Node* node = block->at(nodeIndex); + + graph.doToChildren( + node, + [&] (Edge edge) { + if (edge.doesKill()) { + auto& result = flags.add(edge.node(), 0).iterator->value; + if (!(result & calledFunctorFlag)) { + functor(edge.node()); + result |= calledFunctorFlag; + } + } + }); + + Node* before = nullptr; + if (nodeIndex) + before = block->at(nodeIndex - 1); + + forAllKilledOperands( + graph, before, node, + [&] (VirtualRegister reg) { + availabilityMap.closeStartingWithLocal( + reg, + [&] (Node* node) -> bool { + return flags.get(node) & seenInClosureFlag; + }, + [&] (Node* node) -> bool { + auto& resultFlags = flags.add(node, 0).iterator->value; + bool result = resultFlags & seenInClosureFlag; + if (!(resultFlags & calledFunctorFlag)) + functor(node); + resultFlags |= seenInClosureFlag | calledFunctorFlag; + return result; + }); + }); +} + +// Tells you all of the places to start searching from in a basic block. Gives you the node index at which +// the value is either no longer live. This pretends that nodes are dead at the end of the block, so that +// you can use this to do per-basic-block analyses. +template<typename Functor> +void forAllKillsInBlock( + Graph& graph, const CombinedLiveness& combinedLiveness, BasicBlock* block, + const Functor& functor) +{ + for (Node* node : combinedLiveness.liveAtTail[block]) + functor(block->size(), node); + + LocalOSRAvailabilityCalculator localAvailability; + localAvailability.beginBlock(block); + // Start at the second node, because the functor is expected to only inspect nodes from the start of + // the block up to nodeIndex (exclusive), so if nodeIndex is zero then the functor has nothing to do. + for (unsigned nodeIndex = 1; nodeIndex < block->size(); ++nodeIndex) { + forAllKilledNodesAtNodeIndex( + graph, localAvailability.m_availability, block, nodeIndex, + [&] (Node* node) { + functor(nodeIndex, node); + }); + localAvailability.executeNode(block->at(nodeIndex)); + } +} + +} } // namespace JSC::DFG + +#endif // DFGForAllKills_h + diff --git a/Source/JavaScriptCore/dfg/DFGFrozenValue.cpp b/Source/JavaScriptCore/dfg/DFGFrozenValue.cpp new file mode 100644 index 000000000..a62c38dde --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGFrozenValue.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGFrozenValue.h" + +#if ENABLE(DFG_JIT) + +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +FrozenValue* FrozenValue::emptySingleton() +{ + static FrozenValue empty; + return ∅ +} + +void FrozenValue::dumpInContext(PrintStream& out, DumpContext* context) const +{ + if (!!m_value && m_value.isCell()) + out.print(m_strength, ":"); + m_value.dumpInContextAssumingStructure(out, context, m_structure); +} + +void FrozenValue::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGFrozenValue.h b/Source/JavaScriptCore/dfg/DFGFrozenValue.h new file mode 100644 index 000000000..094356fcc --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGFrozenValue.h @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGFrozenValue_h +#define DFGFrozenValue_h + +#if ENABLE(DFG_JIT) + +#include "DFGValueStrength.h" +#include "JSCell.h" +#include "JSCJSValue.h" +#include "Structure.h" + +namespace JSC { namespace DFG { + +class Graph; + +class FrozenValue { +public: + FrozenValue() + : m_structure(nullptr) + , m_strength(WeakValue) + { + } + + FrozenValue(JSValue value) + : m_value(value) + , m_structure(nullptr) + , m_strength(WeakValue) + { + RELEASE_ASSERT(!value || !value.isCell()); + } + + FrozenValue(JSValue value, Structure* structure, ValueStrength strength) + : m_value(value) + , m_structure(structure) + , m_strength(strength) + { + ASSERT((!!value && value.isCell()) == !!structure); + ASSERT(!value || !value.isCell() || value.asCell()->classInfo() == structure->classInfo()); + ASSERT(!!structure || (strength == WeakValue)); + } + + static FrozenValue* emptySingleton(); + + bool operator!() const { return !m_value; } + + JSValue value() const { return m_value; } + JSCell* cell() const { return m_value.asCell(); } + + template<typename T> + T dynamicCast() + { + return jsDynamicCast<T>(value()); + } + template<typename T> + T cast() + { + return jsCast<T>(value()); + } + + Structure* structure() const { return m_structure; } + + void strengthenTo(ValueStrength strength) + { + if (!!m_value && m_value.isCell()) + m_strength = merge(m_strength, strength); + } + + bool pointsToHeap() const { return !!value() && value().isCell(); } + + // The strength of the value itself. The structure is almost always weak. + ValueStrength strength() const { return m_strength; } + + void dumpInContext(PrintStream& out, DumpContext* context) const; + void dump(PrintStream& out) const; + +private: + friend class Graph; + + // This is a utility method for DFG::Graph::freeze(). You should almost always call + // Graph::freeze() directly. Calling this instead of Graph::freeze() can result in + // the same constant being viewed as having different structures during the course + // of compilation, which can sometimes cause bad things to happen. For example, we + // may observe that one version of the constant has an unwatchable structure but + // then a later version may start to have a watchable structure due to a transition. + // The point of freezing is to ensure that we generally only see one version of + // constants, but that requires freezing through the Graph. + static FrozenValue freeze(JSValue value) + { + return FrozenValue( + value, + (!!value && value.isCell()) ? value.asCell()->structure() : nullptr, + WeakValue); + } + + JSValue m_value; + Structure* m_structure; + ValueStrength m_strength; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGFrozenValue_h + diff --git a/Source/JavaScriptCore/dfg/DFGFunctionWhitelist.cpp b/Source/JavaScriptCore/dfg/DFGFunctionWhitelist.cpp new file mode 100644 index 000000000..57dc109d4 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGFunctionWhitelist.cpp @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGFunctionWhitelist.h" + +#if ENABLE(DFG_JIT) + +#include "CodeBlock.h" +#include "Options.h" +#include <stdio.h> +#include <string.h> +#include <wtf/NeverDestroyed.h> +#include <wtf/text/StringBuilder.h> + +namespace JSC { namespace DFG { + +FunctionWhitelist& FunctionWhitelist::ensureGlobalWhitelist() +{ + static LazyNeverDestroyed<FunctionWhitelist> functionWhitelist; + static std::once_flag initializeWhitelistFlag; + std::call_once(initializeWhitelistFlag, [] { + const char* functionWhitelistFile = Options::dfgWhitelist(); + functionWhitelist.construct(functionWhitelistFile); + }); + return functionWhitelist; +} + +FunctionWhitelist::FunctionWhitelist(const char* filename) +{ + parseFunctionNamesInFile(filename); +} + +void FunctionWhitelist::parseFunctionNamesInFile(const char* filename) +{ + if (!filename) + return; + + FILE* f = fopen(filename, "r"); + if (!f) { + dataLogF("Failed to open file %s. Did you add the file-read-data entitlement to WebProcess.sb?\n", filename); + return; + } + + char* line; + char buffer[BUFSIZ]; + while ((line = fgets(buffer, sizeof(buffer), f))) { + if (strstr(line, "//") == line) + continue; + + // Get rid of newlines at the ends of the strings. + size_t length = strlen(line); + if (line[length - 1] == '\n') { + line[length - 1] = '\0'; + length--; + } + + // Skip empty lines. + if (!length) + continue; + + m_entries.add(String(line, length)); + } + + int result = fclose(f); + if (result) + dataLogF("Failed to close file %s: %s\n", filename, strerror(errno)); +} + +bool FunctionWhitelist::contains(CodeBlock* codeBlock) const +{ + ASSERT(!isCompilationThread()); + if (!Options::dfgWhitelist()) + return true; + + if (m_entries.isEmpty()) + return false; + + String name = String::fromUTF8(codeBlock->inferredName()); + if (m_entries.contains(name)) + return true; + + String hash = String::fromUTF8(codeBlock->hashAsStringIfPossible()); + if (m_entries.contains(hash)) + return true; + + return m_entries.contains(name + '#' + hash); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGFunctionWhitelist.h b/Source/JavaScriptCore/dfg/DFGFunctionWhitelist.h new file mode 100644 index 000000000..92498a90b --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGFunctionWhitelist.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGFunctionWhitelist_h +#define DFGFunctionWhitelist_h + +#if ENABLE(DFG_JIT) + +#include <wtf/HashSet.h> +#include <wtf/text/WTFString.h> + +namespace JSC { + +class CodeBlock; + +namespace DFG { + +class FunctionWhitelist { +public: + static FunctionWhitelist& ensureGlobalWhitelist(); + explicit FunctionWhitelist(const char*); + + bool contains(CodeBlock*) const; + +private: + void parseFunctionNamesInFile(const char*); + + HashSet<String> m_entries; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGFunctionWhitelist_h diff --git a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h index e3330fa3b..40c1cd9a4 100644 --- a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h +++ b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h @@ -38,9 +38,9 @@ namespace JSC { namespace DFG { // === GenerationInfo === // -// This class is used to track the current status of a live values during code generation. +// This class is used to track the current status of live values during code generation. // Can provide information as to whether a value is in machine registers, and if so which, -// whether a value has been spilled to the RegsiterFile, and if so may be able to provide +// whether a value has been spilled to the RegisterFile, and if so may be able to provide // details of the format in memory (all values are spilled in a boxed form, but we may be // able to track the type of box), and tracks how many outstanding uses of a value remain, // so that we know when the value is dead and the machine registers associated with it @@ -153,8 +153,6 @@ public: void noticeOSRBirth(VariableEventStream& stream, Node* node, VirtualRegister virtualRegister) { - if (m_isConstant) - return; if (m_node != node) return; if (!alive()) @@ -164,7 +162,9 @@ public: m_bornForOSR = true; - if (m_registerFormat != DataFormatNone) + if (m_isConstant) + appendBirth(stream); + else if (m_registerFormat != DataFormatNone) appendFill(BirthToFill, stream); else if (m_spillFormat != DataFormatNone) appendSpill(BirthToSpill, stream, virtualRegister); @@ -189,10 +189,10 @@ public: // Used to check the operands of operations to see if they are on // their last use; in some cases it may be safe to reuse the same // machine register for the result of the operation. - bool canReuse() + uint32_t useCount() { ASSERT(m_useCount); - return m_useCount == 1; + return m_useCount; } // Get the format of the value in machine registers (or 'none'). @@ -378,7 +378,35 @@ public: return m_useCount; } + ValueRecovery recovery(VirtualRegister spillSlot) const + { + if (m_isConstant) + return ValueRecovery::constant(m_node->constant()->value()); + + if (m_registerFormat == DataFormatDouble) + return ValueRecovery::inFPR(u.fpr, DataFormatDouble); + +#if USE(JSVALUE32_64) + if (m_registerFormat & DataFormatJS) { + if (m_registerFormat == DataFormatJS) + return ValueRecovery::inPair(u.v.tagGPR, u.v.payloadGPR); + return ValueRecovery::inGPR(u.v.payloadGPR, static_cast<DataFormat>(m_registerFormat & ~DataFormatJS)); + } +#endif + if (m_registerFormat) + return ValueRecovery::inGPR(u.gpr, m_registerFormat); + + ASSERT(m_spillFormat); + + return ValueRecovery::displacedInJSStack(spillSlot, m_spillFormat); + } + private: + void appendBirth(VariableEventStream& stream) + { + stream.appendAndLog(VariableEvent::birth(MinifiedID(m_node))); + } + void appendFill(VariableEventKind kind, VariableEventStream& stream) { ASSERT(m_bornForOSR); diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp index 8256007e9..541c84ebb 100644 --- a/Source/JavaScriptCore/dfg/DFGGraph.cpp +++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,23 +26,33 @@ #include "config.h" #include "DFGGraph.h" +#if ENABLE(DFG_JIT) + +#include "BytecodeKills.h" #include "BytecodeLivenessAnalysisInlines.h" #include "CodeBlock.h" #include "CodeBlockWithJITType.h" +#include "DFGBlockWorklist.h" #include "DFGClobberSet.h" +#include "DFGClobbersExitState.h" +#include "DFGCFG.h" +#include "DFGDominators.h" #include "DFGJITCode.h" +#include "DFGMayExit.h" +#include "DFGNaturalLoops.h" +#include "DFGPrePostNumbering.h" #include "DFGVariableAccessDataDump.h" #include "FullBytecodeLiveness.h" #include "FunctionExecutableDump.h" #include "JIT.h" -#include "JSActivation.h" +#include "JSLexicalEnvironment.h" +#include "MaxFrameExtentForSlowPathCall.h" #include "OperandsInlines.h" -#include "Operations.h" +#include "JSCInlines.h" +#include "StackAlignment.h" #include <wtf/CommaPrinter.h> #include <wtf/ListDump.h> -#if ENABLE(DFG_JIT) - namespace JSC { namespace DFG { // Creates an array of stringized names. @@ -55,27 +65,35 @@ static const char* dfgOpNames[] = { Graph::Graph(VM& vm, Plan& plan, LongLivedState& longLivedState) : m_vm(vm) , m_plan(plan) - , m_codeBlock(m_plan.codeBlock.get()) + , m_codeBlock(m_plan.codeBlock) , m_profiledBlock(m_codeBlock->alternative()) , m_allocator(longLivedState.m_allocator) - , m_mustHandleAbstractValues(OperandsLike, plan.mustHandleValues) - , m_inlineCallFrames(adoptPtr(new InlineCallFrameSet())) - , m_hasArguments(false) + , m_cfg(std::make_unique<CFG>(*this)) , m_nextMachineLocal(0) - , m_machineCaptureStart(std::numeric_limits<int>::max()) , m_fixpointState(BeforeFixpoint) + , m_structureRegistrationState(HaveNotStartedRegistering) , m_form(LoadStore) , m_unificationState(LocallyUnified) , m_refCountState(EverythingIsLive) { ASSERT(m_profiledBlock); - for (unsigned i = m_mustHandleAbstractValues.size(); i--;) - m_mustHandleAbstractValues[i].setMostSpecific(*this, plan.mustHandleValues[i]); + m_hasDebuggerEnabled = m_profiledBlock->globalObject()->hasDebugger() + || Options::forceDebuggerBytecodeGeneration(); } Graph::~Graph() { + for (BlockIndex blockIndex = numBlocks(); blockIndex--;) { + BasicBlock* block = this->block(blockIndex); + if (!block) + continue; + + for (unsigned phiIndex = block->phis.size(); phiIndex--;) + m_allocator.free(block->phis[phiIndex]); + for (unsigned nodeIndex = block->size(); nodeIndex--;) + m_allocator.free(block->at(nodeIndex)); + } m_allocator.freeAll(); } @@ -90,16 +108,22 @@ static void printWhiteSpace(PrintStream& out, unsigned amount) out.print(" "); } -bool Graph::dumpCodeOrigin(PrintStream& out, const char* prefix, Node* previousNode, Node* currentNode, DumpContext* context) +bool Graph::dumpCodeOrigin(PrintStream& out, const char* prefix, Node*& previousNodeRef, Node* currentNode, DumpContext* context) { + if (!currentNode->origin.semantic) + return false; + + Node* previousNode = previousNodeRef; + previousNodeRef = currentNode; + if (!previousNode) return false; - if (previousNode->codeOrigin.inlineCallFrame == currentNode->codeOrigin.inlineCallFrame) + if (previousNode->origin.semantic.inlineCallFrame == currentNode->origin.semantic.inlineCallFrame) return false; - Vector<CodeOrigin> previousInlineStack = previousNode->codeOrigin.inlineStack(); - Vector<CodeOrigin> currentInlineStack = currentNode->codeOrigin.inlineStack(); + Vector<CodeOrigin> previousInlineStack = previousNode->origin.semantic.inlineStack(); + Vector<CodeOrigin> currentInlineStack = currentNode->origin.semantic.inlineStack(); unsigned commonSize = std::min(previousInlineStack.size(), currentInlineStack.size()); unsigned indexOfDivergence = commonSize; for (unsigned i = 0; i < commonSize; ++i) { @@ -132,7 +156,7 @@ bool Graph::dumpCodeOrigin(PrintStream& out, const char* prefix, Node* previousN int Graph::amountOfNodeWhiteSpace(Node* node) { - return (node->codeOrigin.inlineDepth() - 1) * 2; + return (node->origin.semantic.inlineDepth() - 1) * 2; } void Graph::printNodeWhiteSpace(PrintStream& out, Node* node) @@ -145,7 +169,6 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext* NodeType op = node->op(); unsigned refCount = node->refCount(); - bool skipped = !refCount; bool mustGenerate = node->mustGenerate(); if (mustGenerate) --refCount; @@ -167,11 +190,10 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext* // (5) The arguments to the operation. The may be of the form: // @# - a NodeIndex referencing a prior node in the graph. // arg# - an argument number. - // $# - the index in the CodeBlock of a constant { for numeric constants the value is displayed | for integers, in both decimal and hex }. // id# - the index in the CodeBlock of an identifier { if codeBlock is passed to dump(), the string representation is displayed }. - // var# - the index of a var on the global object, used by GetGlobalVar/PutGlobalVar operations. - out.printf("% 4d:%s<%c%u:", (int)node->index(), skipped ? " skipped " : " ", mustGenerate ? '!' : ' ', refCount); - if (node->hasResult() && !skipped && node->hasVirtualRegister()) + // var# - the index of a var on the global object, used by GetGlobalVar/GetGlobalLexicalVariable/PutGlobalVariable operations. + out.printf("% 4d:<%c%u:", (int)node->index(), mustGenerate ? '!' : ' ', refCount); + if (node->hasResult() && node->hasVirtualRegister() && node->virtualRegister().isValid()) out.print(node->virtualRegister()); else out.print("-"); @@ -200,88 +222,97 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext* out.print(comma, node->arrayMode()); if (node->hasArithMode()) out.print(comma, node->arithMode()); - if (node->hasVarNumber()) - out.print(comma, node->varNumber()); + if (node->hasArithRoundingMode()) + out.print(comma, "Rounding:", node->arithRoundingMode()); + if (node->hasScopeOffset()) + out.print(comma, node->scopeOffset()); + if (node->hasDirectArgumentsOffset()) + out.print(comma, node->capturedArgumentsOffset()); if (node->hasRegisterPointer()) - out.print(comma, "global", globalObjectFor(node->codeOrigin)->findRegisterIndex(node->registerPointer()), "(", RawPointer(node->registerPointer()), ")"); + out.print(comma, "global", "(", RawPointer(node->variablePointer()), ")"); if (node->hasIdentifier()) out.print(comma, "id", node->identifierNumber(), "{", identifiers()[node->identifierNumber()], "}"); + if (node->hasPromotedLocationDescriptor()) + out.print(comma, node->promotedLocationDescriptor()); if (node->hasStructureSet()) out.print(comma, inContext(node->structureSet(), context)); if (node->hasStructure()) out.print(comma, inContext(*node->structure(), context)); - if (node->hasStructureTransitionData()) - out.print(comma, inContext(*node->structureTransitionData().previousStructure, context), " -> ", inContext(*node->structureTransitionData().newStructure, context)); - if (node->hasFunction()) { - out.print(comma, "function(", RawPointer(node->function()), ", "); - if (node->function()->inherits(JSFunction::info())) { - JSFunction* function = jsCast<JSFunction*>(node->function()); - if (function->isHostFunction()) - out.print("<host function>"); - else - out.print(FunctionExecutableDump(function->jsExecutable())); - } else - out.print("<not JSFunction>"); - out.print(")"); - } - if (node->hasExecutable()) { - if (node->executable()->inherits(FunctionExecutable::info())) - out.print(comma, "executable(", FunctionExecutableDump(jsCast<FunctionExecutable*>(node->executable())), ")"); - else - out.print(comma, "executable(not function: ", RawPointer(node->executable()), ")"); - } - if (node->hasFunctionDeclIndex()) { - FunctionExecutable* executable = m_codeBlock->functionDecl(node->functionDeclIndex()); - out.print(comma, FunctionExecutableDump(executable)); + if (node->hasTransition()) { + out.print(comma, pointerDumpInContext(node->transition(), context)); +#if USE(JSVALUE64) + out.print(", ID:", node->transition()->next->id()); +#else + out.print(", ID:", RawPointer(node->transition()->next)); +#endif } - if (node->hasFunctionExprIndex()) { - FunctionExecutable* executable = m_codeBlock->functionExpr(node->functionExprIndex()); - out.print(comma, FunctionExecutableDump(executable)); + if (node->hasCellOperand()) { + if (!node->cellOperand()->value() || !node->cellOperand()->value().isCell()) + out.print(comma, "invalid cell operand: ", node->cellOperand()->value()); + else { + out.print(comma, pointerDump(node->cellOperand()->value().asCell())); + if (node->cellOperand()->value().isCell()) { + CallVariant variant(node->cellOperand()->value().asCell()); + if (ExecutableBase* executable = variant.executable()) { + if (executable->isHostFunction()) + out.print(comma, "<host function>"); + else if (FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(executable)) + out.print(comma, FunctionExecutableDump(functionExecutable)); + else + out.print(comma, "<non-function executable>"); + } + } + } } if (node->hasStorageAccessData()) { - StorageAccessData& storageAccessData = m_storageAccessData[node->storageAccessDataIndex()]; + StorageAccessData& storageAccessData = node->storageAccessData(); out.print(comma, "id", storageAccessData.identifierNumber, "{", identifiers()[storageAccessData.identifierNumber], "}"); out.print(", ", static_cast<ptrdiff_t>(storageAccessData.offset)); + out.print(", inferredType = ", inContext(storageAccessData.inferredType, context)); + } + if (node->hasMultiGetByOffsetData()) { + MultiGetByOffsetData& data = node->multiGetByOffsetData(); + out.print(comma, "id", data.identifierNumber, "{", identifiers()[data.identifierNumber], "}"); + for (unsigned i = 0; i < data.cases.size(); ++i) + out.print(comma, inContext(data.cases[i], context)); + } + if (node->hasMultiPutByOffsetData()) { + MultiPutByOffsetData& data = node->multiPutByOffsetData(); + out.print(comma, "id", data.identifierNumber, "{", identifiers()[data.identifierNumber], "}"); + for (unsigned i = 0; i < data.variants.size(); ++i) + out.print(comma, inContext(data.variants[i], context)); } ASSERT(node->hasVariableAccessData(*this) == node->hasLocal(*this)); if (node->hasVariableAccessData(*this)) { - VariableAccessData* variableAccessData = node->variableAccessData(); - VirtualRegister operand = variableAccessData->local(); - if (operand.isArgument()) - out.print(comma, "arg", operand.toArgument(), "(", VariableAccessDataDump(*this, variableAccessData), ")"); - else - out.print(comma, "loc", operand.toLocal(), "(", VariableAccessDataDump(*this, variableAccessData), ")"); - - operand = variableAccessData->machineLocal(); - if (operand.isValid()) { - if (operand.isArgument()) - out.print(comma, "machine:arg", operand.toArgument()); - else - out.print(comma, "machine:loc", operand.toLocal()); + VariableAccessData* variableAccessData = node->tryGetVariableAccessData(); + if (variableAccessData) { + VirtualRegister operand = variableAccessData->local(); + out.print(comma, variableAccessData->local(), "(", VariableAccessDataDump(*this, variableAccessData), ")"); + operand = variableAccessData->machineLocal(); + if (operand.isValid()) + out.print(comma, "machine:", operand); } } - if (node->hasUnlinkedLocal()) { - VirtualRegister operand = node->unlinkedLocal(); - if (operand.isArgument()) - out.print(comma, "arg", operand.toArgument()); - else - out.print(comma, "loc", operand.toLocal()); + if (node->hasStackAccessData()) { + StackAccessData* data = node->stackAccessData(); + out.print(comma, data->local); + if (data->machineLocal.isValid()) + out.print(comma, "machine:", data->machineLocal); + out.print(comma, data->format); } + if (node->hasUnlinkedLocal()) + out.print(comma, node->unlinkedLocal()); if (node->hasUnlinkedMachineLocal()) { VirtualRegister operand = node->unlinkedMachineLocal(); - if (operand.isValid()) { - if (operand.isArgument()) - out.print(comma, "machine:arg", operand.toArgument()); - else - out.print(comma, "machine:loc", operand.toLocal()); - } + if (operand.isValid()) + out.print(comma, "machine:", operand); } if (node->hasConstantBuffer()) { out.print(comma); out.print(node->startConstant(), ":["); CommaPrinter anotherComma; for (unsigned i = 0; i < node->numConstants(); ++i) - out.print(anotherComma, inContext(m_codeBlock->constantBuffer(node->startConstant())[i], context)); + out.print(anotherComma, pointerDumpInContext(freeze(m_codeBlock->constantBuffer(node->startConstant())[i]), context)); out.print("]"); } if (node->hasIndexingType()) @@ -292,29 +323,36 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext* out.print(comma, "^", node->phi()->index()); if (node->hasExecutionCounter()) out.print(comma, RawPointer(node->executionCounter())); - if (node->hasVariableWatchpointSet()) - out.print(comma, RawPointer(node->variableWatchpointSet())); - if (node->hasTypedArray()) - out.print(comma, inContext(JSValue(node->typedArray()), context)); + if (node->hasWatchpointSet()) + out.print(comma, RawPointer(node->watchpointSet())); if (node->hasStoragePointer()) out.print(comma, RawPointer(node->storagePointer())); - if (op == JSConstant) { - out.print(comma, "$", node->constantNumber()); - JSValue value = valueOfJSConstant(node); - out.print(" = ", inContext(value, context)); - } - if (op == WeakJSConstant) - out.print(comma, RawPointer(node->weakConstant()), " (", inContext(*node->weakConstant()->structure(), context), ")"); - if (node->isBranch() || node->isJump()) - out.print(comma, "T:", *node->takenBlock()); + if (node->hasObjectMaterializationData()) + out.print(comma, node->objectMaterializationData()); + if (node->hasCallVarargsData()) + out.print(comma, "firstVarArgOffset = ", node->callVarargsData()->firstVarArgOffset); + if (node->hasLoadVarargsData()) { + LoadVarargsData* data = node->loadVarargsData(); + out.print(comma, "start = ", data->start, ", count = ", data->count); + if (data->machineStart.isValid()) + out.print(", machineStart = ", data->machineStart); + if (data->machineCount.isValid()) + out.print(", machineCount = ", data->machineCount); + out.print(", offset = ", data->offset, ", mandatoryMinimum = ", data->mandatoryMinimum); + out.print(", limit = ", data->limit); + } + if (node->isConstant()) + out.print(comma, pointerDumpInContext(node->constant(), context)); + if (node->isJump()) + out.print(comma, "T:", *node->targetBlock()); if (node->isBranch()) - out.print(comma, "F:", *node->notTakenBlock()); + out.print(comma, "T:", node->branchData()->taken, ", F:", node->branchData()->notTaken); if (node->isSwitch()) { SwitchData* data = node->switchData(); out.print(comma, data->kind); for (unsigned i = 0; i < data->cases.size(); ++i) - out.print(comma, inContext(data->cases[i].value, context), ":", *data->cases[i].target); - out.print(comma, "default:", *data->fallThrough); + out.print(comma, inContext(data->cases[i].value, context), ":", data->cases[i].target); + out.print(comma, "default:", data->fallThrough); } ClobberSet reads; ClobberSet writes; @@ -323,45 +361,66 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext* out.print(comma, "R:", sortedListDump(reads.direct(), ",")); if (!writes.isEmpty()) out.print(comma, "W:", sortedListDump(writes.direct(), ",")); - out.print(comma, "bc#", node->codeOrigin.bytecodeIndex); - + ExitMode exitMode = mayExit(*this, node); + if (exitMode != DoesNotExit) + out.print(comma, exitMode); + if (clobbersExitState(*this, node)) + out.print(comma, "ClobbersExit"); + if (node->origin.isSet()) { + out.print(comma, "bc#", node->origin.semantic.bytecodeIndex); + if (node->origin.semantic != node->origin.forExit && node->origin.forExit.isSet()) + out.print(comma, "exit: ", node->origin.forExit); + } + if (!node->origin.exitOK) + out.print(comma, "ExitInvalid"); out.print(")"); - if (!skipped) { - if (node->hasVariableAccessData(*this)) - out.print(" predicting ", SpeculationDump(node->variableAccessData()->prediction())); - else if (node->hasHeapPrediction()) - out.print(" predicting ", SpeculationDump(node->getHeapPrediction())); - } + if (node->hasVariableAccessData(*this) && node->tryGetVariableAccessData()) + out.print(" predicting ", SpeculationDump(node->tryGetVariableAccessData()->prediction())); + else if (node->hasHeapPrediction()) + out.print(" predicting ", SpeculationDump(node->getHeapPrediction())); out.print("\n"); } +bool Graph::terminalsAreValid() +{ + for (BasicBlock* block : blocksInNaturalOrder()) { + if (!block->terminal()) + return false; + } + return true; +} + void Graph::dumpBlockHeader(PrintStream& out, const char* prefix, BasicBlock* block, PhiNodeDumpMode phiNodeDumpMode, DumpContext* context) { - out.print(prefix, "Block ", *block, " (", inContext(block->at(0)->codeOrigin, context), "): ", block->isReachable ? "" : "(skipped)", block->isOSRTarget ? " (OSR target)" : "", "\n"); + out.print(prefix, "Block ", *block, " (", inContext(block->at(0)->origin.semantic, context), "):", block->isReachable ? "" : " (skipped)", block->isOSRTarget ? " (OSR target)" : "", "\n"); + if (block->executionCount == block->executionCount) + out.print(prefix, " Execution count: ", block->executionCount, "\n"); out.print(prefix, " Predecessors:"); for (size_t i = 0; i < block->predecessors.size(); ++i) out.print(" ", *block->predecessors[i]); out.print("\n"); - if (m_dominators.isValid()) { - out.print(prefix, " Dominated by:"); - for (size_t i = 0; i < m_blocks.size(); ++i) { - if (!m_dominators.dominates(i, block->index)) - continue; - out.print(" #", i); - } - out.print("\n"); - out.print(prefix, " Dominates:"); - for (size_t i = 0; i < m_blocks.size(); ++i) { - if (!m_dominators.dominates(block->index, i)) - continue; - out.print(" #", i); + out.print(prefix, " Successors:"); + if (block->terminal()) { + for (BasicBlock* successor : block->successors()) { + out.print(" ", *successor); + if (m_prePostNumbering) + out.print(" (", m_prePostNumbering->edgeKind(block, successor), ")"); } - out.print("\n"); + } else + out.print(" <invalid>"); + out.print("\n"); + if (m_dominators && terminalsAreValid()) { + out.print(prefix, " Dominated by: ", m_dominators->dominatorsOf(block), "\n"); + out.print(prefix, " Dominates: ", m_dominators->blocksDominatedBy(block), "\n"); + out.print(prefix, " Dominance Frontier: ", m_dominators->dominanceFrontierOf(block), "\n"); + out.print(prefix, " Iterated Dominance Frontier: ", m_dominators->iteratedDominanceFrontierOf(BlockList(1, block)), "\n"); } - if (m_naturalLoops.isValid()) { - if (const NaturalLoop* loop = m_naturalLoops.headerOf(block)) { + if (m_prePostNumbering) + out.print(prefix, " Pre/Post Numbering: ", m_prePostNumbering->preNumber(block), "/", m_prePostNumbering->postNumber(block), "\n"); + if (m_naturalLoops) { + if (const NaturalLoop* loop = m_naturalLoops->headerOf(block)) { out.print(prefix, " Loop header, contains:"); Vector<BlockIndex> sortedBlockList; for (unsigned i = 0; i < loop->size(); ++i) @@ -373,7 +432,7 @@ void Graph::dumpBlockHeader(PrintStream& out, const char* prefix, BasicBlock* bl } Vector<const NaturalLoop*> containingLoops = - m_naturalLoops.loopsOf(block); + m_naturalLoops->loopsOf(block); if (!containingLoops.isEmpty()) { out.print(prefix, " Containing loop headers:"); for (unsigned i = 0; i < containingLoops.size(); ++i) @@ -387,7 +446,7 @@ void Graph::dumpBlockHeader(PrintStream& out, const char* prefix, BasicBlock* bl Node* phiNode = block->phis[i]; if (!phiNode->shouldGenerate() && phiNodeDumpMode == DumpLivePhisOnly) continue; - out.print(" @", phiNode->index(), "<", phiNode->refCount(), ">->("); + out.print(" @", phiNode->index(), "<", phiNode->local(), ",", phiNode->refCount(), ">->("); if (phiNode->child1()) { out.print("@", phiNode->child1()->index()); if (phiNode->child2()) { @@ -409,33 +468,48 @@ void Graph::dump(PrintStream& out, DumpContext* context) if (!context) context = &myContext; - dataLog("\n"); - dataLog("DFG for ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT), ":\n"); - dataLog(" Fixpoint state: ", m_fixpointState, "; Form: ", m_form, "; Unification state: ", m_unificationState, "; Ref count state: ", m_refCountState, "\n"); - dataLog("\n"); + out.print("\n"); + out.print("DFG for ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT), ":\n"); + out.print(" Fixpoint state: ", m_fixpointState, "; Form: ", m_form, "; Unification state: ", m_unificationState, "; Ref count state: ", m_refCountState, "\n"); + if (m_form == SSA) + out.print(" Argument formats: ", listDump(m_argumentFormats), "\n"); + else + out.print(" Arguments: ", listDump(m_arguments), "\n"); + out.print("\n"); - Node* lastNode = 0; + Node* lastNode = nullptr; for (size_t b = 0; b < m_blocks.size(); ++b) { BasicBlock* block = m_blocks[b].get(); if (!block) continue; dumpBlockHeader(out, "", block, DumpAllPhis, context); + out.print(" States: ", block->cfaStructureClobberStateAtHead); + if (!block->cfaHasVisited) + out.print(", CurrentlyCFAUnreachable"); + if (!block->intersectionOfCFAHasVisited) + out.print(", CFAUnreachable"); + out.print("\n"); switch (m_form) { case LoadStore: case ThreadedCPS: { - out.print(" vars before: "); + out.print(" Vars Before: "); if (block->cfaHasVisited) out.print(inContext(block->valuesAtHead, context)); else out.print("<empty>"); out.print("\n"); - out.print(" var links: ", block->variablesAtHead, "\n"); + out.print(" Intersected Vars Before: "); + if (block->intersectionOfCFAHasVisited) + out.print(inContext(block->intersectionOfPastValuesAtHead, context)); + else + out.print("<empty>"); + out.print("\n"); + out.print(" Var Links: ", block->variablesAtHead, "\n"); break; } case SSA: { RELEASE_ASSERT(block->ssa); - out.print(" Flush format: ", block->ssa->flushAtHead, "\n"); out.print(" Availability: ", block->ssa->availabilityAtHead, "\n"); out.print(" Live: ", nodeListDump(block->ssa->liveAtHead), "\n"); out.print(" Values: ", nodeMapDump(block->ssa->valuesAtHead, context), "\n"); @@ -444,35 +518,45 @@ void Graph::dump(PrintStream& out, DumpContext* context) for (size_t i = 0; i < block->size(); ++i) { dumpCodeOrigin(out, "", lastNode, block->at(i), context); dump(out, "", block->at(i), context); - lastNode = block->at(i); } + out.print(" States: ", block->cfaBranchDirection, ", ", block->cfaStructureClobberStateAtTail); + if (!block->cfaDidFinish) + out.print(", CFAInvalidated"); + out.print("\n"); switch (m_form) { case LoadStore: case ThreadedCPS: { - out.print(" vars after: "); + out.print(" Vars After: "); if (block->cfaHasVisited) out.print(inContext(block->valuesAtTail, context)); else out.print("<empty>"); out.print("\n"); - out.print(" var links: ", block->variablesAtTail, "\n"); + out.print(" Var Links: ", block->variablesAtTail, "\n"); break; } case SSA: { RELEASE_ASSERT(block->ssa); - out.print(" Flush format: ", block->ssa->flushAtTail, "\n"); out.print(" Availability: ", block->ssa->availabilityAtTail, "\n"); out.print(" Live: ", nodeListDump(block->ssa->liveAtTail), "\n"); out.print(" Values: ", nodeMapDump(block->ssa->valuesAtTail, context), "\n"); break; } } - dataLog("\n"); + out.print("\n"); + } + + out.print("GC Values:\n"); + for (FrozenValue* value : m_frozenValues) { + if (value->pointsToHeap()) + out.print(" ", inContext(*value, &myContext), "\n"); } + + out.print(inContext(watchpoints(), &myContext)); if (!myContext.isEmpty()) { - myContext.dump(WTF::dataFile()); - dataLog("\n"); + myContext.dump(out); + out.print("\n"); } } @@ -534,6 +618,113 @@ void Graph::resetReachability() determineReachability(); } +namespace { + +class RefCountCalculator { +public: + RefCountCalculator(Graph& graph) + : m_graph(graph) + { + } + + void calculate() + { + // First reset the counts to 0 for all nodes. + for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + for (unsigned indexInBlock = block->size(); indexInBlock--;) + block->at(indexInBlock)->setRefCount(0); + for (unsigned phiIndex = block->phis.size(); phiIndex--;) + block->phis[phiIndex]->setRefCount(0); + } + + // Now find the roots: + // - Nodes that are must-generate. + // - Nodes that are reachable from type checks. + // Set their ref counts to 1 and put them on the worklist. + for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + for (unsigned indexInBlock = block->size(); indexInBlock--;) { + Node* node = block->at(indexInBlock); + DFG_NODE_DO_TO_CHILDREN(m_graph, node, findTypeCheckRoot); + if (!(node->flags() & NodeMustGenerate)) + continue; + if (!node->postfixRef()) + m_worklist.append(node); + } + } + + while (!m_worklist.isEmpty()) { + while (!m_worklist.isEmpty()) { + Node* node = m_worklist.last(); + m_worklist.removeLast(); + ASSERT(node->shouldGenerate()); // It should not be on the worklist unless it's ref'ed. + DFG_NODE_DO_TO_CHILDREN(m_graph, node, countEdge); + } + + if (m_graph.m_form == SSA) { + // Find Phi->Upsilon edges, which are represented as meta-data in the + // Upsilon. + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + for (unsigned nodeIndex = block->size(); nodeIndex--;) { + Node* node = block->at(nodeIndex); + if (node->op() != Upsilon) + continue; + if (node->shouldGenerate()) + continue; + if (node->phi()->shouldGenerate()) + countNode(node); + } + } + } + } + } + +private: + void findTypeCheckRoot(Node*, Edge edge) + { + // We may have an "unproved" untyped use for code that is unreachable. The CFA + // will just not have gotten around to it. + if (edge.isProved() || edge.willNotHaveCheck()) + return; + if (!edge->postfixRef()) + m_worklist.append(edge.node()); + } + + void countNode(Node* node) + { + if (node->postfixRef()) + return; + m_worklist.append(node); + } + + void countEdge(Node*, Edge edge) + { + // Don't count edges that are already counted for their type checks. + if (!(edge.isProved() || edge.willNotHaveCheck())) + return; + countNode(edge.node()); + } + + Graph& m_graph; + Vector<Node*, 128> m_worklist; +}; + +} // anonymous namespace + +void Graph::computeRefCounts() +{ + RefCountCalculator calculator(*this); + calculator.calculate(); +} + void Graph::killBlockAndItsContents(BasicBlock* block) { for (unsigned phiIndex = block->phis.size(); phiIndex--;) @@ -557,29 +748,15 @@ void Graph::killUnreachableBlocks() } } -void Graph::resetExitStates() -{ - for (BlockIndex blockIndex = 0; blockIndex < m_blocks.size(); ++blockIndex) { - BasicBlock* block = m_blocks[blockIndex].get(); - if (!block) - continue; - for (unsigned indexInBlock = block->size(); indexInBlock--;) - block->at(indexInBlock)->setCanExit(true); - } -} - void Graph::invalidateCFG() { - m_dominators.invalidate(); - m_naturalLoops.invalidate(); + m_dominators = nullptr; + m_naturalLoops = nullptr; + m_prePostNumbering = nullptr; } void Graph::substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal) { - if (variableAccessData->isCaptured()) { - // Let CSE worry about this one. - return; - } for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) { Node* node = block[indexInBlock]; bool shouldContinue = true; @@ -609,26 +786,37 @@ void Graph::substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, Va } } -void Graph::addForDepthFirstSort(Vector<BasicBlock*>& result, Vector<BasicBlock*, 16>& worklist, HashSet<BasicBlock*>& seen, BasicBlock* block) +BlockList Graph::blocksInPreOrder() { - if (seen.contains(block)) - return; - - result.append(block); - worklist.append(block); - seen.add(block); + BlockList result; + BlockWorklist worklist; + worklist.push(block(0)); + while (BasicBlock* block = worklist.pop()) { + result.append(block); + for (unsigned i = block->numSuccessors(); i--;) + worklist.push(block->successor(i)); + } + return result; } -void Graph::getBlocksInDepthFirstOrder(Vector<BasicBlock*>& result) +BlockList Graph::blocksInPostOrder() { - Vector<BasicBlock*, 16> worklist; - HashSet<BasicBlock*> seen; - addForDepthFirstSort(result, worklist, seen, block(0)); - while (!worklist.isEmpty()) { - BasicBlock* block = worklist.takeLast(); - for (unsigned i = block->numSuccessors(); i--;) - addForDepthFirstSort(result, worklist, seen, block->successor(i)); + BlockList result; + PostOrderBlockWorklist worklist; + worklist.push(block(0)); + while (BlockWithOrder item = worklist.pop()) { + switch (item.order) { + case VisitOrder::Pre: + worklist.pushPost(item.node); + for (unsigned i = item.node->numSuccessors(); i--;) + worklist.push(item.node->successor(i)); + break; + case VisitOrder::Post: + result.append(item.node); + break; + } } + return result; } void Graph::clearReplacements() @@ -638,9 +826,22 @@ void Graph::clearReplacements() if (!block) continue; for (unsigned phiIndex = block->phis.size(); phiIndex--;) - block->phis[phiIndex]->misc.replacement = 0; + block->phis[phiIndex]->setReplacement(nullptr); for (unsigned nodeIndex = block->size(); nodeIndex--;) - block->at(nodeIndex)->misc.replacement = 0; + block->at(nodeIndex)->setReplacement(nullptr); + } +} + +void Graph::clearEpochs() +{ + for (BlockIndex blockIndex = numBlocks(); blockIndex--;) { + BasicBlock* block = m_blocks[blockIndex].get(); + if (!block) + continue; + for (unsigned phiIndex = block->phis.size(); phiIndex--;) + block->phis[phiIndex]->setEpoch(Epoch()); + for (unsigned nodeIndex = block->size(); nodeIndex--;) + block->at(nodeIndex)->setEpoch(Epoch()); } } @@ -651,10 +852,79 @@ void Graph::initializeNodeOwners() if (!block) continue; for (unsigned phiIndex = block->phis.size(); phiIndex--;) - block->phis[phiIndex]->misc.owner = block; + block->phis[phiIndex]->owner = block; + for (unsigned nodeIndex = block->size(); nodeIndex--;) + block->at(nodeIndex)->owner = block; + } +} + +void Graph::clearFlagsOnAllNodes(NodeFlags flags) +{ + for (BlockIndex blockIndex = numBlocks(); blockIndex--;) { + BasicBlock* block = m_blocks[blockIndex].get(); + if (!block) + continue; + for (unsigned phiIndex = block->phis.size(); phiIndex--;) + block->phis[phiIndex]->clearFlags(flags); for (unsigned nodeIndex = block->size(); nodeIndex--;) - block->at(nodeIndex)->misc.owner = block; + block->at(nodeIndex)->clearFlags(flags); + } +} + +bool Graph::watchCondition(const ObjectPropertyCondition& key) +{ + if (!key.isWatchable()) + return false; + + m_plan.weakReferences.addLazily(key.object()); + if (key.hasPrototype()) + m_plan.weakReferences.addLazily(key.prototype()); + if (key.hasRequiredValue()) + m_plan.weakReferences.addLazily(key.requiredValue()); + + m_plan.watchpoints.addLazily(key); + + if (key.kind() == PropertyCondition::Presence) + m_safeToLoad.add(std::make_pair(key.object(), key.offset())); + + return true; +} + +bool Graph::isSafeToLoad(JSObject* base, PropertyOffset offset) +{ + return m_safeToLoad.contains(std::make_pair(base, offset)); +} + +InferredType::Descriptor Graph::inferredTypeFor(const PropertyTypeKey& key) +{ + assertIsRegistered(key.structure()); + + auto iter = m_inferredTypes.find(key); + if (iter != m_inferredTypes.end()) + return iter->value; + + InferredType* typeObject = key.structure()->inferredTypeFor(key.uid()); + if (!typeObject) { + m_inferredTypes.add(key, InferredType::Top); + return InferredType::Top; } + + InferredType::Descriptor typeDescriptor = typeObject->descriptor(); + if (typeDescriptor.kind() == InferredType::Top) { + m_inferredTypes.add(key, InferredType::Top); + return InferredType::Top; + } + + m_inferredTypes.add(key, typeDescriptor); + + m_plan.weakReferences.addLazily(typeObject); + registerInferredType(typeDescriptor); + + // Note that we may already be watching this desired inferred type, because multiple structures may + // point to the same InferredType instance. + m_plan.watchpoints.addLazily(DesiredInferredType(typeObject, typeDescriptor)); + + return typeDescriptor; } FullBytecodeLiveness& Graph::livenessFor(CodeBlock* codeBlock) @@ -666,7 +936,7 @@ FullBytecodeLiveness& Graph::livenessFor(CodeBlock* codeBlock) std::unique_ptr<FullBytecodeLiveness> liveness = std::make_unique<FullBytecodeLiveness>(); codeBlock->livenessAnalysis().computeFullLiveness(*liveness); FullBytecodeLiveness& result = *liveness; - m_bytecodeLiveness.add(codeBlock, std::move(liveness)); + m_bytecodeLiveness.add(codeBlock, WTFMove(liveness)); return result; } @@ -675,49 +945,98 @@ FullBytecodeLiveness& Graph::livenessFor(InlineCallFrame* inlineCallFrame) return livenessFor(baselineCodeBlockFor(inlineCallFrame)); } +BytecodeKills& Graph::killsFor(CodeBlock* codeBlock) +{ + HashMap<CodeBlock*, std::unique_ptr<BytecodeKills>>::iterator iter = m_bytecodeKills.find(codeBlock); + if (iter != m_bytecodeKills.end()) + return *iter->value; + + std::unique_ptr<BytecodeKills> kills = std::make_unique<BytecodeKills>(); + codeBlock->livenessAnalysis().computeKills(*kills); + BytecodeKills& result = *kills; + m_bytecodeKills.add(codeBlock, WTFMove(kills)); + return result; +} + +BytecodeKills& Graph::killsFor(InlineCallFrame* inlineCallFrame) +{ + return killsFor(baselineCodeBlockFor(inlineCallFrame)); +} + bool Graph::isLiveInBytecode(VirtualRegister operand, CodeOrigin codeOrigin) { + CodeOrigin* codeOriginPtr = &codeOrigin; for (;;) { - if (operand.offset() < codeOrigin.stackOffset() + JSStack::CallFrameHeaderSize) { - VirtualRegister reg = VirtualRegister( - operand.offset() - codeOrigin.stackOffset()); - + VirtualRegister reg = VirtualRegister( + operand.offset() - codeOriginPtr->stackOffset()); + + if (operand.offset() < codeOriginPtr->stackOffset() + JSStack::CallFrameHeaderSize) { if (reg.isArgument()) { RELEASE_ASSERT(reg.offset() < JSStack::CallFrameHeaderSize); - if (!codeOrigin.inlineCallFrame->isClosureCall) - return false; - - if (reg.offset() == JSStack::Callee) + if (codeOriginPtr->inlineCallFrame->isClosureCall + && reg.offset() == JSStack::Callee) return true; - if (reg.offset() == JSStack::ScopeChain) + + if (codeOriginPtr->inlineCallFrame->isVarargs() + && reg.offset() == JSStack::ArgumentCount) return true; return false; } - return livenessFor(codeOrigin.inlineCallFrame).operandIsLive( - reg.offset(), codeOrigin.bytecodeIndex); + return livenessFor(codeOriginPtr->inlineCallFrame).operandIsLive( + reg.offset(), codeOriginPtr->bytecodeIndex); } - if (!codeOrigin.inlineCallFrame) + InlineCallFrame* inlineCallFrame = codeOriginPtr->inlineCallFrame; + if (!inlineCallFrame) break; + + // Arguments are always live. This would be redundant if it wasn't for our + // op_call_varargs inlining. + if (reg.isArgument() + && static_cast<size_t>(reg.toArgument()) < inlineCallFrame->arguments.size()) + return true; - codeOrigin = codeOrigin.inlineCallFrame->caller; + codeOriginPtr = inlineCallFrame->getCallerSkippingTailCalls(); + + // The first inline call frame could be an inline tail call + if (!codeOriginPtr) + break; } return true; } +BitVector Graph::localsLiveInBytecode(CodeOrigin codeOrigin) +{ + BitVector result; + result.ensureSize(block(0)->variablesAtHead.numberOfLocals()); + forAllLocalsLiveInBytecode( + codeOrigin, + [&] (VirtualRegister reg) { + ASSERT(reg.isLocal()); + result.quickSet(reg.toLocal()); + }); + return result; +} + unsigned Graph::frameRegisterCount() { - return m_nextMachineLocal + m_parameterSlots; + unsigned result = m_nextMachineLocal + std::max(m_parameterSlots, static_cast<unsigned>(maxFrameExtentForSlowPathCallInRegisters)); + return roundLocalRegisterCountForFramePointerOffset(result); +} + +unsigned Graph::stackPointerOffset() +{ + return virtualRegisterForLocal(frameRegisterCount() - 1).offset(); } unsigned Graph::requiredRegisterCountForExit() { unsigned count = JIT::frameRegisterCountFor(m_profiledBlock); - for (InlineCallFrameSet::iterator iter = m_inlineCallFrames->begin(); !!iter; ++iter) { + for (InlineCallFrameSet::iterator iter = m_plan.inlineCallFrames->begin(); !!iter; ++iter) { InlineCallFrame* inlineCallFrame = *iter; CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame); unsigned requiredCount = VirtualRegister(inlineCallFrame->stackOffset).toLocal() + 1 + JIT::frameRegisterCountFor(codeBlock); @@ -731,47 +1050,512 @@ unsigned Graph::requiredRegisterCountForExecutionAndExit() return std::max(frameRegisterCount(), requiredRegisterCountForExit()); } -JSActivation* Graph::tryGetActivation(Node* node) +JSValue Graph::tryGetConstantProperty( + JSValue base, const StructureSet& structureSet, PropertyOffset offset) { - if (!node->hasConstant()) - return 0; - return jsDynamicCast<JSActivation*>(valueOfJSConstant(node)); + if (!base || !base.isObject()) + return JSValue(); + + JSObject* object = asObject(base); + + for (unsigned i = structureSet.size(); i--;) { + Structure* structure = structureSet[i]; + assertIsRegistered(structure); + + WatchpointSet* set = structure->propertyReplacementWatchpointSet(offset); + if (!set || !set->isStillValid()) + return JSValue(); + + ASSERT(structure->isValidOffset(offset)); + ASSERT(!structure->isUncacheableDictionary()); + + watchpoints().addLazily(set); + } + + // What follows may require some extra thought. We need this load to load a valid JSValue. If + // our profiling makes sense and we're still on track to generate code that won't be + // invalidated, then we have nothing to worry about. We do, however, have to worry about + // loading - and then using - an invalid JSValue in the case that unbeknownst to us our code + // is doomed. + // + // One argument in favor of this code is that it should definitely work because the butterfly + // is always set before the structure. However, we don't currently have a fence between those + // stores. It's not clear if this matters, however. We don't ever shrink the property storage. + // So, for this to fail, you'd need an access on a constant object pointer such that the inline + // caches told us that the object had a structure that it did not *yet* have, and then later, + // the object transitioned to that structure that the inline caches had alraedy seen. And then + // the processor reordered the stores. Seems unlikely and difficult to test. I believe that + // this is worth revisiting but it isn't worth losing sleep over. Filed: + // https://bugs.webkit.org/show_bug.cgi?id=134641 + // + // For now, we just do the minimal thing: defend against the structure right now being + // incompatible with the getDirect we're trying to do. The easiest way to do that is to + // determine if the structure belongs to the proven set. + + if (!structureSet.contains(object->structure())) + return JSValue(); + + return object->getDirect(offset); +} + +JSValue Graph::tryGetConstantProperty(JSValue base, Structure* structure, PropertyOffset offset) +{ + return tryGetConstantProperty(base, StructureSet(structure), offset); } -WriteBarrierBase<Unknown>* Graph::tryGetRegisters(Node* node) +JSValue Graph::tryGetConstantProperty( + JSValue base, const StructureAbstractValue& structure, PropertyOffset offset) { - JSActivation* activation = tryGetActivation(node); + if (structure.isInfinite()) { + // FIXME: If we just converted the offset to a uid, we could do ObjectPropertyCondition + // watching to constant-fold the property. + // https://bugs.webkit.org/show_bug.cgi?id=147271 + return JSValue(); + } + + return tryGetConstantProperty(base, structure.set(), offset); +} + +JSValue Graph::tryGetConstantProperty(const AbstractValue& base, PropertyOffset offset) +{ + return tryGetConstantProperty(base.m_value, base.m_structure, offset); +} + +AbstractValue Graph::inferredValueForProperty( + const StructureSet& base, UniquedStringImpl* uid, StructureClobberState clobberState) +{ + AbstractValue result; + base.forEach( + [&] (Structure* structure) { + AbstractValue value; + value.set(*this, inferredTypeForProperty(structure, uid)); + result.merge(value); + }); + if (clobberState == StructuresAreClobbered) + result.clobberStructures(); + return result; +} + +AbstractValue Graph::inferredValueForProperty( + const AbstractValue& base, UniquedStringImpl* uid, PropertyOffset offset, + StructureClobberState clobberState) +{ + if (JSValue value = tryGetConstantProperty(base, offset)) { + AbstractValue result; + result.set(*this, *freeze(value), clobberState); + return result; + } + + if (base.m_structure.isFinite()) + return inferredValueForProperty(base.m_structure.set(), uid, clobberState); + + return AbstractValue::heapTop(); +} + +JSValue Graph::tryGetConstantClosureVar(JSValue base, ScopeOffset offset) +{ + // This has an awesome concurrency story. See comment for GetGlobalVar in ByteCodeParser. + + if (!base) + return JSValue(); + + JSLexicalEnvironment* activation = jsDynamicCast<JSLexicalEnvironment*>(base); if (!activation) - return 0; - if (!activation->isTornOff()) - return 0; - return activation->registers(); + return JSValue(); + + SymbolTable* symbolTable = activation->symbolTable(); + JSValue value; + WatchpointSet* set; + { + ConcurrentJITLocker locker(symbolTable->m_lock); + + SymbolTableEntry* entry = symbolTable->entryFor(locker, offset); + if (!entry) + return JSValue(); + + set = entry->watchpointSet(); + if (!set) + return JSValue(); + + if (set->state() != IsWatched) + return JSValue(); + + ASSERT(entry->scopeOffset() == offset); + value = activation->variableAt(offset).get(); + if (!value) + return JSValue(); + } + + watchpoints().addLazily(set); + + return value; } -JSArrayBufferView* Graph::tryGetFoldableView(Node* node) +JSValue Graph::tryGetConstantClosureVar(const AbstractValue& value, ScopeOffset offset) +{ + return tryGetConstantClosureVar(value.m_value, offset); +} + +JSValue Graph::tryGetConstantClosureVar(Node* node, ScopeOffset offset) { if (!node->hasConstant()) - return 0; - JSArrayBufferView* view = jsDynamicCast<JSArrayBufferView*>(valueOfJSConstant(node)); - if (!view) - return 0; - if (!watchpoints().isStillValid(view)) - return 0; + return JSValue(); + return tryGetConstantClosureVar(node->asJSValue(), offset); +} + +JSArrayBufferView* Graph::tryGetFoldableView(JSValue value) +{ + if (!value) + return nullptr; + JSArrayBufferView* view = jsDynamicCast<JSArrayBufferView*>(value); + if (!value) + return nullptr; + if (!view->length()) + return nullptr; + WTF::loadLoadFence(); + watchpoints().addLazily(view); return view; } -JSArrayBufferView* Graph::tryGetFoldableView(Node* node, ArrayMode arrayMode) +JSArrayBufferView* Graph::tryGetFoldableView(JSValue value, ArrayMode arrayMode) +{ + if (arrayMode.type() != Array::AnyTypedArray && arrayMode.typedArrayType() == NotTypedArray) + return nullptr; + return tryGetFoldableView(value); +} + +void Graph::registerFrozenValues() +{ + m_codeBlock->constants().resize(0); + m_codeBlock->constantsSourceCodeRepresentation().resize(0); + for (FrozenValue* value : m_frozenValues) { + if (!value->pointsToHeap()) + continue; + + ASSERT(value->structure()); + ASSERT(m_plan.weakReferences.contains(value->structure())); + + switch (value->strength()) { + case WeakValue: { + m_plan.weakReferences.addLazily(value->value().asCell()); + break; + } + case StrongValue: { + unsigned constantIndex = m_codeBlock->addConstantLazily(); + // We already have a barrier on the code block. + m_codeBlock->constants()[constantIndex].setWithoutWriteBarrier(value->value()); + break; + } } + } + m_codeBlock->constants().shrinkToFit(); + m_codeBlock->constantsSourceCodeRepresentation().shrinkToFit(); +} + +void Graph::visitChildren(SlotVisitor& visitor) +{ + for (FrozenValue* value : m_frozenValues) { + visitor.appendUnbarrieredReadOnlyValue(value->value()); + visitor.appendUnbarrieredReadOnlyPointer(value->structure()); + } + + for (BlockIndex blockIndex = numBlocks(); blockIndex--;) { + BasicBlock* block = this->block(blockIndex); + if (!block) + continue; + + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + + switch (node->op()) { + case CheckStructure: + for (unsigned i = node->structureSet().size(); i--;) + visitor.appendUnbarrieredReadOnlyPointer(node->structureSet()[i]); + break; + + case NewObject: + case ArrayifyToStructure: + case NewStringObject: + visitor.appendUnbarrieredReadOnlyPointer(node->structure()); + break; + + case PutStructure: + case AllocatePropertyStorage: + case ReallocatePropertyStorage: + visitor.appendUnbarrieredReadOnlyPointer( + node->transition()->previous); + visitor.appendUnbarrieredReadOnlyPointer( + node->transition()->next); + break; + + case MultiGetByOffset: + for (const MultiGetByOffsetCase& getCase : node->multiGetByOffsetData().cases) { + for (Structure* structure : getCase.set()) + visitor.appendUnbarrieredReadOnlyPointer(structure); + } + break; + + case MultiPutByOffset: + for (unsigned i = node->multiPutByOffsetData().variants.size(); i--;) { + PutByIdVariant& variant = node->multiPutByOffsetData().variants[i]; + const StructureSet& set = variant.oldStructure(); + for (unsigned j = set.size(); j--;) + visitor.appendUnbarrieredReadOnlyPointer(set[j]); + if (variant.kind() == PutByIdVariant::Transition) + visitor.appendUnbarrieredReadOnlyPointer(variant.newStructure()); + } + break; + + default: + break; + } + } + } +} + +FrozenValue* Graph::freeze(JSValue value) +{ + if (UNLIKELY(!value)) + return FrozenValue::emptySingleton(); + + auto result = m_frozenValueMap.add(JSValue::encode(value), nullptr); + if (LIKELY(!result.isNewEntry)) + return result.iterator->value; + + if (value.isUInt32()) + m_uint32ValuesInUse.append(value.asUInt32()); + + FrozenValue frozenValue = FrozenValue::freeze(value); + if (Structure* structure = frozenValue.structure()) + registerStructure(structure); + + return result.iterator->value = m_frozenValues.add(frozenValue); +} + +FrozenValue* Graph::freezeStrong(JSValue value) +{ + FrozenValue* result = freeze(value); + result->strengthenTo(StrongValue); + return result; +} + +void Graph::convertToConstant(Node* node, FrozenValue* value) +{ + if (value->structure()) + assertIsRegistered(value->structure()); + node->convertToConstant(value); +} + +void Graph::convertToConstant(Node* node, JSValue value) +{ + convertToConstant(node, freeze(value)); +} + +void Graph::convertToStrongConstant(Node* node, JSValue value) +{ + convertToConstant(node, freezeStrong(value)); +} + +StructureRegistrationResult Graph::registerStructure(Structure* structure) +{ + m_plan.weakReferences.addLazily(structure); + if (m_plan.watchpoints.consider(structure)) + return StructureRegisteredAndWatched; + return StructureRegisteredNormally; +} + +void Graph::assertIsRegistered(Structure* structure) +{ + // It's convenient to be able to call this with a maybe-null structure. + if (!structure) + return; + + DFG_ASSERT(*this, nullptr, m_plan.weakReferences.contains(structure)); + + if (!structure->dfgShouldWatch()) + return; + if (watchpoints().isWatched(structure->transitionWatchpointSet())) + return; + + DFG_CRASH(*this, nullptr, toCString("Structure ", pointerDump(structure), " is watchable but isn't being watched.").data()); +} + +NO_RETURN_DUE_TO_CRASH static void crash( + Graph& graph, const CString& whileText, const char* file, int line, const char* function, + const char* assertion) +{ + startCrashing(); + dataLog("DFG ASSERTION FAILED: ", assertion, "\n"); + dataLog(file, "(", line, ") : ", function, "\n"); + dataLog("\n"); + dataLog(whileText); + dataLog("Graph at time of failure:\n"); + graph.dump(); + dataLog("\n"); + dataLog("DFG ASSERTION FAILED: ", assertion, "\n"); + dataLog(file, "(", line, ") : ", function, "\n"); + CRASH_WITH_SECURITY_IMPLICATION(); +} + +void Graph::handleAssertionFailure( + std::nullptr_t, const char* file, int line, const char* function, const char* assertion) +{ + crash(*this, "", file, line, function, assertion); +} + +void Graph::handleAssertionFailure( + Node* node, const char* file, int line, const char* function, const char* assertion) +{ + crash(*this, toCString("While handling node ", node, "\n\n"), file, line, function, assertion); +} + +void Graph::handleAssertionFailure( + BasicBlock* block, const char* file, int line, const char* function, const char* assertion) +{ + crash(*this, toCString("While handling block ", pointerDump(block), "\n\n"), file, line, function, assertion); +} + +void Graph::ensureDominators() +{ + if (!m_dominators) + m_dominators = std::make_unique<Dominators>(*this); +} + +void Graph::ensurePrePostNumbering() +{ + if (!m_prePostNumbering) + m_prePostNumbering = std::make_unique<PrePostNumbering>(*this); +} + +void Graph::ensureNaturalLoops() +{ + ensureDominators(); + if (!m_naturalLoops) + m_naturalLoops = std::make_unique<NaturalLoops>(*this); +} + +ValueProfile* Graph::valueProfileFor(Node* node) +{ + if (!node) + return nullptr; + + CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic); + + if (node->hasLocal(*this)) { + if (!node->local().isArgument()) + return nullptr; + int argument = node->local().toArgument(); + Node* argumentNode = m_arguments[argument]; + if (!argumentNode) + return nullptr; + if (node->variableAccessData() != argumentNode->variableAccessData()) + return nullptr; + return profiledBlock->valueProfileForArgument(argument); + } + + if (node->hasHeapPrediction()) + return profiledBlock->valueProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); + + return nullptr; +} + +MethodOfGettingAValueProfile Graph::methodOfGettingAValueProfileFor(Node* node) { - if (arrayMode.typedArrayType() == NotTypedArray) - return 0; - return tryGetFoldableView(node); + if (!node) + return MethodOfGettingAValueProfile(); + + if (ValueProfile* valueProfile = valueProfileFor(node)) + return MethodOfGettingAValueProfile(valueProfile); + + if (node->op() == GetLocal) { + CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic); + + return MethodOfGettingAValueProfile::fromLazyOperand( + profiledBlock, + LazyOperandValueProfileKey( + node->origin.semantic.bytecodeIndex, node->local())); + } + + return MethodOfGettingAValueProfile(); } -JSArrayBufferView* Graph::tryGetFoldableViewForChild1(Node* node) +bool Graph::isStringPrototypeMethodSane(JSObject* stringPrototype, Structure* stringPrototypeStructure, UniquedStringImpl* uid) { - return tryGetFoldableView(child(node, 0).node(), node->arrayMode()); + unsigned attributesUnused; + PropertyOffset offset = stringPrototypeStructure->getConcurrently(uid, attributesUnused); + if (!isValidOffset(offset)) + return false; + + JSValue value = tryGetConstantProperty(stringPrototype, stringPrototypeStructure, offset); + if (!value) + return false; + + JSFunction* function = jsDynamicCast<JSFunction*>(value); + if (!function) + return false; + + if (function->executable()->intrinsicFor(CodeForCall) != StringPrototypeValueOfIntrinsic) + return false; + + return true; +} + +bool Graph::canOptimizeStringObjectAccess(const CodeOrigin& codeOrigin) +{ + if (hasExitSite(codeOrigin, NotStringObject)) + return false; + + Structure* stringObjectStructure = globalObjectFor(codeOrigin)->stringObjectStructure(); + registerStructure(stringObjectStructure); + ASSERT(stringObjectStructure->storedPrototype().isObject()); + ASSERT(stringObjectStructure->storedPrototype().asCell()->classInfo() == StringPrototype::info()); + + FrozenValue* stringPrototypeObjectValue = freeze(stringObjectStructure->storedPrototype()); + StringPrototype* stringPrototypeObject = stringPrototypeObjectValue->dynamicCast<StringPrototype*>(); + Structure* stringPrototypeStructure = stringPrototypeObjectValue->structure(); + if (registerStructure(stringPrototypeStructure) != StructureRegisteredAndWatched) + return false; + + if (stringPrototypeStructure->isDictionary()) + return false; + + // We're being conservative here. We want DFG's ToString on StringObject to be + // used in both numeric contexts (that would call valueOf()) and string contexts + // (that would call toString()). We don't want the DFG to have to distinguish + // between the two, just because that seems like it would get confusing. So we + // just require both methods to be sane. + if (!isStringPrototypeMethodSane(stringPrototypeObject, stringPrototypeStructure, m_vm.propertyNames->valueOf.impl())) + return false; + if (!isStringPrototypeMethodSane(stringPrototypeObject, stringPrototypeStructure, m_vm.propertyNames->toString.impl())) + return false; + + return true; +} + +bool Graph::willCatchExceptionInMachineFrame(CodeOrigin codeOrigin, CodeOrigin& opCatchOriginOut, HandlerInfo*& catchHandlerOut) +{ + if (!m_hasExceptionHandlers) + return false; + + unsigned bytecodeIndexToCheck = codeOrigin.bytecodeIndex; + while (1) { + InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; + CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame); + if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeIndexToCheck)) { + opCatchOriginOut = CodeOrigin(handler->target, inlineCallFrame); + catchHandlerOut = handler; + return true; + } + + if (!inlineCallFrame) + return false; + + bytecodeIndexToCheck = inlineCallFrame->directCaller.bytecodeIndex; + codeOrigin = codeOrigin.inlineCallFrame->directCaller; + } + + RELEASE_ASSERT_NOT_REACHED(); } } } // namespace JSC::DFG -#endif +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h index 7a5170048..1a4e2f7dd 100644 --- a/Source/JavaScriptCore/dfg/DFGGraph.h +++ b/Source/JavaScriptCore/dfg/DFGGraph.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,24 +26,24 @@ #ifndef DFGGraph_h #define DFGGraph_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "AssemblyHelpers.h" +#include "BytecodeLivenessAnalysisInlines.h" #include "CodeBlock.h" #include "DFGArgumentPosition.h" #include "DFGBasicBlock.h" -#include "DFGDominators.h" +#include "DFGFrozenValue.h" #include "DFGLongLivedState.h" -#include "DFGNaturalLoops.h" #include "DFGNode.h" #include "DFGNodeAllocator.h" #include "DFGPlan.h" -#include "DFGVariadicFunction.h" -#include "InlineCallFrameSet.h" +#include "DFGPropertyTypeKey.h" +#include "DFGScannable.h" +#include "FullBytecodeLiveness.h" #include "JSStack.h" #include "MethodOfGettingAValueProfile.h" +#include <unordered_map> #include <wtf/BitVector.h> #include <wtf/HashMap.h> #include <wtf/Vector.h> @@ -56,10 +56,52 @@ class ExecState; namespace DFG { -struct StorageAccessData { - PropertyOffset offset; - unsigned identifierNumber; -}; +class CFG; +class Dominators; +class NaturalLoops; +class PrePostNumbering; + +#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \ + Node* _node = (node); \ + if (_node->flags() & NodeHasVarArgs) { \ + for (unsigned _childIdx = _node->firstChild(); \ + _childIdx < _node->firstChild() + _node->numChildren(); \ + _childIdx++) { \ + if (!!(graph).m_varArgChildren[_childIdx]) \ + thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \ + } \ + } else { \ + if (!_node->child1()) { \ + ASSERT( \ + !_node->child2() \ + && !_node->child3()); \ + break; \ + } \ + thingToDo(_node, _node->child1()); \ + \ + if (!_node->child2()) { \ + ASSERT(!_node->child3()); \ + break; \ + } \ + thingToDo(_node, _node->child2()); \ + \ + if (!_node->child3()) \ + break; \ + thingToDo(_node, _node->child3()); \ + } \ + } while (false) + +#define DFG_ASSERT(graph, node, assertion) do { \ + if (!!(assertion)) \ + break; \ + (graph).handleAssertionFailure( \ + (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \ + } while (false) + +#define DFG_CRASH(graph, node, reason) do { \ + (graph).handleAssertionFailure( \ + (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason)); \ + } while (false) struct InlineVariableData { InlineCallFrame* inlineCallFrame; @@ -78,7 +120,7 @@ enum AddSpeculationMode { // // The order may be significant for nodes with side-effects (property accesses, value conversions). // Nodes that are 'dead' remain in the vector with refCount 0. -class Graph { +class Graph : public virtual Scannable { public: Graph(VM&, Plan&, LongLivedState&); ~Graph(); @@ -126,7 +168,7 @@ public: return; // Check if there is any replacement. - Node* replacement = child->misc.replacement; + Node* replacement = child->replacement(); if (!replacement) return; @@ -134,55 +176,36 @@ public: // There is definitely a replacement. Assert that the replacement does not // have a replacement. - ASSERT(!child->misc.replacement); + ASSERT(!child->replacement()); } -#define DFG_DEFINE_ADD_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \ - templatePre typeParams templatePost Node* addNode(SpeculatedType type valueParamsComma valueParams) \ - { \ - Node* node = new (m_allocator) Node(valueArgs); \ - node->predict(type); \ - return node; \ + template<typename... Params> + Node* addNode(SpeculatedType type, Params... params) + { + Node* node = new (m_allocator) Node(params...); + node->predict(type); + return node; } - DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_ADD_NODE) -#undef DFG_DEFINE_ADD_NODE void dethread(); - void convertToConstant(Node* node, unsigned constantNumber) - { - if (node->op() == GetLocal) - dethread(); - else - ASSERT(!node->hasVariableAccessData(*this)); - node->convertToConstant(constantNumber); - } - - unsigned constantRegisterForConstant(JSValue value) - { - unsigned constantRegister; - if (!m_codeBlock->findConstant(value, constantRegister)) { - constantRegister = m_codeBlock->addConstantLazily(); - initializeLazyWriteBarrierForConstant( - m_plan.writeBarriers, - m_codeBlock->constants()[constantRegister], - m_codeBlock, - constantRegister, - m_codeBlock->ownerExecutable(), - value); - } - return constantRegister; - } + FrozenValue* freeze(JSValue); // We use weak freezing by default. + FrozenValue* freezeStrong(JSValue); // Shorthand for freeze(value)->strengthenTo(StrongValue). + + void convertToConstant(Node* node, FrozenValue* value); + void convertToConstant(Node* node, JSValue value); + void convertToStrongConstant(Node* node, JSValue value); + + StructureRegistrationResult registerStructure(Structure* structure); + void assertIsRegistered(Structure* structure); - void convertToConstant(Node* node, JSValue value) - { - convertToConstant(node, constantRegisterForConstant(value)); - } - // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names). void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0); + + bool terminalsAreValid(); + enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis }; - void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext* context); + void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*); void dump(PrintStream&, Edge); void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0); static int amountOfNodeWhiteSpace(Node*); @@ -190,49 +213,54 @@ public: // Dump the code origin of the given node as a diff from the code origin of the // preceding node. Returns true if anything was printed. - bool dumpCodeOrigin(PrintStream&, const char* prefix, Node* previousNode, Node* currentNode, DumpContext* context); + bool dumpCodeOrigin(PrintStream&, const char* prefix, Node*& previousNode, Node* currentNode, DumpContext*); - SpeculatedType getJSConstantSpeculation(Node* node) - { - return speculationFromValue(node->valueOfJSConstant(m_codeBlock)); - } - - AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32) + AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass) { ASSERT(add->op() == ValueAdd || add->op() == ArithAdd || add->op() == ArithSub); + RareCaseProfilingSource source = add->sourceFor(pass); + Node* left = add->child1().node(); Node* right = add->child2().node(); if (left->hasConstant()) - return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, left); + return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, right, left, source); if (right->hasConstant()) - return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, right); + return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, left, right, source); - return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32()) ? SpeculateInt32 : DontSpeculateInt32; + return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32; } - AddSpeculationMode valueAddSpeculationMode(Node* add) + AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass) { - return addSpeculationMode(add, add->child1()->shouldSpeculateInt32ExpectingDefined(), add->child2()->shouldSpeculateInt32ExpectingDefined()); + return addSpeculationMode( + add, + add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(), + add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(), + pass); } - AddSpeculationMode arithAddSpeculationMode(Node* add) + AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass) { - return addSpeculationMode(add, add->child1()->shouldSpeculateInt32ForArithmetic(), add->child2()->shouldSpeculateInt32ForArithmetic()); + return addSpeculationMode( + add, + add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(), + add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(), + pass); } - AddSpeculationMode addSpeculationMode(Node* add) + AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass) { if (add->op() == ValueAdd) - return valueAddSpeculationMode(add); + return valueAddSpeculationMode(add, pass); - return arithAddSpeculationMode(add); + return arithAddSpeculationMode(add, pass); } - bool addShouldSpeculateInt32(Node* add) + bool addShouldSpeculateInt32(Node* add, PredictionPass pass) { - return addSpeculationMode(add) != DontSpeculateInt32; + return addSpeculationMode(add, pass) != DontSpeculateInt32; } bool addShouldSpeculateMachineInt(Node* add) @@ -243,155 +271,65 @@ public: Node* left = add->child1().node(); Node* right = add->child2().node(); - bool speculation; - if (add->op() == ValueAdd) - speculation = Node::shouldSpeculateMachineInt(left, right); - else - speculation = Node::shouldSpeculateMachineInt(left, right); - + bool speculation = Node::shouldSpeculateMachineInt(left, right); return speculation && !hasExitSite(add, Int52Overflow); } - bool mulShouldSpeculateInt32(Node* mul) + bool binaryArithShouldSpeculateInt32(Node* node, PredictionPass pass) { - ASSERT(mul->op() == ArithMul); + Node* left = node->child1().node(); + Node* right = node->child2().node(); - Node* left = mul->child1().node(); - Node* right = mul->child2().node(); - - return Node::shouldSpeculateInt32ForArithmetic(left, right) - && mul->canSpeculateInt32(); + return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right) + && node->canSpeculateInt32(node->sourceFor(pass)); } - bool mulShouldSpeculateMachineInt(Node* mul) + bool binaryArithShouldSpeculateMachineInt(Node* node, PredictionPass pass) { - ASSERT(mul->op() == ArithMul); - if (!enableInt52()) return false; - Node* left = mul->child1().node(); - Node* right = mul->child2().node(); + Node* left = node->child1().node(); + Node* right = node->child2().node(); return Node::shouldSpeculateMachineInt(left, right) - && mul->canSpeculateInt52() - && !hasExitSite(mul, Int52Overflow); + && node->canSpeculateInt52(pass) + && !hasExitSite(node, Int52Overflow); } - bool negateShouldSpeculateInt32(Node* negate) + bool unaryArithShouldSpeculateInt32(Node* node, PredictionPass pass) { - ASSERT(negate->op() == ArithNegate); - return negate->child1()->shouldSpeculateInt32ForArithmetic() && negate->canSpeculateInt32(); + return node->child1()->shouldSpeculateInt32OrBooleanForArithmetic() + && node->canSpeculateInt32(pass); } - bool negateShouldSpeculateMachineInt(Node* negate) + bool unaryArithShouldSpeculateMachineInt(Node* node, PredictionPass pass) { - ASSERT(negate->op() == ArithNegate); if (!enableInt52()) return false; - return negate->child1()->shouldSpeculateMachineInt() - && !hasExitSite(negate, Int52Overflow) - && negate->canSpeculateInt52(); + return node->child1()->shouldSpeculateMachineInt() + && node->canSpeculateInt52(pass) + && !hasExitSite(node, Int52Overflow); } - - VirtualRegister bytecodeRegisterForArgument(CodeOrigin codeOrigin, int argument) + + bool canOptimizeStringObjectAccess(const CodeOrigin&); + + bool roundShouldSpeculateInt32(Node* arithRound, PredictionPass pass) { - return VirtualRegister( - codeOrigin.inlineCallFrame->stackOffset + - baselineCodeBlockFor(codeOrigin)->argumentIndexAfterCapture(argument)); + ASSERT(arithRound->op() == ArithRound || arithRound->op() == ArithFloor || arithRound->op() == ArithCeil); + return arithRound->canSpeculateInt32(pass) && !hasExitSite(arithRound->origin.semantic, Overflow) && !hasExitSite(arithRound->origin.semantic, NegativeZero); } - // Helper methods to check nodes for constants. - bool isConstant(Node* node) - { - return node->hasConstant(); - } - bool isJSConstant(Node* node) - { - return node->hasConstant(); - } - bool isInt32Constant(Node* node) - { - return node->isInt32Constant(m_codeBlock); - } - bool isDoubleConstant(Node* node) - { - return node->isDoubleConstant(m_codeBlock); - } - bool isNumberConstant(Node* node) - { - return node->isNumberConstant(m_codeBlock); - } - bool isBooleanConstant(Node* node) - { - return node->isBooleanConstant(m_codeBlock); - } - bool isCellConstant(Node* node) - { - if (!isJSConstant(node)) - return false; - JSValue value = valueOfJSConstant(node); - return value.isCell() && !!value; - } - bool isFunctionConstant(Node* node) - { - if (!isJSConstant(node)) - return false; - if (!getJSFunction(valueOfJSConstant(node))) - return false; - return true; - } - bool isInternalFunctionConstant(Node* node) - { - if (!isJSConstant(node)) - return false; - JSValue value = valueOfJSConstant(node); - if (!value.isCell() || !value) - return false; - JSCell* cell = value.asCell(); - if (!cell->inherits(InternalFunction::info())) - return false; - return true; - } - // Helper methods get constant values from nodes. - JSValue valueOfJSConstant(Node* node) - { - return node->valueOfJSConstant(m_codeBlock); - } - int32_t valueOfInt32Constant(Node* node) - { - return valueOfJSConstant(node).asInt32(); - } - double valueOfNumberConstant(Node* node) - { - return valueOfJSConstant(node).asNumber(); - } - bool valueOfBooleanConstant(Node* node) - { - return valueOfJSConstant(node).asBoolean(); - } - JSFunction* valueOfFunctionConstant(Node* node) - { - JSCell* function = getJSFunction(valueOfJSConstant(node)); - ASSERT(function); - return jsCast<JSFunction*>(function); - } - static const char *opName(NodeType); StructureSet* addStructureSet(const StructureSet& structureSet) { - ASSERT(structureSet.size()); + for (Structure* structure : structureSet) + registerStructure(structure); m_structureSet.append(structureSet); return &m_structureSet.last(); } - StructureTransitionData* addStructureTransitionData(const StructureTransitionData& structureTransitionData) - { - m_structureTransitionData.append(structureTransitionData); - return &m_structureTransitionData.last(); - } - JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin) { return m_codeBlock->globalObjectFor(codeOrigin); @@ -406,9 +344,9 @@ public: ScriptExecutable* executableFor(InlineCallFrame* inlineCallFrame) { if (!inlineCallFrame) - return m_codeBlock->ownerExecutable(); + return m_codeBlock->ownerScriptExecutable(); - return inlineCallFrame->executable.get(); + return inlineCallFrame->baselineCodeBlock->ownerScriptExecutable(); } ScriptExecutable* executableFor(const CodeOrigin& codeOrigin) @@ -432,7 +370,7 @@ public: { if (!codeOrigin.inlineCallFrame) return m_codeBlock->isStrictMode(); - return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode(); + return codeOrigin.inlineCallFrame->isStrictMode(); } ECMAMode ecmaModeFor(CodeOrigin codeOrigin) @@ -442,8 +380,7 @@ public: bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin) { - return m_plan.watchpoints.isStillValid( - globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint()); + return globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid(); } bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind) @@ -458,128 +395,11 @@ public: bool hasExitSite(Node* node, ExitKind exitKind) { - return hasExitSite(node->codeOrigin, exitKind); - } - - VirtualRegister argumentsRegisterFor(InlineCallFrame* inlineCallFrame) - { - if (!inlineCallFrame) - return m_profiledBlock->argumentsRegister(); - - return VirtualRegister(baselineCodeBlockForInlineCallFrame( - inlineCallFrame)->argumentsRegister().offset() + - inlineCallFrame->stackOffset); - } - - VirtualRegister argumentsRegisterFor(const CodeOrigin& codeOrigin) - { - return argumentsRegisterFor(codeOrigin.inlineCallFrame); - } - - VirtualRegister machineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame) - { - if (!inlineCallFrame) - return m_codeBlock->argumentsRegister(); - - return inlineCallFrame->argumentsRegister; - } - - VirtualRegister machineArgumentsRegisterFor(const CodeOrigin& codeOrigin) - { - return machineArgumentsRegisterFor(codeOrigin.inlineCallFrame); - } - - VirtualRegister uncheckedArgumentsRegisterFor(InlineCallFrame* inlineCallFrame) - { - if (!inlineCallFrame) - return m_profiledBlock->uncheckedArgumentsRegister(); - - CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame); - if (!codeBlock->usesArguments()) - return VirtualRegister(); - - return VirtualRegister(codeBlock->argumentsRegister().offset() + - inlineCallFrame->stackOffset); - } - - VirtualRegister uncheckedArgumentsRegisterFor(const CodeOrigin& codeOrigin) - { - return uncheckedArgumentsRegisterFor(codeOrigin.inlineCallFrame); - } - - VirtualRegister activationRegister() - { - return m_profiledBlock->activationRegister(); - } - - VirtualRegister uncheckedActivationRegister() - { - return m_profiledBlock->uncheckedActivationRegister(); - } - - VirtualRegister machineActivationRegister() - { - return m_profiledBlock->activationRegister(); - } - - VirtualRegister uncheckedMachineActivationRegister() - { - return m_profiledBlock->uncheckedActivationRegister(); - } - - ValueProfile* valueProfileFor(Node* node) - { - if (!node) - return 0; - - CodeBlock* profiledBlock = baselineCodeBlockFor(node->codeOrigin); - - if (node->op() == GetArgument) - return profiledBlock->valueProfileForArgument(node->local().toArgument()); - - if (node->hasLocal(*this)) { - if (m_form == SSA) - return 0; - if (!node->local().isArgument()) - return 0; - int argument = node->local().toArgument(); - if (node->variableAccessData() != m_arguments[argument]->variableAccessData()) - return 0; - return profiledBlock->valueProfileForArgument(argument); - } - - if (node->hasHeapPrediction()) - return profiledBlock->valueProfileForBytecodeOffset(node->codeOrigin.bytecodeIndex); - - return 0; - } - - MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* node) - { - if (!node) - return MethodOfGettingAValueProfile(); - - CodeBlock* profiledBlock = baselineCodeBlockFor(node->codeOrigin); - - if (node->op() == GetLocal) { - return MethodOfGettingAValueProfile::fromLazyOperand( - profiledBlock, - LazyOperandValueProfileKey( - node->codeOrigin.bytecodeIndex, node->local())); - } - - return MethodOfGettingAValueProfile(valueProfileFor(node)); - } - - bool needsActivation() const - { - return m_codeBlock->needsFullScopeChain() && m_codeBlock->codeType() != GlobalCode; + return hasExitSite(node->origin.semantic, exitKind); } - bool usesArguments() const - { - return m_codeBlock->usesArguments(); - } + ValueProfile* valueProfileFor(Node*); + MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node*); BlockIndex numBlocks() const { return m_blocks.size(); } BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); } @@ -593,7 +413,7 @@ public: void killBlock(BlockIndex blockIndex) { - m_blocks[blockIndex].clear(); + m_blocks[blockIndex] = nullptr; } void killBlock(BasicBlock* basicBlock) @@ -605,76 +425,10 @@ public: void killUnreachableBlocks(); - bool isPredictedNumerical(Node* node) - { - return isNumerical(node->child1().useKind()) && isNumerical(node->child2().useKind()); - } - - // Note that a 'true' return does not actually mean that the ByVal access clobbers nothing. - // It really means that it will not clobber the entire world. It's still up to you to - // carefully consider things like: - // - PutByVal definitely changes the array it stores to, and may even change its length. - // - PutByOffset definitely changes the object it stores to. - // - and so on. - bool byValIsPure(Node* node) - { - switch (node->arrayMode().type()) { - case Array::Generic: - return false; - case Array::Int32: - case Array::Double: - case Array::Contiguous: - case Array::ArrayStorage: - return !node->arrayMode().isOutOfBounds(); - case Array::SlowPutArrayStorage: - return !node->arrayMode().mayStoreToHole(); - case Array::String: - return node->op() == GetByVal && node->arrayMode().isInBounds(); -#if USE(JSVALUE32_64) - case Array::Arguments: - if (node->op() == GetByVal) - return true; - return false; -#endif // USE(JSVALUE32_64) - default: - return true; - } - } - - bool clobbersWorld(Node* node) - { - if (node->flags() & NodeClobbersWorld) - return true; - if (!(node->flags() & NodeMightClobber)) - return false; - switch (node->op()) { - case GetByVal: - case PutByValDirect: - case PutByVal: - case PutByValAlias: - return !byValIsPure(node); - case ToString: - switch (node->child1().useKind()) { - case StringObjectUse: - case StringOrStringObjectUse: - return false; - case CellUse: - case UntypedUse: - return true; - default: - RELEASE_ASSERT_NOT_REACHED(); - return true; - } - default: - RELEASE_ASSERT_NOT_REACHED(); - return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst. - } - } - void determineReachability(); void resetReachability(); - void resetExitStates(); + void computeRefCounts(); unsigned varArgNumChildren(Node* node) { @@ -702,7 +456,7 @@ public: return node->children.child(index); } - void voteNode(Node* node, unsigned ballot) + void voteNode(Node* node, unsigned ballot, float weight = 1) { switch (node->op()) { case ValueToInt32: @@ -714,35 +468,35 @@ public: } if (node->op() == GetLocal) - node->variableAccessData()->vote(ballot); + node->variableAccessData()->vote(ballot, weight); } - void voteNode(Edge edge, unsigned ballot) + void voteNode(Edge edge, unsigned ballot, float weight = 1) { - voteNode(edge.node(), ballot); + voteNode(edge.node(), ballot, weight); } - void voteChildren(Node* node, unsigned ballot) + void voteChildren(Node* node, unsigned ballot, float weight = 1) { if (node->flags() & NodeHasVarArgs) { for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) { if (!!m_varArgChildren[childIdx]) - voteNode(m_varArgChildren[childIdx], ballot); + voteNode(m_varArgChildren[childIdx], ballot, weight); } return; } if (!node->child1()) return; - voteNode(node->child1(), ballot); + voteNode(node->child1(), ballot, weight); if (!node->child2()) return; - voteNode(node->child2(), ballot); + voteNode(node->child2(), ballot, weight); if (!node->child3()) return; - voteNode(node->child3(), ballot); + voteNode(node->child3(), ballot, weight); } template<typename T> // T = Node* or Edge @@ -777,32 +531,282 @@ public: void invalidateCFG(); + void clearFlagsOnAllNodes(NodeFlags); + void clearReplacements(); + void clearEpochs(); void initializeNodeOwners(); - void getBlocksInDepthFirstOrder(Vector<BasicBlock*>& result); + BlockList blocksInPreOrder(); + BlockList blocksInPostOrder(); + + class NaturalBlockIterable { + public: + NaturalBlockIterable() + : m_graph(nullptr) + { + } + + NaturalBlockIterable(Graph& graph) + : m_graph(&graph) + { + } + + class iterator { + public: + iterator() + : m_graph(nullptr) + , m_index(0) + { + } + + iterator(Graph& graph, BlockIndex index) + : m_graph(&graph) + , m_index(findNext(index)) + { + } + + BasicBlock *operator*() + { + return m_graph->block(m_index); + } + + iterator& operator++() + { + m_index = findNext(m_index + 1); + return *this; + } + + bool operator==(const iterator& other) const + { + return m_index == other.m_index; + } + + bool operator!=(const iterator& other) const + { + return !(*this == other); + } + + private: + BlockIndex findNext(BlockIndex index) + { + while (index < m_graph->numBlocks() && !m_graph->block(index)) + index++; + return index; + } + + Graph* m_graph; + BlockIndex m_index; + }; + + iterator begin() + { + return iterator(*m_graph, 0); + } + + iterator end() + { + return iterator(*m_graph, m_graph->numBlocks()); + } + + private: + Graph* m_graph; + }; + + NaturalBlockIterable blocksInNaturalOrder() + { + return NaturalBlockIterable(*this); + } + + template<typename ChildFunctor> + void doToChildrenWithNode(Node* node, const ChildFunctor& functor) + { + DFG_NODE_DO_TO_CHILDREN(*this, node, functor); + } + + template<typename ChildFunctor> + void doToChildren(Node* node, const ChildFunctor& functor) + { + doToChildrenWithNode( + node, + [&functor] (Node*, Edge& edge) { + functor(edge); + }); + } + + bool uses(Node* node, Node* child) + { + bool result = false; + doToChildren(node, [&] (Edge edge) { result |= edge == child; }); + return result; + } Profiler::Compilation* compilation() { return m_plan.compilation.get(); } DesiredIdentifiers& identifiers() { return m_plan.identifiers; } DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; } - DesiredStructureChains& chains() { return m_plan.chains; } + + // Returns false if the key is already invalid or unwatchable. If this is a Presence condition, + // this also makes it cheap to query if the condition holds. Also makes sure that the GC knows + // what's going on. + bool watchCondition(const ObjectPropertyCondition&); + + // Checks if it's known that loading from the given object at the given offset is fine. This is + // computed by tracking which conditions we track with watchCondition(). + bool isSafeToLoad(JSObject* base, PropertyOffset); + + void registerInferredType(const InferredType::Descriptor& type) + { + if (type.structure()) + registerStructure(type.structure()); + } + + // Tells us what inferred type we are able to prove the property to have now and in the future. + InferredType::Descriptor inferredTypeFor(const PropertyTypeKey&); + InferredType::Descriptor inferredTypeForProperty(Structure* structure, UniquedStringImpl* uid) + { + return inferredTypeFor(PropertyTypeKey(structure, uid)); + } + + AbstractValue inferredValueForProperty( + const StructureSet& base, UniquedStringImpl* uid, StructureClobberState = StructuresAreWatched); + + // This uses either constant property inference or property type inference to derive a good abstract + // value for some property accessed with the given abstract value base. + AbstractValue inferredValueForProperty( + const AbstractValue& base, UniquedStringImpl* uid, PropertyOffset, StructureClobberState); FullBytecodeLiveness& livenessFor(CodeBlock*); FullBytecodeLiveness& livenessFor(InlineCallFrame*); + + // Quickly query if a single local is live at the given point. This is faster than calling + // forAllLiveInBytecode() if you will only query one local. But, if you want to know all of the + // locals live, then calling this for each local is much slower than forAllLiveInBytecode(). bool isLiveInBytecode(VirtualRegister, CodeOrigin); + // Quickly get all of the non-argument locals live at the given point. This doesn't give you + // any arguments because those are all presumed live. You can call forAllLiveInBytecode() to + // also get the arguments. This is much faster than calling isLiveInBytecode() for each local. + template<typename Functor> + void forAllLocalsLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor) + { + // Support for not redundantly reporting arguments. Necessary because in case of a varargs + // call, only the callee knows that arguments are live while in the case of a non-varargs + // call, both callee and caller will see the variables live. + VirtualRegister exclusionStart; + VirtualRegister exclusionEnd; + + CodeOrigin* codeOriginPtr = &codeOrigin; + + for (;;) { + InlineCallFrame* inlineCallFrame = codeOriginPtr->inlineCallFrame; + VirtualRegister stackOffset(inlineCallFrame ? inlineCallFrame->stackOffset : 0); + + if (inlineCallFrame) { + if (inlineCallFrame->isClosureCall) + functor(stackOffset + JSStack::Callee); + if (inlineCallFrame->isVarargs()) + functor(stackOffset + JSStack::ArgumentCount); + } + + CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame); + FullBytecodeLiveness& fullLiveness = livenessFor(codeBlock); + const FastBitVector& liveness = fullLiveness.getLiveness(codeOriginPtr->bytecodeIndex); + for (unsigned relativeLocal = codeBlock->m_numCalleeLocals; relativeLocal--;) { + VirtualRegister reg = stackOffset + virtualRegisterForLocal(relativeLocal); + + // Don't report if our callee already reported. + if (reg >= exclusionStart && reg < exclusionEnd) + continue; + + if (liveness.get(relativeLocal)) + functor(reg); + } + + if (!inlineCallFrame) + break; + + // Arguments are always live. This would be redundant if it wasn't for our + // op_call_varargs inlining. See the comment above. + exclusionStart = stackOffset + CallFrame::argumentOffsetIncludingThis(0); + exclusionEnd = stackOffset + CallFrame::argumentOffsetIncludingThis(inlineCallFrame->arguments.size()); + + // We will always have a "this" argument and exclusionStart should be a smaller stack + // offset than exclusionEnd. + ASSERT(exclusionStart < exclusionEnd); + + for (VirtualRegister reg = exclusionStart; reg < exclusionEnd; reg += 1) + functor(reg); + + codeOriginPtr = inlineCallFrame->getCallerSkippingTailCalls(); + + // The first inline call frame could be an inline tail call + if (!codeOriginPtr) + break; + } + } + + // Get a BitVector of all of the non-argument locals live right now. This is mostly useful if + // you want to compare two sets of live locals from two different CodeOrigins. + BitVector localsLiveInBytecode(CodeOrigin); + + // Tells you all of the arguments and locals live at the given CodeOrigin. This is a small + // extension to forAllLocalsLiveInBytecode(), since all arguments are always presumed live. + template<typename Functor> + void forAllLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor) + { + forAllLocalsLiveInBytecode(codeOrigin, functor); + + // Report all arguments as being live. + for (unsigned argument = block(0)->variablesAtHead.numberOfArguments(); argument--;) + functor(virtualRegisterForArgument(argument)); + } + + BytecodeKills& killsFor(CodeBlock*); + BytecodeKills& killsFor(InlineCallFrame*); + unsigned frameRegisterCount(); + unsigned stackPointerOffset(); unsigned requiredRegisterCountForExit(); unsigned requiredRegisterCountForExecutionAndExit(); - JSActivation* tryGetActivation(Node*); - WriteBarrierBase<Unknown>* tryGetRegisters(Node*); + JSValue tryGetConstantProperty(JSValue base, const StructureSet&, PropertyOffset); + JSValue tryGetConstantProperty(JSValue base, Structure*, PropertyOffset); + JSValue tryGetConstantProperty(JSValue base, const StructureAbstractValue&, PropertyOffset); + JSValue tryGetConstantProperty(const AbstractValue&, PropertyOffset); + + JSValue tryGetConstantClosureVar(JSValue base, ScopeOffset); + JSValue tryGetConstantClosureVar(const AbstractValue&, ScopeOffset); + JSValue tryGetConstantClosureVar(Node*, ScopeOffset); - JSArrayBufferView* tryGetFoldableView(Node*); - JSArrayBufferView* tryGetFoldableView(Node*, ArrayMode); - JSArrayBufferView* tryGetFoldableViewForChild1(Node*); + JSArrayBufferView* tryGetFoldableView(JSValue); + JSArrayBufferView* tryGetFoldableView(JSValue, ArrayMode arrayMode); + void registerFrozenValues(); + + virtual void visitChildren(SlotVisitor&) override; + + NO_RETURN_DUE_TO_CRASH void handleAssertionFailure( + std::nullptr_t, const char* file, int line, const char* function, + const char* assertion); + NO_RETURN_DUE_TO_CRASH void handleAssertionFailure( + Node*, const char* file, int line, const char* function, + const char* assertion); + NO_RETURN_DUE_TO_CRASH void handleAssertionFailure( + BasicBlock*, const char* file, int line, const char* function, + const char* assertion); + + bool hasDebuggerEnabled() const { return m_hasDebuggerEnabled; } + + void ensureDominators(); + void ensurePrePostNumbering(); + void ensureNaturalLoops(); + + // This function only makes sense to call after bytecode parsing + // because it queries the m_hasExceptionHandlers boolean whose value + // is only fully determined after bytcode parsing. + bool willCatchExceptionInMachineFrame(CodeOrigin, CodeOrigin& opCatchOriginOut, HandlerInfo*& catchHandlerOut); + VM& m_vm; Plan& m_plan; CodeBlock* m_codeBlock; @@ -810,54 +814,110 @@ public: NodeAllocator& m_allocator; - Operands<AbstractValue> m_mustHandleAbstractValues; - Vector< RefPtr<BasicBlock> , 8> m_blocks; Vector<Edge, 16> m_varArgChildren; - Vector<StorageAccessData> m_storageAccessData; + + HashMap<EncodedJSValue, FrozenValue*, EncodedJSValueHash, EncodedJSValueHashTraits> m_frozenValueMap; + Bag<FrozenValue> m_frozenValues; + + Vector<uint32_t> m_uint32ValuesInUse; + + Bag<StorageAccessData> m_storageAccessData; + + // In CPS, this is all of the SetArgument nodes for the arguments in the machine code block + // that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush + // nodes. + // + // In SSA, this is all of the GetStack nodes for the arguments in the machine code block that + // may have some speculation in the prologue and survived DCE. Note that to get the speculation + // for an argument in SSA, you must use m_argumentFormats, since we still have to speculate + // even if the argument got killed. For example: + // + // function foo(x) { + // var tmp = x + 1; + // } + // + // Assume that x is always int during profiling. The ArithAdd for "x + 1" will be dead and will + // have a proven check for the edge to "x". So, we will not insert a Check node and we will + // kill the GetStack for "x". But, we must do the int check in the progolue, because that's the + // thing we used to allow DCE of ArithAdd. Otherwise the add could be impure: + // + // var o = { + // valueOf: function() { do side effects; } + // }; + // foo(o); + // + // If we DCE the ArithAdd and we remove the int check on x, then this won't do the side + // effects. Vector<Node*, 8> m_arguments; + + // In CPS, this is meaningless. In SSA, this is the argument speculation that we've locked in. + Vector<FlushFormat> m_argumentFormats; + SegmentedVector<VariableAccessData, 16> m_variableAccessData; SegmentedVector<ArgumentPosition, 8> m_argumentPositions; SegmentedVector<StructureSet, 16> m_structureSet; - SegmentedVector<StructureTransitionData, 8> m_structureTransitionData; + Bag<Transition> m_transitions; SegmentedVector<NewArrayBufferData, 4> m_newArrayBufferData; - SegmentedVector<SwitchData, 4> m_switchData; + Bag<BranchData> m_branchData; + Bag<SwitchData> m_switchData; + Bag<MultiGetByOffsetData> m_multiGetByOffsetData; + Bag<MultiPutByOffsetData> m_multiPutByOffsetData; + Bag<ObjectMaterializationData> m_objectMaterializationData; + Bag<CallVarargsData> m_callVarargsData; + Bag<LoadVarargsData> m_loadVarargsData; + Bag<StackAccessData> m_stackAccessData; Vector<InlineVariableData, 4> m_inlineVariableData; - OwnPtr<InlineCallFrameSet> m_inlineCallFrames; HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness; - bool m_hasArguments; - HashSet<ExecutableBase*> m_executablesWhoseArgumentsEscaped; - BitVector m_lazyVars; - Dominators m_dominators; - NaturalLoops m_naturalLoops; + HashMap<CodeBlock*, std::unique_ptr<BytecodeKills>> m_bytecodeKills; + HashSet<std::pair<JSObject*, PropertyOffset>> m_safeToLoad; + HashMap<PropertyTypeKey, InferredType::Descriptor> m_inferredTypes; + std::unique_ptr<Dominators> m_dominators; + std::unique_ptr<PrePostNumbering> m_prePostNumbering; + std::unique_ptr<NaturalLoops> m_naturalLoops; + std::unique_ptr<CFG> m_cfg; unsigned m_localVars; unsigned m_nextMachineLocal; unsigned m_parameterSlots; - int m_machineCaptureStart; - std::unique_ptr<SlowArgument[]> m_slowArguments; + +#if USE(JSVALUE32_64) + std::unordered_map<int64_t, double*> m_doubleConstantsMap; + std::unique_ptr<Bag<double>> m_doubleConstants; +#endif OptimizationFixpointState m_fixpointState; + StructureRegistrationState m_structureRegistrationState; GraphForm m_form; UnificationState m_unificationState; + PlanStage m_planStage { PlanStage::Initial }; RefCountState m_refCountState; + bool m_hasDebuggerEnabled; + bool m_hasExceptionHandlers { false }; private: - + + bool isStringPrototypeMethodSane(JSObject* stringPrototype, Structure* stringPrototypeStructure, UniquedStringImpl*); + void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor); - void addForDepthFirstSort(Vector<BasicBlock*>& result, Vector<BasicBlock*, 16>& worklist, HashSet<BasicBlock*>& seen, BasicBlock*); - AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* immediate) + AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* operand, Node*immediate, RareCaseProfilingSource source) { ASSERT(immediate->hasConstant()); - JSValue immediateValue = immediate->valueOfJSConstant(m_codeBlock); - if (!immediateValue.isNumber()) + JSValue immediateValue = immediate->asJSValue(); + if (!immediateValue.isNumber() && !immediateValue.isBoolean()) return DontSpeculateInt32; if (!variableShouldSpeculateInt32) return DontSpeculateInt32; + + // Integer constants can be typed Double if they are written like a double in the source code (e.g. 42.0). + // In that case, we stay conservative unless the other operand was explicitly typed as integer. + NodeFlags operandResultType = operand->result(); + if (operandResultType != NodeResultInt32 && immediateValue.isDouble()) + return DontSpeculateInt32; - if (immediateValue.isInt32()) - return add->canSpeculateInt32() ? SpeculateInt32 : DontSpeculateInt32; + if (immediateValue.isBoolean() || jsNumber(immediateValue.asNumber()).isInt32()) + return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32; double doubleImmediate = immediateValue.asDouble(); const double twoToThe48 = 281474976710656.0; @@ -866,62 +926,8 @@ private: return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32; } - - bool mulImmediateShouldSpeculateInt32(Node* mul, Node* variable, Node* immediate) - { - ASSERT(immediate->hasConstant()); - - JSValue immediateValue = immediate->valueOfJSConstant(m_codeBlock); - if (!immediateValue.isInt32()) - return false; - - if (!variable->shouldSpeculateInt32ForArithmetic()) - return false; - - int32_t intImmediate = immediateValue.asInt32(); - // Doubles have a 53 bit mantissa so we expect a multiplication of 2^31 (the highest - // magnitude possible int32 value) and any value less than 2^22 to not result in any - // rounding in a double multiplication - hence it will be equivalent to an integer - // multiplication, if we are doing int32 truncation afterwards (which is what - // canSpeculateInt32() implies). - const int32_t twoToThe22 = 1 << 22; - if (intImmediate <= -twoToThe22 || intImmediate >= twoToThe22) - return mul->canSpeculateInt32() && !nodeMayOverflow(mul->arithNodeFlags()); - - return mul->canSpeculateInt32(); - } }; -#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \ - Node* _node = (node); \ - if (_node->flags() & NodeHasVarArgs) { \ - for (unsigned _childIdx = _node->firstChild(); \ - _childIdx < _node->firstChild() + _node->numChildren(); \ - _childIdx++) { \ - if (!!(graph).m_varArgChildren[_childIdx]) \ - thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \ - } \ - } else { \ - if (!_node->child1()) { \ - ASSERT( \ - !_node->child2() \ - && !_node->child3()); \ - break; \ - } \ - thingToDo(_node, _node->child1()); \ - \ - if (!_node->child2()) { \ - ASSERT(!_node->child3()); \ - break; \ - } \ - thingToDo(_node, _node->child2()); \ - \ - if (!_node->child3()) \ - break; \ - thingToDo(_node, _node->child3()); \ - } \ - } while (false) - } } // namespace JSC::DFG #endif diff --git a/Source/JavaScriptCore/dfg/DFGGraphSafepoint.cpp b/Source/JavaScriptCore/dfg/DFGGraphSafepoint.cpp new file mode 100644 index 000000000..e021e99bc --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGGraphSafepoint.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGGraphSafepoint.h" + +#if ENABLE(DFG_JIT) + +#include "DFGGraph.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +GraphSafepoint::GraphSafepoint(Graph& graph, Safepoint::Result& result) + : m_safepoint(graph.m_plan, result) +{ + m_safepoint.add(&graph); + m_safepoint.begin(); +} + +GraphSafepoint::~GraphSafepoint() { } + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.h b/Source/JavaScriptCore/dfg/DFGGraphSafepoint.h index 4d7b3c429..1759b6e12 100644 --- a/Source/JavaScriptCore/dfg/DFGFlushLivenessAnalysisPhase.h +++ b/Source/JavaScriptCore/dfg/DFGGraphSafepoint.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,26 +23,29 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef DFGFlushLivenessAnalysisPhase_h -#define DFGFlushLivenessAnalysisPhase_h - -#include <wtf/Platform.h> +#ifndef DFGGraphSafepoint_h +#define DFGGraphSafepoint_h #if ENABLE(DFG_JIT) -#include "DFGCommon.h" +#include "DFGSafepoint.h" namespace JSC { namespace DFG { class Graph; -// Computes BasicBlock::ssa->flushFormatAtHead - -bool performFlushLivenessAnalysis(Graph&); +class GraphSafepoint { +public: + GraphSafepoint(Graph&, Safepoint::Result&); + ~GraphSafepoint(); + +private: + Safepoint m_safepoint; +}; } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) -#endif // DFGFlushLivenessAnalysisPhase_h +#endif // DFGGraphSafepoint_h diff --git a/Source/JavaScriptCore/dfg/DFGHeapLocation.cpp b/Source/JavaScriptCore/dfg/DFGHeapLocation.cpp new file mode 100644 index 000000000..55d84cf56 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGHeapLocation.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGHeapLocation.h" + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +void HeapLocation::dump(PrintStream& out) const +{ + out.print(m_kind, ":", m_heap); + + if (!m_base) + return; + + out.print("[", m_base); + if (m_index) + out.print(", ", m_index); + out.print("]"); +} + +} } // namespace JSC::DFG + +namespace WTF { + +using namespace JSC::DFG; + +void printInternal(PrintStream& out, LocationKind kind) +{ + switch (kind) { + case InvalidLocationKind: + out.print("InvalidLocationKind"); + return; + + case InvalidationPointLoc: + out.print("InvalidationPointLoc"); + return; + + case IsObjectOrNullLoc: + out.print("IsObjectOrNullLoc"); + return; + + case IsFunctionLoc: + out.print("IsFunctionLoc"); + return; + + case GetterLoc: + out.print("GetterLoc"); + return; + + case SetterLoc: + out.print("SetterLoc"); + return; + + case StackLoc: + out.print("StackLoc"); + return; + + case StackPayloadLoc: + out.print("StackPayloadLoc"); + return; + + case ArrayLengthLoc: + out.print("ArrayLengthLoc"); + return; + + case ButterflyLoc: + out.print("ButterflyLoc"); + return; + + case ButterflyReadOnlyLoc: + out.print("ButterflyReadOnlyLoc"); + return; + + case CheckTypeInfoFlagsLoc: + out.print("CheckTypeInfoFlagsLoc"); + return; + + case OverridesHasInstanceLoc: + out.print("OverridesHasInstanceLoc"); + return; + + case ClosureVariableLoc: + out.print("ClosureVariableLoc"); + return; + + case DirectArgumentsLoc: + out.print("DirectArgumentsLoc"); + return; + + case GlobalVariableLoc: + out.print("GlobalVariableLoc"); + return; + + case HasIndexedPropertyLoc: + out.print("HasIndexedPorpertyLoc"); + return; + + case IndexedPropertyLoc: + out.print("IndexedPorpertyLoc"); + return; + + case IndexedPropertyStorageLoc: + out.print("IndexedPropertyStorageLoc"); + return; + + case InstanceOfLoc: + out.print("InstanceOfLoc"); + return; + + case NamedPropertyLoc: + out.print("NamedPropertyLoc"); + return; + + case TypedArrayByteOffsetLoc: + out.print("TypedArrayByteOffsetLoc"); + return; + + case VarInjectionWatchpointLoc: + out.print("VarInjectionWatchpointLoc"); + return; + + case StructureLoc: + out.print("StructureLoc"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGHeapLocation.h b/Source/JavaScriptCore/dfg/DFGHeapLocation.h new file mode 100644 index 000000000..b85fa2604 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGHeapLocation.h @@ -0,0 +1,166 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGHeapLocation_h +#define DFGHeapLocation_h + +#if ENABLE(DFG_JIT) + +#include "DFGAbstractHeap.h" +#include "DFGLazyNode.h" +#include "DFGNode.h" + +namespace JSC { namespace DFG { + +enum LocationKind { + InvalidLocationKind, + + ArrayLengthLoc, + ButterflyLoc, + ButterflyReadOnlyLoc, + CheckTypeInfoFlagsLoc, + OverridesHasInstanceLoc, + ClosureVariableLoc, + DirectArgumentsLoc, + GetterLoc, + GlobalVariableLoc, + HasIndexedPropertyLoc, + IndexedPropertyLoc, + IndexedPropertyStorageLoc, + InstanceOfLoc, + InvalidationPointLoc, + IsFunctionLoc, + IsObjectOrNullLoc, + NamedPropertyLoc, + SetterLoc, + StructureLoc, + TypedArrayByteOffsetLoc, + VarInjectionWatchpointLoc, + StackLoc, + StackPayloadLoc +}; + +class HeapLocation { +public: + HeapLocation( + LocationKind kind = InvalidLocationKind, + AbstractHeap heap = AbstractHeap(), + Node* base = nullptr, LazyNode index = LazyNode()) + : m_kind(kind) + , m_heap(heap) + , m_base(base) + , m_index(index) + { + ASSERT((kind == InvalidLocationKind) == !heap); + ASSERT(!!m_heap || !m_base); + ASSERT(m_base || !m_index); + } + + HeapLocation(LocationKind kind, AbstractHeap heap, Node* base, Node* index) + : HeapLocation(kind, heap, base, LazyNode(index)) + { + } + + HeapLocation(LocationKind kind, AbstractHeap heap, Edge base, Edge index = Edge()) + : HeapLocation(kind, heap, base.node(), index.node()) + { + } + + HeapLocation(WTF::HashTableDeletedValueType) + : m_kind(InvalidLocationKind) + , m_heap(WTF::HashTableDeletedValue) + , m_base(nullptr) + , m_index(nullptr) + { + } + + bool operator!() const { return !m_heap; } + + LocationKind kind() const { return m_kind; } + AbstractHeap heap() const { return m_heap; } + Node* base() const { return m_base; } + LazyNode index() const { return m_index; } + + unsigned hash() const + { + return m_kind + m_heap.hash() + m_index.hash() + m_kind; + } + + bool operator==(const HeapLocation& other) const + { + return m_kind == other.m_kind + && m_heap == other.m_heap + && m_base == other.m_base + && m_index == other.m_index; + } + + bool isHashTableDeletedValue() const + { + return m_heap.isHashTableDeletedValue(); + } + + void dump(PrintStream& out) const; + +private: + LocationKind m_kind; + AbstractHeap m_heap; + Node* m_base; + LazyNode m_index; +}; + +struct HeapLocationHash { + static unsigned hash(const HeapLocation& key) { return key.hash(); } + static bool equal(const HeapLocation& a, const HeapLocation& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} } // namespace JSC::DFG + +namespace WTF { + +void printInternal(PrintStream&, JSC::DFG::LocationKind); + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::DFG::HeapLocation> { + typedef JSC::DFG::HeapLocationHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::DFG::HeapLocation> : SimpleClassHashTraits<JSC::DFG::HeapLocation> { + static const bool emptyValueIsZero = false; +}; + +} // namespace WTF + +namespace JSC { namespace DFG { + +typedef HashMap<HeapLocation, LazyNode> ImpureMap; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGHeapLocation_h + diff --git a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp index 468c68f84..e5d88d2d5 100644 --- a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp +++ b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,12 +31,14 @@ #include "CodeBlock.h" #include "DFGBasicBlock.h" #include "GetByIdStatus.h" -#include "Operations.h" +#include "JSCInlines.h" #include "PutByIdStatus.h" #include "StringObject.h" namespace JSC { namespace DFG { +static const bool verbose = false; + InPlaceAbstractState::InPlaceAbstractState(Graph& graph) : m_graph(graph) , m_variables(m_graph.m_codeBlock->numParameters(), graph.m_localVars) @@ -58,36 +60,18 @@ void InPlaceAbstractState::beginBasicBlock(BasicBlock* basicBlock) forNode(basicBlock->at(i)).clear(); m_variables = basicBlock->valuesAtHead; - m_haveStructures = false; - for (size_t i = 0; i < m_variables.numberOfArguments(); ++i) { - if (m_variables.argument(i).hasClobberableState()) { - m_haveStructures = true; - break; - } - } - for (size_t i = 0; i < m_variables.numberOfLocals(); ++i) { - if (m_variables.local(i).hasClobberableState()) { - m_haveStructures = true; - break; - } - } if (m_graph.m_form == SSA) { - HashMap<Node*, AbstractValue>::iterator iter = basicBlock->ssa->valuesAtHead.begin(); - HashMap<Node*, AbstractValue>::iterator end = basicBlock->ssa->valuesAtHead.end(); - for (; iter != end; ++iter) { - forNode(iter->key) = iter->value; - if (iter->value.hasClobberableState()) - m_haveStructures = true; - } + for (auto& entry : basicBlock->ssa->valuesAtHead) + forNode(entry.key) = entry.value; } - basicBlock->cfaShouldRevisit = false; basicBlock->cfaHasVisited = true; m_block = basicBlock; m_isValid = true; m_foundConstants = false; m_branchDirection = InvalidBranchDirection; + m_structureClobberState = basicBlock->cfaStructureClobberStateAtHead; } static void setLiveValues(HashMap<Node*, AbstractValue>& values, HashSet<Node*>& live) @@ -106,37 +90,44 @@ void InPlaceAbstractState::initialize() root->cfaShouldRevisit = true; root->cfaHasVisited = false; root->cfaFoundConstants = false; + root->cfaStructureClobberStateAtHead = StructuresAreWatched; + root->cfaStructureClobberStateAtTail = StructuresAreWatched; for (size_t i = 0; i < root->valuesAtHead.numberOfArguments(); ++i) { root->valuesAtTail.argument(i).clear(); - if (m_graph.m_form == SSA) { - root->valuesAtHead.argument(i).makeHeapTop(); - continue; - } - - Node* node = root->variablesAtHead.argument(i); - ASSERT(node->op() == SetArgument); - if (!node->variableAccessData()->shouldUnboxIfPossible()) { - root->valuesAtHead.argument(i).makeHeapTop(); - continue; + + FlushFormat format; + if (m_graph.m_form == SSA) + format = m_graph.m_argumentFormats[i]; + else { + Node* node = m_graph.m_arguments[i]; + if (!node) + format = FlushedJSValue; + else { + ASSERT(node->op() == SetArgument); + format = node->variableAccessData()->flushFormat(); + } } - SpeculatedType prediction = - node->variableAccessData()->argumentAwarePrediction(); - if (isInt32Speculation(prediction)) + switch (format) { + case FlushedInt32: root->valuesAtHead.argument(i).setType(SpecInt32); - else if (isBooleanSpeculation(prediction)) + break; + case FlushedBoolean: root->valuesAtHead.argument(i).setType(SpecBoolean); - else if (isCellSpeculation(prediction)) - root->valuesAtHead.argument(i).setType(SpecCell); - else - root->valuesAtHead.argument(i).makeHeapTop(); + break; + case FlushedCell: + root->valuesAtHead.argument(i).setType(m_graph, SpecCell); + break; + case FlushedJSValue: + root->valuesAtHead.argument(i).makeBytecodeTop(); + break; + default: + DFG_CRASH(m_graph, nullptr, "Bad flush format for argument"); + break; + } } for (size_t i = 0; i < root->valuesAtHead.numberOfLocals(); ++i) { - Node* node = root->variablesAtHead.local(i); - if (node && node->variableAccessData()->isCaptured()) - root->valuesAtHead.local(i).makeHeapTop(); - else - root->valuesAtHead.local(i).clear(); + root->valuesAtHead.local(i).clear(); root->valuesAtTail.local(i).clear(); } for (BlockIndex blockIndex = 1 ; blockIndex < m_graph.numBlocks(); ++blockIndex) { @@ -147,6 +138,8 @@ void InPlaceAbstractState::initialize() block->cfaShouldRevisit = false; block->cfaHasVisited = false; block->cfaFoundConstants = false; + block->cfaStructureClobberStateAtHead = StructuresAreWatched; + block->cfaStructureClobberStateAtTail = StructuresAreWatched; for (size_t i = 0; i < block->valuesAtHead.numberOfArguments(); ++i) { block->valuesAtHead.argument(i).clear(); block->valuesAtTail.argument(i).clear(); @@ -155,16 +148,6 @@ void InPlaceAbstractState::initialize() block->valuesAtHead.local(i).clear(); block->valuesAtTail.local(i).clear(); } - if (!block->isOSRTarget) - continue; - if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex) - continue; - for (size_t i = 0; i < m_graph.m_mustHandleAbstractValues.size(); ++i) { - AbstractValue value = m_graph.m_mustHandleAbstractValues[i]; - int operand = m_graph.m_mustHandleAbstractValues.operandForIndex(i); - block->valuesAtHead.operand(operand).merge(value); - } - block->cfaShouldRevisit = true; } if (m_graph.m_form == SSA) { for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { @@ -177,7 +160,7 @@ void InPlaceAbstractState::initialize() } } -bool InPlaceAbstractState::endBasicBlock(MergeMode mergeMode) +bool InPlaceAbstractState::endBasicBlock() { ASSERT(m_block); @@ -192,48 +175,41 @@ bool InPlaceAbstractState::endBasicBlock(MergeMode mergeMode) return false; } - bool changed = false; + bool changed = checkAndSet(block->cfaStructureClobberStateAtTail, m_structureClobberState); - if (mergeMode != DontMerge || !ASSERT_DISABLED) { - switch (m_graph.m_form) { - case ThreadedCPS: { - for (size_t argument = 0; argument < block->variablesAtTail.numberOfArguments(); ++argument) { - AbstractValue& destination = block->valuesAtTail.argument(argument); - changed |= mergeStateAtTail(destination, m_variables.argument(argument), block->variablesAtTail.argument(argument)); - } - - for (size_t local = 0; local < block->variablesAtTail.numberOfLocals(); ++local) { - AbstractValue& destination = block->valuesAtTail.local(local); - changed |= mergeStateAtTail(destination, m_variables.local(local), block->variablesAtTail.local(local)); - } - break; + switch (m_graph.m_form) { + case ThreadedCPS: { + for (size_t argument = 0; argument < block->variablesAtTail.numberOfArguments(); ++argument) { + AbstractValue& destination = block->valuesAtTail.argument(argument); + changed |= mergeStateAtTail(destination, m_variables.argument(argument), block->variablesAtTail.argument(argument)); } - - case SSA: { - for (size_t i = 0; i < block->valuesAtTail.size(); ++i) - changed |= block->valuesAtTail[i].merge(m_variables[i]); - - HashSet<Node*>::iterator iter = block->ssa->liveAtTail.begin(); - HashSet<Node*>::iterator end = block->ssa->liveAtTail.end(); - for (; iter != end; ++iter) { - Node* node = *iter; - changed |= block->ssa->valuesAtTail.find(node)->value.merge(forNode(node)); - } - break; + + for (size_t local = 0; local < block->variablesAtTail.numberOfLocals(); ++local) { + AbstractValue& destination = block->valuesAtTail.local(local); + changed |= mergeStateAtTail(destination, m_variables.local(local), block->variablesAtTail.local(local)); } - - default: - RELEASE_ASSERT_NOT_REACHED(); + break; + } + + case SSA: { + for (size_t i = 0; i < block->valuesAtTail.size(); ++i) + changed |= block->valuesAtTail[i].merge(m_variables[i]); + + HashSet<Node*>::iterator iter = block->ssa->liveAtTail.begin(); + HashSet<Node*>::iterator end = block->ssa->liveAtTail.end(); + for (; iter != end; ++iter) { + Node* node = *iter; + changed |= block->ssa->valuesAtTail.find(node)->value.merge(forNode(node)); } + break; } - - ASSERT(mergeMode != DontMerge || !changed); - + + default: + RELEASE_ASSERT_NOT_REACHED(); + } + reset(); - if (mergeMode != MergeToSuccessors) - return changed; - return mergeToSuccessors(block); } @@ -242,6 +218,7 @@ void InPlaceAbstractState::reset() m_block = 0; m_isValid = false; m_branchDirection = InvalidBranchDirection; + m_structureClobberState = StructuresAreWatched; } bool InPlaceAbstractState::mergeStateAtTail(AbstractValue& destination, AbstractValue& inVariable, Node* node) @@ -251,46 +228,31 @@ bool InPlaceAbstractState::mergeStateAtTail(AbstractValue& destination, Abstract AbstractValue source; - if (node->variableAccessData()->isCaptured()) { - // If it's captured then we know that whatever value was stored into the variable last is the - // one we care about. This is true even if the variable at tail is dead, which might happen if - // the last thing we did to the variable was a GetLocal and then ended up now using the - // GetLocal's result. - + switch (node->op()) { + case Phi: + case SetArgument: + case PhantomLocal: + case Flush: + // The block transfers the value from head to tail. source = inVariable; - } else { - switch (node->op()) { - case Phi: - case SetArgument: - case PhantomLocal: - case Flush: - // The block transfers the value from head to tail. - source = inVariable; - break; + break; - case GetLocal: - // The block refines the value with additional speculations. - source = forNode(node); - break; + case GetLocal: + // The block refines the value with additional speculations. + source = forNode(node); + break; - case SetLocal: - // The block sets the variable, and potentially refines it, both - // before and after setting it. - source = forNode(node->child1()); - if (node->variableAccessData()->flushFormat() == FlushedDouble) { - ASSERT(!(source.m_type & ~SpecFullNumber)); - ASSERT(!!(source.m_type & ~SpecDouble) == !!(source.m_type & SpecMachineInt)); - if (!(source.m_type & ~SpecDouble)) { - source.merge(SpecInt52AsDouble); - source.filter(SpecDouble); - } - } - break; + case SetLocal: + // The block sets the variable, and potentially refines it, both + // before and after setting it. + source = forNode(node->child1()); + if (node->variableAccessData()->flushFormat() == FlushedDouble) + RELEASE_ASSERT(!(source.m_type & ~SpecFullDouble)); + break; - default: - RELEASE_ASSERT_NOT_REACHED(); - break; - } + default: + RELEASE_ASSERT_NOT_REACHED(); + break; } if (destination == source) { @@ -308,11 +270,17 @@ bool InPlaceAbstractState::mergeStateAtTail(AbstractValue& destination, Abstract bool InPlaceAbstractState::merge(BasicBlock* from, BasicBlock* to) { + if (verbose) + dataLog(" Merging from ", pointerDump(from), " to ", pointerDump(to), "\n"); ASSERT(from->variablesAtTail.numberOfArguments() == to->variablesAtHead.numberOfArguments()); ASSERT(from->variablesAtTail.numberOfLocals() == to->variablesAtHead.numberOfLocals()); bool changed = false; + changed |= checkAndSet( + to->cfaStructureClobberStateAtHead, + DFG::merge(from->cfaStructureClobberStateAtTail, to->cfaStructureClobberStateAtHead)); + switch (m_graph.m_form) { case ThreadedCPS: { for (size_t argument = 0; argument < from->variablesAtTail.numberOfArguments(); ++argument) { @@ -330,13 +298,16 @@ bool InPlaceAbstractState::merge(BasicBlock* from, BasicBlock* to) case SSA: { for (size_t i = from->valuesAtTail.size(); i--;) changed |= to->valuesAtHead[i].merge(from->valuesAtTail[i]); - - HashSet<Node*>::iterator iter = to->ssa->liveAtHead.begin(); - HashSet<Node*>::iterator end = to->ssa->liveAtHead.end(); - for (; iter != end; ++iter) { - Node* node = *iter; - changed |= to->ssa->valuesAtHead.find(node)->value.merge( + + for (auto& entry : to->ssa->valuesAtHead) { + Node* node = entry.key; + if (verbose) + dataLog(" Merging for ", node, ": from ", from->ssa->valuesAtTail.find(node)->value, " to ", entry.value, "\n"); + changed |= entry.value.merge( from->ssa->valuesAtTail.find(node)->value); + + if (verbose) + dataLog(" Result: ", entry.value, "\n"); } break; } @@ -349,6 +320,8 @@ bool InPlaceAbstractState::merge(BasicBlock* from, BasicBlock* to) if (!to->cfaHasVisited) changed = true; + if (verbose) + dataLog(" Will revisit: ", changed, "\n"); to->cfaShouldRevisit |= changed; return changed; @@ -356,23 +329,23 @@ bool InPlaceAbstractState::merge(BasicBlock* from, BasicBlock* to) inline bool InPlaceAbstractState::mergeToSuccessors(BasicBlock* basicBlock) { - Node* terminal = basicBlock->last(); + Node* terminal = basicBlock->terminal(); ASSERT(terminal->isTerminal()); switch (terminal->op()) { case Jump: { ASSERT(basicBlock->cfaBranchDirection == InvalidBranchDirection); - return merge(basicBlock, terminal->takenBlock()); + return merge(basicBlock, terminal->targetBlock()); } case Branch: { ASSERT(basicBlock->cfaBranchDirection != InvalidBranchDirection); bool changed = false; if (basicBlock->cfaBranchDirection != TakeFalse) - changed |= merge(basicBlock, terminal->takenBlock()); + changed |= merge(basicBlock, terminal->branchData()->taken.block); if (basicBlock->cfaBranchDirection != TakeTrue) - changed |= merge(basicBlock, terminal->notTakenBlock()); + changed |= merge(basicBlock, terminal->branchData()->notTaken.block); return changed; } @@ -381,13 +354,16 @@ inline bool InPlaceAbstractState::mergeToSuccessors(BasicBlock* basicBlock) // we're not. However I somehow doubt that this will ever be a big deal. ASSERT(basicBlock->cfaBranchDirection == InvalidBranchDirection); SwitchData* data = terminal->switchData(); - bool changed = merge(basicBlock, data->fallThrough); + bool changed = merge(basicBlock, data->fallThrough.block); for (unsigned i = data->cases.size(); i--;) - changed |= merge(basicBlock, data->cases[i].target); + changed |= merge(basicBlock, data->cases[i].target.block); return changed; } case Return: + case TailCall: + case TailCallVarargs: + case TailCallForwardVarargs: case Unreachable: ASSERT(basicBlock->cfaBranchDirection == InvalidBranchDirection); return false; diff --git a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h index f0f2a46d5..10220cdf6 100644 --- a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h +++ b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h @@ -26,21 +26,19 @@ #ifndef DFGInPlaceAbstractState_h #define DFGInPlaceAbstractState_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGAbstractValue.h" #include "DFGBranchDirection.h" #include "DFGGraph.h" -#include "DFGMergeMode.h" #include "DFGNode.h" namespace JSC { namespace DFG { class InPlaceAbstractState { + WTF_MAKE_FAST_ALLOCATED; public: - InPlaceAbstractState(Graph& graph); + InPlaceAbstractState(Graph&); ~InPlaceAbstractState(); @@ -77,26 +75,14 @@ public: // Finish abstractly executing a basic block. If MergeToTail or // MergeToSuccessors is passed, then this merges everything we have // learned about how the state changes during this block's execution into - // the block's data structures. There are three return modes, depending - // on the value of mergeMode: - // - // DontMerge: - // Always returns false. - // - // MergeToTail: - // Returns true if the state of the block at the tail was changed. - // This means that you must call mergeToSuccessors(), and if that - // returns true, then you must revisit (at least) the successor - // blocks. False will always be returned if the block is terminal - // (i.e. ends in Throw or Return, or has a ForceOSRExit inside it). + // the block's data structures. // - // MergeToSuccessors: - // Returns true if the state of the block at the tail was changed, - // and, if the state at the heads of successors was changed. - // A true return means that you must revisit (at least) the successor - // blocks. This also sets cfaShouldRevisit to true for basic blocks - // that must be visited next. - bool endBasicBlock(MergeMode); + // Returns true if the state of the block at the tail was changed, + // and, if the state at the heads of successors was changed. + // A true return means that you must revisit (at least) the successor + // blocks. This also sets cfaShouldRevisit to true for basic blocks + // that must be visited next. + bool endBasicBlock(); // Reset the AbstractState. This throws away any results, and at this point // you can safely call beginBasicBlock() on any basic block. @@ -105,6 +91,9 @@ public: // Did the last executed node clobber the world? bool didClobber() const { return m_didClobber; } + // Are structures currently clobbered? + StructureClobberState structureClobberState() const { return m_structureClobberState; } + // Is the execution state still valid? This will be false if execute() has // returned false previously. bool isValid() const { return m_isValid; } @@ -124,11 +113,16 @@ public: // Methods intended to be called from AbstractInterpreter. void setDidClobber(bool didClobber) { m_didClobber = didClobber; } + void setStructureClobberState(StructureClobberState value) { m_structureClobberState = value; } void setIsValid(bool isValid) { m_isValid = isValid; } void setBranchDirection(BranchDirection branchDirection) { m_branchDirection = branchDirection; } + + // This method is evil - it causes a huge maintenance headache and there is a gross amount of + // code devoted to it. It would be much nicer to just always run the constant folder on each + // block. But, the last time we did it, it was a 1% SunSpider regression: + // https://bugs.webkit.org/show_bug.cgi?id=133947 + // So, we should probably keep this method. void setFoundConstants(bool foundConstants) { m_foundConstants = foundConstants; } - bool haveStructures() const { return m_haveStructures; } // It's always safe to return true. - void setHaveStructures(bool haveStructures) { m_haveStructures = haveStructures; } private: bool mergeStateAtTail(AbstractValue& destination, AbstractValue& inVariable, Node*); @@ -140,11 +134,11 @@ private: Operands<AbstractValue> m_variables; BasicBlock* m_block; - bool m_haveStructures; bool m_foundConstants; bool m_isValid; bool m_didClobber; + StructureClobberState m_structureClobberState; BranchDirection m_branchDirection; // This is only set for blocks that end in Branch and that execute to completion (i.e. m_isValid == true). }; diff --git a/Source/JavaScriptCore/dfg/DFGInferredTypeCheck.cpp b/Source/JavaScriptCore/dfg/DFGInferredTypeCheck.cpp new file mode 100644 index 000000000..1951c79c1 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGInferredTypeCheck.cpp @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGInferredTypeCheck.h" + +#if ENABLE(DFG_JIT) + +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +void insertInferredTypeCheck( + InsertionSet& insertionSet, unsigned nodeIndex, NodeOrigin origin, Node* baseNode, + const InferredType::Descriptor& type) +{ + insertionSet.graph().registerInferredType(type); + + switch (type.kind()) { + case InferredType::Bottom: + insertionSet.insertNode(nodeIndex, SpecNone, ForceOSRExit, origin); + return; + + case InferredType::Boolean: + insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, BooleanUse)); + return; + + case InferredType::Other: + insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, OtherUse)); + return; + + case InferredType::Int32: + insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, Int32Use)); + return; + + case InferredType::Number: + insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, NumberUse)); + return; + + case InferredType::String: + insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, StringUse)); + return; + + case InferredType::Symbol: + insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, SymbolUse)); + return; + + case InferredType::ObjectWithStructure: + insertionSet.insertNode( + nodeIndex, SpecNone, CheckStructure, origin, + OpInfo(insertionSet.graph().addStructureSet(type.structure())), + Edge(baseNode, CellUse)); + return; + + case InferredType::ObjectWithStructureOrOther: + insertionSet.insertNode( + nodeIndex, SpecNone, CheckStructure, origin, + OpInfo(insertionSet.graph().addStructureSet(type.structure())), + Edge(baseNode, CellOrOtherUse)); + return; + + case InferredType::Object: + insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, ObjectUse)); + return; + + case InferredType::ObjectOrOther: + insertionSet.insertNode(nodeIndex, SpecNone, Check, origin, Edge(baseNode, ObjectOrOtherUse)); + return; + + case InferredType::Top: + return; + } + + DFG_CRASH(insertionSet.graph(), baseNode, "Bad inferred type"); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGInferredTypeCheck.h b/Source/JavaScriptCore/dfg/DFGInferredTypeCheck.h new file mode 100644 index 000000000..b6b69c737 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGInferredTypeCheck.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGInferredTypeCheck_h +#define DFGInferredTypeCheck_h + +#if ENABLE(DFG_JIT) + +#include "DFGInsertionSet.h" +#include "InferredType.h" + +namespace JSC { namespace DFG { + +// Inserts a type check that ensures that baseNode has the given inferred type. +void insertInferredTypeCheck( + InsertionSet&, unsigned nodeIndex, NodeOrigin, Node* baseNode, const InferredType::Descriptor&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGInferredTypeCheck_h + diff --git a/Source/JavaScriptCore/dfg/DFGInsertionSet.cpp b/Source/JavaScriptCore/dfg/DFGInsertionSet.cpp new file mode 100644 index 000000000..8878d7c1e --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGInsertionSet.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGInsertionSet.h" + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +void InsertionSet::insertSlow(const Insertion& insertion) +{ + ASSERT(!m_insertions.isEmpty()); + ASSERT(m_insertions.last().index() > insertion.index()); + + for (size_t index = m_insertions.size() - 1; index--;) { + if (m_insertions[index].index() <= insertion.index()) { + m_insertions.insert(index + 1, insertion); + return; + } + } + + m_insertions.insert(0, insertion); +} + +void InsertionSet::execute(BasicBlock* block) +{ + executeInsertions(*block, m_insertions); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGInsertionSet.h b/Source/JavaScriptCore/dfg/DFGInsertionSet.h index 8d76c4566..57ff55fef 100644 --- a/Source/JavaScriptCore/dfg/DFGInsertionSet.h +++ b/Source/JavaScriptCore/dfg/DFGInsertionSet.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGInsertionSet_h #define DFGInsertionSet_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGGraph.h" @@ -45,10 +43,18 @@ public: { } + Graph& graph() { return m_graph; } + + // Adds another code insertion. It's expected that you'll usually insert things in order. If + // you don't, this function will perform a linear search to find the largest insertion point + // at which insertion order would be preserved. This is essentially equivalent to if you did + // a stable sort on the insertions. Node* insert(const Insertion& insertion) { - ASSERT(!m_insertions.size() || m_insertions.last().index() <= insertion.index()); - m_insertions.append(insertion); + if (LIKELY(!m_insertions.size() || m_insertions.last().index() <= insertion.index())) + m_insertions.append(insertion); + else + insertSlow(insertion); return insertion.element(); } @@ -57,19 +63,77 @@ public: return insert(Insertion(index, element)); } -#define DFG_DEFINE_INSERT_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \ - templatePre typeParams templatePost Node* insertNode(size_t index, SpeculatedType type valueParamsComma valueParams) \ - { \ - return insert(index, m_graph.addNode(type valueParamsComma valueArgs)); \ + template<typename... Params> + Node* insertNode(size_t index, SpeculatedType type, Params... params) + { + return insert(index, m_graph.addNode(type, params...)); + } + + Node* insertConstant( + size_t index, NodeOrigin origin, FrozenValue* value, + NodeType op = JSConstant) + { + return insertNode( + index, speculationFromValue(value->value()), op, origin, OpInfo(value)); + } + + Edge insertConstantForUse( + size_t index, NodeOrigin origin, FrozenValue* value, UseKind useKind) + { + NodeType op; + if (isDouble(useKind)) + op = DoubleConstant; + else if (useKind == Int52RepUse) + op = Int52Constant; + else + op = JSConstant; + return Edge(insertConstant(index, origin, value, op), useKind); } - DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_INSERT_NODE) -#undef DFG_DEFINE_INSERT_NODE - void execute(BasicBlock* block) + Node* insertConstant(size_t index, NodeOrigin origin, JSValue value, NodeType op = JSConstant) { - executeInsertions(*block, m_insertions); + return insertConstant(index, origin, m_graph.freeze(value), op); } + + Edge insertConstantForUse(size_t index, NodeOrigin origin, JSValue value, UseKind useKind) + { + return insertConstantForUse(index, origin, m_graph.freeze(value), useKind); + } + + Edge insertBottomConstantForUse(size_t index, NodeOrigin origin, UseKind useKind) + { + if (isDouble(useKind)) + return insertConstantForUse(index, origin, jsNumber(PNaN), useKind); + if (useKind == Int52RepUse) + return insertConstantForUse(index, origin, jsNumber(0), useKind); + return insertConstantForUse(index, origin, jsUndefined(), useKind); + } + + Node* insertCheck(size_t index, NodeOrigin origin, AdjacencyList children) + { + children = children.justChecks(); + if (children.isEmpty()) + return nullptr; + return insertNode(index, SpecNone, Check, origin, children); + } + + Node* insertCheck(size_t index, Node* node) + { + return insertCheck(index, node->origin, node->children); + } + + Node* insertCheck(size_t index, NodeOrigin origin, Edge edge) + { + if (edge.willHaveCheck()) + return insertNode(index, SpecNone, Check, origin, edge); + return nullptr; + } + + void execute(BasicBlock* block); + private: + void insertSlow(const Insertion&); + Graph& m_graph; Vector<Insertion, 8> m_insertions; }; diff --git a/Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.cpp b/Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.cpp new file mode 100644 index 000000000..b9f1ebad5 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.cpp @@ -0,0 +1,401 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGIntegerCheckCombiningPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGGraph.h" +#include "DFGInsertionSet.h" +#include "DFGPhase.h" +#include "DFGPredictionPropagationPhase.h" +#include "DFGVariableAccessDataDump.h" +#include "JSCInlines.h" +#include <unordered_map> +#include <wtf/HashMethod.h> + +namespace JSC { namespace DFG { + +namespace { + +static const bool verbose = false; + +enum RangeKind { + InvalidRangeKind, + + // This means we did ArithAdd with CheckOverflow. + Addition, + + // This means we did CheckInBounds on some length. + ArrayBounds +}; + +struct RangeKey { + RangeKey() + : m_kind(InvalidRangeKind) + , m_key(nullptr) + { + } + + static RangeKey addition(Edge edge) + { + RangeKey result; + result.m_kind = Addition; + result.m_source = edge.sanitized(); + result.m_key = 0; + return result; + } + + static RangeKey arrayBounds(Edge edge, Node* key) + { + RangeKey result; + result.m_kind = ArrayBounds; + result.m_source = edge.sanitized(); + result.m_key = key; + return result; + } + + bool operator!() const { return m_kind == InvalidRangeKind; } + + unsigned hash() const + { + return m_kind + m_source.hash() + PtrHash<Node*>::hash(m_key); + } + + bool operator==(const RangeKey& other) const + { + return m_kind == other.m_kind + && m_source == other.m_source + && m_key == other.m_key; + } + + void dump(PrintStream& out) const + { + switch (m_kind) { + case InvalidRangeKind: + out.print("InvalidRangeKind("); + break; + case Addition: + out.print("Addition("); + break; + case ArrayBounds: + out.print("ArrayBounds("); + break; + } + out.print(m_source, ", ", m_key, ")"); + } + + RangeKind m_kind; + Edge m_source; + Node* m_key; +}; + +struct RangeKeyAndAddend { + RangeKeyAndAddend() + : m_addend(0) + { + } + + RangeKeyAndAddend(RangeKey key, int32_t addend) + : m_key(key) + , m_addend(addend) + { + } + + bool operator!() const { return !m_key && !m_addend; } + + void dump(PrintStream& out) const + { + out.print(m_key, " + ", m_addend); + } + + RangeKey m_key; + int32_t m_addend; +}; + +struct Range { + Range() + : m_minBound(0) + , m_maxBound(0) + , m_count(0) + , m_hoisted(false) + { + } + + void dump(PrintStream& out) const + { + out.print("(", m_minBound, " @", m_minOrigin, ") .. (", m_maxBound, " @", m_maxOrigin, "), count = ", m_count, ", hoisted = ", m_hoisted); + } + + int32_t m_minBound; + CodeOrigin m_minOrigin; + int32_t m_maxBound; + CodeOrigin m_maxOrigin; + unsigned m_count; // If this is zero then the bounds won't necessarily make sense. + bool m_hoisted; +}; + +} // anonymous namespace + +class IntegerCheckCombiningPhase : public Phase { +public: + IntegerCheckCombiningPhase(Graph& graph) + : Phase(graph, "integer check combining") + , m_insertionSet(graph) + { + } + + bool run() + { + ASSERT(m_graph.m_form == SSA); + + m_changed = false; + + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) + handleBlock(blockIndex); + + return m_changed; + } + +private: + void handleBlock(BlockIndex blockIndex) + { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + return; + + m_map.clear(); + + // First we collect Ranges. If operations within the range have enough redundancy, + // we hoist. And then we remove additions and checks that fall within the max range. + + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + RangeKeyAndAddend data = rangeKeyAndAddend(node); + if (verbose) + dataLog("For ", node, ": ", data, "\n"); + if (!data) + continue; + + Range& range = m_map[data.m_key]; + if (verbose) + dataLog(" Range: ", range, "\n"); + if (range.m_count) { + if (data.m_addend > range.m_maxBound) { + range.m_maxBound = data.m_addend; + range.m_maxOrigin = node->origin.semantic; + } else if (data.m_addend < range.m_minBound) { + range.m_minBound = data.m_addend; + range.m_minOrigin = node->origin.semantic; + } + } else { + range.m_maxBound = data.m_addend; + range.m_minBound = data.m_addend; + range.m_minOrigin = node->origin.semantic; + range.m_maxOrigin = node->origin.semantic; + } + range.m_count++; + if (verbose) + dataLog(" New range: ", range, "\n"); + } + + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + RangeKeyAndAddend data = rangeKeyAndAddend(node); + if (!data) + continue; + Range range = m_map[data.m_key]; + if (!isValid(data.m_key, range)) + continue; + + // Do the hoisting. + if (!range.m_hoisted) { + NodeOrigin minOrigin = node->origin.withSemantic(range.m_minOrigin); + NodeOrigin maxOrigin = node->origin.withSemantic(range.m_maxOrigin); + + switch (data.m_key.m_kind) { + case Addition: { + if (range.m_minBound < 0) + insertAdd(nodeIndex, minOrigin, data.m_key.m_source, range.m_minBound); + if (range.m_maxBound > 0) + insertAdd(nodeIndex, maxOrigin, data.m_key.m_source, range.m_maxBound); + break; + } + + case ArrayBounds: { + Node* minNode; + Node* maxNode; + + if (!data.m_key.m_source) { + minNode = 0; + maxNode = m_insertionSet.insertConstant( + nodeIndex, maxOrigin, jsNumber(range.m_maxBound)); + } else { + minNode = insertAdd( + nodeIndex, minOrigin, data.m_key.m_source, range.m_minBound, + Arith::Unchecked); + maxNode = insertAdd( + nodeIndex, maxOrigin, data.m_key.m_source, range.m_maxBound, + Arith::Unchecked); + } + + if (minNode) { + m_insertionSet.insertNode( + nodeIndex, SpecNone, CheckInBounds, node->origin, + Edge(minNode, Int32Use), Edge(data.m_key.m_key, Int32Use)); + } + m_insertionSet.insertNode( + nodeIndex, SpecNone, CheckInBounds, node->origin, + Edge(maxNode, Int32Use), Edge(data.m_key.m_key, Int32Use)); + break; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + } + + m_changed = true; + m_map[data.m_key].m_hoisted = true; + } + + // Do the elimination. + switch (data.m_key.m_kind) { + case Addition: + node->setArithMode(Arith::Unchecked); + m_changed = true; + break; + + case ArrayBounds: + node->remove(); + m_changed = true; + break; + + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + + m_insertionSet.execute(block); + } + + RangeKeyAndAddend rangeKeyAndAddend(Node* node) + { + switch (node->op()) { + case ArithAdd: { + if (node->arithMode() != Arith::CheckOverflow + && node->arithMode() != Arith::CheckOverflowAndNegativeZero) + break; + if (!node->child2()->isInt32Constant()) + break; + return RangeKeyAndAddend( + RangeKey::addition(node->child1()), + node->child2()->asInt32()); + } + + case CheckInBounds: { + Edge source; + int32_t addend; + Node* key = node->child2().node(); + + Edge index = node->child1(); + + if (index->isInt32Constant()) { + source = Edge(); + addend = index->asInt32(); + } else if ( + index->op() == ArithAdd + && index->isBinaryUseKind(Int32Use) + && index->child2()->isInt32Constant()) { + source = index->child1(); + addend = index->child2()->asInt32(); + } else { + source = index; + addend = 0; + } + + return RangeKeyAndAddend(RangeKey::arrayBounds(source, key), addend); + } + + default: + break; + } + + return RangeKeyAndAddend(); + } + + bool isValid(const RangeKey& key, const Range& range) + { + if (range.m_count < 2) + return false; + + switch (key.m_kind) { + case ArrayBounds: { + // Have to do this carefully because C++ compilers are too smart. But all we're really doing is detecting if + // the difference between the bounds is 2^31 or more. If it was, then we'd have to worry about wrap-around. + // The way we'd like to write this expression is (range.m_maxBound - range.m_minBound) >= 0, but that is a + // signed subtraction and compare, which allows the C++ compiler to do anything it wants in case of + // wrap-around. + uint32_t maxBound = range.m_maxBound; + uint32_t minBound = range.m_minBound; + uint32_t unsignedDifference = maxBound - minBound; + return !(unsignedDifference >> 31); + } + + default: + return true; + } + } + + Node* insertAdd( + unsigned nodeIndex, NodeOrigin origin, Edge source, int32_t addend, + Arith::Mode arithMode = Arith::CheckOverflow) + { + if (!addend) + return source.node(); + return m_insertionSet.insertNode( + nodeIndex, source->prediction(), source->result(), + ArithAdd, origin, OpInfo(arithMode), source, + m_insertionSet.insertConstantForUse( + nodeIndex, origin, jsNumber(addend), source.useKind())); + } + + typedef std::unordered_map<RangeKey, Range, HashMethod<RangeKey>> RangeMap; + RangeMap m_map; + + InsertionSet m_insertionSet; + bool m_changed; +}; + +bool performIntegerCheckCombining(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Integer Check Combining Phase"); + return runPhase<IntegerCheckCombiningPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.h b/Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.h index e8a24019e..6abec0355 100644 --- a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.h +++ b/Source/JavaScriptCore/dfg/DFGIntegerCheckCombiningPhase.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,10 +23,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef DFGArgumentsSimplificationPhase_h -#define DFGArgumentsSimplificationPhase_h - -#include <wtf/Platform.h> +#ifndef DFGIntegerCheckCombiningPhase_h +#define DFGIntegerCheckCombiningPhase_h #if ENABLE(DFG_JIT) @@ -34,16 +32,13 @@ namespace JSC { namespace DFG { class Graph; -// Simplifies reflective uses of the Arguments object: -// -// Inlined arguments.length -> constant -// Inlined arguments[constant] -> GetLocalUnlinked +// Removes overflow checks and out-of-bounds checks by hoisting them. -bool performArgumentsSimplification(Graph&); +bool performIntegerCheckCombining(Graph&); } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) -#endif // DFGArgumentsSimplificationPhase_h +#endif // DFGIntegerCheckCombiningPhase_h diff --git a/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.cpp b/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.cpp new file mode 100644 index 000000000..02aa134d8 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.cpp @@ -0,0 +1,1780 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGIntegerRangeOptimizationPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGBlockMapInlines.h" +#include "DFGBlockSet.h" +#include "DFGGraph.h" +#include "DFGInsertionSet.h" +#include "DFGPhase.h" +#include "DFGPredictionPropagationPhase.h" +#include "DFGVariableAccessDataDump.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +namespace { + +const bool verbose = false; + +int64_t clampedSumImpl() { return 0; } + +template<typename... Args> +int64_t clampedSumImpl(int left, Args... args) +{ + return static_cast<int64_t>(left) + clampedSumImpl(args...); +} + +template<typename... Args> +int clampedSum(Args... args) +{ + int64_t result = clampedSumImpl(args...); + return static_cast<int>(std::min( + static_cast<int64_t>(std::numeric_limits<int>::max()), + std::max( + static_cast<int64_t>(std::numeric_limits<int>::min()), + result))); +} + +bool isGeneralOffset(int offset) +{ + return offset >= -1 && offset <= 1; +} + +class Relationship { +public: + enum Kind { + LessThan, + Equal, + NotEqual, + GreaterThan + }; + + // Some relationships provide more information than others. When a relationship provides more + // information, it is less vague. + static unsigned vagueness(Kind kind) + { + switch (kind) { + case Equal: + return 0; + case LessThan: + case GreaterThan: + return 1; + case NotEqual: + return 2; + } + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } + + static const unsigned minVagueness = 0; + static const unsigned maxVagueness = 2; + + static Kind flipped(Kind kind) + { + switch (kind) { + case LessThan: + return GreaterThan; + case Equal: + return Equal; + case NotEqual: + return NotEqual; + case GreaterThan: + return LessThan; + } + RELEASE_ASSERT_NOT_REACHED(); + return kind; + } + + Relationship() + : m_left(nullptr) + , m_right(nullptr) + , m_kind(Equal) + , m_offset(0) + { + } + + Relationship(Node* left, Node* right, Kind kind, int offset = 0) + : m_left(left) + , m_right(right) + , m_kind(kind) + , m_offset(offset) + { + RELEASE_ASSERT(m_left); + RELEASE_ASSERT(m_right); + RELEASE_ASSERT(m_left != m_right); + } + + static Relationship safeCreate(Node* left, Node* right, Kind kind, int offset = 0) + { + if (!left || !right || left == right) + return Relationship(); + return Relationship(left, right, kind, offset); + } + + explicit operator bool() const { return m_left; } + + Node* left() const { return m_left; } + Node* right() const { return m_right; } + Kind kind() const { return m_kind; } + int offset() const { return m_offset; } + + unsigned vagueness() const { return vagueness(kind()); } + + Relationship flipped() const + { + if (!*this) + return Relationship(); + + // This should return Relationship() if -m_offset overflows. For example: + // + // @a > @b - 2**31 + // + // If we flip it we get: + // + // @b < @a + 2**31 + // + // Except that the sign gets flipped since it's INT_MIN: + // + // @b < @a - 2**31 + // + // And that makes no sense. To see how little sense it makes, consider: + // + // @a > @zero - 2**31 + // + // We would flip it to mean: + // + // @zero < @a - 2**31 + // + // Which is absurd. + + if (m_offset == std::numeric_limits<int>::min()) + return Relationship(); + + return Relationship(m_right, m_left, flipped(m_kind), -m_offset); + } + + Relationship inverse() const + { + if (!*this) + return *this; + + switch (m_kind) { + case Equal: + return Relationship(m_left, m_right, NotEqual, m_offset); + case NotEqual: + return Relationship(m_left, m_right, Equal, m_offset); + case LessThan: + if (sumOverflows<int>(m_offset, -1)) + return Relationship(); + return Relationship(m_left, m_right, GreaterThan, m_offset - 1); + case GreaterThan: + if (sumOverflows<int>(m_offset, 1)) + return Relationship(); + return Relationship(m_left, m_right, LessThan, m_offset + 1); + } + + RELEASE_ASSERT_NOT_REACHED(); + } + + bool isCanonical() const { return m_left < m_right; } + + Relationship canonical() const + { + if (isCanonical()) + return *this; + return flipped(); + } + + bool sameNodesAs(const Relationship& other) const + { + return m_left == other.m_left + && m_right == other.m_right; + } + + bool operator==(const Relationship& other) const + { + return sameNodesAs(other) + && m_kind == other.m_kind + && m_offset == other.m_offset; + } + + bool operator!=(const Relationship& other) const + { + return !(*this == other); + } + + bool operator<(const Relationship& other) const + { + if (m_left != other.m_left) + return m_left < other.m_left; + if (m_right != other.m_right) + return m_right < other.m_right; + if (m_kind != other.m_kind) + return m_kind < other.m_kind; + return m_offset < other.m_offset; + } + + // If possible, returns a form of this relationship where the given node is the left + // side. Returns a null relationship if this relationship cannot say anything about this + // node. + Relationship forNode(Node* node) const + { + if (m_left == node) + return *this; + if (m_right == node) + return flipped(); + return Relationship(); + } + + void setLeft(Node* left) + { + RELEASE_ASSERT(left != m_right); + m_left = left; + } + bool addToOffset(int offset) + { + if (sumOverflows<int>(m_offset, offset)) + return false; + m_offset += offset; + return true; + } + + // Attempts to create relationships that summarize the union of this relationship and + // the other relationship. Relationships are returned by calling the functor with the newly + // created relationships. No relationships are created to indicate TOP. This is used + // for merging the current relationship-at-head for some pair of nodes and a new + // relationship-at-head being proposed by a predecessor. We wish to create a new + // relationship that is true whenever either of them are true, which ensuring that we don't + // do this forever. Anytime we create a relationship that is not equal to either of the + // previous ones, we will cause the analysis fixpoint to reexecute. + // + // If *this and other are identical, we just pass it to the functor. + // + // If they are different, we pick from a finite set of "general" relationships: + // + // Eq: this == other + C, where C is -1, 0, or 1. + // Lt: this < other + C, where C is -1, 0, or 1. + // Gt: this > other + C, where C is -1, 0, or 1. + // Ne: this != other + C, where C is -1, 0, or 1. + // TOP: the null relationship. + // + // Constraining C to -1,0,1 is necessary to ensure that the set of general relationships is + // finite. This finite set of relationships forms a pretty simple lattice where a + // relA->relB means "relB is more general than relA". For example, this<other+1 is more + // general than this==other. I'll leave it as an exercise for the reader to see that a + // graph between the 13 general relationships is indeed a lattice. The fact that the set of + // general relationships is a finite lattice ensures monotonicity of the fixpoint, since + // any merge over not-identical relationships returns a relationship that is closer to the + // TOP relationship than either of the original relationships. Here's how convergence is + // achieved for any pair of relationships over the same nodes: + // + // - If they are identical, then returning *this means that we won't be responsible for + // causing another fixpoint iteration. Once all merges reach this point, we're done. + // + // - If they are different, then we pick the most constraining of the 13 general + // relationships that is true if either *this or other are true. This means that if the + // relationships are not identical, the merged relationship will be closer to TOP than + // either of the originals. Returning a different relationship means that we will be + // responsible for the fixpoint to reloop, but we can only do this at most 13 times since + // that's how "deep" the general relationship lattice is. + // + // Note that C being constrained to -1,0,1 also ensures that we never have to return a + // combination of Lt and Gt, as in for example this<other+C && this>other-D. The only possible + // values of C and D where this would work are -1 and 1, but in that case we just say + // this==other. That said, the logic for merging two == relationships, like this==other+C || + // this==other+D is to attempt to create these two relationships: this>other+min(C,D)-1 && + // this<other+max(C,D)+1. But only one of these relationships will belong to the set of general + // relationships. + // + // Here's an example of this in action: + // + // for (var i = a; ; ++i) { } + // + // Without C being constrained to -1,0,1, we could end up looping forever: first we'd say + // that i==a, then we might say that i<a+2, then i<a+3, then i<a+4, etc. We won't do this + // because i<a+2 is not a valid general relationship: so when we merge i==a from the first + // iteration and i==a+1 from the second iteration, we create i>a-1 and i<a+2 but then + // realize that only i>a-1 is a valid general relationship. This gives us exactly what we + // want: a statement that i>=a. + // + // However, this may return a pair of relationships when merging relationships involving + // constants. For example, if given: + // + // @x == @c + // @x == @d + // + // where @c and @d are constants, then this may pass two relationships to the functor: + // + // @x > min(@c, @d) - 1 + // @x < max(@c, @d) + 1 + // + // This still allows for convergence, because just as when merging relationships over + // variables, this always picks from a set of general relationships. Hence although this may + // produce two relationships as a result of the merge, the total number of relationships that + // can be present at head of block is limited by O(graph.size^2). + template<typename Functor> + void merge(const Relationship& other, const Functor& functor) const + { + // Handle the super obvious case first. + if (*this == other) { + functor(*this); + return; + } + + if (m_left != other.m_left) + return; + + if (m_right != other.m_right) { + mergeConstantsImpl(other, functor); + return; + } + + ASSERT(sameNodesAs(other)); + + // This does some interesting permutations to reduce the amount of duplicate code. For + // example: + // + // initially: @a != @b, @a > @b + // @b != @a, @b < @a + // @b < @a, @b != @a + // finally: @b != a, @b < @a + // + // Another example: + // + // initially: @a < @b, @a != @b + // finally: @a != @b, @a < @b + + Relationship a = *this; + Relationship b = other; + bool needFlip = false; + + // Get rid of GreaterThan. + if (a.m_kind == GreaterThan || b.m_kind == GreaterThan) { + a = a.flipped(); + b = b.flipped(); + + // In rare cases, we might not be able to flip. Just give up on life in those + // cases. + if (!a || !b) + return; + + needFlip = true; + + // If we still have GreaterThan, then it means that we started with @a < @b and + // @a > @b. That's pretty much always a tautology; we don't attempt to do smart + // things for that case for now. + if (a.m_kind == GreaterThan || b.m_kind == GreaterThan) + return; + } + + // Make sure that if we have a LessThan, then it's first. + if (b.m_kind == LessThan) + std::swap(a, b); + + // Make sure that if we have a NotEqual, then it's first. + if (b.m_kind == NotEqual) + std::swap(a, b); + + Relationship result = a.mergeImpl(b); + if (!result) + return; + + if (needFlip) + result = result.flipped(); + + functor(result); + } + + // Attempts to construct one Relationship that adequately summarizes the intersection of + // this and other. Returns a null relationship if the filtration should be expressed as two + // different relationships. Returning null is always safe because relationship lists in + // this phase always imply intersection. So, you could soundly skip calling this method and + // just put both relationships into the list. But, that could lead the fixpoint to diverge. + // Hence this will attempt to combine the two relationships into one as a convergence hack. + // In some cases, it will do something conservative. It's always safe for this to return + // *this, or to return other. It'll do that sometimes, mainly to accelerate convergence for + // things that we don't think are important enough to slow down the analysis. + Relationship filter(const Relationship& other) const + { + // We are only interested in merging relationships over the same nodes. + ASSERT(sameNodesAs(other)); + + if (*this == other) + return *this; + + // From here we can assume that the two relationships are not identical. Usually we use + // this to assume that we have different offsets anytime that everything but the offset + // is identical. + + // We want equality to take precedent over everything else, and we don't want multiple + // independent claims of equality. That would just be a contradiction. When it does + // happen, we will be conservative in the sense that we will pick one. + if (m_kind == Equal) + return *this; + if (other.m_kind == Equal) + return other; + + // Useful helper for flipping. + auto filterFlipped = [&] () -> Relationship { + // If we cannot flip, then just conservatively return *this. + Relationship a = flipped(); + Relationship b = other.flipped(); + if (!a || !b) + return *this; + Relationship result = a.filter(b); + if (!result) + return Relationship(); + result = result.flipped(); + if (!result) + return *this; + return result; + }; + + if (m_kind == NotEqual) { + if (other.m_kind == NotEqual) { + // We could do something smarter here. We could even keep both NotEqual's. We + // would need to make sure that we correctly collapsed them when merging. But + // for now, we just pick one of them and hope for the best. + return *this; + } + + if (other.m_kind == GreaterThan) { + // Implement this in terms of NotEqual.filter(LessThan). + return filterFlipped(); + } + + ASSERT(other.m_kind == LessThan); + // We have two claims: + // @a != @b + C + // @a < @b + D + // + // If C >= D, then the NotEqual is redundant. + // If C < D - 1, then we could keep both, but for now we just keep the LessThan. + // If C == D - 1, then the LessThan can be turned into: + // + // @a < @b + C + // + // Note that C == this.m_offset, D == other.m_offset. + + if (m_offset == other.m_offset - 1) + return Relationship(m_left, m_right, LessThan, m_offset); + + return other; + } + + if (other.m_kind == NotEqual) + return other.filter(*this); + + if (m_kind == LessThan) { + if (other.m_kind == LessThan) { + return Relationship( + m_left, m_right, LessThan, std::min(m_offset, other.m_offset)); + } + + ASSERT(other.m_kind == GreaterThan); + if (sumOverflows<int>(m_offset, -1)) + return Relationship(); + if (sumOverflows<int>(other.m_offset, 1)) + return Relationship(); + if (m_offset - 1 == other.m_offset + 1) + return Relationship(m_left, m_right, Equal, m_offset - 1); + + return Relationship(); + } + + ASSERT(m_kind == GreaterThan); + return filterFlipped(); + } + + // Come up with a relationship that is the best description of this && other, provided that left() is + // the same and right() is a constant. Also requires that this is at least as vague as other. It may + // return this or it may return something else, but whatever it returns, it will have the same nodes as + // this. This is not automatically done by filter() because it currently only makes sense to call this + // during a very particular part of setOneSide(). + Relationship filterConstant(const Relationship& other) const + { + ASSERT(m_left == other.m_left); + ASSERT(m_right->isInt32Constant()); + ASSERT(other.m_right->isInt32Constant()); + ASSERT(vagueness() >= other.vagueness()); + + if (vagueness() == other.vagueness()) + return *this; + + int thisRight = m_right->asInt32(); + int otherRight = other.m_right->asInt32(); + + // Ignore funny business. + if (sumOverflows<int>(otherRight, other.m_offset)) + return *this; + + int otherEffectiveRight = otherRight + other.m_offset; + + switch (other.m_kind) { + case Equal: + // Return a version of *this that is Equal to other's constant. + return Relationship(m_left, m_right, Equal, otherEffectiveRight - thisRight); + + case LessThan: + case GreaterThan: + ASSERT(m_kind == NotEqual); + // We could do smart things here. But we don't currently have an example of when it would be + // valuable. Note that you have to be careful. We could refine NotEqual to LessThan, but only + // if the LessThan subsumes the NotEqual. + return *this; + + case NotEqual: + RELEASE_ASSERT_NOT_REACHED(); + return Relationship(); + } + + RELEASE_ASSERT_NOT_REACHED(); + return Relationship(); + } + + int minValueOfLeft() const + { + if (m_left->isInt32Constant()) + return m_left->asInt32(); + + if (m_kind == LessThan || m_kind == NotEqual) + return std::numeric_limits<int>::min(); + + int minRightValue = std::numeric_limits<int>::min(); + if (m_right->isInt32Constant()) + minRightValue = m_right->asInt32(); + + if (m_kind == GreaterThan) + return clampedSum(minRightValue, m_offset, 1); + ASSERT(m_kind == Equal); + return clampedSum(minRightValue, m_offset); + } + + int maxValueOfLeft() const + { + if (m_left->isInt32Constant()) + return m_left->asInt32(); + + if (m_kind == GreaterThan || m_kind == NotEqual) + return std::numeric_limits<int>::max(); + + int maxRightValue = std::numeric_limits<int>::max(); + if (m_right->isInt32Constant()) + maxRightValue = m_right->asInt32(); + + if (m_kind == LessThan) + return clampedSum(maxRightValue, m_offset, -1); + ASSERT(m_kind == Equal); + return clampedSum(maxRightValue, m_offset); + } + + void dump(PrintStream& out) const + { + // This prints out the relationship without any whitespace, like @x<@y+42. This + // optimizes for the clarity of a list of relationships. It's easier to read something + // like [@1<@2+3, @4==@5-6] than it would be if there was whitespace inside the + // relationships. + + if (!*this) { + out.print("null"); + return; + } + + out.print(m_left); + switch (m_kind) { + case LessThan: + out.print("<"); + break; + case Equal: + out.print("=="); + break; + case NotEqual: + out.print("!="); + break; + case GreaterThan: + out.print(">"); + break; + } + out.print(m_right); + if (m_offset > 0) + out.print("+", m_offset); + else if (m_offset < 0) + out.print("-", -static_cast<int64_t>(m_offset)); + } + +private: + Relationship mergeImpl(const Relationship& other) const + { + ASSERT(sameNodesAs(other)); + ASSERT(m_kind != GreaterThan); + ASSERT(other.m_kind != GreaterThan); + ASSERT(*this != other); + + // The purpose of this method is to guarantee that: + // + // - We avoid having more than one Relationship over the same two nodes. Therefore, if + // the merge could be expressed as two Relationships, we prefer to instead pick the + // less precise single Relationship form even if that means TOP. + // + // - If the difference between two Relationships is just the m_offset, then we create a + // Relationship that has an offset of -1, 0, or 1. This is an essential convergence + // hack. We need -1 and 1 to support <= and >=. + + // From here we can assume that the two relationships are not identical. Usually we use + // this to assume that we have different offsets anytime that everything but the offset + // is identical. + + if (m_kind == NotEqual) { + if (other.m_kind == NotEqual) + return Relationship(); // Different offsets, so tautology. + + if (other.m_kind == Equal) { + if (m_offset != other.m_offset) { + // Saying that you might be B when you've already said that you're anything + // but A, where A and B are different, is a tautology. You could just say + // that you're anything but A. Adding "(a == b + 1)" to "(a != b + 5)" has + // no value. + return *this; + } + // Otherwise, same offsets: we're saying that you're either A or you're not + // equal to A. + + return Relationship(); + } + + RELEASE_ASSERT(other.m_kind == LessThan); + // We have these claims, and we're merging them: + // @a != @b + C + // @a < @b + D + // + // If we have C == D, then the merge is clearly just the NotEqual. + // If we have C < D, then the merge is a tautology. + // If we have C > D, then we could keep both claims, but we are cheap, so we + // don't. We just use the NotEqual. + + if (m_offset < other.m_offset) + return Relationship(); + + return *this; + } + + if (m_kind == LessThan) { + if (other.m_kind == LessThan) { + // Figure out what offset to select to merge them. The appropriate offsets are + // -1, 0, or 1. + + // First figure out what offset we'd like to use. + int bestOffset = std::max(m_offset, other.m_offset); + + // We have something like @a < @b + 2. We can't represent this under the + // -1,0,1 rule. + if (isGeneralOffset(bestOffset)) + return Relationship(m_left, m_right, LessThan, std::max(bestOffset, -1)); + + return Relationship(); + } + + // The only thing left is Equal. We would have eliminated the GreaterThan's, and + // if we merge LessThan and NotEqual, the NotEqual always comes first. + RELEASE_ASSERT(other.m_kind == Equal); + + // This is the really interesting case. We have: + // + // @a < @b + C + // + // and: + // + // @a == @b + D + // + // Therefore we'd like to return: + // + // @a < @b + max(C, D + 1) + + int bestOffset = std::max(m_offset, other.m_offset + 1); + + // We have something like @a < @b + 2. We can't do it. + if (isGeneralOffset(bestOffset)) + return Relationship(m_left, m_right, LessThan, std::max(bestOffset, -1)); + + return Relationship(); + } + + // The only thing left is Equal, since we would have gotten rid of the GreaterThan's. + RELEASE_ASSERT(m_kind == Equal); + + // We would never see NotEqual, because those always come first. We would never + // see GreaterThan, because we would have eliminated those. We would never see + // LessThan, because those always come first. + + RELEASE_ASSERT(other.m_kind == Equal); + // We have @a == @b + C and @a == @b + D, where C != D. Turn this into some + // inequality that involves a constant that is -1,0,1. Note that we will never have + // lessThan and greaterThan because the constants are constrained to -1,0,1. The only + // way for both of them to be valid is a<b+1 and a>b-1, but then we would have said + // a==b. + + Relationship lessThan; + Relationship greaterThan; + + int lessThanEqOffset = std::max(m_offset, other.m_offset); + if (lessThanEqOffset >= -2 && lessThanEqOffset <= 0) { + lessThan = Relationship( + m_left, other.m_right, LessThan, lessThanEqOffset + 1); + + ASSERT(isGeneralOffset(lessThan.offset())); + } + + int greaterThanEqOffset = std::min(m_offset, other.m_offset); + if (greaterThanEqOffset >= 0 && greaterThanEqOffset <= 2) { + greaterThan = Relationship( + m_left, other.m_right, GreaterThan, greaterThanEqOffset - 1); + + ASSERT(isGeneralOffset(greaterThan.offset())); + } + + if (lessThan) { + // Both relationships cannot be valid; see above. + RELEASE_ASSERT(!greaterThan); + + return lessThan; + } + + return greaterThan; + } + + template<typename Functor> + void mergeConstantsImpl(const Relationship& other, const Functor& functor) const + { + ASSERT(m_left == other.m_left); + + // Only deal with constant right. + if (!m_right->isInt32Constant() || !other.m_right->isInt32Constant()) + return; + + // What follows is a fairly conservative merge. We could tune this phase to come up with + // all possible inequalities between variables and constants, but we focus mainly on cheap + // cases for now. + + // Here are some of the the arrangements we can merge usefully assuming @c < @d: + // + // @x == @c || @x == @d => @x >= c && @x <= @d + // @x >= @c || @x <= @d => TOP + // @x == @c || @x != @d => @x != @d + + int thisRight = m_right->asInt32(); + int otherRight = other.m_right->asInt32(); + + // Ignore funny business. + if (sumOverflows<int>(thisRight, m_offset)) + return; + if (sumOverflows<int>(otherRight, other.m_offset)) + return; + + int thisEffectiveRight = thisRight + m_offset; + int otherEffectiveRight = otherRight + other.m_offset; + + auto makeUpper = [&] (int64_t upper) { + if (upper <= thisRight) { + // We want m_right + offset to be equal to upper. Hence we want offset to cancel + // with m_right. But there's more to it, since we want +1 to turn the LessThan into + // a LessThanOrEqual, and we want to make sure we don't end up with non-general + // offsets. + int offset = static_cast<int>(std::max( + static_cast<int64_t>(1) + upper - static_cast<int64_t>(thisRight), + static_cast<int64_t>(-1))); + functor(Relationship(m_left, m_right, LessThan, offset)); + } + if (upper <= otherRight) { + int offset = static_cast<int>(std::max( + static_cast<int64_t>(1) + upper - static_cast<int64_t>(otherRight), + static_cast<int64_t>(-1))); + functor(Relationship(m_left, other.m_right, LessThan, offset)); + } + }; + auto makeLower = [&] (int64_t lower) { + if (lower >= thisRight) { + // We want m_right + offset to be equal to lower. Hence we want offset to cancel with + // m_right. But there's more to it, since we want -1 to turn the GreaterThan into a + // GreaterThanOrEqual, and we want to make sure we don't end up with non-general + // offsets. + int offset = static_cast<int>(std::min( + static_cast<int64_t>(-1) + lower - static_cast<int64_t>(thisRight), + static_cast<int64_t>(1))); + functor(Relationship(m_left, m_right, GreaterThan, offset)); + } + if (lower >= otherRight) { + int offset = static_cast<int>(std::min( + static_cast<int64_t>(-1) + lower - static_cast<int64_t>(otherRight), + static_cast<int64_t>(1))); + functor(Relationship(m_left, other.m_right, GreaterThan, offset)); + } + }; + + switch (m_kind) { + case Equal: { + switch (other.m_kind) { + case Equal: { + if (thisEffectiveRight == otherEffectiveRight) { + // This probably won't arise often. We can keep whichever relationship is general. + if (isGeneralOffset(m_offset)) + functor(*this); + if (isGeneralOffset(other.m_offset)) + functor(other); + return; + } + + // What follows is the only case where a merge will create more rules than what it + // started with. This is fine for convergence because the LessThan/GreaterThan + // rules that this creates are general (i.e. have small offsets) and they never + // spawn more rules upon subsequent merging. + + makeUpper(std::max(thisEffectiveRight, otherEffectiveRight)); + makeLower(std::min(thisEffectiveRight, otherEffectiveRight)); + return; + } + + case LessThan: { + // Either the LessThan condition subsumes the equality, or the LessThan condition + // and equality merge together to create a looser LessThan condition. + + // This is @x == thisEffectiveRight + // Other is: @x < otherEffectiveRight + + // We want to create @x <= upper. Figure out the value of upper. + makeUpper(std::max( + static_cast<int64_t>(thisEffectiveRight), + static_cast<int64_t>(otherEffectiveRight) - 1)); + return; + } + + case GreaterThan: { + // Opposite of the LessThan case, above. + + // This is: @x == thisEffectiveRight + // Other is: @x > otherEffectiveRight + + makeLower(std::min( + static_cast<int64_t>(thisEffectiveRight), + static_cast<int64_t>(otherEffectiveRight) + 1)); + return; + } + + case NotEqual: { + // We keep the NotEqual so long as it doesn't contradict our Equal. + if (otherEffectiveRight == thisEffectiveRight) + return; + + // But, we only keep the NotEqual if it is general. This simplifies reasoning about + // convergence: merging never introduces a new rule unless that rule is general. + if (!isGeneralOffset(other.m_offset)) + return; + + functor(other); + return; + } } + + RELEASE_ASSERT_NOT_REACHED(); + return; + } + + case LessThan: { + switch (other.m_kind) { + case Equal: { + other.mergeConstantsImpl(*this, functor); + return; + } + + case LessThan: { + makeUpper(std::max( + static_cast<int64_t>(thisEffectiveRight) - 1, + static_cast<int64_t>(otherEffectiveRight) - 1)); + return; + } + + case GreaterThan: { + // We have a claim that @x > @c || @x < @d. If @d > @c, this is the tautology. If + // @d <= @c, it's sort of uninteresting. Just ignore this. + return; + } + + case NotEqual: { + // We have a claim that @x < @c || @x != @d. This isn't interesting. + return; + } } + + RELEASE_ASSERT_NOT_REACHED(); + return; + } + + case GreaterThan: { + switch (other.m_kind) { + case Equal: { + other.mergeConstantsImpl(*this, functor); + return; + } + + case LessThan: { + // Not interesting, see above. + return; + } + + case GreaterThan: { + makeLower(std::min( + static_cast<int64_t>(thisEffectiveRight) + 1, + static_cast<int64_t>(otherEffectiveRight) + 1)); + return; + } + + case NotEqual: { + // Not interesting, see above. + return; + } } + + RELEASE_ASSERT_NOT_REACHED(); + return; + } + + case NotEqual: { + if (other.m_kind == Equal) + other.mergeConstantsImpl(*this, functor); + return; + } } + + RELEASE_ASSERT_NOT_REACHED(); + } + + Node* m_left; + Node* m_right; + Kind m_kind; + int m_offset; // This offset can be arbitrarily large. +}; + +typedef HashMap<Node*, Vector<Relationship>> RelationshipMap; + +class IntegerRangeOptimizationPhase : public Phase { +public: + IntegerRangeOptimizationPhase(Graph& graph) + : Phase(graph, "integer range optimization") + , m_zero(nullptr) + , m_relationshipsAtHead(graph) + , m_insertionSet(graph) + { + } + + bool run() + { + ASSERT(m_graph.m_form == SSA); + + // Before we do anything, make sure that we have a zero constant at the top. + for (Node* node : *m_graph.block(0)) { + if (node->isInt32Constant() && !node->asInt32()) { + m_zero = node; + break; + } + } + if (!m_zero) { + m_zero = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(0)); + m_insertionSet.execute(m_graph.block(0)); + } + + if (verbose) { + dataLog("Graph before integer range optimization:\n"); + m_graph.dump(); + } + + // This performs a fixpoint over the blocks in reverse post-order. Logically, we + // maintain a list of relationships at each point in the program. The list should be + // read as an intersection. For example if we have {rel1, rel2, ..., relN}, you should + // read this as: + // + // TOP && rel1 && rel2 && ... && relN + // + // This allows us to express things like: + // + // @a > @b - 42 && @a < @b + 25 + // + // But not things like: + // + // @a < @b - 42 || @a > @b + 25 + // + // We merge two lists by merging each relationship in one list with each relationship + // in the other list. Merging two relationships will yield a relationship list; as with + // all such lists it is an intersction. Merging relationships over different variables + // always yields the empty list (i.e. TOP). This merge style is sound because if we + // have: + // + // (A && B && C) || (D && E && F) + // + // Then a valid merge is just one that will return true if A, B, C are all true, or + // that will return true if D, E, F are all true. Our merge style essentially does: + // + // (A || D) && (A || E) && (A || F) && (B || D) && (B || E) && (B || F) && + // (C || D) && (C || E) && (C || F) + // + // If A && B && C is true, then this returns true. If D && E && F is true, this also + // returns true. + // + // While this appears at first like a kind of expression explosion, in practice it + // isn't. The code that handles this knows that the merge of two relationships over + // different variables is TOP (i.e. the empty list). For example if A above is @a < @b + // and B above is @c > @d, where @a, @b, @c, and @d are different nodes, the merge will + // yield nothing. In fact, the merge algorithm will skip such merges entirely because + // the relationship lists are actually keyed by node. + // + // Note that it's always safe to drop any of relationship from the relationship list. + // This merely increases the likelihood of the "expression" yielding true, i.e. being + // closer to TOP. Optimizations are only performed if we can establish that the + // expression implied by the relationship list is false for all of those cases where + // some check would have failed. + // + // There is no notion of BOTTOM because we treat blocks that haven't had their + // state-at-head set as a special case: we just transfer all live relationships to such + // a block. After the head of a block is set, we perform the merging as above. In all + // other places where we would ordinarily need BOTTOM, we approximate it by having some + // non-BOTTOM relationship. + + BlockList postOrder = m_graph.blocksInPostOrder(); + + // This loop analyzes the IR to give us m_relationshipsAtHead for each block. This + // may reexecute blocks many times, but it is guaranteed to converge. The state of + // the relationshipsAtHead over any pair of nodes converge monotonically towards the + // TOP relationship (i.e. no relationships in the relationship list). The merge rule + // when between the current relationshipsAtHead and the relationships being propagated + // from a predecessor ensures monotonicity by converting disagreements into one of a + // small set of "general" relationships. There are 12 such relationshis, plus TOP. See + // the comment above Relationship::merge() for details. + bool changed = true; + while (changed) { + changed = false; + for (unsigned postOrderIndex = postOrder.size(); postOrderIndex--;) { + BasicBlock* block = postOrder[postOrderIndex]; + DFG_ASSERT( + m_graph, nullptr, + block == m_graph.block(0) || m_seenBlocks.contains(block)); + + m_relationships = m_relationshipsAtHead[block]; + + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + if (verbose) + dataLog("Analysis: at ", node, ": ", listDump(sortedRelationships()), "\n"); + executeNode(node); + } + + // Now comes perhaps the most important piece of cleverness: if we Branch, and + // the predicate involves some relation over integers, we propagate different + // information to the taken and notTaken paths. This handles: + // - Branch on int32. + // - Branch on LogicalNot on int32. + // - Branch on compare on int32's. + // - Branch on LogicalNot of compare on int32's. + Node* terminal = block->terminal(); + bool alreadyMerged = false; + if (terminal->op() == Branch) { + Relationship relationshipForTrue; + BranchData* branchData = terminal->branchData(); + + bool invert = false; + if (terminal->child1()->op() == LogicalNot) { + terminal = terminal->child1().node(); + invert = true; + } + + if (terminal->child1().useKind() == Int32Use) { + relationshipForTrue = Relationship::safeCreate( + terminal->child1().node(), m_zero, Relationship::NotEqual, 0); + } else { + Node* compare = terminal->child1().node(); + switch (compare->op()) { + case CompareEq: + case CompareStrictEq: + case CompareLess: + case CompareLessEq: + case CompareGreater: + case CompareGreaterEq: { + if (!compare->isBinaryUseKind(Int32Use)) + break; + + switch (compare->op()) { + case CompareEq: + case CompareStrictEq: + relationshipForTrue = Relationship::safeCreate( + compare->child1().node(), compare->child2().node(), + Relationship::Equal, 0); + break; + case CompareLess: + relationshipForTrue = Relationship::safeCreate( + compare->child1().node(), compare->child2().node(), + Relationship::LessThan, 0); + break; + case CompareLessEq: + relationshipForTrue = Relationship::safeCreate( + compare->child1().node(), compare->child2().node(), + Relationship::LessThan, 1); + break; + case CompareGreater: + relationshipForTrue = Relationship::safeCreate( + compare->child1().node(), compare->child2().node(), + Relationship::GreaterThan, 0); + break; + case CompareGreaterEq: + relationshipForTrue = Relationship::safeCreate( + compare->child1().node(), compare->child2().node(), + Relationship::GreaterThan, -1); + break; + default: + DFG_CRASH(m_graph, compare, "Invalid comparison node type"); + break; + } + break; + } + + default: + break; + } + } + + if (invert) + relationshipForTrue = relationshipForTrue.inverse(); + + if (relationshipForTrue) { + RelationshipMap forTrue = m_relationships; + RelationshipMap forFalse = m_relationships; + + if (verbose) + dataLog("Dealing with true:\n"); + setRelationship(forTrue, relationshipForTrue); + if (Relationship relationshipForFalse = relationshipForTrue.inverse()) { + if (verbose) + dataLog("Dealing with false:\n"); + setRelationship(forFalse, relationshipForFalse); + } + + changed |= mergeTo(forTrue, branchData->taken.block); + changed |= mergeTo(forFalse, branchData->notTaken.block); + alreadyMerged = true; + } + } + + if (!alreadyMerged) { + for (BasicBlock* successor : block->successors()) + changed |= mergeTo(m_relationships, successor); + } + } + } + + // Now we transform the code based on the results computed in the previous loop. + changed = false; + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + m_relationships = m_relationshipsAtHead[block]; + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + if (verbose) + dataLog("Transformation: at ", node, ": ", listDump(sortedRelationships()), "\n"); + + // This ends up being pretty awkward to write because we need to decide if we + // optimize by using the relationships before the operation, but we need to + // call executeNode() before we optimize. + switch (node->op()) { + case ArithAbs: { + if (node->child1().useKind() != Int32Use) + break; + + auto iter = m_relationships.find(node->child1().node()); + if (iter == m_relationships.end()) + break; + + int minValue = std::numeric_limits<int>::min(); + int maxValue = std::numeric_limits<int>::max(); + for (Relationship relationship : iter->value) { + minValue = std::max(minValue, relationship.minValueOfLeft()); + maxValue = std::min(maxValue, relationship.maxValueOfLeft()); + } + + executeNode(block->at(nodeIndex)); + + if (minValue >= 0) { + node->convertToIdentityOn(node->child1().node()); + changed = true; + break; + } + if (maxValue <= 0) { + node->convertToArithNegate(); + if (minValue > std::numeric_limits<int>::min()) + node->setArithMode(Arith::Unchecked); + changed = true; + break; + } + if (minValue > std::numeric_limits<int>::min()) { + node->setArithMode(Arith::Unchecked); + changed = true; + break; + } + + break; + } + case ArithAdd: { + if (!node->isBinaryUseKind(Int32Use)) + break; + if (node->arithMode() != Arith::CheckOverflow) + break; + if (!node->child2()->isInt32Constant()) + break; + + auto iter = m_relationships.find(node->child1().node()); + if (iter == m_relationships.end()) + break; + + int minValue = std::numeric_limits<int>::min(); + int maxValue = std::numeric_limits<int>::max(); + for (Relationship relationship : iter->value) { + minValue = std::max(minValue, relationship.minValueOfLeft()); + maxValue = std::min(maxValue, relationship.maxValueOfLeft()); + } + + if (verbose) + dataLog(" minValue = ", minValue, ", maxValue = ", maxValue, "\n"); + + if (sumOverflows<int>(minValue, node->child2()->asInt32()) || + sumOverflows<int>(maxValue, node->child2()->asInt32())) + break; + + if (verbose) + dataLog(" It's in bounds.\n"); + + executeNode(block->at(nodeIndex)); + node->setArithMode(Arith::Unchecked); + changed = true; + break; + } + + case CheckInBounds: { + auto iter = m_relationships.find(node->child1().node()); + if (iter == m_relationships.end()) + break; + + bool nonNegative = false; + bool lessThanLength = false; + for (Relationship relationship : iter->value) { + if (relationship.minValueOfLeft() >= 0) + nonNegative = true; + + if (relationship.right() == node->child2()) { + if (relationship.kind() == Relationship::Equal + && relationship.offset() < 0) + lessThanLength = true; + + if (relationship.kind() == Relationship::LessThan + && relationship.offset() <= 0) + lessThanLength = true; + } + } + + if (nonNegative && lessThanLength) { + executeNode(block->at(nodeIndex)); + node->remove(); + changed = true; + } + break; + } + + case GetByVal: { + if (node->arrayMode().type() != Array::Undecided) + break; + + auto iter = m_relationships.find(node->child2().node()); + if (iter == m_relationships.end()) + break; + + int minValue = std::numeric_limits<int>::min(); + for (Relationship relationship : iter->value) + minValue = std::max(minValue, relationship.minValueOfLeft()); + + if (minValue < 0) + break; + + executeNode(block->at(nodeIndex)); + m_graph.convertToConstant(node, jsUndefined()); + changed = true; + break; + } + + default: + break; + } + + executeNode(block->at(nodeIndex)); + } + } + + return changed; + } + +private: + void executeNode(Node* node) + { + switch (node->op()) { + case CheckInBounds: { + setRelationship(Relationship::safeCreate(node->child1().node(), node->child2().node(), Relationship::LessThan)); + setRelationship(Relationship::safeCreate(node->child1().node(), m_zero, Relationship::GreaterThan, -1)); + break; + } + + case ArithAbs: { + if (node->child1().useKind() != Int32Use) + break; + setRelationship(Relationship(node, m_zero, Relationship::GreaterThan, -1)); + break; + } + + case ArithAdd: { + // We're only interested in int32 additions and we currently only know how to + // handle the non-wrapping ones. + if (!node->isBinaryUseKind(Int32Use)) + break; + + // FIXME: We could handle the unchecked arithmetic case. We just do it don't right + // now. + if (node->arithMode() != Arith::CheckOverflow) + break; + + // Handle add: @value + constant. + if (!node->child2()->isInt32Constant()) + break; + + int offset = node->child2()->asInt32(); + + // We add a relationship for @add == @value + constant, and then we copy the + // relationships for @value. This gives us a one-deep view of @value's existing + // relationships, which matches the one-deep search in setRelationship(). + + setRelationship( + Relationship(node, node->child1().node(), Relationship::Equal, offset)); + + auto iter = m_relationships.find(node->child1().node()); + if (iter != m_relationships.end()) { + Vector<Relationship> toAdd; + for (Relationship relationship : iter->value) { + // We have: + // add: ArithAdd(@x, C) + // @x op @y + D + // + // The following certainly holds: + // @x == @add - C + // + // Which allows us to substitute: + // @add - C op @y + D + // + // And then carry the C over: + // @add op @y + D + C + + Relationship newRelationship = relationship; + ASSERT(newRelationship.left() == node->child1().node()); + + if (newRelationship.right() == node) + continue; + newRelationship.setLeft(node); + if (newRelationship.addToOffset(offset)) + toAdd.append(newRelationship); + } + for (Relationship relationship : toAdd) + setRelationship(relationship, 0); + } + + // Now we want to establish that both the input and the output of the addition are + // within a particular range of integers. + + if (offset > 0) { + // If we have "add: @value + 1" then we know that @value <= max - 1, i.e. that + // @value < max. + if (!sumOverflows<int>(std::numeric_limits<int>::max(), -offset, 1)) { + setRelationship( + Relationship::safeCreate( + node->child1().node(), m_zero, Relationship::LessThan, + std::numeric_limits<int>::max() - offset + 1), + 0); + } + + // If we have "add: @value + 1" then we know that @add >= min + 1, i.e. that + // @add > min. + if (!sumOverflows<int>(std::numeric_limits<int>::min(), offset, -1)) { + setRelationship( + Relationship( + node, m_zero, Relationship::GreaterThan, + std::numeric_limits<int>::min() + offset - 1), + 0); + } + } + + if (offset < 0 && offset != std::numeric_limits<int>::min()) { + // If we have "add: @value - 1" then we know that @value >= min + 1, i.e. that + // @value > min. + if (!sumOverflows<int>(std::numeric_limits<int>::min(), offset, -1)) { + setRelationship( + Relationship::safeCreate( + node->child1().node(), m_zero, Relationship::GreaterThan, + std::numeric_limits<int>::min() + offset - 1), + 0); + } + + // If we have "add: @value + 1" then we know that @add <= max - 1, i.e. that + // @add < max. + if (!sumOverflows<int>(std::numeric_limits<int>::max(), -offset, 1)) { + setRelationship( + Relationship( + node, m_zero, Relationship::LessThan, + std::numeric_limits<int>::max() - offset + 1), + 0); + } + } + break; + } + + case GetArrayLength: { + setRelationship(Relationship(node, m_zero, Relationship::GreaterThan, -1)); + break; + } + + case Upsilon: { + setRelationship( + Relationship::safeCreate( + node->child1().node(), node->phi(), Relationship::Equal, 0)); + + auto iter = m_relationships.find(node->child1().node()); + if (iter != m_relationships.end()) { + Vector<Relationship> toAdd; + for (Relationship relationship : iter->value) { + Relationship newRelationship = relationship; + if (node->phi() == newRelationship.right()) + continue; + newRelationship.setLeft(node->phi()); + toAdd.append(newRelationship); + } + for (Relationship relationship : toAdd) + setRelationship(relationship); + } + break; + } + + default: + break; + } + } + + void setRelationship(Relationship relationship, unsigned timeToLive = 1) + { + setRelationship(m_relationships, relationship, timeToLive); + } + + void setRelationship( + RelationshipMap& relationshipMap, Relationship relationship, unsigned timeToLive = 1) + { + setOneSide(relationshipMap, relationship, timeToLive); + setOneSide(relationshipMap, relationship.flipped(), timeToLive); + } + + void setOneSide( + RelationshipMap& relationshipMap, Relationship relationship, unsigned timeToLive = 1) + { + if (!relationship) + return; + + if (verbose) + dataLog(" Setting: ", relationship, " (ttl = ", timeToLive, ")\n"); + + auto result = relationshipMap.add( + relationship.left(), Vector<Relationship>()); + Vector<Relationship>& relationships = result.iterator->value; + + if (relationship.right()->isInt32Constant()) { + // We want to do some work to refine relationships over constants. This is necessary because + // when we introduce a constant into the IR, we don't automatically create relationships + // between that constant and the other constants. That means that when we do introduce + // relationships between a non-constant and a constant, we need to check the other + // relationships between that non-constant and other constants to see if we can make some + // refinements. Possible constant statement filtrations: + // + // - @x == @c and @x != @d, where @c > @d: + // @x == @c and @x > @d + // + // but actually we are more aggressive: + // + // - @x == @c and @x op @d where @c == @d + k + // @x == @c and @x == @d + k + // + // And this is also possible: + // + // - @x > @c and @x != @d where @c == @d + k and k >= 0 + // + // @x > @c and @x > @d + k + // + // So, here's what we should do depending on the kind of relationship we're introducing: + // + // Equal constant: Find all LessThan, NotEqual, and GreaterThan constant operations and refine + // them to be Equal constant. Don't worry about contradictions. + // + // LessThan, GreaterThan constant: See if there is any Equal constant, and if so, refine to + // that. Otherwise, find all NotEqual constant operations and refine them to be LessThan or + // GreaterThan constant if possible. + // + // NotEqual constant: See if there is any Equal constant, and if so, refine to that. Otherwise, + // see if there is any LessThan or GreaterThan constant operation, and if so, attempt to + // refine to that. + // + // Seems that the key thing is to have a filterConstant() operation that returns a refined + // version of *this based on other. The code here accomplishes this by using the vagueness + // index (Relationship::vagueness()) to first find less vague relationships and refine this one + // using them, and then find more vague relationships and refine those to this. + + if (relationship.vagueness() != Relationship::minVagueness) { + // We're not minimally vague (maximally specific), so try to refine ourselves based on what + // we already know. + for (Relationship& otherRelationship : relationships) { + if (otherRelationship.vagueness() < relationship.vagueness() + && otherRelationship.right()->isInt32Constant()) { + Relationship newRelationship = relationship.filterConstant(otherRelationship); + if (verbose && newRelationship != relationship) + dataLog(" Refined to: ", newRelationship, " based on ", otherRelationship, "\n"); + relationship = newRelationship; + } + } + } + + if (relationship.vagueness() != Relationship::maxVagueness) { + // We're not maximally value (minimally specific), so try to refine other relationships + // based on this one. + for (Relationship& otherRelationship : relationships) { + if (otherRelationship.vagueness() > relationship.vagueness() + && otherRelationship.right()->isInt32Constant()) { + Relationship newRelationship = otherRelationship.filterConstant(relationship); + if (verbose && newRelationship != otherRelationship) + dataLog(" Refined ", otherRelationship, " to: ", newRelationship, "\n"); + otherRelationship = newRelationship; + } + } + } + } + + Vector<Relationship> toAdd; + bool found = false; + for (Relationship& otherRelationship : relationships) { + if (otherRelationship.sameNodesAs(relationship)) { + if (Relationship filtered = otherRelationship.filter(relationship)) { + ASSERT(filtered.left() == relationship.left()); + otherRelationship = filtered; + found = true; + } + } + + // FIXME: Also add filtration over statements about constants. For example, if we have + // @x == @c and @x != @d, where @d > @c, then we want to turn @x != @d into @x < @d. + + if (timeToLive && otherRelationship.kind() == Relationship::Equal) { + if (verbose) + dataLog(" Considering: ", otherRelationship, "\n"); + + // We have: + // @a op @b + C + // @a == @c + D + // + // This implies: + // @c + D op @b + C + // @c op @b + C - D + // + // Where: @a == relationship.left(), @b == relationship.right(), + // @a == otherRelationship.left(), @c == otherRelationship.right(). + + if (otherRelationship.offset() != std::numeric_limits<int>::min()) { + Relationship newRelationship = relationship; + if (newRelationship.right() != otherRelationship.right()) { + newRelationship.setLeft(otherRelationship.right()); + if (newRelationship.addToOffset(-otherRelationship.offset())) + toAdd.append(newRelationship); + } + } + } + } + + if (!found) + relationships.append(relationship); + + for (Relationship anotherRelationship : toAdd) { + ASSERT(timeToLive); + setOneSide(relationshipMap, anotherRelationship, timeToLive - 1); + } + } + + bool mergeTo(RelationshipMap& relationshipMap, BasicBlock* target) + { + if (verbose) { + dataLog("Merging to ", pointerDump(target), ":\n"); + dataLog(" Incoming: ", listDump(sortedRelationships(relationshipMap)), "\n"); + dataLog(" At head: ", listDump(sortedRelationships(m_relationshipsAtHead[target])), "\n"); + } + + if (m_seenBlocks.add(target)) { + // This is a new block. We copy subject to liveness pruning. + auto isLive = [&] (Node* node) { + if (node == m_zero) + return true; + return target->ssa->liveAtHead.contains(node); + }; + + for (auto& entry : relationshipMap) { + if (!isLive(entry.key)) + continue; + + Vector<Relationship> values; + for (Relationship relationship : entry.value) { + ASSERT(relationship.left() == entry.key); + if (isLive(relationship.right())) { + if (verbose) + dataLog(" Propagating ", relationship, "\n"); + values.append(relationship); + } + } + + std::sort(values.begin(), values.end()); + m_relationshipsAtHead[target].add(entry.key, values); + } + return true; + } + + // Merge by intersecting. We have no notion of BOTTOM, so we use the omission of + // relationships for a pair of nodes to mean TOP. The reason why we don't need BOTTOM + // is (1) we just overapproximate contradictions and (2) a value never having been + // assigned would only happen if we have not processed the node's predecessor. We + // shouldn't process blocks until we have processed the block's predecessor because we + // are using reverse postorder. + Vector<Node*> toRemove; + bool changed = false; + for (auto& entry : m_relationshipsAtHead[target]) { + auto iter = relationshipMap.find(entry.key); + if (iter == relationshipMap.end()) { + toRemove.append(entry.key); + changed = true; + continue; + } + + Vector<Relationship> mergedRelationships; + for (Relationship targetRelationship : entry.value) { + for (Relationship sourceRelationship : iter->value) { + if (verbose) + dataLog(" Merging ", targetRelationship, " and ", sourceRelationship, ":\n"); + targetRelationship.merge( + sourceRelationship, + [&] (Relationship newRelationship) { + if (verbose) + dataLog(" Got ", newRelationship, "\n"); + + // We need to filter() to avoid exponential explosion of identical + // relationships. We do this here to avoid making setOneSide() do + // more work, since we expect setOneSide() will be called more + // frequently. Here's an example. At some point someone might start + // with two relationships like @a > @b - C and @a < @b + D. Then + // someone does a setRelationship() passing something that turns + // both of these into @a == @b. Now we have @a == @b duplicated. + // Let's say that this duplicate @a == @b ends up at the head of a + // loop. If we didn't have this rule, then the loop would propagate + // duplicate @a == @b's onto the existing duplicate @a == @b's. + // There would be four pairs of @a == @b, each of which would + // create a new @a == @b. Now we'd have four of these duplicates + // and the next time around we'd have 8, then 16, etc. We avoid + // this here by doing this filtration. That might be a bit of + // overkill, since it's probably just the identical duplicate + // relationship case we want' to avoid. But, I'll keep this until + // we have evidence that this is a performance problem. Remember - + // we are already dealing with a list that is pruned down to + // relationships with identical left operand. It shouldn't be a + // large list. + bool found = false; + for (Relationship& existingRelationship : mergedRelationships) { + if (existingRelationship.sameNodesAs(newRelationship)) { + Relationship filtered = + existingRelationship.filter(newRelationship); + if (filtered) { + existingRelationship = filtered; + found = true; + break; + } + } + } + + if (!found) + mergedRelationships.append(newRelationship); + }); + } + } + std::sort(mergedRelationships.begin(), mergedRelationships.end()); + if (entry.value == mergedRelationships) + continue; + + entry.value = mergedRelationships; + changed = true; + } + for (Node* node : toRemove) + m_relationshipsAtHead[target].remove(node); + + return changed; + } + + Vector<Relationship> sortedRelationships(const RelationshipMap& relationships) + { + Vector<Relationship> result; + for (auto& entry : relationships) + result.appendVector(entry.value); + std::sort(result.begin(), result.end()); + return result; + } + + Vector<Relationship> sortedRelationships() + { + return sortedRelationships(m_relationships); + } + + Node* m_zero; + RelationshipMap m_relationships; + BlockSet m_seenBlocks; + BlockMap<RelationshipMap> m_relationshipsAtHead; + InsertionSet m_insertionSet; +}; + +} // anonymous namespace + +bool performIntegerRangeOptimization(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Integer Range Optimization Phase"); + return runPhase<IntegerRangeOptimizationPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.h b/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.h new file mode 100644 index 000000000..fe0615e39 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGIntegerRangeOptimizationPhase.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGIntegerRangeOptimizationPhase_h +#define DFGIntegerRangeOptimizationPhase_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; + +// Removes overflow checks and out-of-bounds checks by doing a forward flow analysis to prove +// inequalities. It will remove the overflow and bounds checks in loops like: +// +// for (var i = 0; i < array.length; ++i) array[i]; + +bool performIntegerRangeOptimization(Graph&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGIntegerRangeOptimizationPhase_h + diff --git a/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.cpp b/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.cpp index d71a7cbda..9228001cc 100644 --- a/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,11 +28,12 @@ #if ENABLE(DFG_JIT) +#include "DFGBlockSetInlines.h" #include "DFGClobberize.h" #include "DFGGraph.h" #include "DFGInsertionSet.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -50,7 +51,7 @@ public: { ASSERT(m_graph.m_form != SSA); - BitVector blocksThatNeedInvalidationPoints; + BlockSet blocksThatNeedInvalidationPoints; for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); @@ -63,17 +64,13 @@ public: // Note: this assumes that control flow occurs at bytecode instruction boundaries. if (m_originThatHadFire.isSet()) { for (unsigned i = block->numSuccessors(); i--;) - blocksThatNeedInvalidationPoints.set(block->successor(i)->index); + blocksThatNeedInvalidationPoints.add(block->successor(i)); } m_insertionSet.execute(block); } - - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - if (!blocksThatNeedInvalidationPoints.get(blockIndex)) - continue; - - BasicBlock* block = m_graph.block(blockIndex); + + for (BasicBlock* block : blocksThatNeedInvalidationPoints.iterable(m_graph)) { insertInvalidationCheck(0, block->at(0)); m_insertionSet.execute(block); } @@ -84,18 +81,18 @@ public: private: void handle(unsigned nodeIndex, Node* node) { - if (m_originThatHadFire.isSet() && m_originThatHadFire != node->codeOrigin) { + if (m_originThatHadFire.isSet() && m_originThatHadFire != node->origin.forExit) { insertInvalidationCheck(nodeIndex, node); m_originThatHadFire = CodeOrigin(); } if (writesOverlap(m_graph, node, Watchpoint_fire)) - m_originThatHadFire = node->codeOrigin; + m_originThatHadFire = node->origin.forExit; } void insertInvalidationCheck(unsigned nodeIndex, Node* node) { - m_insertionSet.insertNode(nodeIndex, SpecNone, InvalidationPoint, node->codeOrigin); + m_insertionSet.insertNode(nodeIndex, SpecNone, InvalidationPoint, node->origin); } CodeOrigin m_originThatHadFire; diff --git a/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.h b/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.h index a135fdc3a..4c49cc96a 100644 --- a/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.h +++ b/Source/JavaScriptCore/dfg/DFGInvalidationPointInjectionPhase.h @@ -26,8 +26,6 @@ #ifndef DFGInvalidationPointInjectionPhase_h #define DFGInvalidationPointInjectionPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGJITCode.cpp b/Source/JavaScriptCore/dfg/DFGJITCode.cpp index c53653f8f..75549133c 100644 --- a/Source/JavaScriptCore/dfg/DFGJITCode.cpp +++ b/Source/JavaScriptCore/dfg/DFGJITCode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,11 +29,17 @@ #if ENABLE(DFG_JIT) #include "CodeBlock.h" +#include "JSCInlines.h" +#include "TrackedReferences.h" namespace JSC { namespace DFG { JITCode::JITCode() : DirectJITCode(DFGJIT) +#if ENABLE(FTL_JIT) + , osrEntryRetry(0) + , abandonOSREntry(false) +#endif // ENABLE(FTL_JIT) { } @@ -77,23 +83,40 @@ void JITCode::reconstruct( reconstruct(codeBlock, codeOrigin, streamIndex, recoveries); result = Operands<JSValue>(OperandsLike, recoveries); - for (size_t i = result.size(); i--;) { - int operand = result.operandForIndex(i); - - if (operandIsArgument(operand) - && !VirtualRegister(operand).toArgument() - && codeBlock->codeType() == FunctionCode - && codeBlock->specializationKind() == CodeForConstruct) { - // Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will - // also never be used. It doesn't matter what we put into the value for this, - // but it has to be an actual value that can be grokked by subsequent DFG passes, - // so we sanitize it here by turning it into Undefined. - result[i] = jsUndefined(); - continue; - } - + for (size_t i = result.size(); i--;) result[i] = recoveries[i].recover(exec); +} + +RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock* codeBlock, CallSiteIndex callSiteIndex) +{ + for (OSRExit& exit : osrExit) { + if (exit.isExceptionHandler() && exit.m_exceptionHandlerCallSiteIndex.bits() == callSiteIndex.bits()) { + Operands<ValueRecovery> valueRecoveries; + reconstruct(codeBlock, exit.m_codeOrigin, exit.m_streamIndex, valueRecoveries); + RegisterSet liveAtOSRExit; + for (size_t index = 0; index < valueRecoveries.size(); ++index) { + const ValueRecovery& recovery = valueRecoveries[index]; + if (recovery.isInRegisters()) { + if (recovery.isInGPR()) + liveAtOSRExit.set(recovery.gpr()); + else if (recovery.isInFPR()) + liveAtOSRExit.set(recovery.fpr()); +#if USE(JSVALUE32_64) + else if (recovery.isInJSValueRegs()) { + liveAtOSRExit.set(recovery.payloadGPR()); + liveAtOSRExit.set(recovery.tagGPR()); + } +#endif + else + RELEASE_ASSERT_NOT_REACHED(); + } + } + + return liveAtOSRExit; + } } + + return RegisterSet(); } #if ENABLE(FTL_JIT) @@ -156,6 +179,7 @@ void JITCode::setOptimizationThresholdBasedOnCompilationResult( switch (result) { case CompilationSuccessful: optimizeNextInvocation(codeBlock); + codeBlock->baselineVersion()->m_hasBeenCompiledWithFTL = true; return; case CompilationFailed: dontOptimizeAnytimeSoon(codeBlock); @@ -179,6 +203,30 @@ void JITCode::setOptimizationThresholdBasedOnCompilationResult( } #endif // ENABLE(FTL_JIT) +void JITCode::validateReferences(const TrackedReferences& trackedReferences) +{ + common.validateReferences(trackedReferences); + + for (OSREntryData& entry : osrEntry) { + for (unsigned i = entry.m_expectedValues.size(); i--;) + entry.m_expectedValues[i].validateReferences(trackedReferences); + } + + minifiedDFG.validateReferences(trackedReferences); +} + +Optional<CodeOrigin> JITCode::findPC(CodeBlock*, void* pc) +{ + for (OSRExit& exit : osrExit) { + if (ExecutableMemoryHandle* handle = exit.m_code.executableMemory()) { + if (handle->start() <= pc && pc < handle->end()) + return Optional<CodeOrigin>(exit.m_codeOriginForExitProfile); + } + } + + return Nullopt; +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGJITCode.h b/Source/JavaScriptCore/dfg/DFGJITCode.h index 0e771e046..243e353a3 100644 --- a/Source/JavaScriptCore/dfg/DFGJITCode.h +++ b/Source/JavaScriptCore/dfg/DFGJITCode.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,10 +26,9 @@ #ifndef DFGJITCode_h #define DFGJITCode_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) +#include "CodeBlock.h" #include "CompilationResult.h" #include "DFGCommonData.h" #include "DFGMinifiedGraph.h" @@ -40,7 +39,11 @@ #include "JITCode.h" #include <wtf/SegmentedVector.h> -namespace JSC { namespace DFG { +namespace JSC { + +class TrackedReferences; + +namespace DFG { class JITCompiler; @@ -109,7 +112,20 @@ public: void setOptimizationThresholdBasedOnCompilationResult(CodeBlock*, CompilationResult); #endif // ENABLE(FTL_JIT) + void validateReferences(const TrackedReferences&) override; + void shrinkToFit(); + + RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex) override; +#if ENABLE(FTL_JIT) + CodeBlock* osrEntryBlock() { return m_osrEntryBlock.get(); } + void setOSREntryBlock(VM& vm, const JSCell* owner, CodeBlock* osrEntryBlock) { m_osrEntryBlock.set(vm, owner, osrEntryBlock); } + void clearOSREntryBlock() { m_osrEntryBlock.clear(); } +#endif + + static ptrdiff_t commonDataOffset() { return OBJECT_OFFSETOF(JITCode, common); } + + Optional<CodeOrigin> findPC(CodeBlock*, void* pc) override; private: friend class JITCompiler; // Allow JITCompiler to call setCodeRef(). @@ -122,8 +138,12 @@ public: DFG::VariableEventStream variableEventStream; DFG::MinifiedGraph minifiedDFG; #if ENABLE(FTL_JIT) - ExecutionCounter tierUpCounter; - RefPtr<CodeBlock> osrEntryBlock; + uint8_t nestedTriggerIsSet { 0 }; + uint8_t neverExecutedEntry { 1 }; + UpperTierExecutionCounter tierUpCounter; + WriteBarrier<CodeBlock> m_osrEntryBlock; + unsigned osrEntryRetry; + bool abandonOSREntry; #endif // ENABLE(FTL_JIT) }; diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp index 2934d2ba9..758f5cdd4 100644 --- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp +++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -40,8 +40,10 @@ #include "DFGSpeculativeJIT.h" #include "DFGThunks.h" #include "JSCJSValueInlines.h" -#include "VM.h" #include "LinkBuffer.h" +#include "MaxFrameExtentForSlowPathCall.h" +#include "JSCInlines.h" +#include "VM.h" namespace JSC { namespace DFG { @@ -50,9 +52,10 @@ JITCompiler::JITCompiler(Graph& dfg) , m_graph(dfg) , m_jitCode(adoptRef(new JITCode())) , m_blockHeads(dfg.numBlocks()) + , m_pcToCodeOriginMapBuilder(dfg.m_vm) { - if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler) - m_disassembler = adoptPtr(new Disassembler(dfg)); + if (shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler) + m_disassembler = std::make_unique<Disassembler>(dfg); } JITCompiler::~JITCompiler() @@ -83,6 +86,7 @@ void JITCompiler::linkOSRExits() failureJumps.link(this); else info.m_replacementDestination = label(); + jitAssertHasValidCallFrame(); store32(TrustedImm32(i), &vm()->osrExitIndex); exit.setPatchableCodeOffset(patchableJump()); @@ -92,15 +96,27 @@ void JITCompiler::linkOSRExits() void JITCompiler::compileEntry() { // This code currently matches the old JIT. In the function header we need to - // pop the return address (since we do not allow any recursion on the machine - // stack), and perform a fast stack check. + // save return address and call frame via the prologue and perform a fast stack check. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292 // We'll need to convert the remaining cti_ style calls (specifically the stack // check) which will be dependent on stack layout. (We'd need to account for this in // both normal return code and when jumping to an exception handler). - preserveReturnAddressAfterCall(GPRInfo::regT2); - emitPutReturnPCToCallFrameHeader(GPRInfo::regT2); - emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); + emitFunctionPrologue(); + emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); +} + +void JITCompiler::compileSetupRegistersForEntry() +{ + emitSaveCalleeSaves(); + emitMaterializeTagCheckRegisters(); +} + +void JITCompiler::compileEntryExecutionFlag() +{ +#if ENABLE(FTL_JIT) + if (m_graph.m_plan.canTierUpAndOSREnter) + store8(TrustedImm32(0), &m_jitCode->neverExecutedEntry); +#endif // ENABLE(FTL_JIT) } void JITCompiler::compileBody() @@ -114,32 +130,44 @@ void JITCompiler::compileBody() void JITCompiler::compileExceptionHandlers() { - if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty()) - return; - - Jump doLookup; - if (!m_exceptionChecksWithCallFrameRollback.empty()) { m_exceptionChecksWithCallFrameRollback.link(this); - emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0); - doLookup = jump(); + + copyCalleeSavesToVMCalleeSavesBuffer(); + + // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). + move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); + +#if CPU(X86) + // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! + poke(GPRInfo::argumentGPR0); + poke(GPRInfo::argumentGPR1, 1); +#endif + m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame)); + + jumpToExceptionHandler(); } - if (!m_exceptionChecks.empty()) + if (!m_exceptionChecks.empty()) { m_exceptionChecks.link(this); - // lookupExceptionHandler is passed one argument, the exec (the CallFrame*). - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + copyCalleeSavesToVMCalleeSavesBuffer(); - if (doLookup.isSet()) - doLookup.link(this); + // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). + move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); #if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); + // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! + poke(GPRInfo::argumentGPR0); + poke(GPRInfo::argumentGPR1, 1); #endif - m_calls.append(CallLinkRecord(call(), lookupExceptionHandler)); - jumpToExceptionHandler(); + m_calls.append(CallLinkRecord(call(), lookupExceptionHandler)); + + jumpToExceptionHandler(); + } } void JITCompiler::link(LinkBuffer& linkBuffer) @@ -148,15 +176,18 @@ void JITCompiler::link(LinkBuffer& linkBuffer) m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount(); m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit(); - if (!m_graph.m_inlineCallFrames->isEmpty()) - m_jitCode->common.inlineCallFrames = m_graph.m_inlineCallFrames.release(); + if (!m_graph.m_plan.inlineCallFrames->isEmpty()) + m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames; - m_jitCode->common.machineCaptureStart = m_graph.m_machineCaptureStart; - m_jitCode->common.slowArguments = std::move(m_graph.m_slowArguments); +#if USE(JSVALUE32_64) + m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants); +#endif + + m_graph.registerFrozenValues(); BitVector usedJumpTables; - for (unsigned i = m_graph.m_switchData.size(); i--;) { - SwitchData& data = m_graph.m_switchData[i]; + for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) { + SwitchData& data = **iter; if (!data.didUseJumpTable) continue; @@ -167,14 +198,14 @@ void JITCompiler::link(LinkBuffer& linkBuffer) usedJumpTables.set(data.switchTableIndex); SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); - table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]); + table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]); table.ctiOffsets.grow(table.branchOffsets.size()); for (unsigned j = table.ctiOffsets.size(); j--;) table.ctiOffsets[j] = table.ctiDefault; for (unsigned j = data.cases.size(); j--;) { SwitchCase& myCase = data.cases[j]; - table.ctiOffsets[myCase.value.switchLookupValue() - table.min] = - linkBuffer.locationOf(m_blockHeads[myCase.target->index]); + table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] = + linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]); } } @@ -188,8 +219,8 @@ void JITCompiler::link(LinkBuffer& linkBuffer) // NOTE: we cannot clear string switch tables because (1) we're running concurrently // and we cannot deref StringImpl's and (2) it would be weird to deref those // StringImpl's since we refer to them. - for (unsigned i = m_graph.m_switchData.size(); i--;) { - SwitchData& data = m_graph.m_switchData[i]; + for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) { + SwitchData& data = **switchDataIter; if (!data.didUseJumpTable) continue; @@ -197,7 +228,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer) continue; StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); - table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]); + table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]); StringJumpTable::StringOffsetTable::iterator iter; StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); for (iter = table.offsetTable.begin(); iter != end; ++iter) @@ -206,7 +237,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer) SwitchCase& myCase = data.cases[j]; iter = table.offsetTable.find(myCase.value.stringImpl()); RELEASE_ASSERT(iter != end); - iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target->index]); + iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]); } } @@ -228,17 +259,13 @@ void JITCompiler::link(LinkBuffer& linkBuffer) info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label())); } - m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size()); for (unsigned i = 0; i < m_jsCalls.size(); ++i) { - CallLinkInfo& info = m_codeBlock->callLinkInfo(i); - info.callType = m_jsCalls[i].m_callType; - info.isDFG = true; - info.codeOrigin = m_jsCalls[i].m_codeOrigin; - linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress())); - info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall); - info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck); - info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall); - info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee); + JSCallRecord& record = m_jsCalls[i]; + CallLinkInfo& info = *record.m_info; + linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(linkCallThunkGenerator).code().executableAddress())); + info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall), + linkBuffer.locationOf(record.m_targetToCheck), + linkBuffer.locationOfNearCall(record.m_fastCall)); } MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator); @@ -266,9 +293,30 @@ void JITCompiler::link(LinkBuffer& linkBuffer) } } else ASSERT(!m_exitSiteLabels.size()); - + m_jitCode->common.compilation = m_graph.compilation(); + // Link new DFG exception handlers and remove baseline JIT handlers. + m_codeBlock->clearExceptionHandlers(); + for (unsigned i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) { + OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo; + if (info.m_replacementDestination.isSet()) { + // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow. + // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame. + // If this *is set*, it means we will be landing at this code location from genericUnwind from an + // exception thrown in a child call frame. + CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination); + HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler; + CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex; + newExceptionHandler.start = callSite.bits(); + newExceptionHandler.end = callSite.bits() + 1; + newExceptionHandler.nativeCode = catchLabel; + m_codeBlock->appendExceptionHandler(newExceptionHandler); + } + } + + if (m_pcToCodeOriginMapBuilder.didBuildMapping()) + m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer)); } void JITCompiler::compile() @@ -277,12 +325,35 @@ void JITCompiler::compile() setStartOfCode(); compileEntry(); - m_speculative = adoptPtr(new SpeculativeJIT(*this)); + m_speculative = std::make_unique<SpeculativeJIT>(*this); + + // Plant a check that sufficient space is available in the JSStack. + addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); + Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1); + + addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); + checkStackPointerAlignment(); + compileSetupRegistersForEntry(); + compileEntryExecutionFlag(); compileBody(); setEndOfMainPath(); + // === Footer code generation === + // + // Generate the stack overflow handling; if the stack check in the entry head fails, + // we need to call out to a helper function to throw the StackOverflowError. + stackOverflow.link(this); + + emitStoreCodeOrigin(CodeOrigin(0)); + + if (maxFrameExtentForSlowPathCall) + addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); + + m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); + // Generate slow path code. - m_speculative->runSlowPathGenerators(); + m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder); + m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); compileExceptionHandlers(); linkOSRExits(); @@ -290,26 +361,23 @@ void JITCompiler::compile() // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); -} -void JITCompiler::link() -{ - OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail)); + auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail); if (linkBuffer->didFailToAllocate()) { - m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan)); + m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan); return; } link(*linkBuffer); m_speculative->linkOSREntries(*linkBuffer); - + m_jitCode->shrinkToFit(); codeBlock()->shrinkToFit(CodeBlock::LateShrink); disassemble(*linkBuffer); - m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer( - m_graph.m_plan, m_jitCode.release(), linkBuffer.release())); + m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>( + m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer)); } void JITCompiler::compileFunction() @@ -325,30 +393,36 @@ void JITCompiler::compileFunction() // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the JSStack. - addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit()).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); - Jump stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), GPRInfo::regT1); - // Return here after stack check. - Label fromStackCheck = label(); + addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); + Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1); + + // Move the stack pointer down to accommodate locals + addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); + checkStackPointerAlignment(); + compileSetupRegistersForEntry(); + compileEntryExecutionFlag(); // === Function body code generation === - m_speculative = adoptPtr(new SpeculativeJIT(*this)); + m_speculative = std::make_unique<SpeculativeJIT>(*this); compileBody(); setEndOfMainPath(); // === Function footer code generation === // - // Generate code to perform the slow stack check (if the fast one in + // Generate code to perform the stack overflow handling (if the stack check in // the function header fails), and generate the entry point with arity check. // - // Generate the stack check; if the fast check in the function head fails, - // we need to call out to a helper function to check whether more space is available. - // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). - stackCheck.link(this); + // Generate the stack overflow handling; if the stack check in the function head fails, + // we need to call out to a helper function to throw the StackOverflowError. + stackOverflow.link(this); emitStoreCodeOrigin(CodeOrigin(0)); - m_speculative->callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock); - jump(fromStackCheck); + + if (maxFrameExtentForSlowPathCall) + addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); + + m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically @@ -361,14 +435,20 @@ void JITCompiler::compileFunction() load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); + if (maxFrameExtentForSlowPathCall) + addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0); - branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this); + if (maxFrameExtentForSlowPathCall) + addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); + branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); + move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0); m_callArityFixup = call(); jump(fromArityCheck); // Generate slow path code. - m_speculative->runSlowPathGenerators(); + m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder); + m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); compileExceptionHandlers(); linkOSRExits(); @@ -376,14 +456,11 @@ void JITCompiler::compileFunction() // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); -} -void JITCompiler::linkFunction() -{ // === Link === - OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail)); + auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail); if (linkBuffer->didFailToAllocate()) { - m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan)); + m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan); return; } link(*linkBuffer); @@ -392,25 +469,165 @@ void JITCompiler::linkFunction() m_jitCode->shrinkToFit(); codeBlock()->shrinkToFit(CodeBlock::LateShrink); - linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixup)).code().executableAddress())); + linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress())); disassemble(*linkBuffer); MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck); - m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer( - m_graph.m_plan, m_jitCode.release(), linkBuffer.release(), withArityCheck)); + m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>( + m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer), withArityCheck); } void JITCompiler::disassemble(LinkBuffer& linkBuffer) { - if (shouldShowDisassembly()) + if (shouldDumpDisassembly()) { m_disassembler->dump(linkBuffer); + linkBuffer.didAlreadyDisassemble(); + } if (m_graph.m_plan.compilation) m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer); } +#if USE(JSVALUE32_64) +void* JITCompiler::addressOfDoubleConstant(Node* node) +{ + double value = node->asNumber(); + int64_t valueBits = bitwise_cast<int64_t>(value); + auto it = m_graph.m_doubleConstantsMap.find(valueBits); + if (it != m_graph.m_doubleConstantsMap.end()) + return it->second; + + if (!m_graph.m_doubleConstants) + m_graph.m_doubleConstants = std::make_unique<Bag<double>>(); + + double* addressInConstantPool = m_graph.m_doubleConstants->add(); + *addressInConstantPool = value; + m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool; + return addressInConstantPool; +} +#endif + +void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer) +{ + // OSR entry is not allowed into blocks deemed unreachable by control flow analysis. + if (!basicBlock.intersectionOfCFAHasVisited) + return; + + OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead)); + + entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead; + + // Fix the expected values: in our protocol, a dead variable will have an expected + // value of (None, []). But the old JIT may stash some values there. So we really + // need (Top, TOP). + for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) { + Node* node = basicBlock.variablesAtHead.argument(argument); + if (!node || !node->shouldGenerate()) + entry->m_expectedValues.argument(argument).makeHeapTop(); + } + for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) { + Node* node = basicBlock.variablesAtHead.local(local); + if (!node || !node->shouldGenerate()) + entry->m_expectedValues.local(local).makeHeapTop(); + else { + VariableAccessData* variable = node->variableAccessData(); + entry->m_machineStackUsed.set(variable->machineLocal().toLocal()); + + switch (variable->flushFormat()) { + case FlushedDouble: + entry->m_localsForcedDouble.set(local); + break; + case FlushedInt52: + entry->m_localsForcedMachineInt.set(local); + break; + default: + break; + } + + if (variable->local() != variable->machineLocal()) { + entry->m_reshufflings.append( + OSREntryReshuffling( + variable->local().offset(), variable->machineLocal().offset())); + } + } + } + + entry->m_reshufflings.shrinkToFit(); +} + +void JITCompiler::appendExceptionHandlingOSRExit(ExitKind kind, unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail) +{ + OSRExit exit(kind, JSValueRegs(), graph().methodOfGettingAValueProfileFor(nullptr), m_speculative.get(), eventStreamIndex); + exit.m_codeOrigin = opCatchOrigin; + exit.m_exceptionHandlerCallSiteIndex = callSite; + OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail); + jitCode()->appendOSRExit(exit); + m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite }); +} + +void JITCompiler::exceptionCheck() +{ + // It's important that we use origin.forExit here. Consider if we hoist string + // addition outside a loop, and that we exit at the point of that concatenation + // from an out of memory exception. + // If the original loop had a try/catch around string concatenation, if we "catch" + // that exception inside the loop, then the loops induction variable will be undefined + // in the OSR exit value recovery. It's more defensible for the string concatenation, + // then, to not be caught by the for loops' try/catch. + // Here is the program I'm speaking about: + // + // >>>> lets presume "c = a + b" gets hoisted here. + // for (var i = 0; i < length; i++) { + // try { + // c = a + b + // } catch(e) { + // If we threw an out of memory error, and we cought the exception + // right here, then "i" would almost certainly be undefined, which + // would make no sense. + // ... + // } + // } + CodeOrigin opCatchOrigin; + HandlerInfo* exceptionHandler; + bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler); + if (willCatchException) { + unsigned streamIndex = m_speculative->m_outOfLineStreamIndex != UINT_MAX ? m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size(); + MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck(); + // We assume here that this is called after callOpeartion()/appendCall() is called. + appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException); + } else + m_exceptionChecks.append(emitExceptionCheck()); +} + +CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex) +{ + CodeOrigin opCatchOrigin; + HandlerInfo* exceptionHandler; + bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler); + CallSiteIndex callSite = addCallSite(callSiteCodeOrigin); + if (willCatchException) + appendExceptionHandlingOSRExit(GenericUnwind, eventStreamIndex, opCatchOrigin, exceptionHandler, callSite); + return callSite; +} + +void JITCompiler::setEndOfMainPath() +{ + m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), m_speculative->m_origin.semantic); + if (LIKELY(!m_disassembler)) + return; + m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints()); +} + +void JITCompiler::setEndOfCode() +{ + m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); + if (LIKELY(!m_disassembler)) + return; + m_disassembler->setEndOfCode(labelIgnoringWatchpoints()); +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h index b582cea4f..71f4141e4 100644 --- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h +++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,7 +29,6 @@ #if ENABLE(DFG_JIT) #include "CCallHelpers.h" -#include "CallFrameInlines.h" #include "CodeBlock.h" #include "DFGDisassembler.h" #include "DFGGraph.h" @@ -39,10 +38,12 @@ #include "DFGRegisterBank.h" #include "FPRInfo.h" #include "GPRInfo.h" +#include "HandlerInfo.h" #include "JITCode.h" #include "JITInlineCacheGenerator.h" #include "LinkBuffer.h" #include "MacroAssembler.h" +#include "PCToCodeOriginMap.h" #include "TempRegisterSet.h" namespace JSC { @@ -112,15 +113,13 @@ public: void compile(); void compileFunction(); - void link(); - void linkFunction(); - // Accessors for properties. Graph& graph() { return m_graph; } // Methods to set labels for the disassembler. void setStartOfCode() { + m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), CodeOrigin(0, nullptr)); if (LIKELY(!m_disassembler)) return; m_disassembler->setStartOfCode(labelIgnoringWatchpoints()); @@ -140,25 +139,23 @@ public: m_disassembler->setForNode(node, labelIgnoringWatchpoints()); } - void setEndOfMainPath() - { - if (LIKELY(!m_disassembler)) - return; - m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints()); - } + void setEndOfMainPath(); + void setEndOfCode(); - void setEndOfCode() + CallSiteIndex addCallSite(CodeOrigin codeOrigin) { - if (LIKELY(!m_disassembler)) - return; - m_disassembler->setEndOfCode(labelIgnoringWatchpoints()); + return m_jitCode->common.addCodeOrigin(codeOrigin); } - + void emitStoreCodeOrigin(CodeOrigin codeOrigin) { - unsigned index = m_jitCode->common.addCodeOrigin(codeOrigin); - unsigned locationBits = CallFrame::Location::encodeAsCodeOriginIndex(index); - store32(TrustedImm32(locationBits), tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount))); + CallSiteIndex callSite = addCallSite(codeOrigin); + emitStoreCallSiteIndex(callSite); + } + + void emitStoreCallSiteIndex(CallSiteIndex callSite) + { + store32(TrustedImm32(callSite.bits()), tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount))); } // Add a call out from JIT code, without an exception check. @@ -169,15 +166,7 @@ public: return functionCall; } - void exceptionCheck(Jump jumpToHandler) - { - m_exceptionChecks.append(jumpToHandler); - } - - void exceptionCheck() - { - m_exceptionChecks.append(emitExceptionCheck()); - } + void exceptionCheck(); void exceptionCheckWithCallFrameRollback() { @@ -187,6 +176,7 @@ public: // Add a call out from JIT code, with a fast exception check that tests if the return value is zero. void fastExceptionCheck() { + callExceptionFuzz(); m_exceptionChecks.append(branchTestPtr(Zero, GPRInfo::returnValueGPR)); } @@ -199,12 +189,7 @@ public: } #if USE(JSVALUE32_64) - void* addressOfDoubleConstant(Node* node) - { - ASSERT(m_graph.isNumberConstant(node)); - unsigned constantIndex = node->constantNumber(); - return &(codeBlock()->constantRegister(FirstConstantRegisterIndex + constantIndex)); - } + void* addressOfDoubleConstant(Node*); #endif void addGetById(const JITGetByIdGenerator& gen, SlowPathGenerator* slowPath) @@ -221,10 +206,15 @@ public: { m_ins.append(record); } + + unsigned currentJSCallIndex() const + { + return m_jsCalls.size(); + } - void addJSCall(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo::CallType callType, GPRReg callee, CodeOrigin codeOrigin) + void addJSCall(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo* info) { - m_jsCalls.append(JSCallRecord(fastCall, slowCall, targetToCheck, callType, callee, codeOrigin)); + m_jsCalls.append(JSCallRecord(fastCall, slowCall, targetToCheck, info)); } void addWeakReference(JSCell* target) @@ -245,62 +235,36 @@ public: addWeakReference(weakPtr); return result; } - - void noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer) + + template<typename T> + Jump branchWeakStructure(RelationalCondition cond, T left, Structure* weakStructure) { - // OSR entry is not allowed into blocks deemed unreachable by control flow analysis. - if (!basicBlock.cfaHasVisited) - return; - - OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead)); - - entry->m_expectedValues = basicBlock.valuesAtHead; - - // Fix the expected values: in our protocol, a dead variable will have an expected - // value of (None, []). But the old JIT may stash some values there. So we really - // need (Top, TOP). - for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) { - Node* node = basicBlock.variablesAtHead.argument(argument); - if (!node || !node->shouldGenerate()) - entry->m_expectedValues.argument(argument).makeHeapTop(); - } - for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) { - Node* node = basicBlock.variablesAtHead.local(local); - if (!node || !node->shouldGenerate()) - entry->m_expectedValues.local(local).makeHeapTop(); - else { - VariableAccessData* variable = node->variableAccessData(); - switch (variable->flushFormat()) { - case FlushedDouble: - entry->m_localsForcedDouble.set(local); - break; - case FlushedInt52: - entry->m_localsForcedMachineInt.set(local); - break; - default: - break; - } - - if (variable->local() != variable->machineLocal()) { - entry->m_reshufflings.append( - OSREntryReshuffling( - variable->local().offset(), variable->machineLocal().offset())); - } - } - } - - entry->m_reshufflings.shrinkToFit(); +#if USE(JSVALUE64) + Jump result = branch32(cond, left, TrustedImm32(weakStructure->id())); + addWeakReference(weakStructure); + return result; +#else + return branchWeakPtr(cond, left, weakStructure); +#endif } + + void noticeOSREntry(BasicBlock&, JITCompiler::Label blockHead, LinkBuffer&); - PassRefPtr<JITCode> jitCode() { return m_jitCode; } + RefPtr<JITCode> jitCode() { return m_jitCode; } Vector<Label>& blockHeads() { return m_blockHeads; } + CallSiteIndex recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin&, unsigned eventStreamIndex); + + PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder() { return m_pcToCodeOriginMapBuilder; } + private: friend class OSRExitJumpPlaceholder; // Internal implementation to compile. void compileEntry(); + void compileSetupRegistersForEntry(); + void compileEntryExecutionFlag(); void compileBody(); void link(LinkBuffer&); @@ -308,11 +272,13 @@ private: void compileExceptionHandlers(); void linkOSRExits(); void disassemble(LinkBuffer&); - + + void appendExceptionHandlingOSRExit(ExitKind, unsigned eventStreamIndex, CodeOrigin, HandlerInfo* exceptionHandler, CallSiteIndex, MacroAssembler::JumpList jumpsToFail = MacroAssembler::JumpList()); + // The dataflow graph currently being generated. Graph& m_graph; - OwnPtr<Disassembler> m_disassembler; + std::unique_ptr<Disassembler> m_disassembler; RefPtr<JITCode> m_jitCode; @@ -325,22 +291,18 @@ private: Vector<Label> m_blockHeads; struct JSCallRecord { - JSCallRecord(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo::CallType callType, GPRReg callee, CodeOrigin codeOrigin) + JSCallRecord(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo* info) : m_fastCall(fastCall) , m_slowCall(slowCall) , m_targetToCheck(targetToCheck) - , m_callType(callType) - , m_callee(callee) - , m_codeOrigin(codeOrigin) + , m_info(info) { } Call m_fastCall; Call m_slowCall; DataLabelPtr m_targetToCheck; - CallLinkInfo::CallType m_callType; - GPRReg m_callee; - CodeOrigin m_codeOrigin; + CallLinkInfo* m_info; }; Vector<InlineCacheWrapper<JITGetByIdGenerator>, 4> m_getByIds; @@ -350,9 +312,17 @@ private: SegmentedVector<OSRExitCompilationInfo, 4> m_exitCompilationInfo; Vector<Vector<Label>> m_exitSiteLabels; + struct ExceptionHandlingOSRExitInfo { + OSRExitCompilationInfo& exitInfo; + HandlerInfo baselineExceptionHandler; + CallSiteIndex callSiteIndex; + }; + Vector<ExceptionHandlingOSRExitInfo> m_exceptionHandlerOSRExitCallSites; + Call m_callArityFixup; Label m_arityCheck; - OwnPtr<SpeculativeJIT> m_speculative; + std::unique_ptr<SpeculativeJIT> m_speculative; + PCToCodeOriginMapBuilder m_pcToCodeOriginMapBuilder; }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp b/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp index b7ea594c6..1132fc9b7 100644 --- a/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp +++ b/Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,15 +29,18 @@ #if ENABLE(DFG_JIT) #include "CodeBlock.h" +#include "CodeBlockWithJITType.h" #include "DFGCommon.h" #include "DFGPlan.h" +#include "JSCInlines.h" +#include "ProfilerDatabase.h" namespace JSC { namespace DFG { -JITFinalizer::JITFinalizer(Plan& plan, PassRefPtr<JITCode> jitCode, PassOwnPtr<LinkBuffer> linkBuffer, MacroAssemblerCodePtr withArityCheck) +JITFinalizer::JITFinalizer(Plan& plan, PassRefPtr<JITCode> jitCode, std::unique_ptr<LinkBuffer> linkBuffer, MacroAssemblerCodePtr withArityCheck) : Finalizer(plan) , m_jitCode(jitCode) - , m_linkBuffer(linkBuffer) + , m_linkBuffer(WTFMove(linkBuffer)) , m_withArityCheck(withArityCheck) { } @@ -46,10 +49,18 @@ JITFinalizer::~JITFinalizer() { } +size_t JITFinalizer::codeSize() +{ + return m_linkBuffer->size(); +} + bool JITFinalizer::finalize() { - m_jitCode->initializeCodeRef(m_linkBuffer->finalizeCodeWithoutDisassembly()); - m_plan.codeBlock->setJITCode(m_jitCode, MacroAssemblerCodePtr()); + m_jitCode->initializeCodeRef( + FINALIZE_DFG_CODE(*m_linkBuffer, ("DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::DFGJIT)).data())), + MacroAssemblerCodePtr()); + + m_plan.codeBlock->setJITCode(m_jitCode); finalizeCommon(); @@ -59,8 +70,10 @@ bool JITFinalizer::finalize() bool JITFinalizer::finalizeFunction() { RELEASE_ASSERT(!m_withArityCheck.isEmptyValue()); - m_jitCode->initializeCodeRef(m_linkBuffer->finalizeCodeWithoutDisassembly()); - m_plan.codeBlock->setJITCode(m_jitCode, m_withArityCheck); + m_jitCode->initializeCodeRef( + FINALIZE_DFG_CODE(*m_linkBuffer, ("DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::DFGJIT)).data())), + m_withArityCheck); + m_plan.codeBlock->setJITCode(m_jitCode); finalizeCommon(); @@ -70,11 +83,14 @@ bool JITFinalizer::finalizeFunction() void JITFinalizer::finalizeCommon() { #if ENABLE(FTL_JIT) - m_jitCode->optimizeAfterWarmUp(m_plan.codeBlock.get()); + m_jitCode->optimizeAfterWarmUp(m_plan.codeBlock); #endif // ENABLE(FTL_JIT) if (m_plan.compilation) m_plan.vm.m_perBytecodeProfiler->addCompilation(m_plan.compilation); + + if (!m_plan.willTryToTierUp) + m_plan.codeBlock->baselineVersion()->m_didFailFTLCompilation = true; } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGJITFinalizer.h b/Source/JavaScriptCore/dfg/DFGJITFinalizer.h index 5c7c82b66..110442fe4 100644 --- a/Source/JavaScriptCore/dfg/DFGJITFinalizer.h +++ b/Source/JavaScriptCore/dfg/DFGJITFinalizer.h @@ -26,8 +26,6 @@ #ifndef DFGJITFinalizer_h #define DFGJITFinalizer_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGFinalizer.h" @@ -39,9 +37,10 @@ namespace JSC { namespace DFG { class JITFinalizer : public Finalizer { public: - JITFinalizer(Plan&, PassRefPtr<JITCode>, PassOwnPtr<LinkBuffer>, MacroAssemblerCodePtr withArityCheck = MacroAssemblerCodePtr(MacroAssemblerCodePtr::EmptyValue)); + JITFinalizer(Plan&, PassRefPtr<JITCode>, std::unique_ptr<LinkBuffer>, MacroAssemblerCodePtr withArityCheck = MacroAssemblerCodePtr(MacroAssemblerCodePtr::EmptyValue)); virtual ~JITFinalizer(); + virtual size_t codeSize() override; virtual bool finalize() override; virtual bool finalizeFunction() override; @@ -49,7 +48,7 @@ private: void finalizeCommon(); RefPtr<JITCode> m_jitCode; - OwnPtr<LinkBuffer> m_linkBuffer; + std::unique_ptr<LinkBuffer> m_linkBuffer; MacroAssemblerCodePtr m_withArityCheck; }; diff --git a/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp b/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp index 033985e88..533752997 100644 --- a/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp +++ b/Source/JavaScriptCore/dfg/DFGJumpReplacement.cpp @@ -29,13 +29,14 @@ #if ENABLE(DFG_JIT) #include "MacroAssembler.h" +#include "JSCInlines.h" #include "Options.h" namespace JSC { namespace DFG { void JumpReplacement::fire() { - if (Options::showDisassembly()) + if (Options::dumpDisassembly()) dataLogF("Firing jump replacement watchpoint from %p, to %p.\n", m_source.dataLocation(), m_destination.dataLocation()); MacroAssembler::replaceWithJump(m_source, m_destination); } diff --git a/Source/JavaScriptCore/dfg/DFGLICMPhase.cpp b/Source/JavaScriptCore/dfg/DFGLICMPhase.cpp index 64651309e..f38a44021 100644 --- a/Source/JavaScriptCore/dfg/DFGLICMPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGLICMPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -36,9 +36,10 @@ #include "DFGEdgeDominates.h" #include "DFGGraph.h" #include "DFGInsertionSet.h" +#include "DFGNaturalLoops.h" #include "DFGPhase.h" #include "DFGSafeToExecute.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -46,7 +47,7 @@ namespace { struct LoopData { LoopData() - : preHeader(0) + : preHeader(nullptr) { } @@ -62,18 +63,24 @@ class LICMPhase : public Phase { public: LICMPhase(Graph& graph) : Phase(graph, "LICM") + , m_state(graph) , m_interpreter(graph, m_state) { } bool run() { - ASSERT(m_graph.m_form == SSA); + DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA); - m_graph.m_dominators.computeIfNecessary(m_graph); - m_graph.m_naturalLoops.computeIfNecessary(m_graph); + m_graph.ensureDominators(); + m_graph.ensureNaturalLoops(); + + if (verbose) { + dataLog("Graph before LICM:\n"); + m_graph.dump(); + } - m_data.resize(m_graph.m_naturalLoops.numLoops()); + m_data.resize(m_graph.m_naturalLoops->numLoops()); // Figure out the set of things each loop writes to, not including blocks that // belong to inner loops. We fix this later. @@ -81,37 +88,83 @@ public: BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; - const NaturalLoop* loop = m_graph.m_naturalLoops.innerMostLoopOf(block); + + // Skip blocks that are proved to not execute. + // FIXME: This shouldn't be needed. + // https://bugs.webkit.org/show_bug.cgi?id=128584 + if (!block->cfaHasVisited) + continue; + + const NaturalLoop* loop = m_graph.m_naturalLoops->innerMostLoopOf(block); if (!loop) continue; LoopData& data = m_data[loop->index()]; - for (unsigned nodeIndex = block->size(); nodeIndex--;) - addWrites(m_graph, block->at(nodeIndex), data.writes); + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + + // Don't look beyond parts of the code that definitely always exit. + // FIXME: This shouldn't be needed. + // https://bugs.webkit.org/show_bug.cgi?id=128584 + if (node->op() == ForceOSRExit) + break; + + addWrites(m_graph, node, data.writes); + } } // For each loop: // - Identify its pre-header. // - Make sure its outer loops know what it clobbers. - for (unsigned loopIndex = m_graph.m_naturalLoops.numLoops(); loopIndex--;) { - const NaturalLoop& loop = m_graph.m_naturalLoops.loop(loopIndex); + for (unsigned loopIndex = m_graph.m_naturalLoops->numLoops(); loopIndex--;) { + const NaturalLoop& loop = m_graph.m_naturalLoops->loop(loopIndex); LoopData& data = m_data[loop.index()]; + for ( - const NaturalLoop* outerLoop = m_graph.m_naturalLoops.innerMostOuterLoop(loop); + const NaturalLoop* outerLoop = m_graph.m_naturalLoops->innerMostOuterLoop(loop); outerLoop; - outerLoop = m_graph.m_naturalLoops.innerMostOuterLoop(*outerLoop)) + outerLoop = m_graph.m_naturalLoops->innerMostOuterLoop(*outerLoop)) m_data[outerLoop->index()].writes.addAll(data.writes); BasicBlock* header = loop.header(); - BasicBlock* preHeader = 0; + BasicBlock* preHeader = nullptr; + unsigned numberOfPreHeaders = 0; // We're cool if this is 1. + + // This is guaranteed because we expect the CFG not to have unreachable code. Therefore, a + // loop header must have a predecessor. (Also, we don't allow the root block to be a loop, + // which cuts out the one other way of having a loop header with only one predecessor.) + DFG_ASSERT(m_graph, header->at(0), header->predecessors.size() > 1); + for (unsigned i = header->predecessors.size(); i--;) { BasicBlock* predecessor = header->predecessors[i]; - if (m_graph.m_dominators.dominates(header, predecessor)) + if (m_graph.m_dominators->dominates(header, predecessor)) continue; - RELEASE_ASSERT(!preHeader || preHeader == predecessor); + preHeader = predecessor; + ++numberOfPreHeaders; } - - RELEASE_ASSERT(preHeader->last()->op() == Jump); + + // We need to validate the pre-header. There are a bunch of things that could be wrong + // about it: + // + // - There might be more than one. This means that pre-header creation either did not run, + // or some CFG transformation destroyed the pre-headers. + // + // - It may not be legal to exit at the pre-header. That would be a real bummer. Currently, + // LICM assumes that it can always hoist checks. See + // https://bugs.webkit.org/show_bug.cgi?id=148545. Though even with that fixed, we anyway + // would need to check if it's OK to exit at the pre-header since if we can't then we + // would have to restrict hoisting to non-exiting nodes. + + if (numberOfPreHeaders != 1) + continue; + + // This is guaranteed because the header has multiple predecessors and critical edges are + // broken. Therefore the predecessors must all have one successor, which implies that they + // must end in a Jump. + DFG_ASSERT(m_graph, preHeader->terminal(), preHeader->terminal()->op() == Jump); + + if (!preHeader->terminal()->origin.exitOK) + continue; data.preHeader = preHeader; } @@ -122,6 +175,7 @@ public: // We try to hoist to the outer-most loop that permits it. Hoisting is valid if: // - The node doesn't write anything. // - The node doesn't read anything that the loop writes. + // - The preHeader is valid (i.e. it passed the validation above). // - The preHeader's state at tail makes the node safe to execute. // - The loop's children all belong to nodes that strictly dominate the loop header. // - The preHeader's state at tail is still valid. This is mostly to save compile @@ -133,17 +187,10 @@ public: // // For maximum profit, we walk blocks in DFS order to ensure that we generally // tend to hoist dominators before dominatees. - Vector<BasicBlock*> depthFirst; - m_graph.getBlocksInDepthFirstOrder(depthFirst); Vector<const NaturalLoop*> loopStack; bool changed = false; - for ( - unsigned depthFirstIndex = 0; - depthFirstIndex < depthFirst.size(); - ++depthFirstIndex) { - - BasicBlock* block = depthFirst[depthFirstIndex]; - const NaturalLoop* loop = m_graph.m_naturalLoops.innerMostLoopOf(block); + for (BasicBlock* block : m_graph.blocksInPreOrder()) { + const NaturalLoop* loop = m_graph.m_naturalLoops->innerMostLoopOf(block); if (!loop) continue; @@ -151,7 +198,7 @@ public: for ( const NaturalLoop* current = loop; current; - current = m_graph.m_naturalLoops.innerMostOuterLoop(*current)) + current = m_graph.m_naturalLoops->innerMostOuterLoop(*current)) loopStack.append(current); // Remember: the loop stack has the inner-most loop at index 0, so if we want @@ -188,6 +235,12 @@ private: { Node* node = nodeRef; LoopData& data = m_data[loop->index()]; + + if (!data.preHeader) { + if (verbose) + dataLog(" Not hoisting ", node, " because the pre-header is invalid.\n"); + return false; + } if (!data.preHeader->cfaDidFinish) { if (verbose) @@ -203,6 +256,47 @@ private: return false; } + // FIXME: At this point if the hoisting of the full node fails but the node has type checks, + // we could still hoist just the checks. + // https://bugs.webkit.org/show_bug.cgi?id=144525 + + // FIXME: If a node has a type check - even something like a CheckStructure - then we should + // only hoist the node if we know that it will execute on every loop iteration or if we know + // that the type check will always succeed at the loop pre-header through some other means + // (like looking at prediction propagation results). Otherwise, we might make a mistake like + // this: + // + // var o = ...; // sometimes null and sometimes an object with structure S1. + // for (...) { + // if (o) + // ... = o.f; // CheckStructure and GetByOffset, which we will currently hoist. + // } + // + // When we encounter such code, we'll hoist the CheckStructure and GetByOffset and then we + // will have a recompile. We'll then end up thinking that the get_by_id needs to be + // polymorphic, which is false. + // + // We can counter this by either having a control flow equivalence check, or by consulting + // prediction propagation to see if the check would always succeed. Prediction propagation + // would not be enough for things like: + // + // var p = ...; // some boolean predicate + // var o = {}; + // if (p) + // o.f = 42; + // for (...) { + // if (p) + // ... = o.f; + // } + // + // Prediction propagation can't tell us anything about the structure, and the CheckStructure + // will appear to be hoistable because the loop doesn't clobber structures. The cell check + // in the CheckStructure will be hoistable though, since prediction propagation can tell us + // that o is always SpecFinalObject. In cases like this, control flow equivalence is the + // only effective guard. + // + // https://bugs.webkit.org/show_bug.cgi?id=144527 + if (readsOverlap(m_graph, node, data.writes)) { if (verbose) { dataLog( @@ -226,18 +320,21 @@ private: " Hoisting ", node, " from ", *fromBlock, " to ", *data.preHeader, "\n"); } - - data.preHeader->insertBeforeLast(node); - node->misc.owner = data.preHeader; - node->codeOriginForExitTarget = data.preHeader->last()->codeOriginForExitTarget; + + // FIXME: We should adjust the Check: flags on the edges of node. There are phases that assume + // that those flags are correct even if AI is stale. + // https://bugs.webkit.org/show_bug.cgi?id=148544 + data.preHeader->insertBeforeTerminal(node); + node->owner = data.preHeader; + NodeOrigin originalOrigin = node->origin; + node->origin = data.preHeader->terminal()->origin.withSemantic(node->origin.semantic); // Modify the states at the end of the preHeader of the loop we hoisted to, - // and all pre-headers inside the loop. - // FIXME: This could become a scalability bottleneck. Fortunately, most loops - // are small and anyway we rapidly skip over basic blocks here. + // and all pre-headers inside the loop. This isn't a stability bottleneck right now + // because most loops are small and most blocks belong to few loops. for (unsigned bodyIndex = loop->size(); bodyIndex--;) { BasicBlock* subBlock = loop->at(bodyIndex); - const NaturalLoop* subLoop = m_graph.m_naturalLoops.headerOf(subBlock); + const NaturalLoop* subLoop = m_graph.m_naturalLoops->headerOf(subBlock); if (!subLoop) continue; BasicBlock* subPreHeader = m_data[subLoop->index()].preHeader; @@ -250,9 +347,9 @@ private: // It just so happens that all of the nodes we currently know how to hoist // don't have var-arg children. That may change and then we can fix this // code. But for now we just assert that's the case. - RELEASE_ASSERT(!(node->flags() & NodeHasVarArgs)); + DFG_ASSERT(m_graph, node, !(node->flags() & NodeHasVarArgs)); - nodeRef = m_graph.addNode(SpecNone, Phantom, node->codeOrigin, node->children); + nodeRef = m_graph.addNode(SpecNone, Check, originalOrigin, node->children); return true; } diff --git a/Source/JavaScriptCore/dfg/DFGLICMPhase.h b/Source/JavaScriptCore/dfg/DFGLICMPhase.h index 601918bfd..9c717f23a 100644 --- a/Source/JavaScriptCore/dfg/DFGLICMPhase.h +++ b/Source/JavaScriptCore/dfg/DFGLICMPhase.h @@ -26,8 +26,6 @@ #ifndef DFGLICMPhase_h #define DFGLICMPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGLazyJSValue.cpp b/Source/JavaScriptCore/dfg/DFGLazyJSValue.cpp index 251c9a7c0..6011490c9 100644 --- a/Source/JavaScriptCore/dfg/DFGLazyJSValue.cpp +++ b/Source/JavaScriptCore/dfg/DFGLazyJSValue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,7 +28,7 @@ #if ENABLE(DFG_JIT) -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -36,14 +36,14 @@ JSValue LazyJSValue::getValue(VM& vm) const { switch (m_kind) { case KnownValue: - return value(); + return value()->value(); case SingleCharacterString: return jsSingleCharacterString(&vm, u.character); case KnownStringImpl: return jsString(&vm, u.stringImpl); } RELEASE_ASSERT_NOT_REACHED(); - return value(); + return JSValue(); } static TriState equalToSingleCharacter(JSValue value, UChar character) @@ -81,11 +81,11 @@ TriState LazyJSValue::strictEqual(const LazyJSValue& other) const case KnownValue: switch (other.m_kind) { case KnownValue: - return JSValue::pureStrictEqual(value(), other.value()); + return JSValue::pureStrictEqual(value()->value(), other.value()->value()); case SingleCharacterString: - return equalToSingleCharacter(value(), other.character()); + return equalToSingleCharacter(value()->value(), other.character()); case KnownStringImpl: - return equalToStringImpl(value(), other.stringImpl()); + return equalToStringImpl(value()->value(), other.stringImpl()); } break; case SingleCharacterString: @@ -113,11 +113,41 @@ TriState LazyJSValue::strictEqual(const LazyJSValue& other) const return FalseTriState; } +uintptr_t LazyJSValue::switchLookupValue(SwitchKind kind) const +{ + // NB. Not every kind of JSValue will be able to give you a switch lookup + // value, and this method will assert, or do bad things, if you use it + // for a kind of value that can't. + switch (m_kind) { + case KnownValue: + switch (kind) { + case SwitchImm: + return value()->value().asInt32(); + case SwitchCell: + return bitwise_cast<uintptr_t>(value()->value().asCell()); + default: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } + case SingleCharacterString: + switch (kind) { + case SwitchChar: + return character(); + default: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } + default: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } +} + void LazyJSValue::dumpInContext(PrintStream& out, DumpContext* context) const { switch (m_kind) { case KnownValue: - value().dumpInContext(out, context); + value()->dumpInContext(out, context); return; case SingleCharacterString: out.print("Lazy:SingleCharacterString("); diff --git a/Source/JavaScriptCore/dfg/DFGLazyJSValue.h b/Source/JavaScriptCore/dfg/DFGLazyJSValue.h index 37a07266d..a1231db04 100644 --- a/Source/JavaScriptCore/dfg/DFGLazyJSValue.h +++ b/Source/JavaScriptCore/dfg/DFGLazyJSValue.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,11 +26,10 @@ #ifndef DFGLazyJSValue_h #define DFGLazyJSValue_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) -#include "JSCJSValue.h" +#include "DFGCommon.h" +#include "DFGFrozenValue.h" #include <wtf/text/StringImpl.h> namespace JSC { namespace DFG { @@ -46,10 +45,10 @@ enum LazinessKind { class LazyJSValue { public: - LazyJSValue(JSValue value = JSValue()) + LazyJSValue(FrozenValue* value = FrozenValue::emptySingleton()) : m_kind(KnownValue) { - u.value = JSValue::encode(value); + u.value = value; } static LazyJSValue singleCharacterString(UChar character) @@ -68,19 +67,19 @@ public: return result; } - JSValue tryGetValue() const + FrozenValue* tryGetValue(Graph&) const { if (m_kind == KnownValue) return value(); - return JSValue(); + return nullptr; } JSValue getValue(VM&) const; - JSValue value() const + FrozenValue* value() const { ASSERT(m_kind == KnownValue); - return JSValue::decode(u.value); + return u.value; } UChar character() const @@ -97,28 +96,14 @@ public: TriState strictEqual(const LazyJSValue& other) const; - unsigned switchLookupValue() const - { - // NB. Not every kind of JSValue will be able to give you a switch lookup - // value, and this method will assert, or do bad things, if you use it - // for a kind of value that can't. - switch (m_kind) { - case KnownValue: - return value().asInt32(); - case SingleCharacterString: - return character(); - default: - RELEASE_ASSERT_NOT_REACHED(); - return 0; - } - } + uintptr_t switchLookupValue(SwitchKind) const; void dump(PrintStream&) const; void dumpInContext(PrintStream&, DumpContext*) const; private: union { - EncodedJSValue value; + FrozenValue* value; UChar character; StringImpl* stringImpl; } u; diff --git a/Source/JavaScriptCore/dfg/DFGLazyNode.cpp b/Source/JavaScriptCore/dfg/DFGLazyNode.cpp new file mode 100644 index 000000000..c8d0940e7 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGLazyNode.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGLazyNode.h" + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +void LazyNode::dump(PrintStream& out) const +{ + if (!*this) + out.print("LazyNode:0"); + else { + if (isNode()) + out.print("LazyNode:@", asNode()->index()); + else + out.print("LazyNode:FrozenValue(", Graph::opName(op()), ", ", pointerDump(asValue()), ")"); + } +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGLazyNode.h b/Source/JavaScriptCore/dfg/DFGLazyNode.h new file mode 100644 index 000000000..ffd572f85 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGLazyNode.h @@ -0,0 +1,187 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGLazyNode_h +#define DFGLazyNode_h + +#if ENABLE(DFG_JIT) + +#include "DFGCommon.h" +#include "DFGInsertionSet.h" +#include <wtf/PrintStream.h> + +namespace JSC { namespace DFG { + +class LazyNode { +public: + static const size_t jsConstantTag = 0; + static const size_t doubleConstantTag = 1; + static const size_t int52ConstantTag = 2; + + static const uintptr_t tagMask = 0x3; + static const uintptr_t pointerMask = ~tagMask; + + explicit LazyNode(Node* node = nullptr) + : m_node(node) + , m_value(reinterpret_cast<uintptr_t>(nullptr)) + { + if (node && node->isConstant()) + setFrozenValue(node->constant(), node->op()); + } + + explicit LazyNode(FrozenValue* value, NodeType op = JSConstant) + : m_node(nullptr) + , m_value(reinterpret_cast<uintptr_t>(nullptr)) + { + setFrozenValue(value, op); + } + + LazyNode(std::nullptr_t) + : m_node(nullptr) + , m_value(reinterpret_cast<uintptr_t>(nullptr)) + { + } + + LazyNode(WTF::HashTableDeletedValueType) + : m_node(reinterpret_cast<Node*>(-1)) + { + } + + void setNode(Node* node) + { + m_node = node; + if (node && node->isConstant()) + setFrozenValue(node->constant(), node->op()); + } + + bool isHashTableDeletedValue() const { return m_node == reinterpret_cast<Node*>(-1); } + + bool isNode() const { return m_node; } + + NodeType op() const + { + if (m_node) + return m_node->op(); + + switch (m_value & tagMask) { + case jsConstantTag: + return JSConstant; + case doubleConstantTag: + return DoubleConstant; + case int52ConstantTag: + return Int52Constant; + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + + Node* asNode() const + { + ASSERT(m_node || !asValue()); + return m_node; + } + + FrozenValue* asValue() const + { + return reinterpret_cast<FrozenValue*>(m_value & pointerMask); + } + + unsigned hash() const + { + if (asValue()) + return WTF::PtrHash<FrozenValue*>::hash(asValue()); + return WTF::PtrHash<Node*>::hash(m_node); + } + + bool operator==(const LazyNode& other) const + { + if (asValue() || other.asValue()) + return m_value == other.m_value; + return m_node == other.m_node; + } + + bool operator!=(const LazyNode& other) const + { + return !(*this == other); + } + + Node* ensureIsNode(InsertionSet& insertionSet, BasicBlock* block, unsigned nodeIndex) + { + if (!m_node) + m_node = insertionSet.insertConstant(nodeIndex, block->at(nodeIndex)->origin, asValue(), op()); + + return asNode(); + } + + Node* operator->() const { return asNode(); } + + Node& operator*() const { return *asNode(); } + + bool operator!() const { return !asValue() && !asNode(); } + + explicit operator bool() const { return !!*this; } + + void dump(PrintStream& out) const; + +private: + void setFrozenValue(FrozenValue* value, NodeType op) + { + ASSERT(value); + m_value = reinterpret_cast<uintptr_t>(value); + ASSERT(m_value == (m_value & pointerMask)); + switch (op) { + case JSConstant: + m_value |= jsConstantTag; + break; + case DoubleConstant: + m_value |= doubleConstantTag; + break; + case Int52Constant: + m_value |= int52ConstantTag; + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + } + + Node* m_node; + uintptr_t m_value; +}; + +} } // namespace JSC::DFG + +namespace WTF { + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::DFG::LazyNode> : SimpleClassHashTraits<JSC::DFG::LazyNode> { + static const bool emptyValueIsZero = true; +}; + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + +#endif // DFGLazyNode_h diff --git a/Source/JavaScriptCore/dfg/DFGLiveCatchVariablePreservationPhase.cpp b/Source/JavaScriptCore/dfg/DFGLiveCatchVariablePreservationPhase.cpp new file mode 100644 index 000000000..ea0cffd76 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGLiveCatchVariablePreservationPhase.cpp @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGLiveCatchVariablePreservationPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGBasicBlockInlines.h" +#include "DFGGraph.h" +#include "DFGInsertionSet.h" +#include "DFGPhase.h" +#include "FullBytecodeLiveness.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +class LiveCatchVariablePreservationPhase : public Phase { +public: + LiveCatchVariablePreservationPhase(Graph& graph) + : Phase(graph, "live catch variable preservation phase") + { + } + + bool run() + { + if (!m_graph.m_hasExceptionHandlers) + return true; + + DFG_ASSERT(m_graph, nullptr, m_graph.m_form == LoadStore); + + m_currentBlockLiveness.resize(m_graph.block(0)->variablesAtTail.numberOfLocals()); + + InsertionSet insertionSet(m_graph); + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + handleBlock(block, insertionSet); + insertionSet.execute(block); + } + + return true; + } + + bool willCatchException(CodeOrigin origin) + { + unsigned bytecodeIndexToCheck = origin.bytecodeIndex; + m_currentBlockLiveness.clearAll(); + + while (1) { + InlineCallFrame* inlineCallFrame = origin.inlineCallFrame; + CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame); + if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeIndexToCheck)) { + unsigned catchBytecodeIndex = handler->target; + m_graph.forAllLocalsLiveInBytecode(CodeOrigin(catchBytecodeIndex, inlineCallFrame), [&] (VirtualRegister operand) { + m_currentBlockLiveness.set(operand.toLocal(), true); + }); + return true; + } + + if (!inlineCallFrame) + return false; + + bytecodeIndexToCheck = inlineCallFrame->directCaller.bytecodeIndex; + origin = inlineCallFrame->directCaller; + } + } + + void handleBlock(BasicBlock* block, InsertionSet& insertionSet) + { + // Because precise jump targets ensures that the start of a "try" block is its + // own basic block, we will never have two "try" statements in the same DFG + // basic block. Therefore, checking the first node in the block is sufficient + // to checking if we're in a try block. + if (!willCatchException(block->at(0)->origin.semantic)) + return; + + Operands<VariableAccessData*> currentBlockAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr); + HashSet<InlineCallFrame*> seenInlineCallFrames; + + { + for (unsigned i = 0; i < block->size(); i++) { + Node* node = block->at(i); + bool isPrimordialSetArgument = node->op() == SetArgument && node->local().isArgument() && node == m_graph.m_arguments[node->local().toArgument()]; + InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame; + if (inlineCallFrame) + seenInlineCallFrames.add(inlineCallFrame); + + if (node->op() == SetLocal || (node->op() == SetArgument && !isPrimordialSetArgument)) { + VirtualRegister operand = node->local(); + + int stackOffset = inlineCallFrame ? inlineCallFrame->stackOffset : 0; + if ((operand.isLocal() && m_currentBlockLiveness.get(operand.toLocal())) + || (operand.offset() == stackOffset + CallFrame::thisArgumentOffset())) { + + VariableAccessData* variableAccessData = currentBlockAccessData.operand(operand); + if (!variableAccessData) + variableAccessData = newVariableAccessData(operand); + + insertionSet.insertNode(i, SpecNone, + Flush, node->origin, OpInfo(variableAccessData)); + } + } + + if (node->hasVariableAccessData(m_graph)) + currentBlockAccessData.operand(node->local()) = node->variableAccessData(); + } + } + + // Insert Flush for everything at the end of the block. + { + NodeOrigin origin = block->at(block->size() - 1)->origin; + auto preserveLivenessAtEndOfBlock = [&] (VirtualRegister operand, bool alwaysInsert) { + if ((operand.isLocal() && m_currentBlockLiveness.get(operand.toLocal())) + || operand.isArgument() + || alwaysInsert) { + VariableAccessData* accessData = currentBlockAccessData.operand(operand); + if (!accessData) + accessData = newVariableAccessData(operand); + + currentBlockAccessData.operand(operand) = accessData; + + insertionSet.insertNode(block->size(), SpecNone, + Flush, origin, OpInfo(accessData)); + } + }; + for (unsigned local = 0; local < block->variablesAtTail.numberOfLocals(); local++) + preserveLivenessAtEndOfBlock(virtualRegisterForLocal(local), false); + for (InlineCallFrame* inlineCallFrame : seenInlineCallFrames) + preserveLivenessAtEndOfBlock(VirtualRegister(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset()), true); + preserveLivenessAtEndOfBlock(VirtualRegister(CallFrame::thisArgumentOffset()), true); + } + } + + VariableAccessData* newVariableAccessData(VirtualRegister operand) + { + ASSERT(!operand.isConstant()); + + m_graph.m_variableAccessData.append(VariableAccessData(operand)); + return &m_graph.m_variableAccessData.last(); + } + + FastBitVector m_currentBlockLiveness; +}; + +bool performLiveCatchVariablePreservationPhase(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Live Catch Variables Preservation Phase"); + return runPhase<LiveCatchVariablePreservationPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGLiveCatchVariablePreservationPhase.h b/Source/JavaScriptCore/dfg/DFGLiveCatchVariablePreservationPhase.h new file mode 100644 index 000000000..2491c9c9a --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGLiveCatchVariablePreservationPhase.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGLiveCatchVariablePreservationPhase_h +#define DFGLiveCatchVariablePreservationPhase_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; + +// This phase ensures that we maintain liveness for locals +// that are live in the "catch" block. Because a "catch" +// block will not be in the control flow graph, we need to ensure +// anything live inside the "catch" block in bytecode will maintain +// liveness inside the "try" block for an OSR exit from the "try" +// block into the "catch" block in the case of an exception being thrown. +// +// The mechanism currently used to demonstrate liveness to OSR exit +// is ensuring all variables live in a "catch" are flushed to the +// stack inside the "try" block. + +bool performLiveCatchVariablePreservationPhase(Graph&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGLiveCatchVariablePreservationPhase_h diff --git a/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.cpp b/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.cpp index 65c4105bc..94c398da1 100644 --- a/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,7 +32,7 @@ #include "DFGGraph.h" #include "DFGInsertionSet.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -57,6 +57,7 @@ public: BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; + block->ssa->liveAtTailIsDirty = true; block->ssa->liveAtHead.clear(); block->ssa->liveAtTail.clear(); } @@ -68,12 +69,11 @@ public: } while (m_changed); if (!m_graph.block(0)->ssa->liveAtHead.isEmpty()) { - dataLog( - "Bad liveness analysis result: live at root is not empty: ", - nodeListDump(m_graph.block(0)->ssa->liveAtHead), "\n"); - dataLog("IR at time of error:\n"); - m_graph.dump(); - CRASH(); + DFG_CRASH( + m_graph, nullptr, + toCString( + "Bad liveness analysis result: live at root is not empty: ", + nodeListDump(m_graph.block(0)->ssa->liveAtHead)).data()); } return true; @@ -85,11 +85,12 @@ private: BasicBlock* block = m_graph.block(blockIndex); if (!block) return; - - // FIXME: It's likely that this can be improved, for static analyses that use - // HashSets. https://bugs.webkit.org/show_bug.cgi?id=118455 + + if (!block->ssa->liveAtTailIsDirty) + return; + block->ssa->liveAtTailIsDirty = false; + m_live = block->ssa->liveAtTail; - for (unsigned nodeIndex = block->size(); nodeIndex--;) { Node* node = block->at(nodeIndex); @@ -131,13 +132,17 @@ private: } } - if (m_live == block->ssa->liveAtHead) - return; - - m_changed = true; - block->ssa->liveAtHead = m_live; - for (unsigned i = block->predecessors.size(); i--;) - block->predecessors[i]->ssa->liveAtTail.add(m_live.begin(), m_live.end()); + for (Node* node : m_live) { + if (!block->ssa->liveAtHead.contains(node)) { + m_changed = true; + for (unsigned i = block->predecessors.size(); i--;) { + BasicBlock* predecessor = block->predecessors[i]; + if (predecessor->ssa->liveAtTail.add(node).isNewEntry) + predecessor->ssa->liveAtTailIsDirty = true; + } + } + } + block->ssa->liveAtHead = WTFMove(m_live); } void addChildUse(Node*, Edge& edge) diff --git a/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.h b/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.h index 806611235..28b72853c 100644 --- a/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.h +++ b/Source/JavaScriptCore/dfg/DFGLivenessAnalysisPhase.h @@ -26,8 +26,6 @@ #ifndef DFGLivenessAnalysisPhase_h #define DFGLivenessAnalysisPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" diff --git a/Source/JavaScriptCore/dfg/DFGLongLivedState.cpp b/Source/JavaScriptCore/dfg/DFGLongLivedState.cpp index 26dc16c7c..7a2f2f1f0 100644 --- a/Source/JavaScriptCore/dfg/DFGLongLivedState.cpp +++ b/Source/JavaScriptCore/dfg/DFGLongLivedState.cpp @@ -28,6 +28,8 @@ #if ENABLE(DFG_JIT) +#include "JSCInlines.h" + namespace JSC { namespace DFG { LongLivedState::LongLivedState() diff --git a/Source/JavaScriptCore/dfg/DFGLongLivedState.h b/Source/JavaScriptCore/dfg/DFGLongLivedState.h index 9eb676885..77fab0407 100644 --- a/Source/JavaScriptCore/dfg/DFGLongLivedState.h +++ b/Source/JavaScriptCore/dfg/DFGLongLivedState.h @@ -26,8 +26,6 @@ #ifndef DFGLongLivedState_h #define DFGLongLivedState_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGNodeAllocator.h" diff --git a/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.cpp b/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.cpp index 507e00ff5..30f44fc8f 100644 --- a/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,22 +30,80 @@ #include "DFGBasicBlockInlines.h" #include "DFGBlockInsertionSet.h" +#include "DFGDominators.h" #include "DFGGraph.h" +#include "DFGNaturalLoops.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" #include <wtf/HashMap.h> namespace JSC { namespace DFG { BasicBlock* createPreHeader(Graph& graph, BlockInsertionSet& insertionSet, BasicBlock* block) { - BasicBlock* preHeader = insertionSet.insertBefore(block); + // FIXME: If we run this utility on SSA IR, then we may end up with a bizarre arrangement of + // Upsilons and Phis, like: + // + // BB#1: + // Upsilon(@a, ^p) + // Jump(#3) + // + // BB#2: + // Upsilon(@b, ^p) + // Jump(#3) + // + // BB#3: + // Jump(#4) + // + // BB#4: + // p: Phi() + // + // Notice how the Upsilons are not in the predecessor of the Phi anymore. It's not clear if this + // would be bad. Probably not, but it's weird anyway. We should add a validation rule, and we + // should implement a Upsilon/Phi canonicalization that handles this by calling into the + // SSACalculator and treating the Upsilons as Defs and rebuilding the Phis from scratch. + // + // https://bugs.webkit.org/show_bug.cgi?id=148587 + + // Determine a good frequency for the pre-header. It's definitely not the frequency of the loop body. + // Instead, we use the max of the frequencies of the loop body's non-loop predecessors. + float frequency = 0; + for (BasicBlock* predecessor : block->predecessors) { + if (graph.m_dominators->dominates(block, predecessor)) + continue; + frequency = std::max(frequency, predecessor->executionCount); + } + BasicBlock* preHeader = insertionSet.insertBefore(block, frequency); + + // FIXME: It would be great if we put some effort into enabling exitOK at this origin, if it + // happens to be unset. It might not be set because the loop header (i.e. "block") has Phis in it. + // Phis have to have exitOK=false. There are a few ways to try to set exitOK: + // + // - Regenerate an exit origin by proving that we are at an exit origin boundary. If all of the + // predecessors' terminals have different exit origins than the exit origin of head of block, + // then we can leverage the assumption that exit origin boundaries can always exit. We could + // extend this further, and say that we will set exitOK even if a predecessor's terminal has the + // same exit origin, but so long it hadn't done anything that clobbers exit since the start of + // the origin. + // + // - Analyze the Phi's and MovHint's at the head of block. If prior to the ExitOK there are only + // Phi's and MovHint's, we could "roll them back" by proving that for each of the MovHints, the + // referenced Phi has a child that dominates the pre-header, and that child is the node that is + // OSR-available at the local being MovHinted. + // + // Note that there are some obviously wrong ways to try to set exitOK. For example, we cannot + // simply use the origin of our predecessors, since in bytecode that could be *any* kind of + // instruction. It may not even be a control flow construct, if we had lowered some non-control + // bytecode operation into DFG IR that has control flow. Hence, we really do need to try to use the + // origin of the head of the loop header. + // + // https://bugs.webkit.org/show_bug.cgi?id=148586 preHeader->appendNode( - graph, SpecNone, Jump, block->at(0)->codeOrigin, OpInfo(block)); + graph, SpecNone, Jump, block->at(0)->origin, OpInfo(block)); for (unsigned predecessorIndex = 0; predecessorIndex < block->predecessors.size(); predecessorIndex++) { BasicBlock* predecessor = block->predecessors[predecessorIndex]; - if (graph.m_dominators.dominates(block, predecessor)) + if (graph.m_dominators->dominates(block, predecessor)) continue; block->predecessors[predecessorIndex--] = block->predecessors.last(); block->predecessors.removeLast(); @@ -72,26 +130,45 @@ public: bool run() { - m_graph.m_dominators.computeIfNecessary(m_graph); - m_graph.m_naturalLoops.computeIfNecessary(m_graph); + m_graph.ensureDominators(); + m_graph.ensureNaturalLoops(); - for (unsigned loopIndex = m_graph.m_naturalLoops.numLoops(); loopIndex--;) { - const NaturalLoop& loop = m_graph.m_naturalLoops.loop(loopIndex); + for (unsigned loopIndex = m_graph.m_naturalLoops->numLoops(); loopIndex--;) { + const NaturalLoop& loop = m_graph.m_naturalLoops->loop(loopIndex); BasicBlock* existingPreHeader = 0; bool needsNewPreHeader = false; for (unsigned predecessorIndex = loop.header()->predecessors.size(); predecessorIndex--;) { BasicBlock* predecessor = loop.header()->predecessors[predecessorIndex]; - if (m_graph.m_dominators.dominates(loop.header(), predecessor)) + if (m_graph.m_dominators->dominates(loop.header(), predecessor)) continue; if (!existingPreHeader) { existingPreHeader = predecessor; continue; } - if (existingPreHeader == predecessor) - continue; + // We won't have duplicate entries in the predecessors list. + DFG_ASSERT(m_graph, nullptr, existingPreHeader != predecessor); needsNewPreHeader = true; break; } + + // This phase should only be run on a DFG where unreachable blocks have been pruned. + // We also don't allow loops back to root. This means that every loop header has got + // to have a pre-header. + DFG_ASSERT(m_graph, nullptr, existingPreHeader); + + // We are looking at the predecessors of a loop header. A loop header has to have + // some predecessor other than the pre-header. We must have broken critical edges + // because that is the DFG SSA convention. Therefore, each predecessor of the loop + // header must have only one successor. + DFG_ASSERT(m_graph, nullptr, existingPreHeader->terminal()->op() == Jump); + + // A pre-header is most useful if it's possible to exit from its terminal. Hence + // if the terminal of the existing pre-header doesn't allow for exit, but the first + // origin of the loop header does, then we should create a new pre-header. + if (!needsNewPreHeader && loop.header()->at(0)->origin.exitOK + && !existingPreHeader->terminal()->origin.exitOK) + needsNewPreHeader = true; + if (!needsNewPreHeader) continue; diff --git a/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.h b/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.h index a229875c8..bdd62167d 100644 --- a/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.h +++ b/Source/JavaScriptCore/dfg/DFGLoopPreHeaderCreationPhase.h @@ -26,8 +26,6 @@ #ifndef DFGLoopPreHeaderCreationPhase_h #define DFGLoopPreHeaderCreationPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGMaximalFlushInsertionPhase.cpp b/Source/JavaScriptCore/dfg/DFGMaximalFlushInsertionPhase.cpp new file mode 100644 index 000000000..a63442781 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMaximalFlushInsertionPhase.cpp @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGMaximalFlushInsertionPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGBasicBlockInlines.h" +#include "DFGGraph.h" +#include "DFGInsertionSet.h" +#include "DFGPhase.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +class MaximalFlushInsertionPhase : public Phase { +public: + MaximalFlushInsertionPhase(Graph& graph) + : Phase(graph, "maximal flush insertion phase") + { + } + + bool run() + { + DFG_ASSERT(m_graph, nullptr, m_graph.m_form == LoadStore); + + InsertionSet insertionSet(m_graph); + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + treatRegularBlock(block, insertionSet); + insertionSet.execute(block); + } + + treatRootBlock(m_graph.block(0), insertionSet); + insertionSet.execute(m_graph.block(0)); + + return true; + } + + void treatRegularBlock(BasicBlock* block, InsertionSet& insertionSet) + { + Operands<VariableAccessData*> currentBlockAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr); + // Insert a Flush before every SetLocal to properly pattern the graph such that + // any range between SetLocal and Flush has access to the local on the stack. + { + for (unsigned i = 0; i < block->size(); i++) { + Node* node = block->at(i); + bool isPrimordialSetArgument = node->op() == SetArgument && node->local().isArgument() && node == m_graph.m_arguments[node->local().toArgument()]; + if (node->op() == SetLocal || (node->op() == SetArgument && !isPrimordialSetArgument)) { + VirtualRegister operand = node->local(); + VariableAccessData* flushAccessData = currentBlockAccessData.operand(operand); + if (!flushAccessData) + flushAccessData = newVariableAccessData(operand); + + insertionSet.insertNode(i, SpecNone, + Flush, node->origin, OpInfo(flushAccessData)); + } + + if (node->hasVariableAccessData(m_graph)) + currentBlockAccessData.operand(node->local()) = node->variableAccessData(); + } + } + + // Flush everything at the end of the block. + { + NodeOrigin origin = block->at(block->size() - 1)->origin; + auto insertFlushAtEnd = [&] (VirtualRegister operand) { + VariableAccessData* accessData = currentBlockAccessData.operand(operand); + if (!accessData) + accessData = newVariableAccessData(operand); + + currentBlockAccessData.operand(operand) = accessData; + + insertionSet.insertNode(block->size(), SpecNone, + Flush, origin, OpInfo(accessData)); + }; + + for (unsigned i = 0; i < block->variablesAtTail.numberOfLocals(); i++) + insertFlushAtEnd(virtualRegisterForLocal(i)); + for (unsigned i = 0; i < block->variablesAtTail.numberOfArguments(); i++) + insertFlushAtEnd(virtualRegisterForArgument(i)); + } + } + + void treatRootBlock(BasicBlock* block, InsertionSet& insertionSet) + { + Operands<VariableAccessData*> initialAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr); + Operands<Node*> initialAccessNodes(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr); + for (unsigned i = 0; i < block->size(); i++) { + Node* node = block->at(i); + if (!node->hasVariableAccessData(m_graph)) + continue; + + VirtualRegister operand = node->local(); + if (initialAccessData.operand(operand)) + continue; + + DFG_ASSERT(m_graph, node, node->op() != SetLocal); // We should have inserted a Flush before this! + initialAccessData.operand(operand) = node->variableAccessData(); + initialAccessNodes.operand(operand) = node; + } + + // We want every Flush to be able to reach backwards to + // a SetLocal. Doing this in the root block achieves this goal. + NodeOrigin origin = block->at(0)->origin; + Node* undefined = insertionSet.insertConstant(0, origin, jsUndefined()); + + for (unsigned i = 0; i < block->variablesAtTail.numberOfLocals(); i++) { + VirtualRegister operand = virtualRegisterForLocal(i); + VariableAccessData* accessData; + DFG_ASSERT(m_graph, nullptr, initialAccessNodes.operand(operand)->op() == Flush); // We should have inserted a Flush before any SetLocal/SetArgument for the local that we are analyzing now. + accessData = initialAccessData.operand(operand); + DFG_ASSERT(m_graph, nullptr, accessData); + insertionSet.insertNode(0, SpecNone, + SetLocal, origin, OpInfo(accessData), Edge(undefined)); + } + } + + + VariableAccessData* newVariableAccessData(VirtualRegister operand) + { + ASSERT(!operand.isConstant()); + + m_graph.m_variableAccessData.append(VariableAccessData(operand)); + return &m_graph.m_variableAccessData.last(); + } +}; + +bool performMaximalFlushInsertion(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Flush Everything Insertion Phase"); + return runPhase<MaximalFlushInsertionPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGMaximalFlushInsertionPhase.h b/Source/JavaScriptCore/dfg/DFGMaximalFlushInsertionPhase.h new file mode 100644 index 000000000..5b4f97983 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMaximalFlushInsertionPhase.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGMaximalFlushInsertionPhase_h +#define DFGMaximalFlushInsertionPhase_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; + +// This phase ensures we keep all locals/arguments flushed. +// What this means is: any node inbetween a SetLocal and a Flush should +// be able to observe that particular local on the stack. This phase patterns +// the graph by inserting a Flush before each SetLocal such that the Flush +// we inserted can do a backwards search through all paths in the CFG and +// reach a SetLocal. +// .... +// SetLocal(locX) +// < +// | +// ... We ensure that locX is available on the stack to any nodes in this region that may ask for locX. +// | +// > +// Flush(locX) +// SetLocal(locX) + +bool performMaximalFlushInsertion(Graph&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGMaximalFlushInsertionPhase_h diff --git a/Source/JavaScriptCore/dfg/DFGMayExit.cpp b/Source/JavaScriptCore/dfg/DFGMayExit.cpp new file mode 100644 index 000000000..daa29339d --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMayExit.cpp @@ -0,0 +1,189 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGMayExit.h" + +#if ENABLE(DFG_JIT) + +#include "DFGGraph.h" +#include "DFGNode.h" +#include "Operations.h" + +namespace JSC { namespace DFG { + +namespace { + +class EdgeMayExit { +public: + EdgeMayExit() + : m_result(false) + { + } + + void operator()(Node*, Edge edge) + { + // FIXME: Maybe this should call mayHaveTypeCheck(edge.useKind()) instead. + // https://bugs.webkit.org/show_bug.cgi?id=148545 + if (edge.willHaveCheck()) { + m_result = true; + return; + } + + switch (edge.useKind()) { + // These are shady because nodes that have these use kinds will typically exit for + // unrelated reasons. For example CompareEq doesn't usually exit, but if it uses ObjectUse + // then it will. + case ObjectUse: + case ObjectOrOtherUse: + m_result = true; + break; + + // These are shady because they check the structure even if the type of the child node + // passes the StringObject type filter. + case StringObjectUse: + case StringOrStringObjectUse: + m_result = true; + break; + + default: + break; + } + } + + bool result() const { return m_result; } + +private: + bool m_result; +}; + +} // anonymous namespace + +ExitMode mayExit(Graph& graph, Node* node) +{ + ExitMode result = DoesNotExit; + + switch (node->op()) { + // This is a carefully curated list of nodes that definitely do not exit. We try to be very + // conservative when maintaining this list, because adding new node types to it doesn't + // generally make things a lot better but it might introduce subtle bugs. + case SetArgument: + case JSConstant: + case DoubleConstant: + case Int52Constant: + case MovHint: + case SetLocal: + case Flush: + case Phantom: + case Check: + case Identity: + case GetLocal: + case LoopHint: + case Phi: + case Upsilon: + case ZombieHint: + case ExitOK: + case BottomValue: + case PutHint: + case PhantomNewObject: + case PutStack: + case KillStack: + case GetStack: + case GetCallee: + case GetArgumentCount: + case GetRestLength: + case GetScope: + case PhantomLocal: + case CountExecution: + case Jump: + case Branch: + case Unreachable: + case DoubleRep: + case Int52Rep: + case ValueRep: + case ExtractOSREntryLocal: + case LogicalNot: + case NotifyWrite: + case PutStructure: + case StoreBarrier: + case PutByOffset: + case PutClosureVar: + break; + + case StrCat: + case Call: + case Construct: + case CallVarargs: + case ConstructVarargs: + case CallForwardVarargs: + case ConstructForwardVarargs: + case MaterializeCreateActivation: + case MaterializeNewObject: + case NewFunction: + case NewArrowFunction: + case NewGeneratorFunction: + case NewStringObject: + case CreateActivation: + result = ExitsForExceptions; + break; + + default: + // If in doubt, return true. + return Exits; + } + + EdgeMayExit functor; + DFG_NODE_DO_TO_CHILDREN(graph, node, functor); + if (functor.result()) + result = Exits; + + return result; +} + +} } // namespace JSC::DFG + +namespace WTF { + +using namespace JSC::DFG; + +void printInternal(PrintStream& out, ExitMode mode) +{ + switch (mode) { + case DoesNotExit: + out.print("DoesNotExit"); + return; + case ExitsForExceptions: + out.print("ExitsForExceptions"); + return; + case Exits: + out.print("Exits"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGMayExit.h b/Source/JavaScriptCore/dfg/DFGMayExit.h new file mode 100644 index 000000000..7cece0d40 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMayExit.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGMayExit_h +#define DFGMayExit_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; +struct Node; + +// A *very* conservative approximation of whether or not a node could possibly exit. Usually +// returns true except in cases where we obviously don't expect an exit. + +enum ExitMode { + // The node does not exit at all. This means that it's legal to eliminate the first store in a + // program like: + // + // global = 1 // first store + // DoesNotExit(); // let's assume that this also doesn't read "global" + // global = 2 // second store + // + // It's legal to eliminate the first one since nobody will see it; the second store is always + // executed right after. + DoesNotExit, + + // The node will exit, but only by properly throwing exceptions. A proper exception throw will + // divert execution to the matching op_catch and will not reexecute the exit origin. This means + // that the store elimination optimization above would be illegal, but the following would be OK: + // + // SideEffect(..., exit: bc#42) + // ExitsForExceptions(..., exit: #bc42, ExitInvalid) + // + // In particular, it's OK for a node that reports ExitsForExceptions to be executed in a context + // where !Node::origin.exitOK. That's because this node will not exit in a manner that could lead + // to the reexecution of SideEffect(). + ExitsForExceptions, + + // The node will exit to the exit origin. This means that we cannot do store elimination like for + // DoesNotExit and also we cannot place this node in an ExitInvalid context, since this will exit + // in a manner that will cause the reexecution of all previous operations within this exit origin. + Exits +}; + +// FIXME: This currently consumes the Check: flag produced by AI, and will claim that something doesn't +// exit if the Check: flag was cleared. This makes it hard to use mayExit() for things like hoisting +// (for example in LICM), since that wants to know if the node would exit if it was moved somewhere +// else. +// https://bugs.webkit.org/show_bug.cgi?id=148545 + +ExitMode mayExit(Graph&, Node*); + +} } // namespace JSC::DFG + +namespace WTF { + +class PrintStream; + +void printInternal(PrintStream&, JSC::DFG::ExitMode); + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + +#endif // DFGMayExit_h + diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedGraph.cpp b/Source/JavaScriptCore/dfg/DFGMinifiedGraph.cpp new file mode 100644 index 000000000..61f331f10 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMinifiedGraph.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGMinifiedGraph.h" + +#if ENABLE(DFG_JIT) + +#include "JSCInlines.h" +#include "TrackedReferences.h" + +namespace JSC { namespace DFG { + +void MinifiedGraph::prepareAndShrink() +{ + std::sort(m_list.begin(), m_list.end(), MinifiedNode::compareByNodeIndex); + m_list.shrinkToFit(); +} + +void MinifiedGraph::validateReferences(const TrackedReferences& trackedReferences) +{ + for (MinifiedNode& node : m_list) { + if (node.hasConstant()) + trackedReferences.check(node.constant()); + } +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedGraph.h b/Source/JavaScriptCore/dfg/DFGMinifiedGraph.h index 892a20648..4fc44f8cc 100644 --- a/Source/JavaScriptCore/dfg/DFGMinifiedGraph.h +++ b/Source/JavaScriptCore/dfg/DFGMinifiedGraph.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGMinifiedGraph_h #define DFGMinifiedGraph_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGMinifiedNode.h" @@ -35,7 +33,11 @@ #include <wtf/StdLibExtras.h> #include <wtf/Vector.h> -namespace JSC { namespace DFG { +namespace JSC { + +class TrackedReferences; + +namespace DFG { class MinifiedGraph { public: @@ -52,11 +54,9 @@ public: m_list.append(node); } - void prepareAndShrink() - { - std::sort(m_list.begin(), m_list.end(), MinifiedNode::compareByNodeIndex); - m_list.shrinkToFit(); - } + void prepareAndShrink(); + + void validateReferences(const TrackedReferences&); private: Vector<MinifiedNode> m_list; diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedID.h b/Source/JavaScriptCore/dfg/DFGMinifiedID.h index 24ea25645..bdb312d81 100644 --- a/Source/JavaScriptCore/dfg/DFGMinifiedID.h +++ b/Source/JavaScriptCore/dfg/DFGMinifiedID.h @@ -26,10 +26,6 @@ #ifndef DFGMinifiedID_h #define DFGMinifiedID_h -#include <wtf/Platform.h> - -#if ENABLE(DFG_JIT) - #include "DFGCommon.h" #include <wtf/HashMap.h> #include <wtf/PrintStream.h> @@ -39,6 +35,7 @@ namespace JSC { namespace DFG { class Graph; class MinifiedNode; class ValueSource; +struct Node; class MinifiedID { public: @@ -100,11 +97,11 @@ template<> struct DefaultHash<JSC::DFG::MinifiedID> { }; template<typename T> struct HashTraits; -template<> struct HashTraits<JSC::DFG::MinifiedID> : SimpleClassHashTraits<JSC::DFG::MinifiedID> { }; +template<> struct HashTraits<JSC::DFG::MinifiedID> : SimpleClassHashTraits<JSC::DFG::MinifiedID> { + static const bool emptyValueIsZero = false; +}; } // namespace WTF -#endif // ENABLE(DFG_JIT) - #endif // DFGMinifiedID_h diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp b/Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp index 802cb2984..80795c2fe 100644 --- a/Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp +++ b/Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,6 +29,7 @@ #if ENABLE(DFG_JIT) #include "DFGNode.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -38,13 +39,11 @@ MinifiedNode MinifiedNode::fromNode(Node* node) MinifiedNode result; result.m_id = MinifiedID(node); result.m_op = node->op(); - if (hasConstantNumber(node->op())) - result.m_info = node->constantNumber(); - else if (hasWeakConstant(node->op())) - result.m_info = bitwise_cast<uintptr_t>(node->weakConstant()); + if (hasConstant(node->op())) + result.m_info = JSValue::encode(node->asJSValue()); else { - ASSERT(node->op() == PhantomArguments); - result.m_info = 0; + ASSERT(node->op() == PhantomDirectArguments || node->op() == PhantomClonedArguments); + result.m_info = bitwise_cast<uintptr_t>(node->origin.semantic.inlineCallFrame); } return result; } diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedNode.h b/Source/JavaScriptCore/dfg/DFGMinifiedNode.h index afea6aeed..29798bc22 100644 --- a/Source/JavaScriptCore/dfg/DFGMinifiedNode.h +++ b/Source/JavaScriptCore/dfg/DFGMinifiedNode.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGMinifiedNode_h #define DFGMinifiedNode_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" @@ -42,8 +40,10 @@ inline bool belongsInMinifiedGraph(NodeType type) { switch (type) { case JSConstant: - case WeakJSConstant: - case PhantomArguments: + case Int52Constant: + case DoubleConstant: + case PhantomDirectArguments: + case PhantomClonedArguments: return true; default: return false; @@ -59,22 +59,18 @@ public: MinifiedID id() const { return m_id; } NodeType op() const { return m_op; } - bool hasConstant() const { return hasConstantNumber() || hasWeakConstant(); } - - bool hasConstantNumber() const { return hasConstantNumber(m_op); } + bool hasConstant() const { return hasConstant(m_op); } - unsigned constantNumber() const + JSValue constant() const { - ASSERT(hasConstantNumber(m_op)); - return m_info; + return JSValue::decode(bitwise_cast<EncodedJSValue>(m_info)); } - bool hasWeakConstant() const { return hasWeakConstant(m_op); } + bool hasInlineCallFrame() const { return hasInlineCallFrame(m_op); } - JSCell* weakConstant() const + InlineCallFrame* inlineCallFrame() const { - ASSERT(hasWeakConstant(m_op)); - return bitwise_cast<JSCell*>(m_info); + return bitwise_cast<InlineCallFrame*>(static_cast<uintptr_t>(m_info)); } static MinifiedID getID(MinifiedNode* node) { return node->id(); } @@ -84,17 +80,18 @@ public: } private: - static bool hasConstantNumber(NodeType type) + static bool hasConstant(NodeType type) { - return type == JSConstant; + return type == JSConstant || type == Int52Constant || type == DoubleConstant; } - static bool hasWeakConstant(NodeType type) + + static bool hasInlineCallFrame(NodeType type) { - return type == WeakJSConstant; + return type == PhantomDirectArguments || type == PhantomClonedArguments; } MinifiedID m_id; - uintptr_t m_info; + uint64_t m_info; NodeType m_op; }; diff --git a/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.cpp b/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.cpp new file mode 100644 index 000000000..72be41f07 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.cpp @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGMovHintRemovalPhase.h" + +#if ENABLE(DFG_JIT) + +#include "BytecodeLivenessAnalysisInlines.h" +#include "DFGEpoch.h" +#include "DFGForAllKills.h" +#include "DFGGraph.h" +#include "DFGInsertionSet.h" +#include "DFGMayExit.h" +#include "DFGPhase.h" +#include "JSCInlines.h" +#include "OperandsInlines.h" + +namespace JSC { namespace DFG { + +namespace { + +bool verbose = false; + +class MovHintRemovalPhase : public Phase { +public: + MovHintRemovalPhase(Graph& graph) + : Phase(graph, "MovHint removal") + , m_state(OperandsLike, graph.block(0)->variablesAtHead) + , m_changed(false) + { + } + + bool run() + { + if (verbose) { + dataLog("Graph before MovHint removal:\n"); + m_graph.dump(); + } + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) + handleBlock(block); + + return m_changed; + } + +private: + void handleBlock(BasicBlock* block) + { + if (verbose) + dataLog("Handing block ", pointerDump(block), "\n"); + + // A MovHint is unnecessary if the local dies before it is used. We answer this question by + // maintaining the current exit epoch, and associating an epoch with each local. When a + // local dies, it gets the current exit epoch. If a MovHint occurs in the same epoch as its + // local, then it means there was no exit between the local's death and the MovHint - i.e. + // the MovHint is unnecessary. + + Epoch currentEpoch = Epoch::first(); + + m_state.fill(Epoch()); + m_graph.forAllLiveInBytecode( + block->terminal()->origin.forExit, + [&] (VirtualRegister reg) { + m_state.operand(reg) = currentEpoch; + }); + + if (verbose) + dataLog(" Locals: ", m_state, "\n"); + + // Assume that blocks after us exit. + currentEpoch.bump(); + + for (unsigned nodeIndex = block->size(); nodeIndex--;) { + Node* node = block->at(nodeIndex); + + if (node->op() == MovHint) { + Epoch localEpoch = m_state.operand(node->unlinkedLocal()); + if (verbose) + dataLog(" At ", node, ": current = ", currentEpoch, ", local = ", localEpoch, "\n"); + if (!localEpoch || localEpoch == currentEpoch) { + node->setOpAndDefaultFlags(ZombieHint); + node->child1() = Edge(); + m_changed = true; + } + m_state.operand(node->unlinkedLocal()) = Epoch(); + } + + if (mayExit(m_graph, node) != DoesNotExit) + currentEpoch.bump(); + + if (nodeIndex) { + forAllKilledOperands( + m_graph, block->at(nodeIndex - 1), node, + [&] (VirtualRegister reg) { + // This function is a bit sloppy - it might claim to kill a local even if + // it's still live after. We need to protect against that. + if (!!m_state.operand(reg)) + return; + + if (verbose) + dataLog(" Killed operand at ", node, ": ", reg, "\n"); + m_state.operand(reg) = currentEpoch; + }); + } + } + } + + Operands<Epoch> m_state; + bool m_changed; +}; + +} // anonymous namespace + +bool performMovHintRemoval(Graph& graph) +{ + SamplingRegion samplingRegion("DFG MovHint Removal Phase"); + return runPhase<MovHintRemovalPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.h b/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.h new file mode 100644 index 000000000..dd4c20626 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMovHintRemovalPhase.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGMovHintRemovalPhase_h +#define DFGMovHintRemovalPhase_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; + +// Cleans up unnecessary MovHints. A MovHint is necessary if the variable dies before there is an +// exit. + +bool performMovHintRemoval(Graph&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGMovHintRemovalPhase_h diff --git a/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.cpp b/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.cpp new file mode 100644 index 000000000..d1a111be7 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.cpp @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGMultiGetByOffsetData.h" + +#if ENABLE(DFG_JIT) + +#include "DFGFrozenValue.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +void GetByOffsetMethod::dumpInContext(PrintStream& out, DumpContext* context) const +{ + out.print(m_kind, ":"); + switch (m_kind) { + case Invalid: + out.print("<none>"); + return; + case Constant: + out.print(pointerDumpInContext(constant(), context)); + return; + case Load: + out.print(offset()); + return; + case LoadFromPrototype: + out.print(offset(), "@", pointerDumpInContext(prototype(), context)); + return; + } +} + +void GetByOffsetMethod::dump(PrintStream& out) const +{ + dumpInContext(out, nullptr); +} + +void MultiGetByOffsetCase::dumpInContext(PrintStream& out, DumpContext* context) const +{ + out.print(inContext(m_set, context), ":", inContext(m_method, context)); +} + +void MultiGetByOffsetCase::dump(PrintStream& out) const +{ + dumpInContext(out, nullptr); +} + +} } // namespace JSC::DFG + +namespace WTF { + +using namespace JSC::DFG; + +void printInternal(PrintStream& out, GetByOffsetMethod::Kind kind) +{ + switch (kind) { + case GetByOffsetMethod::Invalid: + out.print("Invalid"); + return; + case GetByOffsetMethod::Constant: + out.print("Constant"); + return; + case GetByOffsetMethod::Load: + out.print("Load"); + return; + case GetByOffsetMethod::LoadFromPrototype: + out.print("LoadFromPrototype"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.h b/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.h new file mode 100644 index 000000000..72680bf61 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMultiGetByOffsetData.h @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGMultiGetByOffsetData_h +#define DFGMultiGetByOffsetData_h + +#if ENABLE(DFG_JIT) + +#include "DumpContext.h" +#include "JSObject.h" +#include "StructureSet.h" + +namespace JSC { namespace DFG { + +class FrozenValue; + +class GetByOffsetMethod { +public: + enum Kind { + Invalid, + Constant, + Load, + LoadFromPrototype + }; + + GetByOffsetMethod() + : m_kind(Invalid) + { + } + + static GetByOffsetMethod constant(FrozenValue* value) + { + GetByOffsetMethod result; + result.m_kind = Constant; + result.u.constant = value; + return result; + } + + static GetByOffsetMethod load(PropertyOffset offset) + { + GetByOffsetMethod result; + result.m_kind = Load; + result.u.load.offset = offset; + return result; + } + + static GetByOffsetMethod loadFromPrototype(FrozenValue* prototype, PropertyOffset offset) + { + GetByOffsetMethod result; + result.m_kind = LoadFromPrototype; + result.u.load.prototype = prototype; + result.u.load.offset = offset; + return result; + } + + bool operator!() const { return m_kind == Invalid; } + + Kind kind() const { return m_kind; } + + FrozenValue* constant() const + { + ASSERT(kind() == Constant); + return u.constant; + } + + FrozenValue* prototype() const + { + ASSERT(kind() == LoadFromPrototype); + return u.load.prototype; + } + + PropertyOffset offset() const + { + ASSERT(kind() == Load || kind() == LoadFromPrototype); + return u.load.offset; + } + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; + +private: + union { + FrozenValue* constant; + struct { + FrozenValue* prototype; + PropertyOffset offset; + } load; + } u; + Kind m_kind; +}; + +class MultiGetByOffsetCase { +public: + MultiGetByOffsetCase() + { + } + + MultiGetByOffsetCase(const StructureSet& set, const GetByOffsetMethod& method) + : m_set(set) + , m_method(method) + { + } + + StructureSet& set() { return m_set; } + const StructureSet& set() const { return m_set; } + const GetByOffsetMethod& method() const { return m_method; } + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; + +private: + StructureSet m_set; + GetByOffsetMethod m_method; +}; + +struct MultiGetByOffsetData { + unsigned identifierNumber; + Vector<MultiGetByOffsetCase, 2> cases; +}; + +} } // namespace JSC::DFG + +namespace WTF { + +void printInternal(PrintStream&, JSC::DFG::GetByOffsetMethod::Kind); + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + +#endif // DFGMultiGetByOffsetData_h + diff --git a/Source/JavaScriptCore/dfg/DFGNaturalLoops.cpp b/Source/JavaScriptCore/dfg/DFGNaturalLoops.cpp index 848917f70..89ca68b9c 100644 --- a/Source/JavaScriptCore/dfg/DFGNaturalLoops.cpp +++ b/Source/JavaScriptCore/dfg/DFGNaturalLoops.cpp @@ -29,6 +29,7 @@ #if ENABLE(DFG_JIT) #include "DFGGraph.h" +#include "JSCInlines.h" #include <wtf/CommaPrinter.h> namespace JSC { namespace DFG { @@ -41,11 +42,10 @@ void NaturalLoop::dump(PrintStream& out) const out.print("]"); } -NaturalLoops::NaturalLoops() { } -NaturalLoops::~NaturalLoops() { } - -void NaturalLoops::compute(Graph& graph) +NaturalLoops::NaturalLoops(Graph& graph) { + ASSERT(graph.m_dominators); + // Implement the classic dominator-based natural loop finder. The first // step is to find all control flow edges A -> B where B dominates A. // Then B is a loop header and A is a backward branching block. We will @@ -56,11 +56,9 @@ void NaturalLoops::compute(Graph& graph) static const bool verbose = false; - graph.m_dominators.computeIfNecessary(graph); - if (verbose) { dataLog("Dominators:\n"); - graph.m_dominators.dump(graph, WTF::dataFile()); + graph.m_dominators->dump(WTF::dataFile()); } m_loops.resize(0); @@ -72,7 +70,7 @@ void NaturalLoops::compute(Graph& graph) for (unsigned i = block->numSuccessors(); i--;) { BasicBlock* successor = block->successor(i); - if (!graph.m_dominators.dominates(successor, block)) + if (!graph.m_dominators->dominates(successor, block)) continue; bool found = false; for (unsigned j = m_loops.size(); j--;) { @@ -195,6 +193,8 @@ void NaturalLoops::compute(Graph& graph) dataLog("Results: ", *this, "\n"); } +NaturalLoops::~NaturalLoops() { } + Vector<const NaturalLoop*> NaturalLoops::loopsOf(BasicBlock* block) const { Vector<const NaturalLoop*> result; diff --git a/Source/JavaScriptCore/dfg/DFGNaturalLoops.h b/Source/JavaScriptCore/dfg/DFGNaturalLoops.h index 7ad0b0bdc..8454d0cb5 100644 --- a/Source/JavaScriptCore/dfg/DFGNaturalLoops.h +++ b/Source/JavaScriptCore/dfg/DFGNaturalLoops.h @@ -26,13 +26,13 @@ #ifndef DFGNaturalLoops_h #define DFGNaturalLoops_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) -#include "DFGAnalysis.h" #include "DFGBasicBlock.h" #include "DFGCommon.h" +#include "DFGDominators.h" +#include <wtf/FastMalloc.h> +#include <wtf/Noncopyable.h> namespace JSC { namespace DFG { @@ -90,21 +90,19 @@ private: unsigned m_index; }; -class NaturalLoops : public Analysis<NaturalLoops> { +class NaturalLoops { + WTF_MAKE_NONCOPYABLE(NaturalLoops); + WTF_MAKE_FAST_ALLOCATED; public: - NaturalLoops(); + NaturalLoops(Graph&); ~NaturalLoops(); - void compute(Graph&); - unsigned numLoops() const { - ASSERT(isValid()); return m_loops.size(); } const NaturalLoop& loop(unsigned i) const { - ASSERT(isValid()); return m_loops[i]; } @@ -112,7 +110,6 @@ public: // loop it belongs to. const NaturalLoop* headerOf(BasicBlock* block) const { - ASSERT(isValid()); const NaturalLoop* loop = innerMostLoopOf(block); if (!loop) return 0; @@ -127,7 +124,6 @@ public: const NaturalLoop* innerMostLoopOf(BasicBlock* block) const { - ASSERT(isValid()); unsigned index = block->innerMostLoopIndices[0]; if (index == UINT_MAX) return 0; @@ -136,7 +132,6 @@ public: const NaturalLoop* innerMostOuterLoop(const NaturalLoop& loop) const { - ASSERT(isValid()); if (loop.m_outerLoopIndex == UINT_MAX) return 0; return &m_loops[loop.m_outerLoopIndex]; @@ -144,7 +139,6 @@ public: bool belongsTo(BasicBlock* block, const NaturalLoop& candidateLoop) const { - ASSERT(isValid()); // It's faster to do this test using the loop itself, if it's small. if (candidateLoop.size() < 4) return candidateLoop.contains(block); @@ -156,6 +150,14 @@ public: return false; } + unsigned loopDepth(BasicBlock* block) const + { + unsigned depth = 0; + for (const NaturalLoop* loop = innerMostLoopOf(block); loop; loop = innerMostOuterLoop(*loop)) + depth++; + return depth; + } + // Return the indices of all loops this belongs to. Vector<const NaturalLoop*> loopsOf(BasicBlock*) const; diff --git a/Source/JavaScriptCore/dfg/DFGNode.cpp b/Source/JavaScriptCore/dfg/DFGNode.cpp index bf43f29a4..6a9853424 100644 --- a/Source/JavaScriptCore/dfg/DFGNode.cpp +++ b/Source/JavaScriptCore/dfg/DFGNode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,9 +30,40 @@ #include "DFGGraph.h" #include "DFGNodeAllocator.h" +#include "DFGPromotedHeapLocation.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { +bool MultiPutByOffsetData::writesStructures() const +{ + for (unsigned i = variants.size(); i--;) { + if (variants[i].writesStructures()) + return true; + } + return false; +} + +bool MultiPutByOffsetData::reallocatesStorage() const +{ + for (unsigned i = variants.size(); i--;) { + if (variants[i].reallocatesStorage()) + return true; + } + return false; +} + +void BranchTarget::dump(PrintStream& out) const +{ + if (!block) + return; + + out.print(*block); + + if (count == count) // If the count is not NaN, then print it. + out.print("/w:", count); +} + unsigned Node::index() const { return NodeAllocator::allocatorOf(this)->indexOf(this); @@ -44,7 +75,6 @@ bool Node::hasVariableAccessData(Graph& graph) case Phi: return graph.m_form != SSA; case GetLocal: - case GetArgument: case SetLocal: case SetArgument: case Flush: @@ -55,6 +85,119 @@ bool Node::hasVariableAccessData(Graph& graph) } } +void Node::remove() +{ + ASSERT(!(flags() & NodeHasVarArgs)); + + children = children.justChecks(); + + setOpAndDefaultFlags(Check); +} + +void Node::convertToIdentity() +{ + RELEASE_ASSERT(child1()); + RELEASE_ASSERT(!child2()); + NodeFlags result = canonicalResultRepresentation(this->result()); + setOpAndDefaultFlags(Identity); + setResult(result); +} + +void Node::convertToIdentityOn(Node* child) +{ + children.reset(); + child1() = child->defaultEdge(); + NodeFlags output = canonicalResultRepresentation(this->result()); + NodeFlags input = canonicalResultRepresentation(child->result()); + if (output == input) { + setOpAndDefaultFlags(Identity); + setResult(output); + return; + } + switch (output) { + case NodeResultDouble: + setOpAndDefaultFlags(DoubleRep); + switch (input) { + case NodeResultInt52: + child1().setUseKind(Int52RepUse); + return; + case NodeResultJS: + child1().setUseKind(NumberUse); + return; + default: + RELEASE_ASSERT_NOT_REACHED(); + return; + } + case NodeResultInt52: + setOpAndDefaultFlags(Int52Rep); + switch (input) { + case NodeResultDouble: + child1().setUseKind(DoubleRepMachineIntUse); + return; + case NodeResultJS: + child1().setUseKind(MachineIntUse); + return; + default: + RELEASE_ASSERT_NOT_REACHED(); + return; + } + case NodeResultJS: + setOpAndDefaultFlags(ValueRep); + switch (input) { + case NodeResultDouble: + child1().setUseKind(DoubleRepUse); + return; + case NodeResultInt52: + child1().setUseKind(Int52RepUse); + return; + default: + RELEASE_ASSERT_NOT_REACHED(); + return; + } + default: + RELEASE_ASSERT_NOT_REACHED(); + return; + } +} + +void Node::convertToPutHint(const PromotedLocationDescriptor& descriptor, Node* base, Node* value) +{ + m_op = PutHint; + m_opInfo = descriptor.imm1().m_value; + m_opInfo2 = descriptor.imm2().m_value; + child1() = base->defaultEdge(); + child2() = value->defaultEdge(); + child3() = Edge(); +} + +void Node::convertToPutStructureHint(Node* structure) +{ + ASSERT(m_op == PutStructure); + ASSERT(structure->castConstant<Structure*>() == transition()->next); + convertToPutHint(StructurePLoc, child1().node(), structure); +} + +void Node::convertToPutByOffsetHint() +{ + ASSERT(m_op == PutByOffset); + convertToPutHint( + PromotedLocationDescriptor(NamedPropertyPLoc, storageAccessData().identifierNumber), + child2().node(), child3().node()); +} + +void Node::convertToPutClosureVarHint() +{ + ASSERT(m_op == PutClosureVar); + convertToPutHint( + PromotedLocationDescriptor(ClosureVarPLoc, scopeOffset().offset()), + child1().node(), child2().node()); +} + +PromotedLocationDescriptor Node::promotedLocationDescriptor() +{ + return PromotedLocationDescriptor(static_cast<PromotedLocationKind>(m_opInfo), m_opInfo2); +} + } } // namespace JSC::DFG namespace WTF { @@ -74,6 +217,9 @@ void printInternal(PrintStream& out, SwitchKind kind) case SwitchString: out.print("SwitchString"); return; + case SwitchCell: + out.print("SwitchCell"); + return; } RELEASE_ASSERT_NOT_REACHED(); } @@ -85,7 +231,10 @@ void printInternal(PrintStream& out, Node* node) return; } out.print("@", node->index()); - out.print(AbbreviatedSpeculationDump(node->prediction())); + if (node->hasDoubleResult()) + out.print("<Double>"); + else if (node->hasInt52Result()) + out.print("<Int52>"); } } // namespace WTF diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h index 55a9ede8d..e29374f04 100644 --- a/Source/JavaScriptCore/dfg/DFGNode.h +++ b/Source/JavaScriptCore/dfg/DFGNode.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,44 +26,57 @@ #ifndef DFGNode_h #define DFGNode_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) +#include "BasicBlockLocation.h" #include "CodeBlock.h" -#include "CodeOrigin.h" #include "DFGAbstractValue.h" #include "DFGAdjacencyList.h" #include "DFGArithMode.h" #include "DFGArrayMode.h" #include "DFGCommon.h" +#include "DFGEpoch.h" #include "DFGLazyJSValue.h" +#include "DFGMultiGetByOffsetData.h" #include "DFGNodeFlags.h" +#include "DFGNodeOrigin.h" #include "DFGNodeType.h" +#include "DFGObjectMaterializationData.h" +#include "DFGTransition.h" +#include "DFGUseKind.h" #include "DFGVariableAccessData.h" +#include "GetByIdVariant.h" #include "JSCJSValue.h" #include "Operands.h" +#include "PutByIdVariant.h" #include "SpeculatedType.h" #include "StructureSet.h" +#include "TypeLocation.h" #include "ValueProfile.h" #include <wtf/ListDump.h> namespace JSC { namespace DFG { class Graph; +class PromotedLocationDescriptor; struct BasicBlock; -struct StructureTransitionData { - Structure* previousStructure; - Structure* newStructure; - - StructureTransitionData() { } +struct StorageAccessData { + PropertyOffset offset; + unsigned identifierNumber; + + // This needs to know the inferred type. For puts, this is necessary because we need to remember + // what check is needed. For gets, this is necessary because otherwise AI might forget what type is + // guaranteed. + InferredType::Descriptor inferredType; +}; + +struct MultiPutByOffsetData { + unsigned identifierNumber; + Vector<PutByIdVariant, 2> variants; - StructureTransitionData(Structure* previousStructure, Structure* newStructure) - : previousStructure(previousStructure) - , newStructure(newStructure) - { - } + bool writesStructures() const; + bool reallocatesStorage() const; }; struct NewArrayBufferData { @@ -72,6 +85,55 @@ struct NewArrayBufferData { IndexingType indexingType; }; +struct BranchTarget { + BranchTarget() + : block(0) + , count(PNaN) + { + } + + explicit BranchTarget(BasicBlock* block) + : block(block) + , count(PNaN) + { + } + + void setBytecodeIndex(unsigned bytecodeIndex) + { + block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(bytecodeIndex)); + } + unsigned bytecodeIndex() const { return bitwise_cast<uintptr_t>(block); } + + void dump(PrintStream&) const; + + BasicBlock* block; + float count; +}; + +struct BranchData { + static BranchData withBytecodeIndices( + unsigned takenBytecodeIndex, unsigned notTakenBytecodeIndex) + { + BranchData result; + result.taken.block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(takenBytecodeIndex)); + result.notTaken.block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(notTakenBytecodeIndex)); + return result; + } + + unsigned takenBytecodeIndex() const { return taken.bytecodeIndex(); } + unsigned notTakenBytecodeIndex() const { return notTaken.bytecodeIndex(); } + + BasicBlock*& forCondition(bool condition) + { + if (condition) + return taken.block; + return notTaken.block; + } + + BranchTarget taken; + BranchTarget notTaken; +}; + // The SwitchData and associated data structures duplicate the information in // JumpTable. The DFG may ultimately end up using the JumpTable, though it may // instead decide to do something different - this is entirely up to the DFG. @@ -85,7 +147,6 @@ struct NewArrayBufferData { // values. struct SwitchCase { SwitchCase() - : target(0) { } @@ -99,20 +160,12 @@ struct SwitchCase { { SwitchCase result; result.value = value; - result.target = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(bytecodeIndex)); + result.target.setBytecodeIndex(bytecodeIndex); return result; } - unsigned targetBytecodeIndex() const { return bitwise_cast<uintptr_t>(target); } - LazyJSValue value; - BasicBlock* target; -}; - -enum SwitchKind { - SwitchImm, - SwitchChar, - SwitchString + BranchTarget target; }; struct SwitchData { @@ -120,30 +173,57 @@ struct SwitchData { // constructing this should make sure to initialize everything they // care about manually. SwitchData() - : fallThrough(0) - , kind(static_cast<SwitchKind>(-1)) + : kind(static_cast<SwitchKind>(-1)) , switchTableIndex(UINT_MAX) , didUseJumpTable(false) { } - void setFallThroughBytecodeIndex(unsigned bytecodeIndex) - { - fallThrough = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(bytecodeIndex)); - } - unsigned fallThroughBytecodeIndex() const { return bitwise_cast<uintptr_t>(fallThrough); } - Vector<SwitchCase> cases; - BasicBlock* fallThrough; + BranchTarget fallThrough; SwitchKind kind; unsigned switchTableIndex; bool didUseJumpTable; }; +struct CallVarargsData { + int firstVarArgOffset; +}; + +struct LoadVarargsData { + VirtualRegister start; // Local for the first element. This is the first actual argument, not this. + VirtualRegister count; // Local for the count. + VirtualRegister machineStart; + VirtualRegister machineCount; + unsigned offset; // Which array element to start with. Usually this is 0. + unsigned mandatoryMinimum; // The number of elements on the stack that must be initialized; if the array is too short then the missing elements must get undefined. Does not include "this". + unsigned limit; // Maximum number of elements to load. Includes "this". +}; + +struct StackAccessData { + StackAccessData() + : format(DeadFlush) + { + } + + StackAccessData(VirtualRegister local, FlushFormat format) + : local(local) + , format(format) + { + } + + VirtualRegister local; + VirtualRegister machineLocal; + FlushFormat format; + + FlushedAt flushedAt() { return FlushedAt(format, machineLocal); } +}; + // This type used in passing an immediate argument to Node constructor; // distinguishes an immediate value (typically an index into a CodeBlock data structure - // a constant index, argument, or identifier) from a Node*. struct OpInfo { + OpInfo() : m_value(0) { } explicit OpInfo(int32_t value) : m_value(static_cast<uintptr_t>(value)) { } explicit OpInfo(uint32_t value) : m_value(static_cast<uintptr_t>(value)) { } #if OS(DARWIN) || USE(JSVALUE64) @@ -161,78 +241,112 @@ struct Node { Node() { } - Node(NodeType op, CodeOrigin codeOrigin, const AdjacencyList& children) - : codeOrigin(codeOrigin) - , codeOriginForExitTarget(codeOrigin) + Node(NodeType op, NodeOrigin nodeOrigin, const AdjacencyList& children) + : origin(nodeOrigin) , children(children) , m_virtualRegister(VirtualRegister()) , m_refCount(1) , m_prediction(SpecNone) + , owner(nullptr) { - misc.replacement = 0; + m_misc.replacement = nullptr; setOpAndDefaultFlags(op); } // Construct a node with up to 3 children, no immediate value. - Node(NodeType op, CodeOrigin codeOrigin, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge()) - : codeOrigin(codeOrigin) - , codeOriginForExitTarget(codeOrigin) + Node(NodeType op, NodeOrigin nodeOrigin, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge()) + : origin(nodeOrigin) , children(AdjacencyList::Fixed, child1, child2, child3) , m_virtualRegister(VirtualRegister()) , m_refCount(1) , m_prediction(SpecNone) , m_opInfo(0) , m_opInfo2(0) + , owner(nullptr) + { + m_misc.replacement = nullptr; + setOpAndDefaultFlags(op); + ASSERT(!(m_flags & NodeHasVarArgs)); + } + + // Construct a node with up to 3 children, no immediate value. + Node(NodeFlags result, NodeType op, NodeOrigin nodeOrigin, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge()) + : origin(nodeOrigin) + , children(AdjacencyList::Fixed, child1, child2, child3) + , m_virtualRegister(VirtualRegister()) + , m_refCount(1) + , m_prediction(SpecNone) + , m_opInfo(0) + , m_opInfo2(0) + , owner(nullptr) + { + m_misc.replacement = nullptr; + setOpAndDefaultFlags(op); + setResult(result); + ASSERT(!(m_flags & NodeHasVarArgs)); + } + + // Construct a node with up to 3 children and an immediate value. + Node(NodeType op, NodeOrigin nodeOrigin, OpInfo imm, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge()) + : origin(nodeOrigin) + , children(AdjacencyList::Fixed, child1, child2, child3) + , m_virtualRegister(VirtualRegister()) + , m_refCount(1) + , m_prediction(SpecNone) + , m_opInfo(imm.m_value) + , m_opInfo2(0) + , owner(nullptr) { - misc.replacement = 0; + m_misc.replacement = nullptr; setOpAndDefaultFlags(op); ASSERT(!(m_flags & NodeHasVarArgs)); } // Construct a node with up to 3 children and an immediate value. - Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge()) - : codeOrigin(codeOrigin) - , codeOriginForExitTarget(codeOrigin) + Node(NodeFlags result, NodeType op, NodeOrigin nodeOrigin, OpInfo imm, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge()) + : origin(nodeOrigin) , children(AdjacencyList::Fixed, child1, child2, child3) , m_virtualRegister(VirtualRegister()) , m_refCount(1) , m_prediction(SpecNone) , m_opInfo(imm.m_value) , m_opInfo2(0) + , owner(nullptr) { - misc.replacement = 0; + m_misc.replacement = nullptr; setOpAndDefaultFlags(op); + setResult(result); ASSERT(!(m_flags & NodeHasVarArgs)); } // Construct a node with up to 3 children and two immediate values. - Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge()) - : codeOrigin(codeOrigin) - , codeOriginForExitTarget(codeOrigin) + Node(NodeType op, NodeOrigin nodeOrigin, OpInfo imm1, OpInfo imm2, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge()) + : origin(nodeOrigin) , children(AdjacencyList::Fixed, child1, child2, child3) , m_virtualRegister(VirtualRegister()) , m_refCount(1) , m_prediction(SpecNone) , m_opInfo(imm1.m_value) , m_opInfo2(imm2.m_value) + , owner(nullptr) { - misc.replacement = 0; + m_misc.replacement = nullptr; setOpAndDefaultFlags(op); ASSERT(!(m_flags & NodeHasVarArgs)); } // Construct a node with a variable number of children and two immediate values. - Node(VarArgTag, NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, unsigned firstChild, unsigned numChildren) - : codeOrigin(codeOrigin) - , codeOriginForExitTarget(codeOrigin) + Node(VarArgTag, NodeType op, NodeOrigin nodeOrigin, OpInfo imm1, OpInfo imm2, unsigned firstChild, unsigned numChildren) + : origin(nodeOrigin) , children(AdjacencyList::Variable, firstChild, numChildren) , m_virtualRegister(VirtualRegister()) , m_refCount(1) , m_prediction(SpecNone) , m_opInfo(imm1.m_value) , m_opInfo2(imm2.m_value) + , owner(nullptr) { - misc.replacement = 0; + m_misc.replacement = nullptr; setOpAndDefaultFlags(op); ASSERT(m_flags & NodeHasVarArgs); } @@ -255,7 +369,6 @@ struct Node { bool mergeFlags(NodeFlags flags) { - ASSERT(!(flags & NodeDoesNotExit)); NodeFlags newFlags = m_flags | flags; if (newFlags == m_flags) return false; @@ -265,7 +378,6 @@ struct Node { bool filterFlags(NodeFlags flags) { - ASSERT(flags & NodeDoesNotExit); NodeFlags newFlags = m_flags & flags; if (newFlags == m_flags) return false; @@ -278,104 +390,108 @@ struct Node { return filterFlags(~flags); } + void setResult(NodeFlags result) + { + ASSERT(!(result & ~NodeResultMask)); + clearFlags(NodeResultMask); + mergeFlags(result); + } + + NodeFlags result() const + { + return flags() & NodeResultMask; + } + void setOpAndDefaultFlags(NodeType op) { m_op = op; m_flags = defaultFlags(op); } - void convertToPhantom() + void remove(); + + void convertToCheckStructure(StructureSet* set) { - setOpAndDefaultFlags(Phantom); + setOpAndDefaultFlags(CheckStructure); + m_opInfo = bitwise_cast<uintptr_t>(set); } - void convertToPhantomUnchecked() + void convertToCheckStructureImmediate(Node* structure) { - setOpAndDefaultFlags(Phantom); + ASSERT(op() == CheckStructure); + m_op = CheckStructureImmediate; + children.setChild1(Edge(structure, CellUse)); } - - void convertToIdentity() + + void replaceWith(Node* other) { - RELEASE_ASSERT(child1()); - RELEASE_ASSERT(!child2()); - setOpAndDefaultFlags(Identity); + remove(); + setReplacement(other); } + void convertToIdentity(); + void convertToIdentityOn(Node*); + bool mustGenerate() { return m_flags & NodeMustGenerate; } - void setCanExit(bool exits) - { - if (exits) - m_flags &= ~NodeDoesNotExit; - else - m_flags |= NodeDoesNotExit; - } - - bool canExit() - { - return !(m_flags & NodeDoesNotExit); - } - bool isConstant() { - return op() == JSConstant; - } - - bool isWeakConstant() - { - return op() == WeakJSConstant; - } - - bool isStronglyProvedConstantIn(InlineCallFrame* inlineCallFrame) - { - return !!(flags() & NodeIsStaticConstant) - && codeOrigin.inlineCallFrame == inlineCallFrame; - } - - bool isStronglyProvedConstantIn(const CodeOrigin& codeOrigin) - { - return isStronglyProvedConstantIn(codeOrigin.inlineCallFrame); - } - - bool isPhantomArguments() - { - return op() == PhantomArguments; + switch (op()) { + case JSConstant: + case DoubleConstant: + case Int52Constant: + return true; + default: + return false; + } } bool hasConstant() { switch (op()) { case JSConstant: - case WeakJSConstant: - case PhantomArguments: + case DoubleConstant: + case Int52Constant: + return true; + + case PhantomDirectArguments: + case PhantomClonedArguments: + // These pretend to be the empty value constant for the benefit of the DFG backend, which + // otherwise wouldn't take kindly to a node that doesn't compute a value. return true; + default: return false; } } - unsigned constantNumber() + FrozenValue* constant() { - ASSERT(isConstant()); - return m_opInfo; - } - - void convertToConstant(unsigned constantNumber) - { - m_op = JSConstant; - m_flags &= ~(NodeMustGenerate | NodeMightClobber | NodeClobbersWorld); - m_opInfo = constantNumber; - children.reset(); + ASSERT(hasConstant()); + + if (op() == PhantomDirectArguments || op() == PhantomClonedArguments) { + // These pretend to be the empty value constant for the benefit of the DFG backend, which + // otherwise wouldn't take kindly to a node that doesn't compute a value. + return FrozenValue::emptySingleton(); + } + + return bitwise_cast<FrozenValue*>(m_opInfo); } - void convertToWeakConstant(JSCell* cell) + // Don't call this directly - use Graph::convertToConstant() instead! + void convertToConstant(FrozenValue* value) { - m_op = WeakJSConstant; - m_flags &= ~(NodeMustGenerate | NodeMightClobber | NodeClobbersWorld); - m_opInfo = bitwise_cast<uintptr_t>(cell); + if (hasDoubleResult()) + m_op = DoubleConstant; + else if (hasInt52Result()) + m_op = Int52Constant; + else + m_op = JSConstant; + m_flags &= ~NodeMustGenerate; + m_opInfo = bitwise_cast<uintptr_t>(value); children.reset(); } @@ -384,54 +500,121 @@ struct Node { ASSERT(op() == GetIndexedPropertyStorage); m_op = ConstantStoragePointer; m_opInfo = bitwise_cast<uintptr_t>(pointer); + children.reset(); } void convertToGetLocalUnlinked(VirtualRegister local) { m_op = GetLocalUnlinked; - m_flags &= ~(NodeMustGenerate | NodeMightClobber | NodeClobbersWorld); + m_flags &= ~NodeMustGenerate; m_opInfo = local.offset(); m_opInfo2 = VirtualRegister().offset(); children.reset(); } - void convertToStructureTransitionWatchpoint(Structure* structure) + void convertToPutStack(StackAccessData* data) { - ASSERT(m_op == CheckStructure || m_op == ArrayifyToStructure); - ASSERT(!child2()); - ASSERT(!child3()); - m_opInfo = bitwise_cast<uintptr_t>(structure); - m_op = StructureTransitionWatchpoint; + m_op = PutStack; + m_flags |= NodeMustGenerate; + m_opInfo = bitwise_cast<uintptr_t>(data); + m_opInfo2 = 0; } - void convertToStructureTransitionWatchpoint() + void convertToGetStack(StackAccessData* data) { - convertToStructureTransitionWatchpoint(structureSet().singletonStructure()); + m_op = GetStack; + m_flags &= ~NodeMustGenerate; + m_opInfo = bitwise_cast<uintptr_t>(data); + m_opInfo2 = 0; + children.reset(); } - void convertToGetByOffset(unsigned storageAccessDataIndex, Edge storage) + void convertToGetByOffset(StorageAccessData& data, Edge storage) { - ASSERT(m_op == GetById || m_op == GetByIdFlush); - m_opInfo = storageAccessDataIndex; + ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == MultiGetByOffset); + m_opInfo = bitwise_cast<uintptr_t>(&data); children.setChild2(children.child1()); children.child2().setUseKind(KnownCellUse); children.setChild1(storage); m_op = GetByOffset; - m_flags &= ~NodeClobbersWorld; + m_flags &= ~NodeMustGenerate; + } + + void convertToMultiGetByOffset(MultiGetByOffsetData* data) + { + ASSERT(m_op == GetById || m_op == GetByIdFlush); + m_opInfo = bitwise_cast<intptr_t>(data); + child1().setUseKind(CellUse); + m_op = MultiGetByOffset; + ASSERT(m_flags & NodeMustGenerate); } - void convertToPutByOffset(unsigned storageAccessDataIndex, Edge storage) + void convertToPutByOffset(StorageAccessData& data, Edge storage) { - ASSERT(m_op == PutById || m_op == PutByIdDirect); - m_opInfo = storageAccessDataIndex; + ASSERT(m_op == PutById || m_op == PutByIdDirect || m_op == PutByIdFlush || m_op == MultiPutByOffset); + m_opInfo = bitwise_cast<uintptr_t>(&data); children.setChild3(children.child2()); children.setChild2(children.child1()); children.setChild1(storage); m_op = PutByOffset; - m_flags &= ~NodeClobbersWorld; } - void convertToPhantomLocal() + void convertToMultiPutByOffset(MultiPutByOffsetData* data) + { + ASSERT(m_op == PutById || m_op == PutByIdDirect || m_op == PutByIdFlush); + m_opInfo = bitwise_cast<intptr_t>(data); + m_op = MultiPutByOffset; + } + + void convertToPutHint(const PromotedLocationDescriptor&, Node* base, Node* value); + + void convertToPutByOffsetHint(); + void convertToPutStructureHint(Node* structure); + void convertToPutClosureVarHint(); + + void convertToPhantomNewObject() + { + ASSERT(m_op == NewObject || m_op == MaterializeNewObject); + m_op = PhantomNewObject; + m_flags &= ~NodeHasVarArgs; + m_flags |= NodeMustGenerate; + m_opInfo = 0; + m_opInfo2 = 0; + children = AdjacencyList(); + } + + void convertToPhantomNewFunction() + { + ASSERT(m_op == NewFunction || m_op == NewArrowFunction || m_op == NewGeneratorFunction); + m_op = PhantomNewFunction; + m_flags |= NodeMustGenerate; + m_opInfo = 0; + m_opInfo2 = 0; + children = AdjacencyList(); + } + + void convertToPhantomNewGeneratorFunction() + { + ASSERT(m_op == NewGeneratorFunction); + m_op = PhantomNewGeneratorFunction; + m_flags |= NodeMustGenerate; + m_opInfo = 0; + m_opInfo2 = 0; + children = AdjacencyList(); + } + + void convertToPhantomCreateActivation() + { + ASSERT(m_op == CreateActivation || m_op == MaterializeCreateActivation); + m_op = PhantomCreateActivation; + m_flags &= ~NodeHasVarArgs; + m_flags |= NodeMustGenerate; + m_opInfo = 0; + m_opInfo2 = 0; + children = AdjacencyList(); + } + + void convertPhantomToPhantomLocal() { ASSERT(m_op == Phantom && (child1()->op() == Phi || child1()->op() == SetLocal || child1()->op() == SetArgument)); m_op = PhantomLocal; @@ -439,6 +622,13 @@ struct Node { children.setChild1(Edge()); } + void convertFlushToPhantomLocal() + { + ASSERT(m_op == Flush); + m_op = PhantomLocal; + children = AdjacencyList(); + } + void convertToGetLocal(VariableAccessData* variable, Node* phi) { ASSERT(m_op == GetLocalUnlinked); @@ -453,53 +643,112 @@ struct Node { ASSERT(m_op == ToPrimitive); m_op = ToString; } + + void convertToArithSqrt() + { + ASSERT(m_op == ArithPow); + child2() = Edge(); + m_op = ArithSqrt; + } + + void convertToArithNegate() + { + ASSERT(m_op == ArithAbs && child1().useKind() == Int32Use); + m_op = ArithNegate; + } - JSCell* weakConstant() + JSValue asJSValue() + { + return constant()->value(); + } + + bool isInt32Constant() + { + return isConstant() && constant()->value().isInt32(); + } + + int32_t asInt32() + { + return asJSValue().asInt32(); + } + + uint32_t asUInt32() + { + return asInt32(); + } + + bool isDoubleConstant() { - ASSERT(op() == WeakJSConstant); - return bitwise_cast<JSCell*>(m_opInfo); + return isConstant() && constant()->value().isDouble(); + } + + bool isNumberConstant() + { + return isConstant() && constant()->value().isNumber(); } - JSValue valueOfJSConstant(CodeBlock* codeBlock) + double asNumber() { - switch (op()) { - case WeakJSConstant: - return JSValue(weakConstant()); - case JSConstant: - return codeBlock->constantRegister(FirstConstantRegisterIndex + constantNumber()).get(); - case PhantomArguments: - return JSValue(); - default: - RELEASE_ASSERT_NOT_REACHED(); - return JSValue(); // Have to return something in release mode. - } + return asJSValue().asNumber(); + } + + bool isMachineIntConstant() + { + return isConstant() && constant()->value().isMachineInt(); + } + + int64_t asMachineInt() + { + return asJSValue().asMachineInt(); + } + + bool isBooleanConstant() + { + return isConstant() && constant()->value().isBoolean(); + } + + bool asBoolean() + { + return constant()->value().asBoolean(); } - bool isInt32Constant(CodeBlock* codeBlock) + bool isUndefinedOrNullConstant() { - return isConstant() && valueOfJSConstant(codeBlock).isInt32(); + return isConstant() && constant()->value().isUndefinedOrNull(); } - - bool isDoubleConstant(CodeBlock* codeBlock) + + bool isCellConstant() { - bool result = isConstant() && valueOfJSConstant(codeBlock).isDouble(); - if (result) - ASSERT(!isInt32Constant(codeBlock)); - return result; + return isConstant() && constant()->value() && constant()->value().isCell(); + } + + JSCell* asCell() + { + return constant()->value().asCell(); + } + + template<typename T> + T dynamicCastConstant() + { + if (!isCellConstant()) + return nullptr; + return jsDynamicCast<T>(asCell()); } - bool isNumberConstant(CodeBlock* codeBlock) + template<typename T> + T castConstant() { - bool result = isConstant() && valueOfJSConstant(codeBlock).isNumber(); - ASSERT(result == (isInt32Constant(codeBlock) || isDoubleConstant(codeBlock))); + T result = dynamicCastConstant<T>(); + RELEASE_ASSERT(result); return result; } - - bool isBooleanConstant(CodeBlock* codeBlock) + + JSValue initializationValueForActivation() const { - return isConstant() && valueOfJSConstant(codeBlock).isBoolean(); + ASSERT(op() == CreateActivation); + return bitwise_cast<FrozenValue*>(m_opInfo2)->value(); } - + bool containsMovHint() { switch (op()) { @@ -517,6 +766,16 @@ struct Node { return hasVariableAccessData(graph); } + // This is useful for debugging code, where a node that should have a variable + // access data doesn't have one because it hasn't been initialized yet. + VariableAccessData* tryGetVariableAccessData() + { + VariableAccessData* result = reinterpret_cast<VariableAccessData*>(m_opInfo); + if (!result) + return 0; + return result->find(); + } + VariableAccessData* variableAccessData() { return reinterpret_cast<VariableAccessData*>(m_opInfo)->find(); @@ -539,6 +798,7 @@ struct Node { case ExtractOSREntryLocal: case MovHint: case ZombieHint: + case KillStack: return true; default: return false; @@ -568,6 +828,23 @@ struct Node { return VirtualRegister(m_opInfo2); } + bool hasStackAccessData() + { + switch (op()) { + case PutStack: + case GetStack: + return true; + default: + return false; + } + } + + StackAccessData* stackAccessData() + { + ASSERT(hasStackAccessData()); + return bitwise_cast<StackAccessData*>(m_opInfo); + } + bool hasPhi() { return op() == Upsilon; @@ -581,14 +858,7 @@ struct Node { bool isStoreBarrier() { - switch (op()) { - case StoreBarrier: - case ConditionalStoreBarrier: - case StoreBarrierWithNullCheck: - return true; - default: - return false; - } + return op() == StoreBarrier; } bool hasIdentifier() @@ -597,7 +867,11 @@ struct Node { case GetById: case GetByIdFlush: case PutById: + case PutByIdFlush: case PutByIdDirect: + case PutGetterById: + case PutSetterById: + case PutGetterSetterById: return true; default: return false; @@ -609,26 +883,44 @@ struct Node { ASSERT(hasIdentifier()); return m_opInfo; } - - bool hasArithNodeFlags() + + bool hasAccessorAttributes() { switch (op()) { - case UInt32ToNumber: - case ArithAdd: - case ArithSub: - case ArithNegate: - case ArithMul: - case ArithAbs: - case ArithMin: - case ArithMax: - case ArithMod: - case ArithDiv: - case ValueAdd: + case PutGetterById: + case PutSetterById: + case PutGetterSetterById: + case PutGetterByVal: + case PutSetterByVal: return true; default: return false; } } + + int32_t accessorAttributes() + { + ASSERT(hasAccessorAttributes()); + switch (op()) { + case PutGetterById: + case PutSetterById: + case PutGetterSetterById: + return m_opInfo2; + case PutGetterByVal: + case PutSetterByVal: + return m_opInfo; + default: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } + } + + bool hasPromotedLocationDescriptor() + { + return op() == PutHint; + } + + PromotedLocationDescriptor promotedLocationDescriptor(); // This corrects the arithmetic node flags, so that irrelevant bits are // ignored. In particular, anything other than ArithMul does not need @@ -636,11 +928,16 @@ struct Node { NodeFlags arithNodeFlags() { NodeFlags result = m_flags & NodeArithFlagsMask; - if (op() == ArithMul || op() == ArithDiv || op() == ArithMod || op() == ArithNegate || op() == DoubleAsInt32) + if (op() == ArithMul || op() == ArithDiv || op() == ArithMod || op() == ArithNegate || op() == ArithPow || op() == ArithRound || op() == ArithFloor || op() == ArithCeil || op() == DoubleAsInt32) return result; return result & ~NodeBytecodeNeedsNegZero; } - + + bool mayHaveNonIntResult() + { + return m_flags & NodeMayHaveNonIntResult; + } + bool hasConstantBuffer() { return op() == NewArrayBuffer; @@ -673,7 +970,15 @@ struct Node { return false; } } - + + // Return the indexing type that an array allocation *wants* to use. It may end up using a different + // type if we're having a bad time. You can determine the actual indexing type by asking the global + // object: + // + // m_graph.globalObjectFor(node->origin.semantic)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()) + // + // This will give you a Structure*, and that will have some indexing type that may be different from + // the this one. IndexingType indexingType() { ASSERT(hasIndexingType()); @@ -717,6 +1022,9 @@ struct Node { m_opInfo = indexingType; } + // FIXME: We really should be able to inline code that uses NewRegexp. That means + // using something other than the index into the CodeBlock here. + // https://bugs.webkit.org/show_bug.cgi?id=154808 bool hasRegexpIndex() { return op() == NewRegexp; @@ -728,55 +1036,120 @@ struct Node { return m_opInfo; } - bool hasVarNumber() + bool hasScopeOffset() { return op() == GetClosureVar || op() == PutClosureVar; } - int varNumber() + ScopeOffset scopeOffset() { - ASSERT(hasVarNumber()); - return m_opInfo; + ASSERT(hasScopeOffset()); + return ScopeOffset(m_opInfo); + } + + bool hasDirectArgumentsOffset() + { + return op() == GetFromArguments || op() == PutToArguments; + } + + DirectArgumentsOffset capturedArgumentsOffset() + { + ASSERT(hasDirectArgumentsOffset()); + return DirectArgumentsOffset(m_opInfo); } bool hasRegisterPointer() { - return op() == GetGlobalVar || op() == PutGlobalVar; + return op() == GetGlobalVar || op() == GetGlobalLexicalVariable || op() == PutGlobalVariable; } - WriteBarrier<Unknown>* registerPointer() + WriteBarrier<Unknown>* variablePointer() { return bitwise_cast<WriteBarrier<Unknown>*>(m_opInfo); } + bool hasCallVarargsData() + { + switch (op()) { + case CallVarargs: + case CallForwardVarargs: + case TailCallVarargs: + case TailCallForwardVarargs: + case TailCallVarargsInlinedCaller: + case TailCallForwardVarargsInlinedCaller: + case ConstructVarargs: + case ConstructForwardVarargs: + return true; + default: + return false; + } + } + + CallVarargsData* callVarargsData() + { + ASSERT(hasCallVarargsData()); + return bitwise_cast<CallVarargsData*>(m_opInfo); + } + + bool hasLoadVarargsData() + { + return op() == LoadVarargs || op() == ForwardVarargs; + } + + LoadVarargsData* loadVarargsData() + { + ASSERT(hasLoadVarargsData()); + return bitwise_cast<LoadVarargsData*>(m_opInfo); + } + bool hasResult() { - return m_flags & NodeResultMask; + return !!result(); } bool hasInt32Result() { - return (m_flags & NodeResultMask) == NodeResultInt32; + return result() == NodeResultInt32; + } + + bool hasInt52Result() + { + return result() == NodeResultInt52; } bool hasNumberResult() { - return (m_flags & NodeResultMask) == NodeResultNumber; + return result() == NodeResultNumber; + } + + bool hasDoubleResult() + { + return result() == NodeResultDouble; } bool hasJSResult() { - return (m_flags & NodeResultMask) == NodeResultJS; + return result() == NodeResultJS; } bool hasBooleanResult() { - return (m_flags & NodeResultMask) == NodeResultBoolean; + return result() == NodeResultBoolean; } bool hasStorageResult() { - return (m_flags & NodeResultMask) == NodeResultStorage; + return result() == NodeResultStorage; + } + + UseKind defaultUseKind() + { + return useKindForResult(result()); + } + + Edge defaultEdge() + { + return Edge(this, defaultUseKind()); } bool isJump() @@ -801,6 +1174,9 @@ struct Node { case Branch: case Switch: case Return: + case TailCall: + case TailCallVarargs: + case TailCallForwardVarargs: case Unreachable: return true; default: @@ -808,40 +1184,30 @@ struct Node { } } - unsigned takenBytecodeOffsetDuringParsing() + bool isFunctionTerminal() { - ASSERT(isBranch() || isJump()); - return m_opInfo; - } + if (isTerminal() && !numSuccessors()) + return true; - unsigned notTakenBytecodeOffsetDuringParsing() - { - ASSERT(isBranch()); - return m_opInfo2; - } - - void setTakenBlock(BasicBlock* block) - { - ASSERT(isBranch() || isJump()); - m_opInfo = bitwise_cast<uintptr_t>(block); + return false; } - - void setNotTakenBlock(BasicBlock* block) + + unsigned targetBytecodeOffsetDuringParsing() { - ASSERT(isBranch()); - m_opInfo2 = bitwise_cast<uintptr_t>(block); + ASSERT(isJump()); + return m_opInfo; } - - BasicBlock*& takenBlock() + + BasicBlock*& targetBlock() { - ASSERT(isBranch() || isJump()); + ASSERT(isJump()); return *bitwise_cast<BasicBlock**>(&m_opInfo); } - BasicBlock*& notTakenBlock() + BranchData* branchData() { ASSERT(isBranch()); - return *bitwise_cast<BasicBlock**>(&m_opInfo2); + return bitwise_cast<BranchData*>(m_opInfo); } SwitchData* switchData() @@ -868,44 +1234,131 @@ struct Node { { if (isSwitch()) { if (index < switchData()->cases.size()) - return switchData()->cases[index].target; + return switchData()->cases[index].target.block; RELEASE_ASSERT(index == switchData()->cases.size()); - return switchData()->fallThrough; + return switchData()->fallThrough.block; } switch (index) { case 0: - return takenBlock(); + if (isJump()) + return targetBlock(); + return branchData()->taken.block; case 1: - return notTakenBlock(); + return branchData()->notTaken.block; default: RELEASE_ASSERT_NOT_REACHED(); - return takenBlock(); + return targetBlock(); + } + } + + class SuccessorsIterable { + public: + SuccessorsIterable() + : m_terminal(nullptr) + { + } + + SuccessorsIterable(Node* terminal) + : m_terminal(terminal) + { + } + + class iterator { + public: + iterator() + : m_terminal(nullptr) + , m_index(UINT_MAX) + { + } + + iterator(Node* terminal, unsigned index) + : m_terminal(terminal) + , m_index(index) + { + } + + BasicBlock* operator*() + { + return m_terminal->successor(m_index); + } + + iterator& operator++() + { + m_index++; + return *this; + } + + bool operator==(const iterator& other) const + { + return m_index == other.m_index; + } + + bool operator!=(const iterator& other) const + { + return !(*this == other); + } + private: + Node* m_terminal; + unsigned m_index; + }; + + iterator begin() + { + return iterator(m_terminal, 0); } + + iterator end() + { + return iterator(m_terminal, m_terminal->numSuccessors()); + } + + size_t size() const { return m_terminal->numSuccessors(); } + BasicBlock* at(size_t index) const { return m_terminal->successor(index); } + BasicBlock* operator[](size_t index) const { return at(index); } + + private: + Node* m_terminal; + }; + + SuccessorsIterable successors() + { + return SuccessorsIterable(this); } BasicBlock*& successorForCondition(bool condition) { - ASSERT(isBranch()); - return condition ? takenBlock() : notTakenBlock(); + return branchData()->forCondition(condition); } bool hasHeapPrediction() { switch (op()) { + case ArithRound: + case ArithFloor: + case ArithCeil: + case GetDirectPname: case GetById: case GetByIdFlush: case GetByVal: - case GetMyArgumentByVal: - case GetMyArgumentByValSafe: case Call: + case TailCallInlinedCaller: case Construct: + case CallVarargs: + case TailCallVarargsInlinedCaller: + case ConstructVarargs: + case CallForwardVarargs: + case TailCallForwardVarargsInlinedCaller: case GetByOffset: + case MultiGetByOffset: case GetClosureVar: + case GetFromArguments: case ArrayPop: case ArrayPush: case RegExpExec: case RegExpTest: case GetGlobalVar: + case GetGlobalLexicalVariable: + case StringReplace: return true; default: return false; @@ -917,61 +1370,56 @@ struct Node { ASSERT(hasHeapPrediction()); return static_cast<SpeculatedType>(m_opInfo2); } - - bool predictHeap(SpeculatedType prediction) + + void setHeapPrediction(SpeculatedType prediction) { ASSERT(hasHeapPrediction()); - - return mergeSpeculation(m_opInfo2, prediction); + m_opInfo2 = prediction; } - bool hasFunction() + bool hasCellOperand() { switch (op()) { - case CheckFunction: - case AllocationProfileWatchpoint: + case CheckCell: + case OverridesHasInstance: + case NewFunction: + case NewArrowFunction: + case NewGeneratorFunction: + case CreateActivation: + case MaterializeCreateActivation: return true; default: return false; } } - JSCell* function() - { - ASSERT(hasFunction()); - JSCell* result = reinterpret_cast<JSFunction*>(m_opInfo); - ASSERT(JSValue(result).isFunction()); - return result; - } - - bool hasExecutable() + FrozenValue* cellOperand() { - return op() == CheckExecutable; + ASSERT(hasCellOperand()); + return reinterpret_cast<FrozenValue*>(m_opInfo); } - ExecutableBase* executable() + template<typename T> + T castOperand() { - return jsCast<ExecutableBase*>(reinterpret_cast<JSCell*>(m_opInfo)); + return cellOperand()->cast<T>(); } - bool hasVariableWatchpointSet() + void setCellOperand(FrozenValue* value) { - return op() == NotifyWrite || op() == VariableWatchpoint; + ASSERT(hasCellOperand()); + m_opInfo = bitwise_cast<uintptr_t>(value); } - VariableWatchpointSet* variableWatchpointSet() + bool hasWatchpointSet() { - return reinterpret_cast<VariableWatchpointSet*>(m_opInfo); + return op() == NotifyWrite; } - bool hasTypedArray() + WatchpointSet* watchpointSet() { - return op() == TypedArrayWatchpoint; - } - - JSArrayBufferView* typedArray() - { - return reinterpret_cast<JSArrayBufferView*>(m_opInfo); + ASSERT(hasWatchpointSet()); + return reinterpret_cast<WatchpointSet*>(m_opInfo); } bool hasStoragePointer() @@ -981,14 +1429,36 @@ struct Node { void* storagePointer() { + ASSERT(hasStoragePointer()); return reinterpret_cast<void*>(m_opInfo); } - bool hasStructureTransitionData() + bool hasUidOperand() + { + return op() == CheckIdent; + } + + UniquedStringImpl* uidOperand() + { + ASSERT(hasUidOperand()); + return reinterpret_cast<UniquedStringImpl*>(m_opInfo); + } + + bool hasTypeInfoOperand() + { + return op() == CheckTypeInfoFlags; + } + + unsigned typeInfoOperand() + { + ASSERT(hasTypeInfoOperand() && m_opInfo <= UCHAR_MAX); + return static_cast<unsigned>(m_opInfo); + } + + bool hasTransition() { switch (op()) { case PutStructure: - case PhantomPutStructure: case AllocatePropertyStorage: case ReallocatePropertyStorage: return true; @@ -997,16 +1467,18 @@ struct Node { } } - StructureTransitionData& structureTransitionData() + Transition* transition() { - ASSERT(hasStructureTransitionData()); - return *reinterpret_cast<StructureTransitionData*>(m_opInfo); + ASSERT(hasTransition()); + return reinterpret_cast<Transition*>(m_opInfo); } bool hasStructureSet() { switch (op()) { case CheckStructure: + case CheckStructureImmediate: + case MaterializeNewObject: return true; default: return false; @@ -1022,7 +1494,6 @@ struct Node { bool hasStructure() { switch (op()) { - case StructureTransitionWatchpoint: case ArrayifyToStructure: case NewObject: case NewStringObject: @@ -1040,47 +1511,140 @@ struct Node { bool hasStorageAccessData() { - return op() == GetByOffset || op() == PutByOffset; + switch (op()) { + case GetByOffset: + case PutByOffset: + case GetGetterSetterByOffset: + return true; + default: + return false; + } } - unsigned storageAccessDataIndex() + StorageAccessData& storageAccessData() { ASSERT(hasStorageAccessData()); - return m_opInfo; + return *bitwise_cast<StorageAccessData*>(m_opInfo); } - bool hasFunctionDeclIndex() + bool hasMultiGetByOffsetData() { - return op() == NewFunction - || op() == NewFunctionNoCheck; + return op() == MultiGetByOffset; } - unsigned functionDeclIndex() + MultiGetByOffsetData& multiGetByOffsetData() { - ASSERT(hasFunctionDeclIndex()); - return m_opInfo; + ASSERT(hasMultiGetByOffsetData()); + return *reinterpret_cast<MultiGetByOffsetData*>(m_opInfo); } - bool hasFunctionExprIndex() + bool hasMultiPutByOffsetData() { - return op() == NewFunctionExpression; + return op() == MultiPutByOffset; } - unsigned functionExprIndex() + MultiPutByOffsetData& multiPutByOffsetData() { - ASSERT(hasFunctionExprIndex()); - return m_opInfo; + ASSERT(hasMultiPutByOffsetData()); + return *reinterpret_cast<MultiPutByOffsetData*>(m_opInfo); } - bool hasSymbolTable() + bool hasObjectMaterializationData() { - return op() == FunctionReentryWatchpoint; + switch (op()) { + case MaterializeNewObject: + case MaterializeCreateActivation: + return true; + + default: + return false; + } } - SymbolTable* symbolTable() + ObjectMaterializationData& objectMaterializationData() { - ASSERT(hasSymbolTable()); - return reinterpret_cast<SymbolTable*>(m_opInfo); + ASSERT(hasObjectMaterializationData()); + return *reinterpret_cast<ObjectMaterializationData*>(m_opInfo2); + } + + bool isObjectAllocation() + { + switch (op()) { + case NewObject: + case MaterializeNewObject: + return true; + default: + return false; + } + } + + bool isPhantomObjectAllocation() + { + switch (op()) { + case PhantomNewObject: + return true; + default: + return false; + } + } + + bool isActivationAllocation() + { + switch (op()) { + case CreateActivation: + case MaterializeCreateActivation: + return true; + default: + return false; + } + } + + bool isPhantomActivationAllocation() + { + switch (op()) { + case PhantomCreateActivation: + return true; + default: + return false; + } + } + + bool isFunctionAllocation() + { + switch (op()) { + case NewArrowFunction: + case NewFunction: + case NewGeneratorFunction: + return true; + default: + return false; + } + } + + bool isPhantomFunctionAllocation() + { + switch (op()) { + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + return true; + default: + return false; + } + } + + bool isPhantomAllocation() + { + switch (op()) { + case PhantomNewObject: + case PhantomDirectArguments: + case PhantomClonedArguments: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + return true; + default: + return false; + } } bool hasArrayMode() @@ -1099,6 +1663,7 @@ struct Node { case ArrayifyToStructure: case ArrayPush: case ArrayPop: + case HasIndexedProperty: return true; default: return false; @@ -1125,6 +1690,7 @@ struct Node { bool hasArithMode() { switch (op()) { + case ArithAbs: case ArithAdd: case ArithSub: case ArithNegate: @@ -1149,6 +1715,23 @@ struct Node { { m_opInfo = mode; } + + bool hasArithRoundingMode() + { + return op() == ArithRound || op() == ArithFloor || op() == ArithCeil; + } + + Arith::RoundingMode arithRoundingMode() + { + ASSERT(hasArithRoundingMode()); + return static_cast<Arith::RoundingMode>(m_opInfo); + } + + void setArithRoundingMode(Arith::RoundingMode mode) + { + ASSERT(hasArithRoundingMode()); + m_opInfo = static_cast<uintptr_t>(mode); + } bool hasVirtualRegister() { @@ -1184,19 +1767,9 @@ struct Node { return m_refCount; } - bool willHaveCodeGenOrOSR() + bool isSemanticallySkippable() { - switch (op()) { - case SetLocal: - case MovHint: - case ZombieHint: - case PhantomArguments: - return true; - case Phantom: - return child1().useKindUnchecked() != UntypedUse || child2().useKindUnchecked() != UntypedUse || child3().useKindUnchecked() != UntypedUse; - default: - return shouldGenerate(); - } + return op() == CountExecution; } unsigned refCount() @@ -1263,9 +1836,25 @@ struct Node { return child1().useKind(); } + bool isBinaryUseKind(UseKind left, UseKind right) + { + return child1().useKind() == left && child2().useKind() == right; + } + bool isBinaryUseKind(UseKind useKind) { - return child1().useKind() == useKind && child2().useKind() == useKind; + return isBinaryUseKind(useKind, useKind); + } + + Edge childFor(UseKind useKind) + { + if (child1().useKind() == useKind) + return child1(); + if (child2().useKind() == useKind) + return child2(); + if (child3().useKind() == useKind) + return child3(); + return Edge(); } SpeculatedType prediction() @@ -1283,29 +1872,34 @@ struct Node { return isInt32Speculation(prediction()); } - bool shouldSpeculateInt32ForArithmetic() + bool sawBooleans() { - return isInt32SpeculationForArithmetic(prediction()); + return !!(prediction() & SpecBoolean); } - bool shouldSpeculateInt32ExpectingDefined() + bool shouldSpeculateInt32OrBoolean() { - return isInt32SpeculationExpectingDefined(prediction()); + return isInt32OrBooleanSpeculation(prediction()); } - bool shouldSpeculateMachineInt() + bool shouldSpeculateInt32ForArithmetic() { - return isMachineIntSpeculation(prediction()); + return isInt32SpeculationForArithmetic(prediction()); } - bool shouldSpeculateMachineIntForArithmetic() + bool shouldSpeculateInt32OrBooleanForArithmetic() { - return isMachineIntSpeculationForArithmetic(prediction()); + return isInt32OrBooleanSpeculationForArithmetic(prediction()); } - bool shouldSpeculateMachineIntExpectingDefined() + bool shouldSpeculateInt32OrBooleanExpectingDefined() { - return isMachineIntSpeculationExpectingDefined(prediction()); + return isInt32OrBooleanSpeculationExpectingDefined(prediction()); + } + + bool shouldSpeculateMachineInt() + { + return isMachineIntSpeculation(prediction()); } bool shouldSpeculateDouble() @@ -1313,9 +1907,9 @@ struct Node { return isDoubleSpeculation(prediction()); } - bool shouldSpeculateDoubleForArithmetic() + bool shouldSpeculateDoubleReal() { - return isDoubleSpeculationForArithmetic(prediction()); + return isDoubleRealSpeculation(prediction()); } bool shouldSpeculateNumber() @@ -1323,26 +1917,51 @@ struct Node { return isFullNumberSpeculation(prediction()); } - bool shouldSpeculateNumberExpectingDefined() + bool shouldSpeculateNumberOrBoolean() { - return isFullNumberSpeculationExpectingDefined(prediction()); + return isFullNumberOrBooleanSpeculation(prediction()); + } + + bool shouldSpeculateNumberOrBooleanExpectingDefined() + { + return isFullNumberOrBooleanSpeculationExpectingDefined(prediction()); } bool shouldSpeculateBoolean() { return isBooleanSpeculation(prediction()); } + + bool shouldSpeculateOther() + { + return isOtherSpeculation(prediction()); + } + + bool shouldSpeculateMisc() + { + return isMiscSpeculation(prediction()); + } bool shouldSpeculateStringIdent() { return isStringIdentSpeculation(prediction()); } + + bool shouldSpeculateNotStringVar() + { + return isNotStringVarSpeculation(prediction()); + } bool shouldSpeculateString() { return isStringSpeculation(prediction()); } + bool shouldSpeculateStringOrOther() + { + return isStringOrOtherSpeculation(prediction()); + } + bool shouldSpeculateStringObject() { return isStringObjectSpeculation(prediction()); @@ -1352,6 +1971,16 @@ struct Node { { return isStringOrStringObjectSpeculation(prediction()); } + + bool shouldSpeculateRegExpObject() + { + return isRegExpObjectSpeculation(prediction()); + } + + bool shouldSpeculateSymbol() + { + return isSymbolSpeculation(prediction()); + } bool shouldSpeculateFinalObject() { @@ -1368,9 +1997,14 @@ struct Node { return isArraySpeculation(prediction()); } - bool shouldSpeculateArguments() + bool shouldSpeculateDirectArguments() + { + return isDirectArgumentsSpeculation(prediction()); + } + + bool shouldSpeculateScopedArguments() { - return isArgumentsSpeculation(prediction()); + return isScopedArgumentsSpeculation(prediction()); } bool shouldSpeculateInt8Array() @@ -1438,6 +2072,36 @@ struct Node { return isCellSpeculation(prediction()); } + bool shouldSpeculateCellOrOther() + { + return isCellOrOtherSpeculation(prediction()); + } + + bool shouldSpeculateNotCell() + { + return isNotCellSpeculation(prediction()); + } + + bool shouldSpeculateUntypedForArithmetic() + { + return isUntypedSpeculationForArithmetic(prediction()); + } + + static bool shouldSpeculateUntypedForArithmetic(Node* op1, Node* op2) + { + return op1->shouldSpeculateUntypedForArithmetic() || op2->shouldSpeculateUntypedForArithmetic(); + } + + bool shouldSpeculateUntypedForBitOps() + { + return isUntypedSpeculationForBitOps(prediction()); + } + + static bool shouldSpeculateUntypedForBitOps(Node* op1, Node* op2) + { + return op1->shouldSpeculateUntypedForBitOps() || op2->shouldSpeculateUntypedForBitOps(); + } + static bool shouldSpeculateBoolean(Node* op1, Node* op2) { return op1->shouldSpeculateBoolean() && op2->shouldSpeculateBoolean(); @@ -1448,44 +2112,49 @@ struct Node { return op1->shouldSpeculateInt32() && op2->shouldSpeculateInt32(); } - static bool shouldSpeculateInt32ForArithmetic(Node* op1, Node* op2) + static bool shouldSpeculateInt32OrBoolean(Node* op1, Node* op2) { - return op1->shouldSpeculateInt32ForArithmetic() && op2->shouldSpeculateInt32ForArithmetic(); + return op1->shouldSpeculateInt32OrBoolean() + && op2->shouldSpeculateInt32OrBoolean(); } - static bool shouldSpeculateInt32ExpectingDefined(Node* op1, Node* op2) + static bool shouldSpeculateInt32OrBooleanForArithmetic(Node* op1, Node* op2) { - return op1->shouldSpeculateInt32ExpectingDefined() && op2->shouldSpeculateInt32ExpectingDefined(); + return op1->shouldSpeculateInt32OrBooleanForArithmetic() + && op2->shouldSpeculateInt32OrBooleanForArithmetic(); } - static bool shouldSpeculateMachineInt(Node* op1, Node* op2) + static bool shouldSpeculateInt32OrBooleanExpectingDefined(Node* op1, Node* op2) { - return op1->shouldSpeculateMachineInt() && op2->shouldSpeculateMachineInt(); + return op1->shouldSpeculateInt32OrBooleanExpectingDefined() + && op2->shouldSpeculateInt32OrBooleanExpectingDefined(); } - static bool shouldSpeculateMachineIntForArithmetic(Node* op1, Node* op2) + static bool shouldSpeculateMachineInt(Node* op1, Node* op2) { - return op1->shouldSpeculateMachineIntForArithmetic() && op2->shouldSpeculateMachineIntForArithmetic(); + return op1->shouldSpeculateMachineInt() && op2->shouldSpeculateMachineInt(); } - static bool shouldSpeculateMachineIntExpectingDefined(Node* op1, Node* op2) + static bool shouldSpeculateNumber(Node* op1, Node* op2) { - return op1->shouldSpeculateMachineIntExpectingDefined() && op2->shouldSpeculateMachineIntExpectingDefined(); + return op1->shouldSpeculateNumber() && op2->shouldSpeculateNumber(); } - static bool shouldSpeculateDoubleForArithmetic(Node* op1, Node* op2) + static bool shouldSpeculateNumberOrBoolean(Node* op1, Node* op2) { - return op1->shouldSpeculateDoubleForArithmetic() && op2->shouldSpeculateDoubleForArithmetic(); + return op1->shouldSpeculateNumberOrBoolean() + && op2->shouldSpeculateNumberOrBoolean(); } - static bool shouldSpeculateNumber(Node* op1, Node* op2) + static bool shouldSpeculateNumberOrBooleanExpectingDefined(Node* op1, Node* op2) { - return op1->shouldSpeculateNumber() && op2->shouldSpeculateNumber(); + return op1->shouldSpeculateNumberOrBooleanExpectingDefined() + && op2->shouldSpeculateNumberOrBooleanExpectingDefined(); } - - static bool shouldSpeculateNumberExpectingDefined(Node* op1, Node* op2) + + static bool shouldSpeculateSymbol(Node* op1, Node* op2) { - return op1->shouldSpeculateNumberExpectingDefined() && op2->shouldSpeculateNumberExpectingDefined(); + return op1->shouldSpeculateSymbol() && op2->shouldSpeculateSymbol(); } static bool shouldSpeculateFinalObject(Node* op1, Node* op2) @@ -1498,16 +2167,81 @@ struct Node { return op1->shouldSpeculateArray() && op2->shouldSpeculateArray(); } - bool canSpeculateInt32() + bool canSpeculateInt32(RareCaseProfilingSource source) + { + return nodeCanSpeculateInt32(arithNodeFlags(), source); + } + + bool canSpeculateInt52(RareCaseProfilingSource source) + { + return nodeCanSpeculateInt52(arithNodeFlags(), source); + } + + RareCaseProfilingSource sourceFor(PredictionPass pass) { - return nodeCanSpeculateInt32(arithNodeFlags()); + if (pass == PrimaryPass || child1()->sawBooleans() || (child2() && child2()->sawBooleans())) + return DFGRareCase; + return AllRareCases; } - bool canSpeculateInt52() + bool canSpeculateInt32(PredictionPass pass) { - return nodeCanSpeculateInt52(arithNodeFlags()); + return canSpeculateInt32(sourceFor(pass)); } + bool canSpeculateInt52(PredictionPass pass) + { + return canSpeculateInt52(sourceFor(pass)); + } + + bool hasTypeLocation() + { + return op() == ProfileType; + } + + TypeLocation* typeLocation() + { + ASSERT(hasTypeLocation()); + return reinterpret_cast<TypeLocation*>(m_opInfo); + } + + bool hasBasicBlockLocation() + { + return op() == ProfileControlFlow; + } + + BasicBlockLocation* basicBlockLocation() + { + ASSERT(hasBasicBlockLocation()); + return reinterpret_cast<BasicBlockLocation*>(m_opInfo); + } + + Node* replacement() const + { + return m_misc.replacement; + } + + void setReplacement(Node* replacement) + { + m_misc.replacement = replacement; + } + + Epoch epoch() const + { + return Epoch::fromUnsigned(m_misc.epoch); + } + + void setEpoch(Epoch epoch) + { + m_misc.epoch = epoch.toUnsigned(); + } + + unsigned numberOfArgumentsToSkip() + { + ASSERT(op() == CopyRest || op() == GetRestLength); + return static_cast<unsigned>(m_opInfo); + } + void dumpChildren(PrintStream& out) { if (!child1()) @@ -1522,12 +2256,9 @@ struct Node { } // NB. This class must have a trivial destructor. - - // Used for determining what bytecode this came from. This is important for - // debugging, exceptions, and even basic execution semantics. - CodeOrigin codeOrigin; - // Code origin for where the node exits to. - CodeOrigin codeOriginForExitTarget; + + NodeOrigin origin; + // References to up to 3 children, or links to a variable length set of children. AdjacencyList children; @@ -1550,20 +2281,22 @@ public: AbstractValue value; // Miscellaneous data that is usually meaningless, but can hold some analysis results - // if you ask right. For example, if you do Graph::initializeNodeOwners(), misc.owner + // if you ask right. For example, if you do Graph::initializeNodeOwners(), Node::owner // will tell you which basic block a node belongs to. You cannot rely on this persisting // across transformations unless you do the maintenance work yourself. Other phases use - // misc.replacement, but they do so manually: first you do Graph::clearReplacements() - // and then you set, and use, replacement's yourself. + // Node::replacement, but they do so manually: first you do Graph::clearReplacements() + // and then you set, and use, replacement's yourself. Same thing for epoch. // // Bottom line: don't use these fields unless you initialize them yourself, or by // calling some appropriate methods that initialize them the way you want. Otherwise, // these fields are meaningless. +private: union { Node* replacement; - BasicBlock* owner; - bool needsBarrier; - } misc; + unsigned epoch; + } m_misc; +public: + BasicBlock* owner; }; inline bool nodeComparator(Node* a, Node* b) diff --git a/Source/JavaScriptCore/dfg/DFGNodeAllocator.h b/Source/JavaScriptCore/dfg/DFGNodeAllocator.h index afd72e584..e9024cc19 100644 --- a/Source/JavaScriptCore/dfg/DFGNodeAllocator.h +++ b/Source/JavaScriptCore/dfg/DFGNodeAllocator.h @@ -26,8 +26,6 @@ #ifndef DFGNodeAllocator_h #define DFGNodeAllocator_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGAllocator.h" diff --git a/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp b/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp index 396ca6119..79f4b4302 100644 --- a/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp +++ b/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,6 +28,7 @@ #if ENABLE(DFG_JIT) +#include "JSCInlines.h" #include <wtf/CommaPrinter.h> #include <wtf/StringPrintStream.h> @@ -46,9 +47,15 @@ void dumpNodeFlags(PrintStream& actualOut, NodeFlags flags) case NodeResultNumber: out.print(comma, "Number"); break; + case NodeResultDouble: + out.print(comma, "Double"); + break; case NodeResultInt32: out.print(comma, "Int32"); break; + case NodeResultInt52: + out.print(comma, "Int52"); + break; case NodeResultBoolean: out.print(comma, "Boolean"); break; @@ -67,12 +74,6 @@ void dumpNodeFlags(PrintStream& actualOut, NodeFlags flags) if (flags & NodeHasVarArgs) out.print(comma, "VarArgs"); - if (flags & NodeClobbersWorld) - out.print(comma, "Clobbers"); - - if (flags & NodeMightClobber) - out.print(comma, "MightClobber"); - if (flags & NodeResultMask) { if (!(flags & NodeBytecodeUsesAsNumber) && !(flags & NodeBytecodeNeedsNegZero)) out.print(comma, "PureInt"); @@ -83,18 +84,33 @@ void dumpNodeFlags(PrintStream& actualOut, NodeFlags flags) if (flags & NodeBytecodeUsesAsOther) out.print(comma, "UseAsOther"); } + + if (flags & NodeMayHaveNonIntResult) + out.print(comma, "MayHaveNonIntResult"); + + if (flags & NodeMayOverflowInt52) + out.print(comma, "MayOverflowInt52"); + + if (flags & NodeMayOverflowInt32InBaseline) + out.print(comma, "MayOverflowInt32InBaseline"); + + if (flags & NodeMayOverflowInt32InDFG) + out.print(comma, "MayOverflowInt32InDFG"); + + if (flags & NodeMayNegZeroInBaseline) + out.print(comma, "MayNegZeroInBaseline"); - if (flags & NodeMayOverflow) - out.print(comma, "MayOverflow"); - - if (flags & NodeMayNegZero) - out.print(comma, "MayNegZero"); + if (flags & NodeMayNegZeroInDFG) + out.print(comma, "MayNegZeroInDFG"); if (flags & NodeBytecodeUsesAsInt) out.print(comma, "UseAsInt"); + + if (flags & NodeBytecodeUsesAsArrayIndex) + out.print(comma, "ReallyWantsInt"); - if (!(flags & NodeDoesNotExit)) - out.print(comma, "CanExit"); + if (flags & NodeIsFlushed) + out.print(comma, "IsFlushed"); CString string = out.toCString(); if (!string.length()) diff --git a/Source/JavaScriptCore/dfg/DFGNodeFlags.h b/Source/JavaScriptCore/dfg/DFGNodeFlags.h index d68f0587b..626b9bb2b 100644 --- a/Source/JavaScriptCore/dfg/DFGNodeFlags.h +++ b/Source/JavaScriptCore/dfg/DFGNodeFlags.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGNodeFlags_h #define DFGNodeFlags_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include <wtf/PrintStream.h> @@ -40,35 +38,39 @@ namespace JSC { namespace DFG { #define NodeResultMask 0x0007 #define NodeResultJS 0x0001 #define NodeResultNumber 0x0002 -#define NodeResultInt32 0x0003 -#define NodeResultInt52 0x0004 -#define NodeResultBoolean 0x0005 -#define NodeResultStorage 0x0006 +#define NodeResultDouble 0x0003 +#define NodeResultInt32 0x0004 +#define NodeResultInt52 0x0005 +#define NodeResultBoolean 0x0006 +#define NodeResultStorage 0x0007 #define NodeMustGenerate 0x0008 // set on nodes that have side effects, and may not trivially be removed by DCE. #define NodeHasVarArgs 0x0010 -#define NodeClobbersWorld 0x0020 -#define NodeMightClobber 0x0040 - -#define NodeBehaviorMask 0x0180 -#define NodeMayOverflow 0x0080 -#define NodeMayNegZero 0x0100 + +#define NodeBehaviorMask 0x07e0 +#define NodeMayHaveNonIntResult 0x0020 +#define NodeMayOverflowInt52 0x0040 +#define NodeMayOverflowInt32InBaseline 0x0080 +#define NodeMayOverflowInt32InDFG 0x0100 +#define NodeMayNegZeroInBaseline 0x0200 +#define NodeMayNegZeroInDFG 0x0400 -#define NodeBytecodeBackPropMask 0x1E00 +#define NodeBytecodeBackPropMask 0xf800 #define NodeBytecodeUseBottom 0x0000 -#define NodeBytecodeUsesAsNumber 0x0200 // The result of this computation may be used in a context that observes fractional, or bigger-than-int32, results. -#define NodeBytecodeNeedsNegZero 0x0400 // The result of this computation may be used in a context that observes -0. -#define NodeBytecodeUsesAsOther 0x0800 // The result of this computation may be used in a context that distinguishes between NaN and other things (like undefined). +#define NodeBytecodeUsesAsNumber 0x0800 // The result of this computation may be used in a context that observes fractional, or bigger-than-int32, results. +#define NodeBytecodeNeedsNegZero 0x1000 // The result of this computation may be used in a context that observes -0. +#define NodeBytecodeUsesAsOther 0x2000 // The result of this computation may be used in a context that distinguishes between NaN and other things (like undefined). #define NodeBytecodeUsesAsValue (NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsOther) -#define NodeBytecodeUsesAsInt 0x1000 // The result of this computation is known to be used in a context that prefers, but does not require, integer values. +#define NodeBytecodeUsesAsInt 0x4000 // The result of this computation is known to be used in a context that prefers, but does not require, integer values. +#define NodeBytecodeUsesAsArrayIndex 0x8000 // The result of this computation is known to be used in a context that strongly prefers integer values, to the point that we should avoid using doubles if at all possible. #define NodeArithFlagsMask (NodeBehaviorMask | NodeBytecodeBackPropMask) -#define NodeDoesNotExit 0x2000 // This flag is negated to make it natural for the default to be that a node does exit. +#define NodeIsFlushed 0x10000 // Computed by CPSRethreadingPhase, will tell you which local nodes are backwards-reachable from a Flush. -#define NodeRelevantToOSR 0x4000 - -#define NodeIsStaticConstant 0x8000 // Used only by the parser, to determine if a constant arose statically and hence could be folded at parse-time. +#define NodeMiscFlag1 0x20000 +#define NodeMiscFlag2 0x40000 +#define NodeMiscFlag3 0x80000 typedef uint32_t NodeFlags; @@ -87,35 +89,87 @@ static inline bool bytecodeCanIgnoreNegativeZero(NodeFlags flags) return !(flags & NodeBytecodeNeedsNegZero); } -static inline bool nodeMayOverflow(NodeFlags flags) +enum RareCaseProfilingSource { + BaselineRareCase, // Comes from slow case counting in the baseline JIT. + DFGRareCase, // Comes from OSR exit profiles. + AllRareCases +}; + +static inline bool nodeMayOverflowInt52(NodeFlags flags, RareCaseProfilingSource) +{ + return !!(flags & NodeMayOverflowInt52); +} + +static inline bool nodeMayOverflowInt32(NodeFlags flags, RareCaseProfilingSource source) { - return !!(flags & NodeMayOverflow); + NodeFlags mask = 0; + switch (source) { + case BaselineRareCase: + mask = NodeMayOverflowInt32InBaseline; + break; + case DFGRareCase: + mask = NodeMayOverflowInt32InDFG; + break; + case AllRareCases: + mask = NodeMayOverflowInt32InBaseline | NodeMayOverflowInt32InDFG; + break; + } + return !!(flags & mask); } -static inline bool nodeMayNegZero(NodeFlags flags) +static inline bool nodeMayNegZero(NodeFlags flags, RareCaseProfilingSource source) { - return !!(flags & NodeMayNegZero); + NodeFlags mask = 0; + switch (source) { + case BaselineRareCase: + mask = NodeMayNegZeroInBaseline; + break; + case DFGRareCase: + mask = NodeMayNegZeroInDFG; + break; + case AllRareCases: + mask = NodeMayNegZeroInBaseline | NodeMayNegZeroInDFG; + break; + } + return !!(flags & mask); } -static inline bool nodeCanSpeculateInt32(NodeFlags flags) +static inline bool nodeCanSpeculateInt32(NodeFlags flags, RareCaseProfilingSource source) { - if (nodeMayOverflow(flags)) + if (nodeMayOverflowInt32(flags, source)) return !bytecodeUsesAsNumber(flags); - if (nodeMayNegZero(flags)) + if (nodeMayNegZero(flags, source)) return bytecodeCanIgnoreNegativeZero(flags); return true; } -static inline bool nodeCanSpeculateInt52(NodeFlags flags) +static inline bool nodeCanSpeculateInt52(NodeFlags flags, RareCaseProfilingSource source) { - if (nodeMayNegZero(flags)) + if (nodeMayOverflowInt52(flags, source)) + return false; + + if (nodeMayNegZero(flags, source)) return bytecodeCanIgnoreNegativeZero(flags); return true; } +// FIXME: Get rid of this. +// https://bugs.webkit.org/show_bug.cgi?id=131689 +static inline NodeFlags canonicalResultRepresentation(NodeFlags flags) +{ + switch (flags) { + case NodeResultDouble: + case NodeResultInt52: + case NodeResultStorage: + return flags; + default: + return NodeResultJS; + } +} + void dumpNodeFlags(PrintStream&, NodeFlags); MAKE_PRINT_ADAPTOR(NodeFlagsDump, NodeFlags, dumpNodeFlags); diff --git a/Source/JavaScriptCore/dfg/DFGNodeOrigin.cpp b/Source/JavaScriptCore/dfg/DFGNodeOrigin.cpp new file mode 100644 index 000000000..5c086d78f --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGNodeOrigin.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGNodeOrigin.h" + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +void NodeOrigin::dump(PrintStream& out) const +{ + out.print("{semantic: ", semantic, ", forExit: ", forExit, ", exitOK: ", exitOK, "}"); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGNodeOrigin.h b/Source/JavaScriptCore/dfg/DFGNodeOrigin.h new file mode 100644 index 000000000..6697a76dd --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGNodeOrigin.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGNodeOrigin_h +#define DFGNodeOrigin_h + +#if ENABLE(DFG_JIT) + +#include "CodeOrigin.h" +#include "DFGClobbersExitState.h" + +namespace JSC { namespace DFG { + +class Graph; +struct Node; + +struct NodeOrigin { + NodeOrigin() { } + + NodeOrigin(CodeOrigin semantic, CodeOrigin forExit, bool exitOK) + : semantic(semantic) + , forExit(forExit) + , exitOK(exitOK) + { + } + + bool isSet() const + { + ASSERT(semantic.isSet() == forExit.isSet()); + return semantic.isSet(); + } + + NodeOrigin withSemantic(CodeOrigin semantic) const + { + if (!isSet()) + return NodeOrigin(); + + NodeOrigin result = *this; + if (semantic.isSet()) + result.semantic = semantic; + return result; + } + + NodeOrigin withForExitAndExitOK(CodeOrigin forExit, bool exitOK) const + { + if (!isSet()) + return NodeOrigin(); + + NodeOrigin result = *this; + if (forExit.isSet()) + result.forExit = forExit; + result.exitOK = exitOK; + return result; + } + + NodeOrigin withExitOK(bool value) const + { + NodeOrigin result = *this; + result.exitOK = value; + return result; + } + + NodeOrigin withInvalidExit() const + { + return withExitOK(false); + } + + NodeOrigin takeValidExit(bool& canExit) const + { + return withExitOK(exitOK & std::exchange(canExit, false)); + } + + NodeOrigin forInsertingAfter(Graph& graph, Node* node) const + { + NodeOrigin result = *this; + if (exitOK && clobbersExitState(graph, node)) + result.exitOK = false; + return result; + } + + bool operator==(const NodeOrigin& other) const + { + return semantic == other.semantic + && forExit == other.forExit + && exitOK == other.exitOK; + } + + bool operator!=(const NodeOrigin& other) const + { + return !(*this == other); + } + + void dump(PrintStream&) const; + + // Used for determining what bytecode this came from. This is important for + // debugging, exceptions, and even basic execution semantics. + CodeOrigin semantic; + // Code origin for where the node exits to. + CodeOrigin forExit; + // Whether or not it is legal to exit here. + bool exitOK { false }; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGNodeOrigin_h + diff --git a/Source/JavaScriptCore/dfg/DFGNodeType.h b/Source/JavaScriptCore/dfg/DFGNodeType.h index 3d2f80bcf..4035dd940 100644 --- a/Source/JavaScriptCore/dfg/DFGNodeType.h +++ b/Source/JavaScriptCore/dfg/DFGNodeType.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2012-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGNodeType_h #define DFGNodeType_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGNodeFlags.h" @@ -37,11 +35,11 @@ namespace JSC { namespace DFG { // This macro defines a set of information about all known node types, used to populate NodeId, NodeType below. #define FOR_EACH_DFG_OP(macro) \ /* A constant in the CodeBlock's constant pool. */\ - macro(JSConstant, NodeResultJS | NodeDoesNotExit) \ + macro(JSConstant, NodeResultJS) \ \ - /* A constant not in the CodeBlock's constant pool. Uses get patched to jumps that exit the */\ - /* code block. */\ - macro(WeakJSConstant, NodeResultJS | NodeDoesNotExit) \ + /* Constants with specific representations. */\ + macro(DoubleConstant, NodeResultDouble) \ + macro(Int52Constant, NodeResultInt52) \ \ /* Marker to indicate that an operation was optimized entirely and all that is left */\ /* is to make one node alias another. CSE will later usually eliminate this node, */\ @@ -52,21 +50,34 @@ namespace JSC { namespace DFG { macro(ToThis, NodeResultJS) \ macro(CreateThis, NodeResultJS) /* Note this is not MustGenerate since we're returning it anyway. */ \ macro(GetCallee, NodeResultJS) \ + macro(GetArgumentCount, NodeResultInt32) \ \ /* Nodes for local variable access. These nodes are linked together using Phi nodes. */\ /* Any two nodes that are part of the same Phi graph will share the same */\ - /* VariableAccessData, and thus will share predictions. */\ - macro(GetLocal, NodeResultJS) \ + /* VariableAccessData, and thus will share predictions. FIXME: We should come up with */\ + /* better names for a lot of these. https://bugs.webkit.org/show_bug.cgi?id=137307. */\ + /* Note that GetLocal is MustGenerate because it's our only way of knowing that some other */\ + /* basic block might have read a local variable in bytecode. We only remove GetLocals if it */\ + /* is redundant because of an earlier GetLocal or SetLocal in the same block. We could make */\ + /* these not MustGenerate and use a more sophisticated analysis to insert PhantomLocals in */\ + /* the same way that we insert Phantoms. That's hard and probably not profitable. See */\ + /* https://bugs.webkit.org/show_bug.cgi?id=144086 */\ + macro(GetLocal, NodeResultJS | NodeMustGenerate) \ macro(SetLocal, 0) \ - macro(MovHint, NodeDoesNotExit) \ - macro(ZombieHint, NodeDoesNotExit) \ - macro(GetArgument, NodeResultJS | NodeMustGenerate) \ + \ + macro(PutStack, NodeMustGenerate) \ + macro(KillStack, NodeMustGenerate) \ + macro(GetStack, NodeResultJS) \ + \ + macro(MovHint, NodeMustGenerate) \ + macro(ZombieHint, NodeMustGenerate) \ + macro(ExitOK, NodeMustGenerate) /* Indicates that exit state is intact. */ \ macro(Phantom, NodeMustGenerate) \ - macro(Check, 0) /* Used if we want just a type check but not liveness. DCE eithers kills this or converts it to Phantom. */\ - macro(Upsilon, NodeDoesNotExit | NodeRelevantToOSR) \ - macro(Phi, NodeDoesNotExit | NodeRelevantToOSR) \ - macro(Flush, NodeMustGenerate | NodeDoesNotExit) \ - macro(PhantomLocal, NodeMustGenerate | NodeDoesNotExit) \ + macro(Check, NodeMustGenerate) /* Used if we want just a type check but not liveness. Non-checking uses will be removed. */\ + macro(Upsilon, 0) \ + macro(Phi, 0) \ + macro(Flush, NodeMustGenerate) \ + macro(PhantomLocal, NodeMustGenerate) \ \ /* Hint that this is where bytecode thinks is a good place to OSR. Note that this */\ /* will exist even in inlined loops. This has no execution semantics but it must */\ @@ -81,6 +92,7 @@ namespace JSC { namespace DFG { /* Tier-up checks from the DFG to the FTL. */\ macro(CheckTierUpInLoop, NodeMustGenerate) \ macro(CheckTierUpAndOSREnter, NodeMustGenerate) \ + macro(CheckTierUpWithNestedTriggerAndOSREnter, NodeMustGenerate) \ macro(CheckTierUpAtReturn, NodeMustGenerate) \ \ /* Get the value of a local variable, without linking into the VariableAccessData */\ @@ -89,7 +101,7 @@ namespace JSC { namespace DFG { macro(GetLocalUnlinked, NodeResultJS) \ \ /* Marker for an argument being set at the prologue of a function. */\ - macro(SetArgument, NodeDoesNotExit) \ + macro(SetArgument, 0) \ \ /* Marker of location in the IR where we may possibly perform jump replacement to */\ /* invalidate this code block. */\ @@ -106,101 +118,122 @@ namespace JSC { namespace DFG { macro(ValueToInt32, NodeResultInt32) \ /* Used to box the result of URShift nodes (result has range 0..2^32-1). */\ macro(UInt32ToNumber, NodeResultNumber) \ + /* Converts booleans to numbers but passes everything else through. */\ + macro(BooleanToNumber, NodeResultJS) \ \ - /* Used to cast known integers to doubles, so as to separate the double form */\ - /* of the value from the integer form. */\ - macro(Int32ToDouble, NodeResultNumber) \ - /* Used to speculate that a double value is actually an integer. */\ + /* Attempt to truncate a double to int32; this will exit if it can't do it. */\ macro(DoubleAsInt32, NodeResultInt32) \ - /* Used to separate representation and register allocation of Int52's represented */\ - /* as values. */\ - macro(Int52ToValue, NodeResultJS) \ - macro(Int52ToDouble, NodeResultNumber) \ - \ - /* Nodes for arithmetic operations. */\ - macro(ArithAdd, NodeResultNumber) \ - macro(ArithSub, NodeResultNumber) \ - macro(ArithNegate, NodeResultNumber) \ - macro(ArithMul, NodeResultNumber) \ + \ + /* Change the representation of a value. */\ + macro(DoubleRep, NodeResultDouble) \ + macro(Int52Rep, NodeResultInt52) \ + macro(ValueRep, NodeResultJS) \ + \ + /* Bogus type asserting node. Useful for testing, disappears during Fixup. */\ + macro(FiatInt52, NodeResultJS) \ + \ + /* Nodes for arithmetic operations. Note that if they do checks other than just type checks, */\ + /* then they are MustGenerate. This is probably stricter than it needs to be - for example */\ + /* they won't do checks if they are speculated double. Also, we could kill these if we do it */\ + /* before AI starts eliminating downstream operations based on proofs, for example in the */\ + /* case of "var tmp = a + b; return (tmp | 0) == tmp;". If a, b are speculated integer then */\ + /* this is only true if we do the overflow check - hence the need to keep it alive. More */\ + /* generally, we need to keep alive any operation whose checks cause filtration in AI. */\ + macro(ArithAdd, NodeResultNumber | NodeMustGenerate) \ + macro(ArithClz32, NodeResultInt32) \ + macro(ArithSub, NodeResultNumber | NodeMustGenerate) \ + macro(ArithNegate, NodeResultNumber | NodeMustGenerate) \ + macro(ArithMul, NodeResultNumber | NodeMustGenerate) \ macro(ArithIMul, NodeResultInt32) \ - macro(ArithDiv, NodeResultNumber) \ - macro(ArithMod, NodeResultNumber) \ - macro(ArithAbs, NodeResultNumber) \ + macro(ArithDiv, NodeResultNumber | NodeMustGenerate) \ + macro(ArithMod, NodeResultNumber | NodeMustGenerate) \ + macro(ArithAbs, NodeResultNumber | NodeMustGenerate) \ macro(ArithMin, NodeResultNumber) \ macro(ArithMax, NodeResultNumber) \ + macro(ArithFRound, NodeResultNumber) \ + macro(ArithPow, NodeResultNumber) \ + macro(ArithRandom, NodeResultDouble | NodeMustGenerate) \ + macro(ArithRound, NodeResultNumber) \ + macro(ArithFloor, NodeResultNumber) \ + macro(ArithCeil, NodeResultNumber) \ macro(ArithSqrt, NodeResultNumber) \ macro(ArithSin, NodeResultNumber) \ macro(ArithCos, NodeResultNumber) \ + macro(ArithLog, NodeResultNumber) \ \ /* Add of values may either be arithmetic, or result in string concatenation. */\ - macro(ValueAdd, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \ + macro(ValueAdd, NodeResultJS | NodeMustGenerate) \ + \ + /* Add of values that always convers its inputs to strings. May have two or three kids. */\ + macro(StrCat, NodeResultJS | NodeMustGenerate) \ \ /* Property access. */\ /* PutByValAlias indicates a 'put' aliases a prior write to the same property. */\ /* Since a put to 'length' may invalidate optimizations here, */\ /* this must be the directly subsequent property put. Note that PutByVal */\ /* opcodes use VarArgs beause they may have up to 4 children. */\ - macro(GetByVal, NodeResultJS | NodeMustGenerate | NodeMightClobber) \ - macro(PutByValDirect, NodeMustGenerate | NodeHasVarArgs | NodeMightClobber) \ - macro(PutByVal, NodeMustGenerate | NodeHasVarArgs | NodeMightClobber) \ - macro(PutByValAlias, NodeMustGenerate | NodeHasVarArgs | NodeMightClobber) \ - macro(GetById, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \ - macro(GetByIdFlush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \ - macro(PutById, NodeMustGenerate | NodeClobbersWorld) \ - macro(PutByIdDirect, NodeMustGenerate | NodeClobbersWorld) \ + macro(GetByVal, NodeResultJS | NodeMustGenerate) \ + macro(GetMyArgumentByVal, NodeResultJS | NodeMustGenerate) \ + macro(LoadVarargs, NodeMustGenerate) \ + macro(ForwardVarargs, NodeMustGenerate) \ + macro(PutByValDirect, NodeMustGenerate | NodeHasVarArgs) \ + macro(PutByVal, NodeMustGenerate | NodeHasVarArgs) \ + macro(PutByValAlias, NodeMustGenerate | NodeHasVarArgs) \ + macro(GetById, NodeResultJS | NodeMustGenerate) \ + macro(GetByIdFlush, NodeResultJS | NodeMustGenerate) \ + macro(PutById, NodeMustGenerate) \ + macro(PutByIdFlush, NodeMustGenerate) \ + macro(PutByIdDirect, NodeMustGenerate) \ + macro(PutGetterById, NodeMustGenerate) \ + macro(PutSetterById, NodeMustGenerate) \ + macro(PutGetterSetterById, NodeMustGenerate) \ + macro(PutGetterByVal, NodeMustGenerate) \ + macro(PutSetterByVal, NodeMustGenerate) \ macro(CheckStructure, NodeMustGenerate) \ - macro(CheckExecutable, NodeMustGenerate) \ - /* Transition watchpoints are a contract between the party setting the watchpoint */\ - /* and the runtime system, where the party promises that the child object once had */\ - /* the structure being watched, and the runtime system in turn promises that the */\ - /* watchpoint will be turned into an OSR exit if any object with that structure */\ - /* ever transitions to a different structure. Hence, the child object must have */\ - /* previously had a CheckStructure executed on it or we're dealing with an object */\ - /* constant (WeakJSConstant) and the object was known to have that structure at */\ - /* compile-time. In the latter case this means that no structure checks have to be */\ - /* performed for this object by JITted code. In the former case this means that*/\ - /* the object's structure does not need to be rechecked due to side-effecting */\ - /* (clobbering) operations. */\ - macro(StructureTransitionWatchpoint, NodeMustGenerate) \ + macro(GetExecutable, NodeResultJS) \ macro(PutStructure, NodeMustGenerate) \ - macro(PhantomPutStructure, NodeMustGenerate | NodeDoesNotExit) \ - macro(AllocatePropertyStorage, NodeMustGenerate | NodeDoesNotExit | NodeResultStorage) \ - macro(ReallocatePropertyStorage, NodeMustGenerate | NodeDoesNotExit | NodeResultStorage) \ + macro(AllocatePropertyStorage, NodeMustGenerate | NodeResultStorage) \ + macro(ReallocatePropertyStorage, NodeMustGenerate | NodeResultStorage) \ macro(GetButterfly, NodeResultStorage) \ + macro(GetButterflyReadOnly, NodeResultStorage) /* A node used to replace GetButterfly at the bitter end of compilation. */\ macro(CheckArray, NodeMustGenerate) \ macro(Arrayify, NodeMustGenerate) \ macro(ArrayifyToStructure, NodeMustGenerate) \ macro(GetIndexedPropertyStorage, NodeResultStorage) \ macro(ConstantStoragePointer, NodeResultStorage) \ - macro(TypedArrayWatchpoint, NodeMustGenerate) \ + macro(GetGetter, NodeResultJS) \ + macro(GetSetter, NodeResultJS) \ macro(GetByOffset, NodeResultJS) \ + macro(GetGetterSetterByOffset, NodeResultJS) \ + macro(MultiGetByOffset, NodeResultJS | NodeMustGenerate) \ macro(PutByOffset, NodeMustGenerate) \ + macro(MultiPutByOffset, NodeMustGenerate) \ macro(GetArrayLength, NodeResultInt32) \ macro(GetTypedArrayByteOffset, NodeResultInt32) \ macro(GetScope, NodeResultJS) \ - macro(GetMyScope, NodeResultJS) \ - macro(SkipTopScope, NodeResultJS) \ macro(SkipScope, NodeResultJS) \ - macro(GetClosureRegisters, NodeResultStorage) \ macro(GetClosureVar, NodeResultJS) \ macro(PutClosureVar, NodeMustGenerate) \ macro(GetGlobalVar, NodeResultJS) \ - macro(PutGlobalVar, NodeMustGenerate) \ + macro(GetGlobalLexicalVariable, NodeResultJS) \ + macro(PutGlobalVariable, NodeMustGenerate) \ macro(NotifyWrite, NodeMustGenerate) \ - macro(VariableWatchpoint, NodeMustGenerate) \ macro(VarInjectionWatchpoint, NodeMustGenerate) \ - macro(FunctionReentryWatchpoint, NodeMustGenerate) \ - macro(CheckFunction, NodeMustGenerate) \ - macro(AllocationProfileWatchpoint, NodeMustGenerate) \ + macro(CheckCell, NodeMustGenerate) \ + macro(CheckNotEmpty, NodeMustGenerate) \ + macro(CheckBadCell, NodeMustGenerate) \ macro(CheckInBounds, NodeMustGenerate) \ + macro(CheckIdent, NodeMustGenerate) \ + macro(CheckTypeInfoFlags, NodeMustGenerate) /* Takes an OpInfo with the flags you want to test are set */\ \ /* Optimizations for array mutation. */\ - macro(ArrayPush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \ - macro(ArrayPop, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \ + macro(ArrayPush, NodeResultJS | NodeMustGenerate) \ + macro(ArrayPop, NodeResultJS | NodeMustGenerate) \ \ /* Optimizations for regular expression matching. */\ macro(RegExpExec, NodeResultJS | NodeMustGenerate) \ macro(RegExpTest, NodeResultJS | NodeMustGenerate) \ + macro(StringReplace, NodeResultJS | NodeMustGenerate) \ \ /* Optimizations for string access */ \ macro(StringCharCodeAt, NodeResultInt32) \ @@ -208,68 +241,84 @@ namespace JSC { namespace DFG { macro(StringFromCharCode, NodeResultJS) \ \ /* Nodes for comparison operations. */\ - macro(CompareLess, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \ - macro(CompareLessEq, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \ - macro(CompareGreater, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \ - macro(CompareGreaterEq, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \ - macro(CompareEq, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \ - macro(CompareEqConstant, NodeResultBoolean) \ + macro(CompareLess, NodeResultBoolean | NodeMustGenerate) \ + macro(CompareLessEq, NodeResultBoolean | NodeMustGenerate) \ + macro(CompareGreater, NodeResultBoolean | NodeMustGenerate) \ + macro(CompareGreaterEq, NodeResultBoolean | NodeMustGenerate) \ + macro(CompareEq, NodeResultBoolean | NodeMustGenerate) \ macro(CompareStrictEq, NodeResultBoolean) \ - macro(CompareStrictEqConstant, NodeResultBoolean) \ \ /* Calls. */\ - macro(Call, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \ - macro(Construct, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \ + macro(Call, NodeResultJS | NodeMustGenerate | NodeHasVarArgs) \ + macro(Construct, NodeResultJS | NodeMustGenerate | NodeHasVarArgs) \ + macro(CallVarargs, NodeResultJS | NodeMustGenerate) \ + macro(CallForwardVarargs, NodeResultJS | NodeMustGenerate) \ + macro(ConstructVarargs, NodeResultJS | NodeMustGenerate) \ + macro(ConstructForwardVarargs, NodeResultJS | NodeMustGenerate) \ + macro(TailCallInlinedCaller, NodeResultJS | NodeMustGenerate | NodeHasVarArgs) \ + macro(TailCallVarargsInlinedCaller, NodeResultJS | NodeMustGenerate) \ + macro(TailCallForwardVarargsInlinedCaller, NodeResultJS | NodeMustGenerate) \ \ /* Allocations. */\ macro(NewObject, NodeResultJS) \ macro(NewArray, NodeResultJS | NodeHasVarArgs) \ - macro(NewArrayWithSize, NodeResultJS) \ + macro(NewArrayWithSize, NodeResultJS | NodeMustGenerate) \ macro(NewArrayBuffer, NodeResultJS) \ - macro(NewTypedArray, NodeResultJS | NodeClobbersWorld | NodeMustGenerate) \ + macro(NewTypedArray, NodeResultJS | NodeMustGenerate) \ macro(NewRegexp, NodeResultJS) \ + /* Rest Parameter */\ + macro(GetRestLength, NodeResultInt32) \ + macro(CopyRest, NodeMustGenerate) \ + \ + /* Support for allocation sinking. */\ + macro(PhantomNewObject, NodeResultJS | NodeMustGenerate) \ + macro(PutHint, NodeMustGenerate) \ + macro(CheckStructureImmediate, NodeMustGenerate) \ + macro(MaterializeNewObject, NodeResultJS | NodeHasVarArgs) \ + macro(PhantomNewFunction, NodeResultJS | NodeMustGenerate) \ + macro(PhantomNewGeneratorFunction, NodeResultJS | NodeMustGenerate) \ + macro(PhantomCreateActivation, NodeResultJS | NodeMustGenerate) \ + macro(MaterializeCreateActivation, NodeResultJS | NodeHasVarArgs) \ \ /* Nodes for misc operations. */\ macro(Breakpoint, NodeMustGenerate) \ macro(ProfileWillCall, NodeMustGenerate) \ macro(ProfileDidCall, NodeMustGenerate) \ - macro(CheckHasInstance, NodeMustGenerate) \ + macro(OverridesHasInstance, NodeMustGenerate | NodeResultBoolean) \ macro(InstanceOf, NodeResultBoolean) \ + macro(InstanceOfCustom, NodeMustGenerate | NodeResultBoolean) \ macro(IsUndefined, NodeResultBoolean) \ macro(IsBoolean, NodeResultBoolean) \ macro(IsNumber, NodeResultBoolean) \ macro(IsString, NodeResultBoolean) \ macro(IsObject, NodeResultBoolean) \ + macro(IsObjectOrNull, NodeResultBoolean) \ macro(IsFunction, NodeResultBoolean) \ macro(TypeOf, NodeResultJS) \ macro(LogicalNot, NodeResultBoolean) \ - macro(ToPrimitive, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \ - macro(ToString, NodeResultJS | NodeMustGenerate | NodeMightClobber) \ + macro(ToPrimitive, NodeResultJS | NodeMustGenerate) \ + macro(ToString, NodeResultJS | NodeMustGenerate) \ + macro(CallStringConstructor, NodeResultJS | NodeMustGenerate) \ macro(NewStringObject, NodeResultJS) \ macro(MakeRope, NodeResultJS) \ - macro(In, NodeResultBoolean | NodeMustGenerate | NodeClobbersWorld) \ + macro(In, NodeResultBoolean | NodeMustGenerate) \ + macro(ProfileType, NodeMustGenerate) \ + macro(ProfileControlFlow, NodeMustGenerate) \ \ - /* Nodes used for activations. Activation support works by having it anchored at */\ - /* epilgoues via TearOffActivation, and all CreateActivation nodes kept alive by */\ - /* being threaded with each other. */\ macro(CreateActivation, NodeResultJS) \ - macro(TearOffActivation, NodeMustGenerate) \ - \ - /* Nodes used for arguments. Similar to activation support, only it makes even less */\ - /* sense. */\ - macro(CreateArguments, NodeResultJS) \ - macro(PhantomArguments, NodeResultJS | NodeDoesNotExit) \ - macro(TearOffArguments, NodeMustGenerate) \ - macro(GetMyArgumentsLength, NodeResultJS | NodeMustGenerate) \ - macro(GetMyArgumentByVal, NodeResultJS | NodeMustGenerate) \ - macro(GetMyArgumentsLengthSafe, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \ - macro(GetMyArgumentByValSafe, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \ - macro(CheckArgumentsNotCreated, NodeMustGenerate) \ \ - /* Nodes for creating functions. */\ - macro(NewFunctionNoCheck, NodeResultJS) \ + macro(CreateDirectArguments, NodeResultJS) \ + macro(PhantomDirectArguments, NodeResultJS | NodeMustGenerate) \ + macro(CreateScopedArguments, NodeResultJS) \ + macro(CreateClonedArguments, NodeResultJS) \ + macro(PhantomClonedArguments, NodeResultJS | NodeMustGenerate) \ + macro(GetFromArguments, NodeResultJS) \ + macro(PutToArguments, NodeMustGenerate) \ + \ macro(NewFunction, NodeResultJS) \ - macro(NewFunctionExpression, NodeResultJS) \ + \ + macro(NewArrowFunction, NodeResultJS) \ + macro(NewGeneratorFunction, NodeResultJS) \ \ /* These aren't terminals but always exit */ \ macro(Throw, NodeMustGenerate) \ @@ -280,6 +329,9 @@ namespace JSC { namespace DFG { macro(Branch, NodeMustGenerate) \ macro(Switch, NodeMustGenerate) \ macro(Return, NodeMustGenerate) \ + macro(TailCall, NodeMustGenerate | NodeHasVarArgs) \ + macro(TailCallVarargs, NodeMustGenerate) \ + macro(TailCallForwardVarargs, NodeMustGenerate) \ macro(Unreachable, NodeMustGenerate) \ \ /* Count execution. */\ @@ -290,13 +342,26 @@ namespace JSC { namespace DFG { /* different compiler. */\ macro(ForceOSRExit, NodeMustGenerate) \ \ - /* Checks the watchdog timer. If the timer has fired, we OSR exit to the */ \ - /* baseline JIT to redo the watchdog timer check, and service the timer. */ \ + /* Vends a bottom JS value. It is invalid to ever execute this. Useful for cases */\ + /* where we know that we would have exited but we'd like to still track the control */\ + /* flow. */\ + macro(BottomValue, NodeResultJS) \ + \ + /* Checks the watchdog timer. If the timer has fired, we call operation operationHandleWatchdogTimer*/ \ macro(CheckWatchdogTimer, NodeMustGenerate) \ /* Write barriers ! */\ macro(StoreBarrier, NodeMustGenerate) \ - macro(ConditionalStoreBarrier, NodeMustGenerate) \ - macro(StoreBarrierWithNullCheck, NodeMustGenerate) \ + \ + /* For-in enumeration opcodes */\ + macro(GetEnumerableLength, NodeMustGenerate | NodeResultJS) \ + macro(HasIndexedProperty, NodeResultBoolean) \ + macro(HasStructureProperty, NodeResultBoolean) \ + macro(HasGenericProperty, NodeResultBoolean) \ + macro(GetDirectPname, NodeMustGenerate | NodeHasVarArgs | NodeResultJS) \ + macro(GetPropertyEnumerator, NodeMustGenerate | NodeResultJS) \ + macro(GetEnumeratorStructurePname, NodeMustGenerate | NodeResultJS) \ + macro(GetEnumeratorGenericPname, NodeMustGenerate | NodeResultJS) \ + macro(ToIndexString, NodeResultJS) // This enum generates a monotonically increasing id for all Node types, // and is used by the subsequent enum to fill out the id (as accessed via the NodeIdMask). diff --git a/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp b/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp index a64963581..5e7fb3b8f 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,7 +32,7 @@ #include "DFGGraph.h" #include "DFGInsertionSet.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -51,29 +51,22 @@ public: BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; - block->ssa->availabilityAtHead.fill(Availability()); - block->ssa->availabilityAtTail.fill(Availability()); + block->ssa->availabilityAtHead.clear(); + block->ssa->availabilityAtTail.clear(); } BasicBlock* root = m_graph.block(0); - for (unsigned argument = root->ssa->availabilityAtHead.numberOfArguments(); argument--;) { - root->ssa->availabilityAtHead.argument(argument) = - Availability::unavailable().withFlush( - FlushedAt(FlushedJSValue, virtualRegisterForArgument(argument))); + root->ssa->availabilityAtHead.m_locals.fill(Availability::unavailable()); + for (unsigned argument = m_graph.m_argumentFormats.size(); argument--;) { + FlushedAt flushedAt = FlushedAt( + m_graph.m_argumentFormats[argument], + virtualRegisterForArgument(argument)); + root->ssa->availabilityAtHead.m_locals.argument(argument) = Availability(flushedAt); } - for (unsigned local = root->ssa->availabilityAtHead.numberOfLocals(); local--;) - root->ssa->availabilityAtHead.local(local) = Availability::unavailable(); - - if (m_graph.m_plan.mode == FTLForOSREntryMode) { - for (unsigned local = m_graph.m_profiledBlock->m_numCalleeRegisters; local--;) { - root->ssa->availabilityAtHead.local(local) = - Availability::unavailable().withFlush( - FlushedAt(FlushedJSValue, virtualRegisterForLocal(local))); - } - } - + // This could be made more efficient by processing blocks in reverse postorder. - Operands<Availability> availability; + + LocalOSRAvailabilityCalculator calculator; bool changed; do { changed = false; @@ -83,55 +76,22 @@ public: if (!block) continue; - availability = block->ssa->availabilityAtHead; + calculator.beginBlock(block); - for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { - Node* node = block->at(nodeIndex); - - switch (node->op()) { - case SetLocal: { - VariableAccessData* variable = node->variableAccessData(); - availability.operand(variable->local()) = - Availability(node->child1().node(), variable->flushedAt()); - break; - } - - case GetArgument: { - VariableAccessData* variable = node->variableAccessData(); - availability.operand(variable->local()) = - Availability(node, variable->flushedAt()); - break; - } - - case MovHint: { - availability.operand(node->unlinkedLocal()) = - Availability(node->child1().node()); - break; - } - - case ZombieHint: { - availability.operand(node->unlinkedLocal()) = - Availability::unavailable(); - break; - } - - default: - break; - } - } + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) + calculator.executeNode(block->at(nodeIndex)); - if (availability == block->ssa->availabilityAtTail) + if (calculator.m_availability == block->ssa->availabilityAtTail) continue; - block->ssa->availabilityAtTail = availability; + block->ssa->availabilityAtTail = calculator.m_availability; changed = true; for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) { BasicBlock* successor = block->successor(successorIndex); - for (unsigned i = availability.size(); i--;) { - successor->ssa->availabilityAtHead[i] = availability[i].merge( - successor->ssa->availabilityAtHead[i]); - } + successor->ssa->availabilityAtHead.merge(calculator.m_availability); + successor->ssa->availabilityAtHead.pruneByLiveness( + m_graph, successor->at(0)->origin.forExit); } } } while (changed); @@ -146,6 +106,110 @@ bool performOSRAvailabilityAnalysis(Graph& graph) return runPhase<OSRAvailabilityAnalysisPhase>(graph); } +LocalOSRAvailabilityCalculator::LocalOSRAvailabilityCalculator() +{ +} + +LocalOSRAvailabilityCalculator::~LocalOSRAvailabilityCalculator() +{ +} + +void LocalOSRAvailabilityCalculator::beginBlock(BasicBlock* block) +{ + m_availability = block->ssa->availabilityAtHead; +} + +void LocalOSRAvailabilityCalculator::endBlock(BasicBlock* block) +{ + m_availability = block->ssa->availabilityAtTail; +} + +void LocalOSRAvailabilityCalculator::executeNode(Node* node) +{ + switch (node->op()) { + case PutStack: { + StackAccessData* data = node->stackAccessData(); + m_availability.m_locals.operand(data->local).setFlush(data->flushedAt()); + break; + } + + case KillStack: { + m_availability.m_locals.operand(node->unlinkedLocal()).setFlush(FlushedAt(ConflictingFlush)); + break; + } + + case GetStack: { + StackAccessData* data = node->stackAccessData(); + m_availability.m_locals.operand(data->local) = Availability(node, data->flushedAt()); + break; + } + + case MovHint: { + m_availability.m_locals.operand(node->unlinkedLocal()).setNode(node->child1().node()); + break; + } + + case ZombieHint: { + m_availability.m_locals.operand(node->unlinkedLocal()).setNodeUnavailable(); + break; + } + + case LoadVarargs: + case ForwardVarargs: { + LoadVarargsData* data = node->loadVarargsData(); + m_availability.m_locals.operand(data->count) = + Availability(FlushedAt(FlushedInt32, data->machineCount)); + for (unsigned i = data->limit; i--;) { + m_availability.m_locals.operand(VirtualRegister(data->start.offset() + i)) = + Availability(FlushedAt(FlushedJSValue, VirtualRegister(data->machineStart.offset() + i))); + } + break; + } + + case PhantomDirectArguments: + case PhantomClonedArguments: { + InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame; + if (!inlineCallFrame) { + // We don't need to record anything about how the arguments are to be recovered. It's just a + // given that we can read them from the stack. + break; + } + + if (inlineCallFrame->isVarargs()) { + // Record how to read each argument and the argument count. + Availability argumentCount = + m_availability.m_locals.operand(inlineCallFrame->stackOffset + JSStack::ArgumentCount); + + m_availability.m_heap.set(PromotedHeapLocation(ArgumentCountPLoc, node), argumentCount); + } + + if (inlineCallFrame->isClosureCall) { + Availability callee = m_availability.m_locals.operand( + inlineCallFrame->stackOffset + JSStack::Callee); + m_availability.m_heap.set(PromotedHeapLocation(ArgumentsCalleePLoc, node), callee); + } + + for (unsigned i = 0; i < inlineCallFrame->arguments.size() - 1; ++i) { + Availability argument = m_availability.m_locals.operand( + inlineCallFrame->stackOffset + CallFrame::argumentOffset(i)); + + m_availability.m_heap.set(PromotedHeapLocation(ArgumentPLoc, node, i), argument); + } + break; + } + + case PutHint: { + m_availability.m_heap.set( + PromotedHeapLocation(node->child1().node(), node->promotedLocationDescriptor()), + Availability(node->child2().node())); + break; + } + + default: + break; + } +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.h b/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.h index 28bf505da..28ad891fa 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.h +++ b/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,10 +26,9 @@ #ifndef DFGOSRAvailabilityAnalysisPhase_h #define DFGOSRAvailabilityAnalysisPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) +#include "DFGBasicBlock.h" #include "DFGCommon.h" namespace JSC { namespace DFG { @@ -37,10 +36,27 @@ namespace JSC { namespace DFG { class Graph; // Computes BasicBlock::ssa->availabiltiyAtHead/Tail. This is a forward flow type inference -// over MovHints and SetLocals. +// over MovHints and SetLocals. This analysis is run directly by the Plan for preparing for +// lowering to B3 IR, but it can also be used as a utility. Note that if you run it before +// stack layout, all of the flush availability will omit the virtual register - but it will +// tell you the format. bool performOSRAvailabilityAnalysis(Graph&); +// Local calculator for figuring out the availability at any node in a basic block. Requires +// having run the availability analysis. +class LocalOSRAvailabilityCalculator { +public: + LocalOSRAvailabilityCalculator(); + ~LocalOSRAvailabilityCalculator(); + + void beginBlock(BasicBlock*); + void endBlock(BasicBlock*); // Useful if you want to get data for the end of the block. You don't need to call this if you did beginBlock() and then executeNode() for every node. + void executeNode(Node*); + + AvailabilityMap m_availability; +}; + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp index 2efb008d0..80e688027 100644 --- a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013, 2014, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -34,17 +34,73 @@ #include "DFGNode.h" #include "JIT.h" #include "JSStackInlines.h" -#include "Operations.h" +#include "JSCInlines.h" +#include <wtf/CommaPrinter.h> namespace JSC { namespace DFG { +void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const +{ + out.print("bc#", m_bytecodeIndex, ", machine code offset = ", m_machineCodeOffset); + out.print(", stack rules = ["); + + auto printOperand = [&] (VirtualRegister reg) { + out.print(inContext(m_expectedValues.operand(reg), context), " ("); + VirtualRegister toReg; + bool overwritten = false; + for (OSREntryReshuffling reshuffling : m_reshufflings) { + if (reg == VirtualRegister(reshuffling.fromOffset)) { + toReg = VirtualRegister(reshuffling.toOffset); + break; + } + if (reg == VirtualRegister(reshuffling.toOffset)) + overwritten = true; + } + if (!overwritten && !toReg.isValid()) + toReg = reg; + if (toReg.isValid()) { + if (toReg.isLocal() && !m_machineStackUsed.get(toReg.toLocal())) + out.print("ignored"); + else + out.print("maps to ", toReg); + } else + out.print("overwritten"); + if (reg.isLocal() && m_localsForcedDouble.get(reg.toLocal())) + out.print(", forced double"); + if (reg.isLocal() && m_localsForcedMachineInt.get(reg.toLocal())) + out.print(", forced machine int"); + out.print(")"); + }; + + CommaPrinter comma; + for (size_t argumentIndex = m_expectedValues.numberOfArguments(); argumentIndex--;) { + out.print(comma, "arg", argumentIndex, ":"); + printOperand(virtualRegisterForArgument(argumentIndex)); + } + for (size_t localIndex = 0; localIndex < m_expectedValues.numberOfLocals(); ++localIndex) { + out.print(comma, "loc", localIndex, ":"); + printOperand(virtualRegisterForLocal(localIndex)); + } + + out.print("], machine stack used = ", m_machineStackUsed); +} + +void OSREntryData::dump(PrintStream& out) const +{ + dumpInContext(out, nullptr); +} + +SUPPRESS_ASAN void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex) { ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType())); ASSERT(codeBlock->alternative()); ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT); ASSERT(!codeBlock->jitCodeMap()); - + + if (!Options::useOSREntryToDFG()) + return 0; + if (Options::verboseOSR()) { dataLog( "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock, @@ -52,6 +108,12 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn } VM* vm = &exec->vm(); + + sanitizeStackForVM(vm); + + if (bytecodeIndex) + codeBlock->ownerScriptExecutable()->setDidTryToEnterInLoop(true); + if (codeBlock->jitType() != JITCode::DFGJIT) { RELEASE_ASSERT(codeBlock->jitType() == JITCode::FTLJIT); @@ -124,7 +186,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn JSValue value; if (!argument) - value = exec->hostThisValue(); + value = exec->thisValue(); else value = exec->argument(argument - 1); @@ -141,33 +203,33 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) { int localOffset = virtualRegisterForLocal(local).offset(); if (entry->m_localsForcedDouble.get(local)) { - if (!exec->registers()[localOffset].jsValue().isNumber()) { + if (!exec->registers()[localOffset].asanUnsafeJSValue().isNumber()) { if (Options::verboseOSR()) { dataLog( " OSR failed because variable ", localOffset, " is ", - exec->registers()[localOffset].jsValue(), ", expected number.\n"); + exec->registers()[localOffset].asanUnsafeJSValue(), ", expected number.\n"); } return 0; } continue; } if (entry->m_localsForcedMachineInt.get(local)) { - if (!exec->registers()[localOffset].jsValue().isMachineInt()) { + if (!exec->registers()[localOffset].asanUnsafeJSValue().isMachineInt()) { if (Options::verboseOSR()) { dataLog( " OSR failed because variable ", localOffset, " is ", - exec->registers()[localOffset].jsValue(), ", expected ", + exec->registers()[localOffset].asanUnsafeJSValue(), ", expected ", "machine int.\n"); } return 0; } continue; } - if (!entry->m_expectedValues.local(local).validate(exec->registers()[localOffset].jsValue())) { + if (!entry->m_expectedValues.local(local).validate(exec->registers()[localOffset].asanUnsafeJSValue())) { if (Options::verboseOSR()) { dataLog( " OSR failed because variable ", localOffset, " is ", - exec->registers()[localOffset].jsValue(), ", expected ", + exec->registers()[localOffset].asanUnsafeJSValue(), ", expected ", entry->m_expectedValues.local(local), ".\n"); } return 0; @@ -181,7 +243,8 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn // it seems silly: you'd be diverting the program to error handling when it // would have otherwise just kept running albeit less quickly. - if (!vm->interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(jitCode->common.requiredRegisterCountForExecutionAndExit()).offset()])) { + unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit(); + if (!vm->interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()])) { if (Options::verboseOSR()) dataLogF(" OSR failed because stack growth failed.\n"); return 0; @@ -189,36 +252,87 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn if (Options::verboseOSR()) dataLogF(" OSR should succeed.\n"); + + // At this point we're committed to entering. We will do some work to set things up, + // but we also rely on our caller recognizing that when we return a non-null pointer, + // that means that we're already past the point of no return and we must succeed at + // entering. - // 3) Perform data format conversions. - for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) { - if (entry->m_localsForcedDouble.get(local)) - *bitwise_cast<double*>(exec->registers() + virtualRegisterForLocal(local).offset()) = exec->registers()[virtualRegisterForLocal(local).offset()].jsValue().asNumber(); - if (entry->m_localsForcedMachineInt.get(local)) - *bitwise_cast<int64_t*>(exec->registers() + virtualRegisterForLocal(local).offset()) = exec->registers()[virtualRegisterForLocal(local).offset()].jsValue().asMachineInt() << JSValue::int52ShiftAmount; + // 3) Set up the data in the scratch buffer and perform data format conversions. + + unsigned frameSize = jitCode->common.frameRegisterCount; + unsigned baselineFrameSize = entry->m_expectedValues.numberOfLocals(); + unsigned maxFrameSize = std::max(frameSize, baselineFrameSize); + + Register* scratch = bitwise_cast<Register*>(vm->scratchBufferForSize(sizeof(Register) * (2 + JSStack::CallFrameHeaderSize + maxFrameSize))->dataBuffer()); + + *bitwise_cast<size_t*>(scratch + 0) = frameSize; + + void* targetPC = codeBlock->jitCode()->executableAddressAtOffset(entry->m_machineCodeOffset); + if (Options::verboseOSR()) + dataLogF(" OSR using target PC %p.\n", targetPC); + RELEASE_ASSERT(targetPC); + *bitwise_cast<void**>(scratch + 1) = targetPC; + + Register* pivot = scratch + 2 + JSStack::CallFrameHeaderSize; + + for (int index = -JSStack::CallFrameHeaderSize; index < static_cast<int>(baselineFrameSize); ++index) { + VirtualRegister reg(-1 - index); + + if (reg.isLocal()) { + if (entry->m_localsForcedDouble.get(reg.toLocal())) { + *bitwise_cast<double*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asNumber(); + continue; + } + + if (entry->m_localsForcedMachineInt.get(reg.toLocal())) { + *bitwise_cast<int64_t*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asMachineInt() << JSValue::int52ShiftAmount; + continue; + } + } + + pivot[index] = exec->registers()[reg.offset()].asanUnsafeJSValue(); } // 4) Reshuffle those registers that need reshuffling. - - Vector<EncodedJSValue> temporaryLocals(entry->m_reshufflings.size()); - EncodedJSValue* registers = bitwise_cast<EncodedJSValue*>(exec->registers()); + Vector<JSValue> temporaryLocals(entry->m_reshufflings.size()); for (unsigned i = entry->m_reshufflings.size(); i--;) - temporaryLocals[i] = registers[entry->m_reshufflings[i].fromOffset]; + temporaryLocals[i] = pivot[VirtualRegister(entry->m_reshufflings[i].fromOffset).toLocal()].asanUnsafeJSValue(); for (unsigned i = entry->m_reshufflings.size(); i--;) - registers[entry->m_reshufflings[i].toOffset] = temporaryLocals[i]; - - // 5) Fix the call frame. + pivot[VirtualRegister(entry->m_reshufflings[i].toOffset).toLocal()] = temporaryLocals[i]; - exec->setCodeBlock(codeBlock); + // 5) Clear those parts of the call frame that the DFG ain't using. This helps GC on + // some programs by eliminating some stale pointer pathologies. + for (unsigned i = frameSize; i--;) { + if (entry->m_machineStackUsed.get(i)) + continue; + pivot[i] = JSValue(); + } + + // 6) Copy our callee saves to buffer. +#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 + RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters(); + RegisterAtOffsetList* allCalleeSaves = vm->getAllCalleeSaveRegisterOffsets(); + RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs()); + + unsigned registerCount = registerSaveLocations->size(); + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset currentEntry = registerSaveLocations->at(i); + if (dontSaveRegisters.get(currentEntry.reg())) + continue; + RegisterAtOffset* vmCalleeSavesEntry = allCalleeSaves->find(currentEntry.reg()); + + *(bitwise_cast<intptr_t*>(pivot - 1) - currentEntry.offsetAsIndex()) = vm->calleeSaveRegistersBuffer[vmCalleeSavesEntry->offsetAsIndex()]; + } +#endif - // 6) Find and return the destination machine code address. + // 7) Fix the call frame to have the right code block. - void* result = codeBlock->jitCode()->executableAddressAtOffset(entry->m_machineCodeOffset); + *bitwise_cast<CodeBlock**>(pivot - 1 - JSStack::CodeBlock) = codeBlock; if (Options::verboseOSR()) - dataLogF(" OSR returning machine code address %p.\n", result); - - return result; + dataLogF(" OSR returning data buffer %p.\n", scratch); + return scratch; } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.h b/Source/JavaScriptCore/dfg/DFGOSREntry.h index edca84bff..04aaabfee 100644 --- a/Source/JavaScriptCore/dfg/DFGOSREntry.h +++ b/Source/JavaScriptCore/dfg/DFGOSREntry.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -59,6 +59,10 @@ struct OSREntryData { BitVector m_localsForcedDouble; BitVector m_localsForcedMachineInt; Vector<OSREntryReshuffling> m_reshufflings; + BitVector m_machineStackUsed; + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; }; inline unsigned getOSREntryDataBytecodeIndex(OSREntryData* osrEntryData) @@ -66,6 +70,8 @@ inline unsigned getOSREntryDataBytecodeIndex(OSREntryData* osrEntryData) return osrEntryData->m_bytecodeIndex; } +// Returns a pointer to a data buffer that the OSR entry thunk will recognize and +// parse. If this returns null, it means void* prepareOSREntry(ExecState*, CodeBlock*, unsigned bytecodeIndex); #else inline void* prepareOSREntry(ExecState*, CodeBlock*, unsigned) { return 0; } diff --git a/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp b/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp index 4f82d15fa..5e5a1504c 100644 --- a/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -33,7 +33,7 @@ #include "DFGGraph.h" #include "DFGLoopPreHeaderCreationPhase.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -54,7 +54,7 @@ public: RELEASE_ASSERT(bytecodeIndex != UINT_MAX); // Needed by createPreHeader(). - m_graph.m_dominators.computeIfNecessary(m_graph); + m_graph.ensureDominators(); CodeBlock* baseline = m_graph.m_profiledBlock; @@ -63,9 +63,12 @@ public: BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; + unsigned nodeIndex = 0; Node* firstNode = block->at(0); + while (firstNode->isSemanticallySkippable()) + firstNode = block->at(++nodeIndex); if (firstNode->op() == LoopHint - && firstNode->codeOrigin == CodeOrigin(bytecodeIndex)) { + && firstNode->origin.semantic == CodeOrigin(bytecodeIndex)) { target = block; break; } @@ -80,46 +83,59 @@ public: BlockInsertionSet insertionSet(m_graph); - BasicBlock* newRoot = insertionSet.insert(0); - CodeOrigin codeOrigin = target->at(0)->codeOrigin; + // We say that the execution count of the entry block is 1, because we know for sure + // that this must be the case. Under our definition of executionCount, "1" means "once + // per invocation". We could have said NaN here, since that would ask any clients of + // executionCount to use best judgement - but that seems unnecessary since we know for + // sure what the executionCount should be in this case. + BasicBlock* newRoot = insertionSet.insert(0, 1); + + // We'd really like to use an unset origin, but ThreadedCPS won't allow that. + NodeOrigin origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), false); - for (int argument = 0; argument < baseline->numParameters(); ++argument) { - Node* oldNode = target->variablesAtHead.argument(argument); - if (!oldNode) { - // Just for sanity, always have a SetArgument even if it's not needed. - oldNode = m_graph.m_arguments[argument]; - } - Node* node = newRoot->appendNode( - m_graph, SpecNone, SetArgument, codeOrigin, - OpInfo(oldNode->variableAccessData())); - m_graph.m_arguments[argument] = node; - } - Vector<Node*> locals(baseline->m_numCalleeRegisters); - for (int local = 0; local < baseline->m_numCalleeRegisters; ++local) { + Vector<Node*> locals(baseline->m_numCalleeLocals); + for (int local = 0; local < baseline->m_numCalleeLocals; ++local) { Node* previousHead = target->variablesAtHead.local(local); if (!previousHead) continue; VariableAccessData* variable = previousHead->variableAccessData(); locals[local] = newRoot->appendNode( - m_graph, variable->prediction(), ExtractOSREntryLocal, codeOrigin, + m_graph, variable->prediction(), ExtractOSREntryLocal, origin, OpInfo(variable->local().offset())); newRoot->appendNode( - m_graph, SpecNone, MovHint, codeOrigin, OpInfo(variable->local().offset()), + m_graph, SpecNone, MovHint, origin, OpInfo(variable->local().offset()), Edge(locals[local])); } - for (int local = 0; local < baseline->m_numCalleeRegisters; ++local) { + + // Now use the origin of the target, since it's not OK to exit, and we will probably hoist + // type checks to here. + origin = target->at(0)->origin; + + for (int argument = 0; argument < baseline->numParameters(); ++argument) { + Node* oldNode = target->variablesAtHead.argument(argument); + if (!oldNode) { + // Just for sanity, always have a SetArgument even if it's not needed. + oldNode = m_graph.m_arguments[argument]; + } + Node* node = newRoot->appendNode( + m_graph, SpecNone, SetArgument, origin, + OpInfo(oldNode->variableAccessData())); + m_graph.m_arguments[argument] = node; + } + + for (int local = 0; local < baseline->m_numCalleeLocals; ++local) { Node* previousHead = target->variablesAtHead.local(local); if (!previousHead) continue; VariableAccessData* variable = previousHead->variableAccessData(); Node* node = locals[local]; newRoot->appendNode( - m_graph, SpecNone, SetLocal, codeOrigin, OpInfo(variable), Edge(node)); + m_graph, SpecNone, SetLocal, origin, OpInfo(variable), Edge(node)); } newRoot->appendNode( - m_graph, SpecNone, Jump, codeOrigin, + m_graph, SpecNone, Jump, origin, OpInfo(createPreHeader(m_graph, insertionSet, target))); insertionSet.execute(); diff --git a/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.h b/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.h index a76372126..2b9beba47 100644 --- a/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.h +++ b/Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.h @@ -26,8 +26,6 @@ #ifndef DFGOSREntrypointCreationPhase_h #define DFGOSREntrypointCreationPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp index 538a85a01..b95d4093e 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp @@ -30,20 +30,26 @@ #include "AssemblyHelpers.h" #include "DFGGraph.h" +#include "DFGMayExit.h" #include "DFGSpeculativeJIT.h" -#include "JSCellInlines.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, SpeculativeJIT* jit, unsigned streamIndex, unsigned recoveryIndex) - : OSRExitBase(kind, jit->m_codeOriginForExitTarget, jit->m_codeOriginForExitProfile) + : OSRExitBase(kind, jit->m_origin.forExit, jit->m_origin.semantic) , m_jsValueSource(jsValueSource) , m_valueProfile(valueProfile) , m_patchableCodeOffset(0) , m_recoveryIndex(recoveryIndex) , m_streamIndex(streamIndex) { - ASSERT(m_codeOrigin.isSet()); + bool canExit = jit->m_origin.exitOK; + if (!canExit && jit->m_currentNode) { + ExitMode exitMode = mayExit(jit->m_jit.graph(), jit->m_currentNode); + canExit = exitMode == ExitMode::Exits || exitMode == ExitMode::ExitsForExceptions; + } + DFG_ASSERT(jit->m_jit.graph(), jit->m_currentNode, canExit); } void OSRExit::setPatchableCodeOffset(MacroAssembler::PatchableJump check) diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.h b/Source/JavaScriptCore/dfg/DFGOSRExit.h index d40efe4e0..a14d54f84 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExit.h +++ b/Source/JavaScriptCore/dfg/DFGOSRExit.h @@ -26,15 +26,12 @@ #ifndef DFGOSRExit_h #define DFGOSRExit_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "CodeOrigin.h" #include "DFGCommon.h" #include "DFGExitProfile.h" #include "DFGOSRExitBase.h" -#include "DFGValueRecoveryOverride.h" #include "GPRInfo.h" #include "MacroAssembler.h" #include "MethodOfGettingAValueProfile.h" @@ -96,19 +93,23 @@ struct OSRExit : public OSRExitBase { unsigned m_patchableCodeOffset; unsigned m_recoveryIndex; - + void setPatchableCodeOffset(MacroAssembler::PatchableJump); MacroAssembler::Jump getPatchableCodeOffsetAsJump() const; CodeLocationJump codeLocationForRepatch(CodeBlock*) const; void correctJump(LinkBuffer&); unsigned m_streamIndex; - - RefPtr<ValueRecoveryOverride> m_valueRecoveryOverride; + void considerAddingAsFrequentExitSite(CodeBlock* profiledCodeBlock) + { + OSRExitBase::considerAddingAsFrequentExitSite(profiledCodeBlock, ExitFromDFG); + } }; struct SpeculationFailureDebugInfo { CodeBlock* codeBlock; + ExitKind kind; + unsigned bytecodeOffset; }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitBase.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitBase.cpp index ebfd27f2e..0197f2c18 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitBase.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitBase.cpp @@ -31,19 +31,18 @@ #include "CodeBlock.h" #include "DFGBasicBlock.h" #include "DFGNode.h" -#include "Operations.h" +#include "InlineCallFrame.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { -bool OSRExitBase::considerAddingAsFrequentExitSiteSlow(CodeBlock* profiledCodeBlock) +void OSRExitBase::considerAddingAsFrequentExitSiteSlow(CodeBlock* profiledCodeBlock, ExitingJITType jitType) { CodeBlock* sourceProfiledCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock( m_codeOriginForExitProfile, profiledCodeBlock); - if (!sourceProfiledCodeBlock) - return false; - return sourceProfiledCodeBlock->addFrequentExitSite( - FrequentExitSite(m_codeOriginForExitProfile.bytecodeIndex, m_kind)); + if (sourceProfiledCodeBlock) + sourceProfiledCodeBlock->addFrequentExitSite(FrequentExitSite(m_codeOriginForExitProfile.bytecodeIndex, m_kind, jitType)); } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitBase.h b/Source/JavaScriptCore/dfg/DFGOSRExitBase.h index ee1d69de7..78510c163 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitBase.h +++ b/Source/JavaScriptCore/dfg/DFGOSRExitBase.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGOSRExitBase_h #define DFGOSRExitBase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "CodeOrigin.h" @@ -57,16 +55,30 @@ struct OSRExitBase { CodeOrigin m_codeOrigin; CodeOrigin m_codeOriginForExitProfile; + CallSiteIndex m_exceptionHandlerCallSiteIndex; + + ALWAYS_INLINE bool isExceptionHandler() const + { + return m_kind == ExceptionCheck || m_kind == GenericUnwind; + } + + // True if this exit is used as an exception handler for unwinding. This happens to only be set when + // isExceptionHandler is true, but all this actually means is that the OSR exit will assume that the + // machine state is as it would be coming out of genericUnwind. + ALWAYS_INLINE bool isGenericUnwindHandler() const + { + return m_kind == GenericUnwind; + } - bool considerAddingAsFrequentExitSite(CodeBlock* profiledCodeBlock) +protected: + void considerAddingAsFrequentExitSite(CodeBlock* profiledCodeBlock, ExitingJITType jitType) { - if (!m_count) - return false; - return considerAddingAsFrequentExitSiteSlow(profiledCodeBlock); + if (m_count) + considerAddingAsFrequentExitSiteSlow(profiledCodeBlock, jitType); } private: - bool considerAddingAsFrequentExitSiteSlow(CodeBlock* profiledCodeBlock); + void considerAddingAsFrequentExitSiteSlow(CodeBlock* profiledCodeBlock, ExitingJITType); }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompilationInfo.h b/Source/JavaScriptCore/dfg/DFGOSRExitCompilationInfo.h index 9eeb4532d..75f1d21c4 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompilationInfo.h +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompilationInfo.h @@ -26,8 +26,6 @@ #ifndef DFGOSRExitCompilationInfo_h #define DFGOSRExitCompilationInfo_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "CodeOrigin.h" diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp index f8c9fb067..1f3f98baf 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -34,23 +34,93 @@ #include "DFGOSRExitPreparation.h" #include "LinkBuffer.h" #include "OperandsInlines.h" -#include "Operations.h" -#include "RepatchBuffer.h" +#include "JSCInlines.h" #include <wtf/StringPrintStream.h> namespace JSC { namespace DFG { +void OSRExitCompiler::emitRestoreArguments(const Operands<ValueRecovery>& operands) +{ + HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand. + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; + int operand = operands.operandForIndex(index); + + if (recovery.technique() != DirectArgumentsThatWereNotCreated + && recovery.technique() != ClonedArgumentsThatWereNotCreated) + continue; + + MinifiedID id = recovery.nodeID(); + auto iter = alreadyAllocatedArguments.find(id); + if (iter != alreadyAllocatedArguments.end()) { + JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1); + m_jit.loadValue(CCallHelpers::addressFor(iter->value), regs); + m_jit.storeValue(regs, CCallHelpers::addressFor(operand)); + continue; + } + + InlineCallFrame* inlineCallFrame = + m_jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame(); + + int stackOffset; + if (inlineCallFrame) + stackOffset = inlineCallFrame->stackOffset; + else + stackOffset = 0; + + if (!inlineCallFrame || inlineCallFrame->isClosureCall) { + m_jit.loadPtr( + AssemblyHelpers::addressFor(stackOffset + JSStack::Callee), + GPRInfo::regT0); + } else { + m_jit.move( + AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()), + GPRInfo::regT0); + } + + if (!inlineCallFrame || inlineCallFrame->isVarargs()) { + m_jit.load32( + AssemblyHelpers::payloadFor(stackOffset + JSStack::ArgumentCount), + GPRInfo::regT1); + } else { + m_jit.move( + AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), + GPRInfo::regT1); + } + + m_jit.setupArgumentsWithExecState( + AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1); + switch (recovery.technique()) { + case DirectArgumentsThatWereNotCreated: + m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0); + break; + case ClonedArgumentsThatWereNotCreated: + m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + m_jit.call(GPRInfo::nonArgGPR0); + m_jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand)); + + alreadyAllocatedArguments.add(id, operand); + } +} + extern "C" { void compileOSRExit(ExecState* exec) { SamplingRegion samplingRegion("DFG OSR Exit Compilation"); + + if (exec->vm().callFrameForCatch) + RELEASE_ASSERT(exec->vm().callFrameForCatch == exec); CodeBlock* codeBlock = exec->codeBlock(); - ASSERT(codeBlock); ASSERT(codeBlock->jitType() == JITCode::DFGJIT); - + VM* vm = &exec->vm(); // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't @@ -60,18 +130,18 @@ void compileOSRExit(ExecState* exec) uint32_t exitIndex = vm->osrExitIndex; OSRExit& exit = codeBlock->jitCode()->dfg()->osrExit[exitIndex]; + if (vm->callFrameForCatch) + ASSERT(exit.m_kind == GenericUnwind); + if (exit.isExceptionHandler()) + ASSERT(!!vm->exception()); + + prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); // Compute the value recoveries. Operands<ValueRecovery> operands; codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands); - // There may be an override, for forward speculations. - if (!!exit.m_valueRecoveryOverride) { - operands.setOperand( - exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery); - } - SpeculationRecovery* recovery = 0; if (exit.m_recoveryIndex != UINT_MAX) recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex]; @@ -80,6 +150,15 @@ void compileOSRExit(ExecState* exec) CCallHelpers jit(vm, codeBlock); OSRExitCompiler exitCompiler(jit); + if (exit.m_kind == GenericUnwind) { + // We are acting as a defacto op_catch because we arrive here from genericUnwind(). + // So, we must restore our call frame and stack pointer. + jit.restoreCalleeSavesFromVMCalleeSavesBuffer(); + jit.loadPtr(vm->addressOfCallFrameForCatch(), GPRInfo::callFrameRegister); + jit.addPtr(CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)), + GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + } + jit.jitAssertHasValidCallFrame(); if (vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation) { @@ -88,15 +167,15 @@ void compileOSRExit(ExecState* exec) Profiler::OSRExit* profilerExit = compilation->addOSRExit( exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin), - exit.m_kind, isWatchpoint(exit.m_kind)); + exit.m_kind, exit.m_kind == UncountableInvalidation); jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress())); } - + exitCompiler.compileExit(exit, operands, recovery); - LinkBuffer patchBuffer(*vm, &jit, codeBlock); + LinkBuffer patchBuffer(*vm, jit, codeBlock); exit.m_code = FINALIZE_CODE_IF( - shouldShowDisassembly(), + shouldDumpDisassembly() || Options::verboseOSR(), patchBuffer, ("DFG OSR exit #%u (%s, %s) from %s, with operands = %s", exitIndex, toCString(exit.m_codeOrigin).data(), @@ -104,10 +183,7 @@ void compileOSRExit(ExecState* exec) toCString(ignoringContext<DumpContext>(operands)).data())); } - { - RepatchBuffer repatchBuffer(codeBlock); - repatchBuffer.relink(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code())); - } + MacroAssembler::repatchJump(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code())); vm->osrExitJumpDestination = exit.m_code.code().executableAddress(); } diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h index cbaafcc1e..cb262d427 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGOSRExitCompiler_h #define DFGOSRExitCompiler_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "AssemblyHelpers.h" @@ -51,28 +49,9 @@ public: void compileExit(const OSRExit&, const Operands<ValueRecovery>&, SpeculationRecovery*); private: -#if !ASSERT_DISABLED - static unsigned badIndex() { return static_cast<unsigned>(-1); }; -#endif - - void initializePoisoned(unsigned size) - { -#if ASSERT_DISABLED - m_poisonScratchIndices.resize(size); -#else - m_poisonScratchIndices.fill(badIndex(), size); -#endif - } - - unsigned poisonIndex(unsigned index) - { - unsigned result = m_poisonScratchIndices[index]; - ASSERT(result != badIndex()); - return result; - } + void emitRestoreArguments(const Operands<ValueRecovery>&); CCallHelpers& m_jit; - Vector<unsigned> m_poisonScratchIndices; }; extern "C" { diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp index 9402d115e..e419941dd 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,23 +31,25 @@ #include "DFGOperations.h" #include "DFGOSRExitCompilerCommon.h" #include "DFGSpeculativeJIT.h" -#include "Operations.h" +#include "JSCInlines.h" #include <wtf/DataLog.h> namespace JSC { namespace DFG { void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery) { - // 1) Pro-forma stuff. + // Pro-forma stuff. if (Options::printEachOSRExit()) { SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo; debugInfo->codeBlock = m_jit.codeBlock(); + debugInfo->kind = exit.m_kind; + debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex; m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo); } - // 2) Perform speculation recovery. This only comes into play when an operation - // starts mutating state before verifying the speculation it has already made. + // Perform speculation recovery. This only comes into play when an operation + // starts mutating state before verifying the speculation it has already made. if (recovery) { switch (recovery->type()) { @@ -63,7 +65,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } } - // 3) Refine some value profile, if appropriate. + // Refine some value profile, if appropriate. if (!!exit.m_jsValueSource) { if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) { @@ -100,13 +102,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2); scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1); -#if CPU(ARM64) - m_jit.pushToSave(scratch1); - m_jit.pushToSave(scratch2); -#else m_jit.push(scratch1); m_jit.push(scratch2); -#endif GPRReg value; if (exit.m_jsValueSource.isAddress()) { @@ -115,20 +112,15 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } else value = exit.m_jsValueSource.payloadGPR(); - m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1); - m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure()); + m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1); + m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructureID()); m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1); m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2); m_jit.lshift32(scratch1, scratch2); m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes())); -#if CPU(ARM64) - m_jit.popToRestore(scratch2); - m_jit.popToRestore(scratch1); -#else m_jit.pop(scratch2); m_jit.pop(scratch1); -#endif } } @@ -139,22 +131,14 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // Save a register so we can use it. GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base()); -#if CPU(ARM64) - m_jit.pushToSave(scratch); -#else m_jit.push(scratch); -#endif m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch); m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch); m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); -#if CPU(ARM64) - m_jit.popToRestore(scratch); -#else m_jit.pop(scratch); -#endif } else if (exit.m_jsValueSource.hasKnownTag()) { m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); @@ -168,7 +152,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // Do a simplified OSR exit. See DFGOSRExitCompiler64.cpp's comment regarding how and wny we // do this simple approach. - // 4) Save all state from GPRs into the scratch buffer. + // Save all state from GPRs into the scratch buffer. ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size()); EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; @@ -201,12 +185,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // Now all GPRs are free to reuse. - // 5) Save all state from FPRs into the scratch buffer. + // Save all state from FPRs into the scratch buffer. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { + case UnboxedDoubleInFPR: case InFPR: m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0); m_jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0)); @@ -219,9 +204,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // Now all FPRs are free to reuse. - // 6) Save all state from the stack into the scratch buffer. For simplicity we - // do this even for state that's already in the right place on the stack. - // It makes things simpler later. + // Save all state from the stack into the scratch buffer. For simplicity we + // do this even for state that's already in the right place on the stack. + // It makes things simpler later. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; @@ -251,19 +236,36 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } } - // 7) Do all data format conversions and store the results into the stack. + // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This + // could toast some stack that the DFG used. We need to do it before storing to stack offsets + // used by baseline. + m_jit.addPtr( + CCallHelpers::TrustedImm32( + -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)), + CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister); - bool haveArguments = false; + // Restore the DFG callee saves and then save the ones the baseline JIT uses. + m_jit.emitRestoreCalleeSaves(); + m_jit.emitSaveCalleeSavesFor(m_jit.baselineCodeBlock()); + + if (exit.isExceptionHandler()) + m_jit.copyCalleeSavesToVMCalleeSavesBuffer(); + + // Do all data format conversions and store the results into the stack. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; - int operand = operands.operandForIndex(index); - + VirtualRegister reg = operands.virtualRegisterForIndex(index); + + if (reg.isLocal() && reg.toLocal() < static_cast<int>(m_jit.baselineCodeBlock()->calleeSaveSpaceAsVirtualRegisters())) + continue; + + int operand = reg.offset(); + switch (recovery.technique()) { case InPair: - case InFPR: case DisplacedInJSStack: - case DoubleDisplacedInJSStack: + case InFPR: m_jit.load32( &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag, GPRInfo::regT0); @@ -278,6 +280,14 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov AssemblyHelpers::payloadFor(operand)); break; + case UnboxedDoubleInFPR: + case DoubleDisplacedInJSStack: + m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0); + m_jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0); + m_jit.purifyNaN(FPRInfo::fpRegT0); + m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor(operand)); + break; + case UnboxedInt32InGPR: case Int32DisplacedInJSStack: m_jit.load32( @@ -326,14 +336,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov AssemblyHelpers::payloadFor(operand)); break; - case ArgumentsThatWereNotCreated: - haveArguments = true; - m_jit.store32( - AssemblyHelpers::TrustedImm32(JSValue().tag()), - AssemblyHelpers::tagFor(operand)); - m_jit.store32( - AssemblyHelpers::TrustedImm32(JSValue().payload()), - AssemblyHelpers::payloadFor(operand)); + case DirectArgumentsThatWereNotCreated: + case ClonedArgumentsThatWereNotCreated: + // Don't do this, yet. break; default: @@ -341,127 +346,57 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } } - // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know - // that all new calls into this code will go to the new JIT, so the execute - // counter only affects call frames that performed OSR exit and call frames - // that were still executing the old JIT at the time of another call frame's - // OSR exit. We want to ensure that the following is true: + // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments + // recoveries don't recursively refer to each other. But, we don't try to assume that they only + // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible. + // Note that we also roughly assume that the arguments might still be materialized outside of its + // inline call frame scope - but for now the DFG wouldn't do that. + + emitRestoreArguments(operands); + + // Adjust the old JIT's execute counter. Since we are exiting OSR, we know + // that all new calls into this code will go to the new JIT, so the execute + // counter only affects call frames that performed OSR exit and call frames + // that were still executing the old JIT at the time of another call frame's + // OSR exit. We want to ensure that the following is true: // - // (a) Code the performs an OSR exit gets a chance to reenter optimized - // code eventually, since optimized code is faster. But we don't - // want to do such reentery too aggressively (see (c) below). + // (a) Code the performs an OSR exit gets a chance to reenter optimized + // code eventually, since optimized code is faster. But we don't + // want to do such reentery too aggressively (see (c) below). // - // (b) If there is code on the call stack that is still running the old - // JIT's code and has never OSR'd, then it should get a chance to - // perform OSR entry despite the fact that we've exited. + // (b) If there is code on the call stack that is still running the old + // JIT's code and has never OSR'd, then it should get a chance to + // perform OSR entry despite the fact that we've exited. // - // (c) Code the performs an OSR exit should not immediately retry OSR - // entry, since both forms of OSR are expensive. OSR entry is - // particularly expensive. + // (c) Code the performs an OSR exit should not immediately retry OSR + // entry, since both forms of OSR are expensive. OSR entry is + // particularly expensive. // - // (d) Frequent OSR failures, even those that do not result in the code - // running in a hot loop, result in recompilation getting triggered. + // (d) Frequent OSR failures, even those that do not result in the code + // running in a hot loop, result in recompilation getting triggered. // - // To ensure (c), we'd like to set the execute counter to - // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger - // (a) and (b), since then every OSR exit would delay the opportunity for - // every call frame to perform OSR entry. Essentially, if OSR exit happens - // frequently and the function has few loops, then the counter will never - // become non-negative and OSR entry will never be triggered. OSR entry - // will only happen if a loop gets hot in the old JIT, which does a pretty - // good job of ensuring (a) and (b). But that doesn't take care of (d), - // since each speculation failure would reset the execute counter. - // So we check here if the number of speculation failures is significantly - // larger than the number of successes (we want 90% success rate), and if - // there have been a large enough number of failures. If so, we set the - // counter to 0; otherwise we set the counter to - // counterValueForOptimizeAfterWarmUp(). + // To ensure (c), we'd like to set the execute counter to + // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger + // (a) and (b), since then every OSR exit would delay the opportunity for + // every call frame to perform OSR entry. Essentially, if OSR exit happens + // frequently and the function has few loops, then the counter will never + // become non-negative and OSR entry will never be triggered. OSR entry + // will only happen if a loop gets hot in the old JIT, which does a pretty + // good job of ensuring (a) and (b). But that doesn't take care of (d), + // since each speculation failure would reset the execute counter. + // So we check here if the number of speculation failures is significantly + // larger than the number of successes (we want 90% success rate), and if + // there have been a large enough number of failures. If so, we set the + // counter to 0; otherwise we set the counter to + // counterValueForOptimizeAfterWarmUp(). handleExitCounts(m_jit, exit); - // 9) Reify inlined call frames. + // Reify inlined call frames. reifyInlinedCallFrames(m_jit, exit); - // 10) Create arguments if necessary and place them into the appropriate aliased - // registers. - - if (haveArguments) { - HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash, - NullableHashTraits<InlineCallFrame*>> didCreateArgumentsObject; - - for (size_t index = 0; index < operands.size(); ++index) { - const ValueRecovery& recovery = operands[index]; - if (recovery.technique() != ArgumentsThatWereNotCreated) - continue; - int operand = operands.operandForIndex(index); - // Find the right inline call frame. - InlineCallFrame* inlineCallFrame = 0; - for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame; - current; - current = current->caller.inlineCallFrame) { - if (current->stackOffset >= operand) { - inlineCallFrame = current; - break; - } - } - - if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments()) - continue; - VirtualRegister argumentsRegister = m_jit.baselineArgumentsRegisterFor(inlineCallFrame); - if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) { - // We know this call frame optimized out an arguments object that - // the baseline JIT would have created. Do that creation now. - if (inlineCallFrame) { - m_jit.setupArgumentsWithExecState( - AssemblyHelpers::TrustedImmPtr(inlineCallFrame)); - m_jit.move( - AssemblyHelpers::TrustedImmPtr( - bitwise_cast<void*>(operationCreateInlinedArguments)), - GPRInfo::nonArgGPR0); - } else { - m_jit.setupArgumentsExecState(); - m_jit.move( - AssemblyHelpers::TrustedImmPtr( - bitwise_cast<void*>(operationCreateArguments)), - GPRInfo::nonArgGPR0); - } - m_jit.call(GPRInfo::nonArgGPR0); - m_jit.store32( - AssemblyHelpers::TrustedImm32(JSValue::CellTag), - AssemblyHelpers::tagFor(argumentsRegister)); - m_jit.store32( - GPRInfo::returnValueGPR, - AssemblyHelpers::payloadFor(argumentsRegister)); - m_jit.store32( - AssemblyHelpers::TrustedImm32(JSValue::CellTag), - AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister))); - m_jit.store32( - GPRInfo::returnValueGPR, - AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister))); - m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms. - } - - m_jit.load32(AssemblyHelpers::payloadFor(argumentsRegister), GPRInfo::regT0); - m_jit.store32( - AssemblyHelpers::TrustedImm32(JSValue::CellTag), - AssemblyHelpers::tagFor(operand)); - m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(operand)); - } - } - -#if ENABLE(GGC) - // 11) Write barrier the owner executable because we're jumping into a different block. - for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) { - CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin); - m_jit.move(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock->ownerExecutable()), GPRInfo::nonArgGPR0); - SpeculativeJIT::osrWriteBarrier(m_jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR2); - if (!codeOrigin.inlineCallFrame) - break; - } -#endif - - // 12) And finish. + // And finish. adjustAndJumpToTarget(m_jit, exit); } diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp index 219a5e68a..6999e5cfb 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,7 +31,7 @@ #include "DFGOperations.h" #include "DFGOSRExitCompilerCommon.h" #include "DFGSpeculativeJIT.h" -#include "Operations.h" +#include "JSCInlines.h" #include "VirtualRegister.h" #include <wtf/DataLog.h> @@ -40,16 +40,20 @@ namespace JSC { namespace DFG { void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery) { - // 1) Pro-forma stuff. + m_jit.jitAssertTagsInPlace(); + + // Pro-forma stuff. if (Options::printEachOSRExit()) { SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo; debugInfo->codeBlock = m_jit.codeBlock(); + debugInfo->kind = exit.m_kind; + debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex; m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo); } - // 2) Perform speculation recovery. This only comes into play when an operation - // starts mutating state before verifying the speculation it has already made. + // Perform speculation recovery. This only comes into play when an operation + // starts mutating state before verifying the speculation it has already made. if (recovery) { switch (recovery->type()) { @@ -67,7 +71,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } } - // 3) Refine some array and/or value profile, if appropriate. + // Refine some array and/or value profile, if appropriate. if (!!exit.m_jsValueSource) { if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) { @@ -93,13 +97,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister); scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1); -#if CPU(ARM64) - m_jit.pushToSave(scratch1); - m_jit.pushToSave(scratch2); -#else - m_jit.push(scratch1); - m_jit.push(scratch2); -#endif + if (isARM64()) { + m_jit.pushToSave(scratch1); + m_jit.pushToSave(scratch2); + } else { + m_jit.push(scratch1); + m_jit.push(scratch2); + } GPRReg value; if (exit.m_jsValueSource.isAddress()) { @@ -108,20 +112,20 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } else value = exit.m_jsValueSource.gpr(); - m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1); - m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure()); - m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1); + m_jit.load32(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1); + m_jit.store32(scratch1, arrayProfile->addressOfLastSeenStructureID()); + m_jit.load8(AssemblyHelpers::Address(value, JSCell::indexingTypeOffset()), scratch1); m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2); m_jit.lshift32(scratch1, scratch2); m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes())); -#if CPU(ARM64) - m_jit.popToRestore(scratch2); - m_jit.popToRestore(scratch1); -#else - m_jit.pop(scratch2); - m_jit.pop(scratch1); -#endif + if (isARM64()) { + m_jit.popToRestore(scratch2); + m_jit.popToRestore(scratch1); + } else { + m_jit.pop(scratch2); + m_jit.pop(scratch1); + } } } @@ -175,7 +179,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // variable" from "how was it represented", which will make it more difficult to add // features in the future and it will make it harder to reason about bugs. - // 4) Save all state from GPRs into the scratch buffer. + // Save all state from GPRs into the scratch buffer. ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size()); EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; @@ -198,16 +202,17 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } // And voila, all GPRs are free to reuse. - - // 5) Save all state from FPRs into the scratch buffer. + + // Save all state from FPRs into the scratch buffer. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { + case UnboxedDoubleInFPR: case InFPR: m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0); - m_jit.storeDouble(recovery.fpr(), GPRInfo::regT0); + m_jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0)); break; default: @@ -217,9 +222,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // Now, all FPRs are also free. - // 6) Save all state from the stack into the scratch buffer. For simplicity we - // do this even for state that's already in the right place on the stack. - // It makes things simpler later. + // Save all state from the stack into the scratch buffer. For simplicity we + // do this even for state that's already in the right place on the stack. + // It makes things simpler later. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; @@ -240,21 +245,43 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov break; } } + + // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This + // could toast some stack that the DFG used. We need to do it before storing to stack offsets + // used by baseline. + m_jit.addPtr( + CCallHelpers::TrustedImm32( + -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)), + CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister); - // 7) Do all data format conversions and store the results into the stack. - - bool haveArguments = false; + // Restore the DFG callee saves and then save the ones the baseline JIT uses. + m_jit.emitRestoreCalleeSaves(); + m_jit.emitSaveCalleeSavesFor(m_jit.baselineCodeBlock()); + + // The tag registers are needed to materialize recoveries below. + m_jit.emitMaterializeTagCheckRegisters(); + + if (exit.isExceptionHandler()) + m_jit.copyCalleeSavesToVMCalleeSavesBuffer(); + + // Do all data format conversions and store the results into the stack. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; - int operand = operands.operandForIndex(index); - + VirtualRegister reg = operands.virtualRegisterForIndex(index); + + if (reg.isLocal() && reg.toLocal() < static_cast<int>(m_jit.baselineCodeBlock()->calleeSaveSpaceAsVirtualRegisters())) + continue; + + int operand = reg.offset(); + switch (recovery.technique()) { case InGPR: case UnboxedCellInGPR: case DisplacedInJSStack: case CellDisplacedInJSStack: case BooleanDisplacedInJSStack: + case InFPR: m_jit.load64(scratch + index, GPRInfo::regT0); m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand)); break; @@ -283,10 +310,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand)); break; - case InFPR: + case UnboxedDoubleInFPR: case DoubleDisplacedInJSStack: m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0); - m_jit.loadDouble(GPRInfo::regT0, FPRInfo::fpRegT0); + m_jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0); + m_jit.purifyNaN(FPRInfo::fpRegT0); m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0); m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand)); break; @@ -297,125 +325,68 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov AssemblyHelpers::addressFor(operand)); break; - case ArgumentsThatWereNotCreated: - haveArguments = true; - // We can't restore this yet but we can make sure that the stack appears - // sane. - m_jit.store64( - AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue())), - AssemblyHelpers::addressFor(operand)); + case DirectArgumentsThatWereNotCreated: + case ClonedArgumentsThatWereNotCreated: + // Don't do this, yet. break; default: + RELEASE_ASSERT_NOT_REACHED(); break; } } + + // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments + // recoveries don't recursively refer to each other. But, we don't try to assume that they only + // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible. + // Note that we also roughly assume that the arguments might still be materialized outside of its + // inline call frame scope - but for now the DFG wouldn't do that. + + emitRestoreArguments(operands); - // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know - // that all new calls into this code will go to the new JIT, so the execute - // counter only affects call frames that performed OSR exit and call frames - // that were still executing the old JIT at the time of another call frame's - // OSR exit. We want to ensure that the following is true: + // Adjust the old JIT's execute counter. Since we are exiting OSR, we know + // that all new calls into this code will go to the new JIT, so the execute + // counter only affects call frames that performed OSR exit and call frames + // that were still executing the old JIT at the time of another call frame's + // OSR exit. We want to ensure that the following is true: // - // (a) Code the performs an OSR exit gets a chance to reenter optimized - // code eventually, since optimized code is faster. But we don't - // want to do such reentery too aggressively (see (c) below). + // (a) Code the performs an OSR exit gets a chance to reenter optimized + // code eventually, since optimized code is faster. But we don't + // want to do such reentery too aggressively (see (c) below). // - // (b) If there is code on the call stack that is still running the old - // JIT's code and has never OSR'd, then it should get a chance to - // perform OSR entry despite the fact that we've exited. + // (b) If there is code on the call stack that is still running the old + // JIT's code and has never OSR'd, then it should get a chance to + // perform OSR entry despite the fact that we've exited. // - // (c) Code the performs an OSR exit should not immediately retry OSR - // entry, since both forms of OSR are expensive. OSR entry is - // particularly expensive. + // (c) Code the performs an OSR exit should not immediately retry OSR + // entry, since both forms of OSR are expensive. OSR entry is + // particularly expensive. // - // (d) Frequent OSR failures, even those that do not result in the code - // running in a hot loop, result in recompilation getting triggered. + // (d) Frequent OSR failures, even those that do not result in the code + // running in a hot loop, result in recompilation getting triggered. // - // To ensure (c), we'd like to set the execute counter to - // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger - // (a) and (b), since then every OSR exit would delay the opportunity for - // every call frame to perform OSR entry. Essentially, if OSR exit happens - // frequently and the function has few loops, then the counter will never - // become non-negative and OSR entry will never be triggered. OSR entry - // will only happen if a loop gets hot in the old JIT, which does a pretty - // good job of ensuring (a) and (b). But that doesn't take care of (d), - // since each speculation failure would reset the execute counter. - // So we check here if the number of speculation failures is significantly - // larger than the number of successes (we want 90% success rate), and if - // there have been a large enough number of failures. If so, we set the - // counter to 0; otherwise we set the counter to - // counterValueForOptimizeAfterWarmUp(). + // To ensure (c), we'd like to set the execute counter to + // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger + // (a) and (b), since then every OSR exit would delay the opportunity for + // every call frame to perform OSR entry. Essentially, if OSR exit happens + // frequently and the function has few loops, then the counter will never + // become non-negative and OSR entry will never be triggered. OSR entry + // will only happen if a loop gets hot in the old JIT, which does a pretty + // good job of ensuring (a) and (b). But that doesn't take care of (d), + // since each speculation failure would reset the execute counter. + // So we check here if the number of speculation failures is significantly + // larger than the number of successes (we want 90% success rate), and if + // there have been a large enough number of failures. If so, we set the + // counter to 0; otherwise we set the counter to + // counterValueForOptimizeAfterWarmUp(). handleExitCounts(m_jit, exit); - // 9) Reify inlined call frames. + // Reify inlined call frames. reifyInlinedCallFrames(m_jit, exit); - - // 10) Create arguments if necessary and place them into the appropriate aliased - // registers. - - if (haveArguments) { - HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash, - NullableHashTraits<InlineCallFrame*>> didCreateArgumentsObject; - - for (size_t index = 0; index < operands.size(); ++index) { - const ValueRecovery& recovery = operands[index]; - if (recovery.technique() != ArgumentsThatWereNotCreated) - continue; - int operand = operands.operandForIndex(index); - // Find the right inline call frame. - InlineCallFrame* inlineCallFrame = 0; - for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame; - current; - current = current->caller.inlineCallFrame) { - if (current->stackOffset >= operand) { - inlineCallFrame = current; - break; - } - } - - if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments()) - continue; - VirtualRegister argumentsRegister = m_jit.baselineArgumentsRegisterFor(inlineCallFrame); - if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) { - // We know this call frame optimized out an arguments object that - // the baseline JIT would have created. Do that creation now. - if (inlineCallFrame) { - m_jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0); - m_jit.setupArguments(GPRInfo::regT0); - } else - m_jit.setupArgumentsExecState(); - m_jit.move( - AssemblyHelpers::TrustedImmPtr( - bitwise_cast<void*>(operationCreateArguments)), - GPRInfo::nonArgGPR0); - m_jit.call(GPRInfo::nonArgGPR0); - m_jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister)); - m_jit.store64( - GPRInfo::returnValueGPR, - AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister))); - m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms. - } - - m_jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0); - m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand)); - } - } - -#if ENABLE(GGC) - // 11) Write barrier the owner executable because we're jumping into a different block. - for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) { - CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin); - m_jit.move(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock->ownerExecutable()), GPRInfo::nonArgGPR0); - SpeculativeJIT::osrWriteBarrier(m_jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR2); - if (!codeOrigin.inlineCallFrame) - break; - } -#endif - // 12) And finish. + // And finish. adjustAndJumpToTarget(m_jit, exit); } diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp index 9f84a2968..00bbe8b4e 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,16 +28,20 @@ #if ENABLE(DFG_JIT) -#include "Arguments.h" +#include "DFGJITCode.h" #include "DFGOperations.h" +#include "JIT.h" #include "JSCJSValueInlines.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit) { jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count)); + + if (!exitKindMayJettison(exit.m_kind)) + return; jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0); @@ -52,20 +56,55 @@ void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit) AssemblyHelpers::GreaterThanOrEqual, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()), AssemblyHelpers::TrustedImm32(0)); + + // We want to figure out if there's a possibility that we're in a loop. For the outermost + // code block in the inline stack, we handle this appropriately by having the loop OSR trigger + // check the exit count of the replacement of the CodeBlock from which we are OSRing. The + // problem is the inlined functions, which might also have loops, but whose baseline versions + // don't know where to look for the exit count. Figure out if those loops are severe enough + // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger. + // Otherwise, we should use the normal reoptimization trigger. + + AssemblyHelpers::JumpList loopThreshold; + + for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) { + loopThreshold.append( + jit.branchTest8( + AssemblyHelpers::NonZero, + AssemblyHelpers::AbsoluteAddress( + inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->addressOfDidTryToEnterInLoop()))); + } + + jit.move( + AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()), + GPRInfo::regT1); + + if (!loopThreshold.empty()) { + AssemblyHelpers::Jump done = jit.jump(); + + loopThreshold.link(&jit); + jit.move( + AssemblyHelpers::TrustedImm32( + jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()), + GPRInfo::regT1); - tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization())); + done.link(&jit); + } + + tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1); reoptimizeNow.link(&jit); // Reoptimize as soon as possible. #if !NUMBER_OF_ARGUMENT_REGISTERS jit.poke(GPRInfo::regT0); + jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1); #else jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0); - ASSERT(GPRInfo::argumentGPR0 != GPRInfo::regT1); + jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1); #endif - jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::regT1); - jit.call(GPRInfo::regT1); + jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0); + jit.call(GPRInfo::nonArgGPR0); AssemblyHelpers::Jump doneAdjusting = jit.jump(); tooFewFails.link(&jit); @@ -74,105 +113,202 @@ void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit) int32_t activeThreshold = jit.baselineCodeBlock()->adjustedCounterValue( Options::thresholdForOptimizeAfterLongWarmUp()); - int32_t targetValue = ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt( + int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt( activeThreshold, jit.baselineCodeBlock()); - int32_t clippedValue = - ExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); + int32_t clippedValue; + switch (jit.codeBlock()->jitType()) { + case JITCode::DFGJIT: + clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); + break; + case JITCode::FTLJIT: + clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); +#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) + clippedValue = 0; // Make some compilers, and mhahnenberg, happy. +#endif + break; + } jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); - jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); + jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); doneAdjusting.link(&jit); } void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit) { + // FIXME: We shouldn't leave holes on the stack when performing an OSR exit + // in presence of inlined tail calls. + // https://bugs.webkit.org/show_bug.cgi?id=147511 ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT); jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock)); - CodeOrigin codeOrigin; - for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) { - InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; - CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin); - CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller); - unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex; - CallLinkInfo& callLinkInfo = baselineCodeBlockForCaller->getCallLinkInfo(callBytecodeIndex); - - void* jumpTarget = callLinkInfo.callReturnLocation.executableAddress(); + const CodeOrigin* codeOrigin; + for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame; codeOrigin = codeOrigin->inlineCallFrame->getCallerSkippingTailCalls()) { + InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame; + CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(*codeOrigin); + InlineCallFrame::Kind trueCallerCallKind; + CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind); + GPRReg callerFrameGPR = GPRInfo::callFrameRegister; - GPRReg callerFrameGPR; - if (inlineCallFrame->caller.inlineCallFrame) { - jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3); + if (!trueCaller) { + ASSERT(inlineCallFrame->isTail()); + jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3); + jit.storePtr(GPRInfo::regT3, AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); + jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), GPRInfo::regT3); callerFrameGPR = GPRInfo::regT3; - } else - callerFrameGPR = GPRInfo::callFrameRegister; - -#if USE(JSVALUE64) + } else { + CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller); + unsigned callBytecodeIndex = trueCaller->bytecodeIndex; + void* jumpTarget = nullptr; + + switch (trueCallerCallKind) { + case InlineCallFrame::Call: + case InlineCallFrame::Construct: + case InlineCallFrame::CallVarargs: + case InlineCallFrame::ConstructVarargs: + case InlineCallFrame::TailCall: + case InlineCallFrame::TailCallVarargs: { + CallLinkInfo* callLinkInfo = + baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex); + RELEASE_ASSERT(callLinkInfo); + + jumpTarget = callLinkInfo->callReturnLocation().executableAddress(); + break; + } + + case InlineCallFrame::GetterCall: + case InlineCallFrame::SetterCall: { + StructureStubInfo* stubInfo = + baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex)); + RELEASE_ASSERT(stubInfo); + + jumpTarget = stubInfo->callReturnLocation.labelAtOffset( + stubInfo->patch.deltaCallToDone).executableAddress(); + break; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + } + + if (trueCaller->inlineCallFrame) { + jit.addPtr( + AssemblyHelpers::TrustedImm32(trueCaller->inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), + GPRInfo::callFrameRegister, + GPRInfo::regT3); + callerFrameGPR = GPRInfo::regT3; + } + + jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); + } + jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); - if (!inlineCallFrame->isClosureCall) - jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); + + // Restore the inline call frame's callee save registers. + // If this inlined frame is a tail call that will return back to the original caller, we need to + // copy the prior contents of the tag registers already saved for the outer frame to this frame. + jit.emitSaveOrCopyCalleeSavesFor( + baselineCodeBlock, + static_cast<VirtualRegister>(inlineCallFrame->stackOffset), + trueCaller ? AssemblyHelpers::UseExistingTagRegisterContents : AssemblyHelpers::CopyBaselineCalleeSavedRegistersFromBaseFrame, + GPRInfo::regT2); + + if (!inlineCallFrame->isVarargs()) + jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); +#if USE(JSVALUE64) jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); - jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); - uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex); + uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits(); jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); - jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); if (!inlineCallFrame->isClosureCall) jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); - - // Leave the captured arguments in regT3. - if (baselineCodeBlock->usesArguments()) - jit.loadPtr(AssemblyHelpers::addressFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3); #else // USE(JSVALUE64) // so this is the 32-bit part - jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); - jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); - if (!inlineCallFrame->isClosureCall) - jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); - jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); - Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex; - uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); + Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin->bytecodeIndex; + uint32_t locationBits = CallSiteIndex(instruction).bits(); jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); - jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); if (!inlineCallFrame->isClosureCall) jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); - - // Leave the captured arguments in regT3. - if (baselineCodeBlock->usesArguments()) - jit.loadPtr(AssemblyHelpers::payloadFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3); #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part - - if (baselineCodeBlock->usesArguments()) { - AssemblyHelpers::Jump noArguments = jit.branchTestPtr(AssemblyHelpers::Zero, GPRInfo::regT3); - jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0); - jit.storePtr(GPRInfo::regT0, AssemblyHelpers::Address(GPRInfo::regT3, Arguments::offsetOfRegisters())); - noArguments.link(&jit); - } } + // Don't need to set the toplevel code origin if we only did inline tail calls + if (codeOrigin) { #if USE(JSVALUE64) - uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex); + uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits(); #else - Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex; - uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); + Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin->bytecodeIndex; + uint32_t locationBits = CallSiteIndex(instruction).bits(); #endif - jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount))); + jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount))); + } +} + +static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch) +{ + AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(owner); + + // We need these extra slots because setupArgumentsWithExecState will use poke on x86. +#if CPU(X86) + jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); +#endif + + jit.setupArgumentsWithExecState(owner); + jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch); + jit.call(scratch); + +#if CPU(X86) + jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); +#endif + + ownerIsRememberedOrInEden.link(&jit); } void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit) { + jit.move( + AssemblyHelpers::TrustedImmPtr( + jit.codeBlock()->baselineAlternative()), GPRInfo::argumentGPR1); + osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0); + + // We barrier all inlined frames -- and not just the current inline stack -- + // because we don't know which inlined function owns the value profile that + // we'll update when we exit. In the case of "f() { a(); b(); }", if both + // a and b are inlined, we might exit inside b due to a bad value loaded + // from a. + // FIXME: MethodOfGettingAValueProfile should remember which CodeBlock owns + // the value profile. + InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get(); + if (inlineCallFrames) { + for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) { + jit.move( + AssemblyHelpers::TrustedImmPtr( + inlineCallFrame->baselineCodeBlock.get()), GPRInfo::argumentGPR1); + osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0); + } + } + if (exit.m_codeOrigin.inlineCallFrame) jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister); - CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin); - Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock); + CodeBlock* codeBlockForExit = jit.baselineCodeBlockFor(exit.m_codeOrigin); + Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(codeBlockForExit); BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex); ASSERT(mapping); ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex); - void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset); + void* jumpTarget = codeBlockForExit->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset); + + jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister); + if (exit.isExceptionHandler()) { + // Since we're jumping to op_catch, we need to set callFrameForCatch. + jit.storePtr(GPRInfo::callFrameRegister, jit.vm()->addressOfCallFrameForCatch()); + } jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2); jit.jump(GPRInfo::regT2); diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h index 8ceb8b6d4..bbb22e013 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,12 +26,14 @@ #ifndef DFGOSRExitCompilerCommon_h #define DFGOSRExitCompilerCommon_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "CCallHelpers.h" #include "DFGOSRExit.h" +#include "DFGCommonData.h" +#include "DFGJITCode.h" +#include "FTLJITCode.h" +#include "RegisterSet.h" namespace JSC { namespace DFG { @@ -39,6 +41,92 @@ void handleExitCounts(CCallHelpers&, const OSRExitBase&); void reifyInlinedCallFrames(CCallHelpers&, const OSRExitBase&); void adjustAndJumpToTarget(CCallHelpers&, const OSRExitBase&); +template <typename JITCodeType> +void adjustFrameAndStackInOSRExitCompilerThunk(MacroAssembler& jit, VM* vm, JITCode::JITType jitType) +{ + ASSERT(jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT); + + bool isFTLOSRExit = jitType == JITCode::FTLJIT; + RegisterSet registersToPreserve; + registersToPreserve.set(GPRInfo::regT0); + if (isFTLOSRExit) { + // FTL can use the scratch registers for values. The code below uses + // the scratch registers. We need to preserve them before doing anything. + registersToPreserve.merge(RegisterSet::macroScratchRegisters()); + } + + size_t scratchSize = sizeof(void*) * registersToPreserve.numberOfSetGPRs(); + if (isFTLOSRExit) + scratchSize += sizeof(void*); + + ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(scratchSize); + char* buffer = static_cast<char*>(scratchBuffer->dataBuffer()); + + jit.pushToSave(GPRInfo::regT1); + jit.move(MacroAssembler::TrustedImmPtr(buffer), GPRInfo::regT1); + + unsigned storeOffset = 0; + registersToPreserve.forEach([&](Reg reg) { + jit.storePtr(reg.gpr(), MacroAssembler::Address(GPRInfo::regT1, storeOffset)); + storeOffset += sizeof(void*); + }); + + if (isFTLOSRExit) { + // FTL OSRExits are entered via the code FTLExitThunkGenerator emits which does + // pushToSaveImmediateWithoutTouchRegisters with the OSR exit index. We need to load + // that top value and then push it back when we reset our SP. + jit.loadPtr(MacroAssembler::Address(MacroAssembler::stackPointerRegister, MacroAssembler::pushToSaveByteOffset()), GPRInfo::regT0); + jit.storePtr(GPRInfo::regT0, MacroAssembler::Address(GPRInfo::regT1, registersToPreserve.numberOfSetGPRs() * sizeof(void*))); + } + jit.popToRestore(GPRInfo::regT1); + + // We need to reset FP in the case of an exception. + jit.loadPtr(vm->addressOfCallFrameForCatch(), GPRInfo::regT0); + MacroAssembler::Jump didNotHaveException = jit.branchTestPtr(MacroAssembler::Zero, GPRInfo::regT0); + jit.move(GPRInfo::regT0, GPRInfo::callFrameRegister); + didNotHaveException.link(&jit); + // We need to make sure SP is correct in case of an exception. + jit.loadPtr(MacroAssembler::Address(GPRInfo::callFrameRegister, JSStack::CodeBlock * static_cast<int>(sizeof(Register))), GPRInfo::regT0); + jit.loadPtr(MacroAssembler::Address(GPRInfo::regT0, CodeBlock::jitCodeOffset()), GPRInfo::regT0); + jit.addPtr(MacroAssembler::TrustedImm32(JITCodeType::commonDataOffset()), GPRInfo::regT0); + jit.load32(MacroAssembler::Address(GPRInfo::regT0, CommonData::frameRegisterCountOffset()), GPRInfo::regT0); + // This does virtualRegisterForLocal(frameRegisterCount - 1)*sizeof(Register) where: + // virtualRegisterForLocal(frameRegisterCount - 1) + // = VirtualRegister::localToOperand(frameRegisterCount - 1) + // = -1 - (frameRegisterCount - 1) + // = -frameRegisterCount + jit.neg32(GPRInfo::regT0); + jit.mul32(MacroAssembler::TrustedImm32(sizeof(Register)), GPRInfo::regT0, GPRInfo::regT0); +#if USE(JSVALUE64) + jit.signExtend32ToPtr(GPRInfo::regT0, GPRInfo::regT0); +#endif + jit.addPtr(GPRInfo::callFrameRegister, GPRInfo::regT0); + jit.move(GPRInfo::regT0, MacroAssembler::stackPointerRegister); + + if (isFTLOSRExit) { + // Leave space for saving the OSR Exit Index. + jit.subPtr(MacroAssembler::TrustedImm32(MacroAssembler::pushToSaveByteOffset()), MacroAssembler::stackPointerRegister); + } + jit.pushToSave(GPRInfo::regT1); + + jit.move(MacroAssembler::TrustedImmPtr(buffer), GPRInfo::regT1); + if (isFTLOSRExit) { + // FTL OSRExits are entered via FTLExitThunkGenerator code with does + // pushToSaveImmediateWithoutTouchRegisters. We need to load that top + // register and then store it back when we have our SP back to a safe value. + jit.loadPtr(MacroAssembler::Address(GPRInfo::regT1, registersToPreserve.numberOfSetGPRs() * sizeof(void*)), GPRInfo::regT0); + jit.storePtr(GPRInfo::regT0, MacroAssembler::Address(MacroAssembler::stackPointerRegister, MacroAssembler::pushToSaveByteOffset())); + } + + unsigned loadOffset = 0; + registersToPreserve.forEach([&](Reg reg) { + jit.loadPtr(MacroAssembler::Address(GPRInfo::regT1, loadOffset), reg.gpr()); + loadOffset += sizeof(void*); + }); + jit.popToRestore(GPRInfo::regT1); +} + + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.cpp new file mode 100644 index 000000000..570a6a02b --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGOSRExitFuzz.h" + +#include "TestRunnerUtils.h" + +namespace JSC { namespace DFG { + +unsigned g_numberOfStaticOSRExitFuzzChecks; +unsigned g_numberOfOSRExitFuzzChecks; + +} // namespace DFG + +unsigned numberOfStaticOSRExitFuzzChecks() +{ + return DFG::g_numberOfStaticOSRExitFuzzChecks; +} + +unsigned numberOfOSRExitFuzzChecks() +{ + return DFG::g_numberOfOSRExitFuzzChecks; +} + +} // namespace JSC + + diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.h b/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.h new file mode 100644 index 000000000..8121f1c3e --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGOSRExitFuzz.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGOSRExitFuzz_h +#define DFGOSRExitFuzz_h + +#include "Options.h" + +namespace JSC { namespace DFG { + +extern unsigned g_numberOfStaticOSRExitFuzzChecks; + +inline bool doOSRExitFuzzing() +{ + if (!Options::useOSRExitFuzz()) + return false; + + g_numberOfStaticOSRExitFuzzChecks++; + if (unsigned atStatic = Options::fireOSRExitFuzzAtStatic()) + return atStatic == g_numberOfStaticOSRExitFuzzChecks; + + return true; +} + +// DFG- and FTL-generated code will query this on every speculation. +extern unsigned g_numberOfOSRExitFuzzChecks; + +} } // namespace JSC::DFG + +#endif // DFGOSRExitFuzz_h + diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.cpp index fec99ec9a..59780544d 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.cpp @@ -30,6 +30,7 @@ #include "DFGJITCompiler.h" #include "DFGSpeculativeJIT.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.h b/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.h index 4e016a406..57cf7834a 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.h +++ b/Source/JavaScriptCore/dfg/DFGOSRExitJumpPlaceholder.h @@ -26,8 +26,6 @@ #ifndef DFGOSRExitJumpPlaceholder_h #define DFGOSRExitJumpPlaceholder_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp index 98e58a101..ba2a0da11 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,7 +32,7 @@ #include "Executable.h" #include "JIT.h" #include "JITCode.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -41,17 +41,14 @@ void prepareCodeOriginForOSRExit(ExecState* exec, CodeOrigin codeOrigin) VM& vm = exec->vm(); DeferGC deferGC(vm.heap); - for (; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) { - FunctionExecutable* executable = - static_cast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get()); - CodeBlock* codeBlock = executable->baselineCodeBlockFor( - codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct); - + for (; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->directCaller) { + CodeBlock* codeBlock = codeOrigin.inlineCallFrame->baselineCodeBlock.get(); if (codeBlock->jitType() == JSC::JITCode::BaselineJIT) continue; + ASSERT(codeBlock->jitType() == JSC::JITCode::InterpreterThunk); JIT::compile(&vm, codeBlock, JITCompilationMustSucceed); - codeBlock->install(); + codeBlock->ownerScriptExecutable()->installCode(codeBlock); } } diff --git a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp b/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp new file mode 100644 index 000000000..743a314d4 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp @@ -0,0 +1,2220 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGObjectAllocationSinkingPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGBlockMapInlines.h" +#include "DFGClobbersExitState.h" +#include "DFGCombinedLiveness.h" +#include "DFGGraph.h" +#include "DFGInsertionSet.h" +#include "DFGLazyNode.h" +#include "DFGLivenessAnalysisPhase.h" +#include "DFGOSRAvailabilityAnalysisPhase.h" +#include "DFGPhase.h" +#include "DFGPromotedHeapLocation.h" +#include "DFGSSACalculator.h" +#include "DFGValidate.h" +#include "JSCInlines.h" + +#include <list> + +namespace JSC { namespace DFG { + +namespace { + +bool verbose = false; + +// In order to sink object cycles, we use a points-to analysis coupled +// with an escape analysis. This analysis is actually similar to an +// abstract interpreter focused on local allocations and ignoring +// everything else. +// +// We represent the local heap using two mappings: +// +// - A set of the local allocations present in the function, where +// each of those have a further mapping from +// PromotedLocationDescriptor to local allocations they must point +// to. +// +// - A "pointer" mapping from nodes to local allocations, if they must +// be equal to said local allocation and are currently live. This +// can be because the node is the actual node that created the +// allocation, or any other node that must currently point to it - +// we don't make a difference. +// +// The following graph is a motivation for why we separate allocations +// from pointers: +// +// Block #0 +// 0: NewObject({}) +// 1: NewObject({}) +// -: PutByOffset(@0, @1, x) +// -: PutStructure(@0, {x:0}) +// 2: GetByOffset(@0, x) +// -: Jump(#1) +// +// Block #1 +// -: Return(@2) +// +// Here, we need to remember in block #1 that @2 points to a local +// allocation with appropriate fields and structures information +// (because we should be able to place a materialization on top of +// block #1 here), even though @1 is dead. We *could* just keep @1 +// artificially alive here, but there is no real reason to do it: +// after all, by the end of block #0, @1 and @2 should be completely +// interchangeable, and there is no reason for us to artificially make +// @1 more important. +// +// An important point to consider to understand this separation is +// that we should think of the local heap as follow: we have a +// bunch of nodes that are pointers to "allocations" that live +// someplace on the heap, and those allocations can have pointers in +// between themselves as well. We shouldn't care about whatever +// names we give to the allocations ; what matters when +// comparing/merging two heaps is the isomorphism/comparison between +// the allocation graphs as seen by the nodes. +// +// For instance, in the following graph: +// +// Block #0 +// 0: NewObject({}) +// -: Branch(#1, #2) +// +// Block #1 +// 1: NewObject({}) +// -: PutByOffset(@0, @1, x) +// -: PutStructure(@0, {x:0}) +// -: Jump(#3) +// +// Block #2 +// 2: NewObject({}) +// -: PutByOffset(@2, undefined, x) +// -: PutStructure(@2, {x:0}) +// -: PutByOffset(@0, @2, x) +// -: PutStructure(@0, {x:0}) +// -: Jump(#3) +// +// Block #3 +// -: Return(@0) +// +// we should think of the heaps at tail of blocks #1 and #2 as being +// exactly the same, even though one has @0.x pointing to @1 and the +// other has @0.x pointing to @2, because in essence this should not +// be different from the graph where we hoisted @1 and @2 into a +// single allocation in block #0. We currently will not handle this +// case, because we merge allocations based on the node they are +// coming from, but this is only a technicality for the sake of +// simplicity that shouldn't hide the deeper idea outlined here. + +class Allocation { +public: + // We use Escaped as a special allocation kind because when we + // decide to sink an allocation, we still need to keep track of it + // once it is escaped if it still has pointers to it in order to + // replace any use of those pointers by the corresponding + // materialization + enum class Kind { Escaped, Object, Activation, Function, ArrowFunction, GeneratorFunction }; + + explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped) + : m_identifier(identifier) + , m_kind(kind) + { + } + + + const HashMap<PromotedLocationDescriptor, Node*>& fields() const + { + return m_fields; + } + + Node* get(PromotedLocationDescriptor descriptor) + { + return m_fields.get(descriptor); + } + + Allocation& set(PromotedLocationDescriptor descriptor, Node* value) + { + // Pointing to anything else than an unescaped local + // allocation is represented by simply not having the + // field + if (value) + m_fields.set(descriptor, value); + else + m_fields.remove(descriptor); + return *this; + } + + void remove(PromotedLocationDescriptor descriptor) + { + set(descriptor, nullptr); + } + + bool hasStructures() const + { + switch (kind()) { + case Kind::Object: + return true; + + default: + return false; + } + } + + Allocation& setStructures(const StructureSet& structures) + { + ASSERT(hasStructures() && !structures.isEmpty()); + m_structures = structures; + return *this; + } + + Allocation& mergeStructures(const StructureSet& structures) + { + ASSERT(hasStructures() || structures.isEmpty()); + m_structures.merge(structures); + return *this; + } + + Allocation& filterStructures(const StructureSet& structures) + { + ASSERT(hasStructures()); + m_structures.filter(structures); + return *this; + } + + const StructureSet& structures() const + { + return m_structures; + } + + Node* identifier() const { return m_identifier; } + + Kind kind() const { return m_kind; } + + bool isEscapedAllocation() const + { + return kind() == Kind::Escaped; + } + + bool isObjectAllocation() const + { + return m_kind == Kind::Object; + } + + bool isActivationAllocation() const + { + return m_kind == Kind::Activation; + } + + bool isFunctionAllocation() const + { + return m_kind == Kind::Function || m_kind == Kind::ArrowFunction || m_kind == Kind::GeneratorFunction; + } + + bool operator==(const Allocation& other) const + { + return m_identifier == other.m_identifier + && m_kind == other.m_kind + && m_fields == other.m_fields + && m_structures == other.m_structures; + } + + bool operator!=(const Allocation& other) const + { + return !(*this == other); + } + + void dump(PrintStream& out) const + { + dumpInContext(out, nullptr); + } + + void dumpInContext(PrintStream& out, DumpContext* context) const + { + switch (m_kind) { + case Kind::Escaped: + out.print("Escaped"); + break; + + case Kind::Object: + out.print("Object"); + break; + + case Kind::Function: + out.print("Function"); + break; + + case Kind::ArrowFunction: + out.print("ArrowFunction"); + break; + + case Kind::GeneratorFunction: + out.print("GeneratorFunction"); + break; + + case Kind::Activation: + out.print("Activation"); + break; + } + out.print("Allocation("); + if (!m_structures.isEmpty()) + out.print(inContext(m_structures, context)); + if (!m_fields.isEmpty()) { + if (!m_structures.isEmpty()) + out.print(", "); + out.print(mapDump(m_fields, " => #", ", ")); + } + out.print(")"); + } + +private: + Node* m_identifier; // This is the actual node that created the allocation + Kind m_kind; + HashMap<PromotedLocationDescriptor, Node*> m_fields; + StructureSet m_structures; +}; + +class LocalHeap { +public: + Allocation& newAllocation(Node* node, Allocation::Kind kind) + { + ASSERT(!m_pointers.contains(node) && !isAllocation(node)); + m_pointers.add(node, node); + return m_allocations.set(node, Allocation(node, kind)).iterator->value; + } + + bool isAllocation(Node* identifier) const + { + return m_allocations.contains(identifier); + } + + // Note that this is fundamentally different from + // onlyLocalAllocation() below. getAllocation() takes as argument + // a node-as-identifier, that is, an allocation node. This + // allocation node doesn't have to be alive; it may only be + // pointed to by other nodes or allocation fields. + // For instance, in the following graph: + // + // Block #0 + // 0: NewObject({}) + // 1: NewObject({}) + // -: PutByOffset(@0, @1, x) + // -: PutStructure(@0, {x:0}) + // 2: GetByOffset(@0, x) + // -: Jump(#1) + // + // Block #1 + // -: Return(@2) + // + // At head of block #1, the only reachable allocation is #@1, + // which can be reached through node @2. Thus, getAllocation(#@1) + // contains the appropriate metadata for this allocation, but + // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer + // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2) + // is the same as getAllocation(#@1), while getAllocation(#@2) + // does not make sense since @2 is not an allocation node. + // + // This is meant to be used when the node is already known to be + // an identifier (i.e. an allocation) - probably because it was + // found as value of a field or pointer in the current heap, or + // was the result of a call to follow(). In any other cases (such + // as when doing anything while traversing the graph), the + // appropriate function to call is probably onlyLocalAllocation. + Allocation& getAllocation(Node* identifier) + { + auto iter = m_allocations.find(identifier); + ASSERT(iter != m_allocations.end()); + return iter->value; + } + + void newPointer(Node* node, Node* identifier) + { + ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node)); + ASSERT(isAllocation(identifier)); + m_pointers.add(node, identifier); + } + + // follow solves the points-to problem. Given a live node, which + // may be either an allocation itself or a heap read (e.g. a + // GetByOffset node), it returns the corresponding allocation + // node, if there is one. If the argument node is neither an + // allocation or a heap read, or may point to different nodes, + // nullptr will be returned. Note that a node that points to + // different nodes can never point to an unescaped local + // allocation. + Node* follow(Node* node) const + { + auto iter = m_pointers.find(node); + ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value)); + return iter == m_pointers.end() ? nullptr : iter->value; + } + + Node* follow(PromotedHeapLocation location) const + { + const Allocation& base = m_allocations.find(location.base())->value; + auto iter = base.fields().find(location.descriptor()); + + if (iter == base.fields().end()) + return nullptr; + + return iter->value; + } + + // onlyLocalAllocation find the corresponding allocation metadata + // for any live node. onlyLocalAllocation(node) is essentially + // getAllocation(follow(node)), with appropriate null handling. + Allocation* onlyLocalAllocation(Node* node) + { + Node* identifier = follow(node); + if (!identifier) + return nullptr; + + return &getAllocation(identifier); + } + + Allocation* onlyLocalAllocation(PromotedHeapLocation location) + { + Node* identifier = follow(location); + if (!identifier) + return nullptr; + + return &getAllocation(identifier); + } + + // This allows us to store the escapees only when necessary. If + // set, the current escapees can be retrieved at any time using + // takeEscapees(), which will clear the cached set of escapees; + // otherwise the heap won't remember escaping allocations. + void setWantEscapees() + { + m_wantEscapees = true; + } + + HashMap<Node*, Allocation> takeEscapees() + { + return WTFMove(m_escapees); + } + + void escape(Node* node) + { + Node* identifier = follow(node); + if (!identifier) + return; + + escapeAllocation(identifier); + } + + void merge(const LocalHeap& other) + { + assertIsValid(); + other.assertIsValid(); + ASSERT(!m_wantEscapees); + + if (!reached()) { + ASSERT(other.reached()); + *this = other; + return; + } + + HashSet<Node*> toEscape; + + for (auto& allocationEntry : other.m_allocations) + m_allocations.add(allocationEntry.key, allocationEntry.value); + for (auto& allocationEntry : m_allocations) { + auto allocationIter = other.m_allocations.find(allocationEntry.key); + + // If we have it and they don't, it died for them but we + // are keeping it alive from another field somewhere. + // There is nothing to do - we will be escaped + // automatically when we handle that other field. + // This will also happen for allocation that we have and + // they don't, and all of those will get pruned. + if (allocationIter == other.m_allocations.end()) + continue; + + if (allocationEntry.value.kind() != allocationIter->value.kind()) { + toEscape.add(allocationEntry.key); + for (const auto& fieldEntry : allocationIter->value.fields()) + toEscape.add(fieldEntry.value); + } else { + mergePointerSets( + allocationEntry.value.fields(), allocationIter->value.fields(), + [&] (Node* identifier) { + toEscape.add(identifier); + }, + [&] (PromotedLocationDescriptor field) { + allocationEntry.value.remove(field); + }); + allocationEntry.value.mergeStructures(allocationIter->value.structures()); + } + } + + mergePointerSets(m_pointers, other.m_pointers, + [&] (Node* identifier) { + toEscape.add(identifier); + }, + [&] (Node* field) { + m_pointers.remove(field); + }); + + for (Node* identifier : toEscape) + escapeAllocation(identifier); + + if (!ASSERT_DISABLED) { + for (const auto& entry : m_allocations) + ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key)); + } + + // If there is no remaining pointer to an allocation, we can + // remove it. This should only happen for escaped allocations, + // because we only merge liveness-pruned heaps in the first + // place. + prune(); + + assertIsValid(); + } + + void pruneByLiveness(const HashSet<Node*>& live) + { + Vector<Node*> toRemove; + for (const auto& entry : m_pointers) { + if (!live.contains(entry.key)) + toRemove.append(entry.key); + } + for (Node* node : toRemove) + m_pointers.remove(node); + + prune(); + } + + void assertIsValid() const + { + if (ASSERT_DISABLED) + return; + + // Pointers should point to an actual allocation + for (const auto& entry : m_pointers) { + ASSERT_UNUSED(entry, entry.value); + ASSERT(m_allocations.contains(entry.value)); + } + + for (const auto& allocationEntry : m_allocations) { + // Fields should point to an actual allocation + for (const auto& fieldEntry : allocationEntry.value.fields()) { + ASSERT_UNUSED(fieldEntry, fieldEntry.value); + ASSERT(m_allocations.contains(fieldEntry.value)); + } + } + } + + bool operator==(const LocalHeap& other) const + { + assertIsValid(); + other.assertIsValid(); + return m_allocations == other.m_allocations + && m_pointers == other.m_pointers; + } + + bool operator!=(const LocalHeap& other) const + { + return !(*this == other); + } + + const HashMap<Node*, Allocation>& allocations() const + { + return m_allocations; + } + + const HashMap<Node*, Node*>& pointers() const + { + return m_pointers; + } + + void dump(PrintStream& out) const + { + out.print(" Allocations:\n"); + for (const auto& entry : m_allocations) + out.print(" #", entry.key, ": ", entry.value, "\n"); + out.print(" Pointers:\n"); + for (const auto& entry : m_pointers) + out.print(" ", entry.key, " => #", entry.value, "\n"); + } + + bool reached() const + { + return m_reached; + } + + void setReached() + { + m_reached = true; + } + +private: + // When we merge two heaps, we escape all fields of allocations, + // unless they point to the same thing in both heaps. + // The reason for this is that it allows us not to do extra work + // for diamond graphs where we would otherwise have to check + // whether we have a single definition or not, which would be + // cumbersome. + // + // Note that we should try to unify nodes even when they are not + // from the same allocation; for instance we should be able to + // completely eliminate all allocations from the following graph: + // + // Block #0 + // 0: NewObject({}) + // -: Branch(#1, #2) + // + // Block #1 + // 1: NewObject({}) + // -: PutByOffset(@1, "left", val) + // -: PutStructure(@1, {val:0}) + // -: PutByOffset(@0, @1, x) + // -: PutStructure(@0, {x:0}) + // -: Jump(#3) + // + // Block #2 + // 2: NewObject({}) + // -: PutByOffset(@2, "right", val) + // -: PutStructure(@2, {val:0}) + // -: PutByOffset(@0, @2, x) + // -: PutStructure(@0, {x:0}) + // -: Jump(#3) + // + // Block #3: + // 3: GetByOffset(@0, x) + // 4: GetByOffset(@3, val) + // -: Return(@4) + template<typename Key, typename EscapeFunctor, typename RemoveFunctor> + void mergePointerSets( + const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their, + const EscapeFunctor& escape, const RemoveFunctor& remove) + { + Vector<Key> toRemove; + for (const auto& entry : my) { + auto iter = their.find(entry.key); + if (iter == their.end()) { + toRemove.append(entry.key); + escape(entry.value); + } else if (iter->value != entry.value) { + toRemove.append(entry.key); + escape(entry.value); + escape(iter->value); + } + } + for (const auto& entry : their) { + if (my.contains(entry.key)) + continue; + escape(entry.value); + } + for (Key key : toRemove) + remove(key); + } + + void escapeAllocation(Node* identifier) + { + Allocation& allocation = getAllocation(identifier); + if (allocation.isEscapedAllocation()) + return; + + Allocation unescaped = WTFMove(allocation); + allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped); + + for (const auto& entry : unescaped.fields()) + escapeAllocation(entry.value); + + if (m_wantEscapees) + m_escapees.add(unescaped.identifier(), WTFMove(unescaped)); + } + + void prune() + { + HashSet<Node*> reachable; + for (const auto& entry : m_pointers) + reachable.add(entry.value); + + // Repeatedly mark as reachable allocations in fields of other + // reachable allocations + { + Vector<Node*> worklist; + worklist.appendRange(reachable.begin(), reachable.end()); + + while (!worklist.isEmpty()) { + Node* identifier = worklist.takeLast(); + Allocation& allocation = m_allocations.find(identifier)->value; + for (const auto& entry : allocation.fields()) { + if (reachable.add(entry.value).isNewEntry) + worklist.append(entry.value); + } + } + } + + // Remove unreachable allocations + { + Vector<Node*> toRemove; + for (const auto& entry : m_allocations) { + if (!reachable.contains(entry.key)) + toRemove.append(entry.key); + } + for (Node* identifier : toRemove) + m_allocations.remove(identifier); + } + } + + bool m_reached = false; + HashMap<Node*, Node*> m_pointers; + HashMap<Node*, Allocation> m_allocations; + + bool m_wantEscapees = false; + HashMap<Node*, Allocation> m_escapees; +}; + +class ObjectAllocationSinkingPhase : public Phase { +public: + ObjectAllocationSinkingPhase(Graph& graph) + : Phase(graph, "object allocation elimination") + , m_pointerSSA(graph) + , m_allocationSSA(graph) + , m_insertionSet(graph) + { + } + + bool run() + { + ASSERT(m_graph.m_form == SSA); + ASSERT(m_graph.m_fixpointState == FixpointNotConverged); + + if (!performSinking()) + return false; + + if (verbose) { + dataLog("Graph after elimination:\n"); + m_graph.dump(); + } + + return true; + } + +private: + bool performSinking() + { + m_graph.computeRefCounts(); + m_graph.initializeNodeOwners(); + m_graph.ensureDominators(); + performLivenessAnalysis(m_graph); + performOSRAvailabilityAnalysis(m_graph); + m_combinedLiveness = CombinedLiveness(m_graph); + + CString graphBeforeSinking; + if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) { + StringPrintStream out; + m_graph.dump(out); + graphBeforeSinking = out.toCString(); + } + + if (verbose) { + dataLog("Graph before elimination:\n"); + m_graph.dump(); + } + + performAnalysis(); + + if (!determineSinkCandidates()) + return false; + + if (verbose) { + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]); + dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]); + } + } + + promoteLocalHeap(); + + if (Options::validateGraphAtEachPhase()) + DFG::validate(m_graph, DumpGraph, graphBeforeSinking); + return true; + } + + void performAnalysis() + { + m_heapAtHead = BlockMap<LocalHeap>(m_graph); + m_heapAtTail = BlockMap<LocalHeap>(m_graph); + + bool changed; + do { + if (verbose) + dataLog("Doing iteration of escape analysis.\n"); + changed = false; + + for (BasicBlock* block : m_graph.blocksInPreOrder()) { + m_heapAtHead[block].setReached(); + m_heap = m_heapAtHead[block]; + + for (Node* node : *block) { + handleNode( + node, + [] (PromotedHeapLocation, LazyNode) { }, + [&] (PromotedHeapLocation) -> Node* { + return nullptr; + }); + } + + if (m_heap == m_heapAtTail[block]) + continue; + + m_heapAtTail[block] = m_heap; + changed = true; + + m_heap.assertIsValid(); + + // We keep only pointers that are live, and only + // allocations that are either live, pointed to by a + // live pointer, or (recursively) stored in a field of + // a live allocation. + // + // This means we can accidentaly leak non-dominating + // nodes into the successor. However, due to the + // non-dominance property, we are guaranteed that the + // successor has at least one predecessor that is not + // dominated either: this means any reference to a + // non-dominating allocation in the successor will + // trigger an escape and get pruned during the merge. + m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]); + + for (BasicBlock* successorBlock : block->successors()) + m_heapAtHead[successorBlock].merge(m_heap); + } + } while (changed); + } + + template<typename WriteFunctor, typename ResolveFunctor> + void handleNode( + Node* node, + const WriteFunctor& heapWrite, + const ResolveFunctor& heapResolve) + { + m_heap.assertIsValid(); + ASSERT(m_heap.takeEscapees().isEmpty()); + + Allocation* target = nullptr; + HashMap<PromotedLocationDescriptor, LazyNode> writes; + PromotedLocationDescriptor exactRead; + + switch (node->op()) { + case NewObject: + target = &m_heap.newAllocation(node, Allocation::Kind::Object); + target->setStructures(node->structure()); + writes.add( + StructurePLoc, LazyNode(m_graph.freeze(node->structure()))); + break; + + case NewFunction: + case NewArrowFunction: + case NewGeneratorFunction: { + if (isStillValid(node->castOperand<FunctionExecutable*>()->singletonFunction())) { + m_heap.escape(node->child1().node()); + break; + } + + if (node->op() == NewGeneratorFunction) + target = &m_heap.newAllocation(node, Allocation::Kind::GeneratorFunction); + else if (node->op() == NewArrowFunction) + target = &m_heap.newAllocation(node, Allocation::Kind::ArrowFunction); + else + target = &m_heap.newAllocation(node, Allocation::Kind::Function); + writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand())); + writes.add(FunctionActivationPLoc, LazyNode(node->child1().node())); + break; + } + + case CreateActivation: { + if (isStillValid(node->castOperand<SymbolTable*>()->singletonScope())) { + m_heap.escape(node->child1().node()); + break; + } + target = &m_heap.newAllocation(node, Allocation::Kind::Activation); + writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand())); + writes.add(ActivationScopePLoc, LazyNode(node->child1().node())); + { + SymbolTable* symbolTable = node->castOperand<SymbolTable*>(); + ConcurrentJITLocker locker(symbolTable->m_lock); + LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation())); + for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) { + writes.add( + PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()), + initialValue); + } + } + break; + } + + case PutStructure: + target = m_heap.onlyLocalAllocation(node->child1().node()); + if (target && target->isObjectAllocation()) { + writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next)))); + target->setStructures(node->transition()->next); + } else + m_heap.escape(node->child1().node()); + break; + + case CheckStructure: { + Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node()); + if (allocation && allocation->isObjectAllocation()) { + allocation->filterStructures(node->structureSet()); + if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc))) + node->convertToCheckStructureImmediate(value); + } else + m_heap.escape(node->child1().node()); + break; + } + + case GetByOffset: + case GetGetterSetterByOffset: + target = m_heap.onlyLocalAllocation(node->child2().node()); + if (target && target->isObjectAllocation()) { + unsigned identifierNumber = node->storageAccessData().identifierNumber; + exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber); + } else { + m_heap.escape(node->child1().node()); + m_heap.escape(node->child2().node()); + } + break; + + case MultiGetByOffset: { + Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node()); + if (allocation && allocation->isObjectAllocation()) { + MultiGetByOffsetData& data = node->multiGetByOffsetData(); + StructureSet validStructures; + bool hasInvalidStructures = false; + for (const auto& multiGetByOffsetCase : data.cases) { + if (!allocation->structures().overlaps(multiGetByOffsetCase.set())) + continue; + + switch (multiGetByOffsetCase.method().kind()) { + case GetByOffsetMethod::LoadFromPrototype: // We need to escape those + case GetByOffsetMethod::Constant: // We don't really have a way of expressing this + hasInvalidStructures = true; + break; + + case GetByOffsetMethod::Load: // We're good + validStructures.merge(multiGetByOffsetCase.set()); + break; + + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + if (hasInvalidStructures) { + m_heap.escape(node->child1().node()); + break; + } + unsigned identifierNumber = data.identifierNumber; + PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber); + if (Node* value = heapResolve(location)) { + if (allocation->structures().isSubsetOf(validStructures)) + node->replaceWith(value); + else { + Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)); + ASSERT(structure); + allocation->filterStructures(validStructures); + node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures())); + node->convertToCheckStructureImmediate(structure); + node->setReplacement(value); + } + } else if (!allocation->structures().isSubsetOf(validStructures)) { + // Even though we don't need the result here, we still need + // to make the call to tell our caller that we could need + // the StructurePLoc. + // The reason for this is that when we decide not to sink a + // node, we will still lower any read to its fields before + // it escapes (which are usually reads across a function + // call that DFGClobberize can't handle) - but we only do + // this for PromotedHeapLocations that we have seen read + // during the analysis! + heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)); + allocation->filterStructures(validStructures); + } + Node* identifier = allocation->get(location.descriptor()); + if (identifier) + m_heap.newPointer(node, identifier); + } else + m_heap.escape(node->child1().node()); + break; + } + + case PutByOffset: + target = m_heap.onlyLocalAllocation(node->child2().node()); + if (target && target->isObjectAllocation()) { + unsigned identifierNumber = node->storageAccessData().identifierNumber; + writes.add( + PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber), + LazyNode(node->child3().node())); + } else { + m_heap.escape(node->child1().node()); + m_heap.escape(node->child2().node()); + m_heap.escape(node->child3().node()); + } + break; + + case GetClosureVar: + target = m_heap.onlyLocalAllocation(node->child1().node()); + if (target && target->isActivationAllocation()) { + exactRead = + PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()); + } else + m_heap.escape(node->child1().node()); + break; + + case PutClosureVar: + target = m_heap.onlyLocalAllocation(node->child1().node()); + if (target && target->isActivationAllocation()) { + writes.add( + PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()), + LazyNode(node->child2().node())); + } else { + m_heap.escape(node->child1().node()); + m_heap.escape(node->child2().node()); + } + break; + + case SkipScope: + target = m_heap.onlyLocalAllocation(node->child1().node()); + if (target && target->isActivationAllocation()) + exactRead = ActivationScopePLoc; + else + m_heap.escape(node->child1().node()); + break; + + case GetExecutable: + target = m_heap.onlyLocalAllocation(node->child1().node()); + if (target && target->isFunctionAllocation()) + exactRead = FunctionExecutablePLoc; + else + m_heap.escape(node->child1().node()); + break; + + case GetScope: + target = m_heap.onlyLocalAllocation(node->child1().node()); + if (target && target->isFunctionAllocation()) + exactRead = FunctionActivationPLoc; + else + m_heap.escape(node->child1().node()); + break; + + case Check: + m_graph.doToChildren( + node, + [&] (Edge edge) { + if (edge.willNotHaveCheck()) + return; + + if (alreadyChecked(edge.useKind(), SpecObject)) + return; + + m_heap.escape(edge.node()); + }); + break; + + case MovHint: + case PutHint: + // Handled by OSR availability analysis + break; + + default: + m_graph.doToChildren( + node, + [&] (Edge edge) { + m_heap.escape(edge.node()); + }); + break; + } + + if (exactRead) { + ASSERT(target); + ASSERT(writes.isEmpty()); + if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) { + ASSERT(!value->replacement()); + node->replaceWith(value); + } + Node* identifier = target->get(exactRead); + if (identifier) + m_heap.newPointer(node, identifier); + } + + for (auto entry : writes) { + ASSERT(target); + if (entry.value.isNode()) + target->set(entry.key, m_heap.follow(entry.value.asNode())); + else + target->remove(entry.key); + heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value); + } + + m_heap.assertIsValid(); + } + + bool determineSinkCandidates() + { + m_sinkCandidates.clear(); + m_materializationToEscapee.clear(); + m_materializationSiteToMaterializations.clear(); + m_materializationSiteToRecoveries.clear(); + + // Logically we wish to consider every allocation and sink + // it. However, it is probably not profitable to sink an + // allocation that will always escape. So, we only sink an + // allocation if one of the following is true: + // + // 1) There exists a basic block with only backwards outgoing + // edges (or no outgoing edges) in which the node wasn't + // materialized. This is meant to catch + // effectively-infinite loops in which we don't need to + // have allocated the object. + // + // 2) There exists a basic block at the tail of which the node + // is dead and not materialized. + // + // 3) The sum of execution counts of the materializations is + // less than the sum of execution counts of the original + // node. + // + // We currently implement only rule #2. + // FIXME: Implement the two other rules. + // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1) + // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3) + // + // However, these rules allow for a sunk object to be put into + // a non-sunk one, which we don't support. We could solve this + // by supporting PutHints on local allocations, making these + // objects only partially correct, and we would need to adapt + // the OSR availability analysis and OSR exit to handle + // this. This would be totally doable, but would create a + // super rare, and thus bug-prone, code path. + // So, instead, we need to implement one of the following + // closure rules: + // + // 1) If we put a sink candidate into a local allocation that + // is not a sink candidate, change our minds and don't + // actually sink the sink candidate. + // + // 2) If we put a sink candidate into a local allocation, that + // allocation becomes a sink candidate as well. + // + // We currently choose to implement closure rule #2. + HashMap<Node*, Vector<Node*>> dependencies; + bool hasUnescapedReads = false; + for (BasicBlock* block : m_graph.blocksInPreOrder()) { + m_heap = m_heapAtHead[block]; + + for (Node* node : *block) { + handleNode( + node, + [&] (PromotedHeapLocation location, LazyNode value) { + if (!value.isNode()) + return; + + Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode()); + if (allocation && !allocation->isEscapedAllocation()) + dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base()); + }, + [&] (PromotedHeapLocation) -> Node* { + hasUnescapedReads = true; + return nullptr; + }); + } + + // The sink candidates are initially the unescaped + // allocations dying at tail of blocks + HashSet<Node*> allocations; + for (const auto& entry : m_heap.allocations()) { + if (!entry.value.isEscapedAllocation()) + allocations.add(entry.key); + } + + m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]); + + for (Node* identifier : allocations) { + if (!m_heap.isAllocation(identifier)) + m_sinkCandidates.add(identifier); + } + } + + // Ensure that the set of sink candidates is closed for put operations + Vector<Node*> worklist; + worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end()); + + while (!worklist.isEmpty()) { + for (Node* identifier : dependencies.get(worklist.takeLast())) { + if (m_sinkCandidates.add(identifier).isNewEntry) + worklist.append(identifier); + } + } + + if (m_sinkCandidates.isEmpty()) + return hasUnescapedReads; + + if (verbose) + dataLog("Candidates: ", listDump(m_sinkCandidates), "\n"); + + // Create the materialization nodes + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + m_heap = m_heapAtHead[block]; + m_heap.setWantEscapees(); + + for (Node* node : *block) { + handleNode( + node, + [] (PromotedHeapLocation, LazyNode) { }, + [] (PromotedHeapLocation) -> Node* { + return nullptr; + }); + auto escapees = m_heap.takeEscapees(); + if (!escapees.isEmpty()) + placeMaterializations(escapees, node); + } + + m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]); + + { + HashMap<Node*, Allocation> escapingOnEdge; + for (const auto& entry : m_heap.allocations()) { + if (entry.value.isEscapedAllocation()) + continue; + + bool mustEscape = false; + for (BasicBlock* successorBlock : block->successors()) { + if (!m_heapAtHead[successorBlock].isAllocation(entry.key) + || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation()) + mustEscape = true; + } + + if (mustEscape) + escapingOnEdge.add(entry.key, entry.value); + } + placeMaterializations(WTFMove(escapingOnEdge), block->terminal()); + } + } + + return hasUnescapedReads || !m_sinkCandidates.isEmpty(); + } + + void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where) + { + // We don't create materializations if the escapee is not a + // sink candidate + Vector<Node*> toRemove; + for (const auto& entry : escapees) { + if (!m_sinkCandidates.contains(entry.key)) + toRemove.append(entry.key); + } + for (Node* identifier : toRemove) + escapees.remove(identifier); + + if (escapees.isEmpty()) + return; + + // First collect the hints that will be needed when the node + // we materialize is still stored into other unescaped sink candidates + Vector<PromotedHeapLocation> hints; + for (const auto& entry : m_heap.allocations()) { + if (escapees.contains(entry.key)) + continue; + + for (const auto& field : entry.value.fields()) { + ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value)); + if (escapees.contains(field.value) && !field.key.neededForMaterialization()) + hints.append(PromotedHeapLocation(entry.key, field.key)); + } + } + + // Now we need to order the materialization. Any order is + // valid (as long as we materialize a node first if it is + // needed for the materialization of another node, e.g. a + // function's activation must be materialized before the + // function itself), but we want to try minimizing the number + // of times we have to place Puts to close cycles after a + // materialization. In other words, we are trying to find the + // minimum number of materializations to remove from the + // materialization graph to make it a DAG, known as the + // (vertex) feedback set problem. Unfortunately, this is a + // NP-hard problem, which we don't want to solve exactly. + // + // Instead, we use a simple greedy procedure, that procedes as + // follow: + // - While there is at least one node with no outgoing edge + // amongst the remaining materializations, materialize it + // first + // + // - Similarily, while there is at least one node with no + // incoming edge amongst the remaining materializations, + // materialize it last. + // + // - When both previous conditions are false, we have an + // actual cycle, and we need to pick a node to + // materialize. We try greedily to remove the "pressure" on + // the remaining nodes by choosing the node with maximum + // |incoming edges| * |outgoing edges| as a measure of how + // "central" to the graph it is. We materialize it first, + // so that all the recoveries will be Puts of things into + // it (rather than Puts of the materialization into other + // objects), which means we will have a single + // StoreBarrier. + + + // Compute dependencies between materializations + HashMap<Node*, HashSet<Node*>> dependencies; + HashMap<Node*, HashSet<Node*>> reverseDependencies; + HashMap<Node*, HashSet<Node*>> forMaterialization; + for (const auto& entry : escapees) { + auto& myDependencies = dependencies.add(entry.key, HashSet<Node*>()).iterator->value; + auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, HashSet<Node*>()).iterator->value; + reverseDependencies.add(entry.key, HashSet<Node*>()); + for (const auto& field : entry.value.fields()) { + if (escapees.contains(field.value) && field.value != entry.key) { + myDependencies.add(field.value); + reverseDependencies.add(field.value, HashSet<Node*>()).iterator->value.add(entry.key); + if (field.key.neededForMaterialization()) + myDependenciesForMaterialization.add(field.value); + } + } + } + + // Helper function to update the materialized set and the + // dependencies + HashSet<Node*> materialized; + auto materialize = [&] (Node* identifier) { + materialized.add(identifier); + for (Node* dep : dependencies.get(identifier)) + reverseDependencies.find(dep)->value.remove(identifier); + for (Node* rdep : reverseDependencies.get(identifier)) { + dependencies.find(rdep)->value.remove(identifier); + forMaterialization.find(rdep)->value.remove(identifier); + } + dependencies.remove(identifier); + reverseDependencies.remove(identifier); + forMaterialization.remove(identifier); + }; + + // Nodes without remaining unmaterialized fields will be + // materialized first - amongst the remaining unmaterialized + // nodes + std::list<Allocation> toMaterialize; + auto firstPos = toMaterialize.begin(); + auto materializeFirst = [&] (Allocation&& allocation) { + materialize(allocation.identifier()); + // We need to insert *after* the current position + if (firstPos != toMaterialize.end()) + ++firstPos; + firstPos = toMaterialize.insert(firstPos, WTFMove(allocation)); + }; + + // Nodes that no other unmaterialized node points to will be + // materialized last - amongst the remaining unmaterialized + // nodes + auto lastPos = toMaterialize.end(); + auto materializeLast = [&] (Allocation&& allocation) { + materialize(allocation.identifier()); + lastPos = toMaterialize.insert(lastPos, WTFMove(allocation)); + }; + + // These are the promoted locations that contains some of the + // allocations we are currently escaping. If they are a location on + // some other allocation we are currently materializing, we will need + // to "recover" their value with a real put once the corresponding + // allocation is materialized; if they are a location on some other + // not-yet-materialized allocation, we will need a PutHint. + Vector<PromotedHeapLocation> toRecover; + + // This loop does the actual cycle breaking + while (!escapees.isEmpty()) { + materialized.clear(); + + // Materialize nodes that won't require recoveries if we can + for (auto& entry : escapees) { + if (!forMaterialization.find(entry.key)->value.isEmpty()) + continue; + + if (dependencies.find(entry.key)->value.isEmpty()) { + materializeFirst(WTFMove(entry.value)); + continue; + } + + if (reverseDependencies.find(entry.key)->value.isEmpty()) { + materializeLast(WTFMove(entry.value)); + continue; + } + } + + // We reach this only if there is an actual cycle that needs + // breaking. Because we do not want to solve a NP-hard problem + // here, we just heuristically pick a node and materialize it + // first. + if (materialized.isEmpty()) { + uint64_t maxEvaluation = 0; + Allocation* bestAllocation; + for (auto& entry : escapees) { + if (!forMaterialization.find(entry.key)->value.isEmpty()) + continue; + + uint64_t evaluation = + static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size(); + if (evaluation > maxEvaluation) { + maxEvaluation = evaluation; + bestAllocation = &entry.value; + } + } + RELEASE_ASSERT(maxEvaluation > 0); + + materializeFirst(WTFMove(*bestAllocation)); + } + RELEASE_ASSERT(!materialized.isEmpty()); + + for (Node* identifier : materialized) + escapees.remove(identifier); + } + + materialized.clear(); + + HashSet<Node*> escaped; + for (const Allocation& allocation : toMaterialize) + escaped.add(allocation.identifier()); + for (const Allocation& allocation : toMaterialize) { + for (const auto& field : allocation.fields()) { + if (escaped.contains(field.value) && !materialized.contains(field.value)) + toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key)); + } + materialized.add(allocation.identifier()); + } + + Vector<Node*>& materializations = m_materializationSiteToMaterializations.add( + where, Vector<Node*>()).iterator->value; + + for (const Allocation& allocation : toMaterialize) { + Node* materialization = createMaterialization(allocation, where); + materializations.append(materialization); + m_materializationToEscapee.add(materialization, allocation.identifier()); + } + + if (!toRecover.isEmpty()) { + m_materializationSiteToRecoveries.add( + where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover); + } + + // The hints need to be after the "real" recoveries so that we + // don't hint not-yet-complete objects + if (!hints.isEmpty()) { + m_materializationSiteToRecoveries.add( + where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(hints); + } + } + + Node* createMaterialization(const Allocation& allocation, Node* where) + { + // FIXME: This is the only place where we actually use the + // fact that an allocation's identifier is indeed the node + // that created the allocation. + switch (allocation.kind()) { + case Allocation::Kind::Object: { + ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add(); + StructureSet* set = m_graph.addStructureSet(allocation.structures()); + + return m_graph.addNode( + allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject, + where->origin.withSemantic(allocation.identifier()->origin.semantic), + OpInfo(set), OpInfo(data), 0, 0); + } + + case Allocation::Kind::ArrowFunction: + case Allocation::Kind::GeneratorFunction: + case Allocation::Kind::Function: { + FrozenValue* executable = allocation.identifier()->cellOperand(); + + NodeType nodeType = + allocation.kind() == Allocation::Kind::ArrowFunction ? NewArrowFunction : + allocation.kind() == Allocation::Kind::GeneratorFunction ? NewGeneratorFunction : NewFunction; + + return m_graph.addNode( + allocation.identifier()->prediction(), nodeType, + where->origin.withSemantic( + allocation.identifier()->origin.semantic), + OpInfo(executable)); + break; + } + + case Allocation::Kind::Activation: { + ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add(); + FrozenValue* symbolTable = allocation.identifier()->cellOperand(); + + return m_graph.addNode( + allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation, + where->origin.withSemantic( + allocation.identifier()->origin.semantic), + OpInfo(symbolTable), OpInfo(data), 0, 0); + } + + default: + DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind"); + } + } + + void promoteLocalHeap() + { + // Collect the set of heap locations that we will be operating + // over. + HashSet<PromotedHeapLocation> locations; + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + m_heap = m_heapAtHead[block]; + + for (Node* node : *block) { + handleNode( + node, + [&] (PromotedHeapLocation location, LazyNode) { + // If the location is not on a sink candidate, + // we only sink it if it is read + if (m_sinkCandidates.contains(location.base())) + locations.add(location); + }, + [&] (PromotedHeapLocation location) -> Node* { + locations.add(location); + return nullptr; + }); + } + } + + // Figure out which locations belong to which allocations. + m_locationsForAllocation.clear(); + for (PromotedHeapLocation location : locations) { + auto result = m_locationsForAllocation.add( + location.base(), + Vector<PromotedHeapLocation>()); + ASSERT(!result.iterator->value.contains(location)); + result.iterator->value.append(location); + } + + m_pointerSSA.reset(); + m_allocationSSA.reset(); + + // Collect the set of "variables" that we will be sinking. + m_locationToVariable.clear(); + m_nodeToVariable.clear(); + Vector<Node*> indexToNode; + Vector<PromotedHeapLocation> indexToLocation; + + for (Node* index : m_sinkCandidates) { + SSACalculator::Variable* variable = m_allocationSSA.newVariable(); + m_nodeToVariable.add(index, variable); + ASSERT(indexToNode.size() == variable->index()); + indexToNode.append(index); + } + + for (PromotedHeapLocation location : locations) { + SSACalculator::Variable* variable = m_pointerSSA.newVariable(); + m_locationToVariable.add(location, variable); + ASSERT(indexToLocation.size() == variable->index()); + indexToLocation.append(location); + } + + // We insert all required constants at top of block 0 so that + // they are inserted only once and we don't clutter the graph + // with useless constants everywhere + HashMap<FrozenValue*, Node*> lazyMapping; + if (!m_bottom) + m_bottom = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(1927)); + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + m_heap = m_heapAtHead[block]; + + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + + // Some named properties can be added conditionally, + // and that would necessitate bottoms + for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) { + if (location.kind() != NamedPropertyPLoc) + continue; + + SSACalculator::Variable* variable = m_locationToVariable.get(location); + m_pointerSSA.newDef(variable, block, m_bottom); + } + + for (Node* materialization : m_materializationSiteToMaterializations.get(node)) { + Node* escapee = m_materializationToEscapee.get(materialization); + m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization); + } + + if (m_sinkCandidates.contains(node)) + m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node); + + handleNode( + node, + [&] (PromotedHeapLocation location, LazyNode value) { + if (!locations.contains(location)) + return; + + Node* nodeValue; + if (value.isNode()) + nodeValue = value.asNode(); + else { + auto iter = lazyMapping.find(value.asValue()); + if (iter != lazyMapping.end()) + nodeValue = iter->value; + else { + nodeValue = value.ensureIsNode( + m_insertionSet, m_graph.block(0), 0); + lazyMapping.add(value.asValue(), nodeValue); + } + } + + SSACalculator::Variable* variable = m_locationToVariable.get(location); + m_pointerSSA.newDef(variable, block, nodeValue); + }, + [] (PromotedHeapLocation) -> Node* { + return nullptr; + }); + } + } + m_insertionSet.execute(m_graph.block(0)); + + // Run the SSA calculators to create Phis + m_pointerSSA.computePhis( + [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* { + PromotedHeapLocation location = indexToLocation[variable->index()]; + + // Don't create Phi nodes for fields of dead allocations + if (!m_heapAtHead[block].isAllocation(location.base())) + return nullptr; + + // Don't create Phi nodes once we are escaped + if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation()) + return nullptr; + + // If we point to a single allocation, we will + // directly use its materialization + if (m_heapAtHead[block].follow(location)) + return nullptr; + + Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit()); + phiNode->mergeFlags(NodeResultJS); + return phiNode; + }); + + m_allocationSSA.computePhis( + [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* { + Node* identifier = indexToNode[variable->index()]; + + // Don't create Phi nodes for dead allocations + if (!m_heapAtHead[block].isAllocation(identifier)) + return nullptr; + + // Don't create Phi nodes until we are escaped + if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation()) + return nullptr; + + Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit()); + phiNode->mergeFlags(NodeResultJS); + return phiNode; + }); + + // Place Phis in the right places, replace all uses of any load with the appropriate + // value, and create the materialization nodes. + LocalOSRAvailabilityCalculator availabilityCalculator; + m_graph.clearReplacements(); + for (BasicBlock* block : m_graph.blocksInPreOrder()) { + m_heap = m_heapAtHead[block]; + availabilityCalculator.beginBlock(block); + + // These mapping tables are intended to be lazy. If + // something is omitted from the table, it means that + // there haven't been any local stores to the promoted + // heap location (or any local materialization). + m_localMapping.clear(); + m_escapeeToMaterialization.clear(); + + // Insert the Phi functions that we had previously + // created. + for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) { + SSACalculator::Variable* variable = phiDef->variable(); + m_insertionSet.insert(0, phiDef->value()); + + PromotedHeapLocation location = indexToLocation[variable->index()]; + m_localMapping.set(location, phiDef->value()); + + if (m_sinkCandidates.contains(location.base())) { + m_insertionSet.insert( + 0, + location.createHint( + m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value())); + } + } + + for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) { + SSACalculator::Variable* variable = phiDef->variable(); + m_insertionSet.insert(0, phiDef->value()); + + Node* identifier = indexToNode[variable->index()]; + m_escapeeToMaterialization.add(identifier, phiDef->value()); + bool canExit = false; + insertOSRHintsForUpdate( + 0, block->at(0)->origin, canExit, + availabilityCalculator.m_availability, identifier, phiDef->value()); + } + + if (verbose) { + dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n"); + dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n"); + } + + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + bool canExit = true; + bool nextCanExit = node->origin.exitOK; + for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) { + if (location.kind() != NamedPropertyPLoc) + continue; + + m_localMapping.set(location, m_bottom); + + if (m_sinkCandidates.contains(node)) { + m_insertionSet.insert( + nodeIndex + 1, + location.createHint( + m_graph, node->origin.takeValidExit(nextCanExit), m_bottom)); + } + } + + for (Node* materialization : m_materializationSiteToMaterializations.get(node)) { + materialization->origin.exitOK &= canExit; + Node* escapee = m_materializationToEscapee.get(materialization); + populateMaterialization(block, materialization, escapee); + m_escapeeToMaterialization.set(escapee, materialization); + m_insertionSet.insert(nodeIndex, materialization); + if (verbose) + dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n"); + } + + for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node)) + m_insertionSet.insert(nodeIndex, createRecovery(block, location, node, canExit)); + + // We need to put the OSR hints after the recoveries, + // because we only want the hints once the object is + // complete + for (Node* materialization : m_materializationSiteToMaterializations.get(node)) { + Node* escapee = m_materializationToEscapee.get(materialization); + insertOSRHintsForUpdate( + nodeIndex, node->origin, canExit, + availabilityCalculator.m_availability, escapee, materialization); + } + + if (node->origin.exitOK && !canExit) { + // We indicate that the exit state is fine now. It is OK because we updated the + // state above. We need to indicate this manually because the validation doesn't + // have enough information to infer that the exit state is fine. + m_insertionSet.insertNode(nodeIndex, SpecNone, ExitOK, node->origin); + } + + if (m_sinkCandidates.contains(node)) + m_escapeeToMaterialization.set(node, node); + + availabilityCalculator.executeNode(node); + + bool desiredNextExitOK = node->origin.exitOK && !clobbersExitState(m_graph, node); + + bool doLower = false; + handleNode( + node, + [&] (PromotedHeapLocation location, LazyNode value) { + if (!locations.contains(location)) + return; + + Node* nodeValue; + if (value.isNode()) + nodeValue = value.asNode(); + else + nodeValue = lazyMapping.get(value.asValue()); + + nodeValue = resolve(block, nodeValue); + + m_localMapping.set(location, nodeValue); + + if (!m_sinkCandidates.contains(location.base())) + return; + + doLower = true; + + m_insertionSet.insert( + nodeIndex + 1, + location.createHint( + m_graph, node->origin.takeValidExit(nextCanExit), nodeValue)); + }, + [&] (PromotedHeapLocation location) -> Node* { + return resolve(block, location); + }); + + if (!nextCanExit && desiredNextExitOK) { + // We indicate that the exit state is fine now. We need to do this because we + // emitted hints that appear to invalidate the exit state. + m_insertionSet.insertNode(nodeIndex + 1, SpecNone, ExitOK, node->origin); + } + + if (m_sinkCandidates.contains(node) || doLower) { + switch (node->op()) { + case NewObject: + node->convertToPhantomNewObject(); + break; + + case NewArrowFunction: + case NewFunction: + node->convertToPhantomNewFunction(); + break; + + case NewGeneratorFunction: + node->convertToPhantomNewGeneratorFunction(); + break; + + case CreateActivation: + node->convertToPhantomCreateActivation(); + break; + + default: + node->remove(); + break; + } + } + + m_graph.doToChildren( + node, + [&] (Edge& edge) { + edge.setNode(resolve(block, edge.node())); + }); + } + + // Gotta drop some Upsilons. + NodeAndIndex terminal = block->findTerminal(); + size_t upsilonInsertionPoint = terminal.index; + NodeOrigin upsilonOrigin = terminal.node->origin; + for (BasicBlock* successorBlock : block->successors()) { + for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) { + Node* phiNode = phiDef->value(); + SSACalculator::Variable* variable = phiDef->variable(); + PromotedHeapLocation location = indexToLocation[variable->index()]; + Node* incoming = resolve(block, location); + + m_insertionSet.insertNode( + upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin, + OpInfo(phiNode), incoming->defaultEdge()); + } + + for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) { + Node* phiNode = phiDef->value(); + SSACalculator::Variable* variable = phiDef->variable(); + Node* incoming = getMaterialization(block, indexToNode[variable->index()]); + + m_insertionSet.insertNode( + upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin, + OpInfo(phiNode), incoming->defaultEdge()); + } + } + + m_insertionSet.execute(block); + } + } + + Node* resolve(BasicBlock* block, PromotedHeapLocation location) + { + // If we are currently pointing to a single local allocation, + // simply return the associated materialization. + if (Node* identifier = m_heap.follow(location)) + return getMaterialization(block, identifier); + + if (Node* result = m_localMapping.get(location)) + return result; + + // This implies that there is no local mapping. Find a non-local mapping. + SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef( + block, m_locationToVariable.get(location)); + ASSERT(def); + ASSERT(def->value()); + + Node* result = def->value(); + + ASSERT(!result->replacement()); + + m_localMapping.add(location, result); + return result; + } + + Node* resolve(BasicBlock* block, Node* node) + { + // If we are currently pointing to a single local allocation, + // simply return the associated materialization. + if (Node* identifier = m_heap.follow(node)) + return getMaterialization(block, identifier); + + if (node->replacement()) + node = node->replacement(); + ASSERT(!node->replacement()); + + return node; + } + + Node* getMaterialization(BasicBlock* block, Node* identifier) + { + ASSERT(m_heap.isAllocation(identifier)); + if (!m_sinkCandidates.contains(identifier)) + return identifier; + + if (Node* materialization = m_escapeeToMaterialization.get(identifier)) + return materialization; + + SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef( + block, m_nodeToVariable.get(identifier)); + ASSERT(def && def->value()); + m_escapeeToMaterialization.add(identifier, def->value()); + ASSERT(!def->value()->replacement()); + return def->value(); + } + + void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, bool& canExit, AvailabilityMap& availability, Node* escapee, Node* materialization) + { + // We need to follow() the value in the heap. + // Consider the following graph: + // + // Block #0 + // 0: NewObject({}) + // 1: NewObject({}) + // -: PutByOffset(@0, @1, x:0) + // -: PutStructure(@0, {x:0}) + // 2: GetByOffset(@0, x:0) + // -: MovHint(@2, loc1) + // -: Branch(#1, #2) + // + // Block #1 + // 3: Call(f, @1) + // 4: Return(@0) + // + // Block #2 + // -: Return(undefined) + // + // We need to materialize @1 at @3, and when doing so we need + // to insert a MovHint for the materialization into loc1 as + // well. + // In order to do this, we say that we need to insert an + // update hint for any availability whose node resolve()s to + // the materialization. + for (auto entry : availability.m_heap) { + if (!entry.value.hasNode()) + continue; + if (m_heap.follow(entry.value.node()) != escapee) + continue; + + m_insertionSet.insert( + nodeIndex, + entry.key.createHint(m_graph, origin.takeValidExit(canExit), materialization)); + } + + for (unsigned i = availability.m_locals.size(); i--;) { + if (!availability.m_locals[i].hasNode()) + continue; + if (m_heap.follow(availability.m_locals[i].node()) != escapee) + continue; + + int operand = availability.m_locals.operandForIndex(i); + m_insertionSet.insertNode( + nodeIndex, SpecNone, MovHint, origin.takeValidExit(canExit), OpInfo(operand), + materialization->defaultEdge()); + } + } + + void populateMaterialization(BasicBlock* block, Node* node, Node* escapee) + { + Allocation& allocation = m_heap.getAllocation(escapee); + switch (node->op()) { + case MaterializeNewObject: { + ObjectMaterializationData& data = node->objectMaterializationData(); + unsigned firstChild = m_graph.m_varArgChildren.size(); + + Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee); + + PromotedHeapLocation structure(StructurePLoc, allocation.identifier()); + ASSERT(locations.contains(structure)); + + m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse)); + + for (PromotedHeapLocation location : locations) { + switch (location.kind()) { + case StructurePLoc: + ASSERT(location == structure); + break; + + case NamedPropertyPLoc: { + ASSERT(location.base() == allocation.identifier()); + data.m_properties.append(PhantomPropertyValue(location.info())); + Node* value = resolve(block, location); + if (m_sinkCandidates.contains(value)) + m_graph.m_varArgChildren.append(m_bottom); + else + m_graph.m_varArgChildren.append(value); + break; + } + + default: + DFG_CRASH(m_graph, node, "Bad location kind"); + } + } + + node->children = AdjacencyList( + AdjacencyList::Variable, + firstChild, m_graph.m_varArgChildren.size() - firstChild); + break; + } + + case MaterializeCreateActivation: { + ObjectMaterializationData& data = node->objectMaterializationData(); + + unsigned firstChild = m_graph.m_varArgChildren.size(); + + Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee); + + PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier()); + ASSERT(locations.contains(symbolTable)); + ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant()); + m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse)); + + PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier()); + ASSERT(locations.contains(scope)); + m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse)); + + for (PromotedHeapLocation location : locations) { + switch (location.kind()) { + case ActivationScopePLoc: { + ASSERT(location == scope); + break; + } + + case ActivationSymbolTablePLoc: { + ASSERT(location == symbolTable); + break; + } + + case ClosureVarPLoc: { + ASSERT(location.base() == allocation.identifier()); + data.m_properties.append(PhantomPropertyValue(location.info())); + Node* value = resolve(block, location); + if (m_sinkCandidates.contains(value)) + m_graph.m_varArgChildren.append(m_bottom); + else + m_graph.m_varArgChildren.append(value); + break; + } + + default: + DFG_CRASH(m_graph, node, "Bad location kind"); + } + } + + node->children = AdjacencyList( + AdjacencyList::Variable, + firstChild, m_graph.m_varArgChildren.size() - firstChild); + break; + } + + case NewFunction: + case NewArrowFunction: + case NewGeneratorFunction: { + Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee); + ASSERT(locations.size() == 2); + + PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier()); + ASSERT_UNUSED(executable, locations.contains(executable)); + + PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier()); + ASSERT(locations.contains(activation)); + + node->child1() = Edge(resolve(block, activation), KnownCellUse); + break; + } + + default: + DFG_CRASH(m_graph, node, "Bad materialize op"); + } + } + + Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where, bool& canExit) + { + if (verbose) + dataLog("Recovering ", location, " at ", where, "\n"); + ASSERT(location.base()->isPhantomAllocation()); + Node* base = getMaterialization(block, location.base()); + Node* value = resolve(block, location); + + NodeOrigin origin = where->origin.withSemantic(base->origin.semantic); + + if (verbose) + dataLog("Base is ", base, " and value is ", value, "\n"); + + if (base->isPhantomAllocation()) { + return PromotedHeapLocation(base, location.descriptor()).createHint( + m_graph, origin.takeValidExit(canExit), value); + } + + switch (location.kind()) { + case NamedPropertyPLoc: { + Allocation& allocation = m_heap.getAllocation(location.base()); + + Vector<Structure*> structures; + structures.appendRange(allocation.structures().begin(), allocation.structures().end()); + unsigned identifierNumber = location.info(); + UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; + + std::sort( + structures.begin(), + structures.end(), + [uid] (Structure *a, Structure* b) -> bool { + return a->getConcurrently(uid) < b->getConcurrently(uid); + }); + + PropertyOffset firstOffset = structures[0]->getConcurrently(uid); + + if (firstOffset == structures.last()->getConcurrently(uid)) { + Node* storage = base; + // FIXME: When we decide to sink objects with a + // property storage, we should handle non-inline offsets. + RELEASE_ASSERT(isInlineOffset(firstOffset)); + + StorageAccessData* data = m_graph.m_storageAccessData.add(); + data->offset = firstOffset; + data->identifierNumber = identifierNumber; + + return m_graph.addNode( + SpecNone, + PutByOffset, + origin.takeValidExit(canExit), + OpInfo(data), + Edge(storage, KnownCellUse), + Edge(base, KnownCellUse), + value->defaultEdge()); + } + + MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add(); + data->identifierNumber = identifierNumber; + + { + PropertyOffset currentOffset = firstOffset; + StructureSet currentSet; + for (Structure* structure : structures) { + PropertyOffset offset = structure->getConcurrently(uid); + if (offset != currentOffset) { + // Because our analysis treats MultiPutByOffset like an escape, we only have to + // deal with storing results that would have been previously stored by PutByOffset + // nodes. Those nodes were guarded by the appropriate type checks. This means that + // at this point, we can simply trust that the incoming value has the right type + // for whatever structure we are using. + data->variants.append( + PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top)); + currentOffset = offset; + currentSet.clear(); + } + currentSet.add(structure); + } + data->variants.append( + PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top)); + } + + return m_graph.addNode( + SpecNone, + MultiPutByOffset, + origin.takeValidExit(canExit), + OpInfo(data), + Edge(base, KnownCellUse), + value->defaultEdge()); + break; + } + + case ClosureVarPLoc: { + return m_graph.addNode( + SpecNone, + PutClosureVar, + origin.takeValidExit(canExit), + OpInfo(location.info()), + Edge(base, KnownCellUse), + value->defaultEdge()); + break; + } + + default: + DFG_CRASH(m_graph, base, "Bad location kind"); + break; + } + } + + // This is a great way of asking value->isStillValid() without having to worry about getting + // different answers. It turns out that this analysis works OK regardless of what this + // returns but breaks badly if this changes its mind for any particular InferredValue. This + // method protects us from that. + bool isStillValid(InferredValue* value) + { + return m_validInferredValues.add(value, value->isStillValid()).iterator->value; + } + + SSACalculator m_pointerSSA; + SSACalculator m_allocationSSA; + HashSet<Node*> m_sinkCandidates; + HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable; + HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable; + HashMap<PromotedHeapLocation, Node*> m_localMapping; + HashMap<Node*, Node*> m_escapeeToMaterialization; + InsertionSet m_insertionSet; + CombinedLiveness m_combinedLiveness; + + HashMap<InferredValue*, bool> m_validInferredValues; + + HashMap<Node*, Node*> m_materializationToEscapee; + HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations; + HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries; + + HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation; + + BlockMap<LocalHeap> m_heapAtHead; + BlockMap<LocalHeap> m_heapAtTail; + LocalHeap m_heap; + + Node* m_bottom = nullptr; +}; + +} + +bool performObjectAllocationSinking(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase"); + return runPhase<ObjectAllocationSinkingPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.h b/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.h new file mode 100644 index 000000000..b400d4e69 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGObjectAllocationSinkingPhase_h +#define DFGObjectAllocationSinkingPhase_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; + +// Eliminates allocations allocations that are never used except +// locally. This will insert phantom allocations and store hints so +// that OSR exit can materialize the objects. Replaces all uses of the +// objects' fields with SSA data flow. This phase is able to handle cyclic allocation graphs. + +bool performObjectAllocationSinking(Graph&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGObjectAllocationSinkingPhase_h diff --git a/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.cpp b/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.cpp new file mode 100644 index 000000000..3abdbe696 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.cpp @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGObjectMaterializationData.h" + +#if ENABLE(DFG_JIT) + +#include <wtf/ListDump.h> + +namespace JSC { namespace DFG { + +void PhantomPropertyValue::dump(PrintStream& out) const +{ + out.print("id", m_identifierNumber); +} + +void ObjectMaterializationData::dump(PrintStream& out) const +{ + out.print("[", listDump(m_properties), "]"); +} + +float ObjectMaterializationData::oneWaySimilarityScore( + const ObjectMaterializationData& other) const +{ + unsigned numHits = 0; + for (PhantomPropertyValue value : m_properties) { + if (other.m_properties.contains(value)) + numHits++; + } + return static_cast<float>(numHits) / static_cast<float>(m_properties.size()); +} + +float ObjectMaterializationData::similarityScore(const ObjectMaterializationData& other) const +{ + return std::min(oneWaySimilarityScore(other), other.oneWaySimilarityScore(*this)); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.h b/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.h new file mode 100644 index 000000000..1c4febe00 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGObjectMaterializationData.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGObjectMaterializationData_h +#define DFGObjectMaterializationData_h + +#if ENABLE(DFG_JIT) + +#include <limits.h> +#include <wtf/MathExtras.h> +#include <wtf/PrintStream.h> +#include <wtf/Vector.h> + +namespace JSC { namespace DFG { + +struct PhantomPropertyValue { + PhantomPropertyValue() + : m_identifierNumber(UINT_MAX) + { + } + + PhantomPropertyValue(unsigned identifierNumber) + : m_identifierNumber(identifierNumber) + { + } + + unsigned m_identifierNumber; + + bool operator==(const PhantomPropertyValue& other) const + { + return m_identifierNumber == other.m_identifierNumber; + } + + void dump(PrintStream&) const; +}; + +struct ObjectMaterializationData { + // Determines the meaning of the passed nodes. + Vector<PhantomPropertyValue> m_properties; + + void dump(PrintStream&) const; + + // The fraction of my properties that the other data has. + float oneWaySimilarityScore(const ObjectMaterializationData&) const; + + // The minimum of the two possible one-way scores. + float similarityScore(const ObjectMaterializationData&) const; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGObjectMaterializationData_h + diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp index efe19a4f6..77137275f 100644 --- a/Source/JavaScriptCore/dfg/DFGOperations.cpp +++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,17 +26,19 @@ #include "config.h" #include "DFGOperations.h" -#include "Arguments.h" #include "ButterflyInlines.h" +#include "ClonedArguments.h" #include "CodeBlock.h" #include "CommonSlowPaths.h" #include "CopiedSpaceInlines.h" #include "DFGDriver.h" +#include "DFGJITCode.h" #include "DFGOSRExit.h" #include "DFGThunks.h" #include "DFGToFTLDeferredCompilationCallback.h" #include "DFGToFTLForOSREntryDeferredCompilationCallback.h" #include "DFGWorklist.h" +#include "DirectArguments.h" #include "FTLForOSREntryJITCode.h" #include "FTLOSREntry.h" #include "HostCallReturnValue.h" @@ -44,16 +46,16 @@ #include "Interpreter.h" #include "JIT.h" #include "JITExceptions.h" -#include "JITOperationWrappers.h" -#include "JSActivation.h" -#include "VM.h" -#include "JSNameScope.h" -#include "NameInstance.h" +#include "JSCInlines.h" +#include "JSLexicalEnvironment.h" #include "ObjectConstructor.h" -#include "Operations.h" #include "Repatch.h" +#include "ScopedArguments.h" #include "StringConstructor.h" +#include "Symbol.h" +#include "TypeProfilerLog.h" #include "TypedArrayInlines.h" +#include "VM.h" #include <wtf/InlineASM.h> #if ENABLE(JIT) @@ -66,6 +68,7 @@ static inline void putByVal(ExecState* exec, JSValue baseValue, uint32_t index, { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); + ASSERT(isIndex(index)); if (direct) { RELEASE_ASSERT(baseValue.isObject()); asObject(baseValue)->putDirectIndex(exec, index, value, 0, strict ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow); @@ -78,7 +81,7 @@ static inline void putByVal(ExecState* exec, JSValue baseValue, uint32_t index, return; } - object->methodTable()->putByIndex(object, exec, index, value, strict); + object->methodTable(vm)->putByIndex(object, exec, index, value, strict); return; } @@ -96,6 +99,8 @@ ALWAYS_INLINE static void JIT_OPERATION operationPutByValInternal(ExecState* exe JSValue value = JSValue::decode(encodedValue); if (LIKELY(property.isUInt32())) { + // Despite its name, JSValue::isUInt32 will return true only for positive boxed int32_t; all those values are valid array indices. + ASSERT(isIndex(property.asUInt32())); putByVal<strict, direct>(exec, baseValue, property.asUInt32(), value); return; } @@ -103,32 +108,26 @@ ALWAYS_INLINE static void JIT_OPERATION operationPutByValInternal(ExecState* exe if (property.isDouble()) { double propertyAsDouble = property.asDouble(); uint32_t propertyAsUInt32 = static_cast<uint32_t>(propertyAsDouble); - if (propertyAsDouble == propertyAsUInt32) { + if (propertyAsDouble == propertyAsUInt32 && isIndex(propertyAsUInt32)) { putByVal<strict, direct>(exec, baseValue, propertyAsUInt32, value); return; } } - if (isName(property)) { - PutPropertySlot slot(baseValue, strict); - if (direct) { - RELEASE_ASSERT(baseValue.isObject()); - asObject(baseValue)->putDirect(*vm, jsCast<NameInstance*>(property.asCell())->privateName(), value, slot); - } else - baseValue.put(exec, jsCast<NameInstance*>(property.asCell())->privateName(), value, slot); + // Don't put to an object if toString throws an exception. + auto propertyName = property.toPropertyKey(exec); + if (vm->exception()) return; - } - // Don't put to an object if toString throws an exception. - Identifier ident(exec, property.toString(exec)->value(exec)); - if (!vm->exception()) { - PutPropertySlot slot(baseValue, strict); - if (direct) { - RELEASE_ASSERT(baseValue.isObject()); - asObject(baseValue)->putDirect(*vm, jsCast<NameInstance*>(property.asCell())->privateName(), value, slot); - } else - baseValue.put(exec, ident, value, slot); - } + PutPropertySlot slot(baseValue, strict); + if (direct) { + RELEASE_ASSERT(baseValue.isObject()); + if (Optional<uint32_t> index = parseIndex(propertyName)) + asObject(baseValue)->putDirectIndex(exec, index.value(), value, 0, strict ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow); + else + asObject(baseValue)->putDirect(*vm, propertyName, value, slot); + } else + baseValue.put(exec, propertyName, value, slot); } template<typename ViewClass> @@ -137,70 +136,12 @@ char* newTypedArrayWithSize(ExecState* exec, Structure* structure, int32_t size) VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); if (size < 0) { - vm.throwException(exec, createRangeError(exec, "Requested length is negative")); + vm.throwException(exec, createRangeError(exec, ASCIILiteral("Requested length is negative"))); return 0; } return bitwise_cast<char*>(ViewClass::create(exec, structure, size)); } -template<typename ViewClass> -char* newTypedArrayWithOneArgument( - ExecState* exec, Structure* structure, EncodedJSValue encodedValue) -{ - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - - JSValue value = JSValue::decode(encodedValue); - - if (JSArrayBuffer* jsBuffer = jsDynamicCast<JSArrayBuffer*>(value)) { - RefPtr<ArrayBuffer> buffer = jsBuffer->impl(); - - if (buffer->byteLength() % ViewClass::elementSize) { - vm.throwException(exec, createRangeError(exec, "ArrayBuffer length minus the byteOffset is not a multiple of the element size")); - return 0; - } - return bitwise_cast<char*>( - ViewClass::create( - exec, structure, buffer, 0, buffer->byteLength() / ViewClass::elementSize)); - } - - if (JSObject* object = jsDynamicCast<JSObject*>(value)) { - unsigned length = object->get(exec, vm.propertyNames->length).toUInt32(exec); - if (exec->hadException()) - return 0; - - ViewClass* result = ViewClass::createUninitialized(exec, structure, length); - if (!result) - return 0; - - if (!result->set(exec, object, 0, length)) - return 0; - - return bitwise_cast<char*>(result); - } - - int length; - if (value.isInt32()) - length = value.asInt32(); - else if (!value.isNumber()) { - vm.throwException(exec, createTypeError(exec, "Invalid array length argument")); - return 0; - } else { - length = static_cast<int>(value.asNumber()); - if (length != value.asNumber()) { - vm.throwException(exec, createTypeError(exec, "Invalid array length argument (fractional lengths not allowed)")); - return 0; - } - } - - if (length < 0) { - vm.throwException(exec, createRangeError(exec, "Requested length is negative")); - return 0; - } - - return bitwise_cast<char*>(ViewClass::create(exec, structure, length)); -} - extern "C" { EncodedJSValue JIT_OPERATION operationToThis(ExecState* exec, EncodedJSValue encodedOp) @@ -221,15 +162,88 @@ EncodedJSValue JIT_OPERATION operationToThisStrict(ExecState* exec, EncodedJSVal JSCell* JIT_OPERATION operationCreateThis(ExecState* exec, JSObject* constructor, int32_t inlineCapacity) { + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + return constructEmptyObject(exec, jsCast<JSFunction*>(constructor)->rareData(exec, inlineCapacity)->objectAllocationProfile()->structure()); +} + +EncodedJSValue JIT_OPERATION operationValueBitAnd(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); -#if !ASSERT_DISABLED - ConstructData constructData; - ASSERT(jsCast<JSFunction*>(constructor)->methodTable()->getConstructData(jsCast<JSFunction*>(constructor), constructData) == ConstructTypeJS); -#endif - - return constructEmptyObject(exec, jsCast<JSFunction*>(constructor)->allocationProfile(exec, inlineCapacity)->structure()); + JSValue op1 = JSValue::decode(encodedOp1); + JSValue op2 = JSValue::decode(encodedOp2); + + int32_t a = op1.toInt32(exec); + int32_t b = op2.toInt32(exec); + return JSValue::encode(jsNumber(a & b)); +} + +EncodedJSValue JIT_OPERATION operationValueBitOr(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); + + JSValue op1 = JSValue::decode(encodedOp1); + JSValue op2 = JSValue::decode(encodedOp2); + + int32_t a = op1.toInt32(exec); + int32_t b = op2.toInt32(exec); + return JSValue::encode(jsNumber(a | b)); +} + +EncodedJSValue JIT_OPERATION operationValueBitXor(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); + + JSValue op1 = JSValue::decode(encodedOp1); + JSValue op2 = JSValue::decode(encodedOp2); + + int32_t a = op1.toInt32(exec); + int32_t b = op2.toInt32(exec); + return JSValue::encode(jsNumber(a ^ b)); +} + +EncodedJSValue JIT_OPERATION operationValueBitLShift(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); + + JSValue op1 = JSValue::decode(encodedOp1); + JSValue op2 = JSValue::decode(encodedOp2); + + int32_t a = op1.toInt32(exec); + uint32_t b = op2.toUInt32(exec); + return JSValue::encode(jsNumber(a << (b & 0x1f))); +} + +EncodedJSValue JIT_OPERATION operationValueBitRShift(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); + + JSValue op1 = JSValue::decode(encodedOp1); + JSValue op2 = JSValue::decode(encodedOp2); + + int32_t a = op1.toInt32(exec); + uint32_t b = op2.toUInt32(exec); + return JSValue::encode(jsNumber(a >> (b & 0x1f))); +} + +EncodedJSValue JIT_OPERATION operationValueBitURShift(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); + + JSValue op1 = JSValue::decode(encodedOp1); + JSValue op2 = JSValue::decode(encodedOp2); + + uint32_t a = op1.toUInt32(exec); + uint32_t b = op2.toUInt32(exec); + return JSValue::encode(jsNumber(static_cast<int32_t>(a >> (b & 0x1f)))); } EncodedJSValue JIT_OPERATION operationValueAdd(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) @@ -259,7 +273,46 @@ EncodedJSValue JIT_OPERATION operationValueAddNotNumber(ExecState* exec, Encoded return JSValue::encode(jsAddSlowCase(exec, op1, op2)); } -static inline EncodedJSValue getByVal(ExecState* exec, JSCell* base, uint32_t index) +EncodedJSValue JIT_OPERATION operationValueDiv(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); + + JSValue op1 = JSValue::decode(encodedOp1); + JSValue op2 = JSValue::decode(encodedOp2); + + double a = op1.toNumber(exec); + double b = op2.toNumber(exec); + return JSValue::encode(jsNumber(a / b)); +} + +EncodedJSValue JIT_OPERATION operationValueMul(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); + + JSValue op1 = JSValue::decode(encodedOp1); + JSValue op2 = JSValue::decode(encodedOp2); + + double a = op1.toNumber(exec); + double b = op2.toNumber(exec); + return JSValue::encode(jsNumber(a * b)); +} + +EncodedJSValue JIT_OPERATION operationValueSub(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); + + JSValue op1 = JSValue::decode(encodedOp1); + JSValue op2 = JSValue::decode(encodedOp2); + + double a = op1.toNumber(exec); + double b = op2.toNumber(exec); + return JSValue::encode(jsNumber(a - b)); +} + +static ALWAYS_INLINE EncodedJSValue getByVal(ExecState* exec, JSCell* base, uint32_t index) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); @@ -278,8 +331,8 @@ static inline EncodedJSValue getByVal(ExecState* exec, JSCell* base, uint32_t in EncodedJSValue JIT_OPERATION operationGetByVal(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty) { - VM* vm = &exec->vm(); - NativeCallFrameTracer tracer(vm, exec); + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); JSValue baseValue = JSValue::decode(encodedBase); JSValue property = JSValue::decode(encodedProperty); @@ -292,25 +345,32 @@ EncodedJSValue JIT_OPERATION operationGetByVal(ExecState* exec, EncodedJSValue e } else if (property.isDouble()) { double propertyAsDouble = property.asDouble(); uint32_t propertyAsUInt32 = static_cast<uint32_t>(propertyAsDouble); - if (propertyAsUInt32 == propertyAsDouble) + if (propertyAsUInt32 == propertyAsDouble && isIndex(propertyAsUInt32)) return getByVal(exec, base, propertyAsUInt32); } else if (property.isString()) { - if (JSValue result = base->fastGetOwnProperty(exec, asString(property)->value(exec))) - return JSValue::encode(result); + Structure& structure = *base->structure(vm); + if (JSCell::canUseFastGetOwnProperty(structure)) { + if (RefPtr<AtomicStringImpl> existingAtomicString = asString(property)->toExistingAtomicString(exec)) { + if (JSValue result = base->fastGetOwnProperty(vm, structure, existingAtomicString.get())) + return JSValue::encode(result); + } + } } } - if (isName(property)) - return JSValue::encode(baseValue.get(exec, jsCast<NameInstance*>(property.asCell())->privateName())); - - Identifier ident(exec, property.toString(exec)->value(exec)); - return JSValue::encode(baseValue.get(exec, ident)); + baseValue.requireObjectCoercible(exec); + if (vm.exception()) + return JSValue::encode(jsUndefined()); + auto propertyName = property.toPropertyKey(exec); + if (vm.exception()) + return JSValue::encode(jsUndefined()); + return JSValue::encode(baseValue.get(exec, propertyName)); } EncodedJSValue JIT_OPERATION operationGetByValCell(ExecState* exec, JSCell* base, EncodedJSValue encodedProperty) { - VM* vm = &exec->vm(); - NativeCallFrameTracer tracer(vm, exec); + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); JSValue property = JSValue::decode(encodedProperty); @@ -322,15 +382,19 @@ EncodedJSValue JIT_OPERATION operationGetByValCell(ExecState* exec, JSCell* base if (propertyAsUInt32 == propertyAsDouble) return getByVal(exec, base, propertyAsUInt32); } else if (property.isString()) { - if (JSValue result = base->fastGetOwnProperty(exec, asString(property)->value(exec))) - return JSValue::encode(result); + Structure& structure = *base->structure(vm); + if (JSCell::canUseFastGetOwnProperty(structure)) { + if (RefPtr<AtomicStringImpl> existingAtomicString = asString(property)->toExistingAtomicString(exec)) { + if (JSValue result = base->fastGetOwnProperty(vm, structure, existingAtomicString.get())) + return JSValue::encode(result); + } + } } - if (isName(property)) - return JSValue::encode(JSValue(base).get(exec, jsCast<NameInstance*>(property.asCell())->privateName())); - - Identifier ident(exec, property.toString(exec)->value(exec)); - return JSValue::encode(JSValue(base).get(exec, ident)); + auto propertyName = property.toPropertyKey(exec); + if (vm.exception()) + return JSValue::encode(jsUndefined()); + return JSValue::encode(JSValue(base).get(exec, propertyName)); } ALWAYS_INLINE EncodedJSValue getByValCellInt(ExecState* exec, JSCell* base, int32_t index) @@ -391,8 +455,8 @@ void JIT_OPERATION operationPutByValCellNonStrict(ExecState* exec, JSCell* cell, void JIT_OPERATION operationPutByValBeyondArrayBoundsStrict(ExecState* exec, JSObject* array, int32_t index, EncodedJSValue encodedValue) { - VM* vm = &exec->vm(); - NativeCallFrameTracer tracer(vm, exec); + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); if (index >= 0) { array->putByIndexInline(exec, index, JSValue::decode(encodedValue), true); @@ -556,8 +620,31 @@ EncodedJSValue JIT_OPERATION operationRegExpExec(ExecState* exec, JSCell* base, if (!base->inherits(RegExpObject::info())) return throwVMTypeError(exec); - ASSERT(argument->isString() || argument->isObject()); - JSString* input = argument->isString() ? asString(argument) : asObject(argument)->toString(exec); + JSString* input; + if (argument->isString()) + input = asString(argument); + else { + input = JSValue(argument).toStringOrNull(exec); + if (!input) + return JSValue::encode(jsUndefined()); + } + return JSValue::encode(asRegExpObject(base)->exec(exec, input)); +} + +EncodedJSValue JIT_OPERATION operationRegExpExecGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedArgument) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + JSValue base = JSValue::decode(encodedBase); + JSValue argument = JSValue::decode(encodedArgument); + + if (!base.inherits(RegExpObject::info())) + return throwVMTypeError(exec); + + JSString* input = argument.toStringOrNull(exec); + if (!input) + return JSValue::encode(jsUndefined()); return JSValue::encode(asRegExpObject(base)->exec(exec, input)); } @@ -571,8 +658,33 @@ size_t JIT_OPERATION operationRegExpTest(ExecState* exec, JSCell* base, JSCell* return false; } - ASSERT(argument->isString() || argument->isObject()); - JSString* input = argument->isString() ? asString(argument) : asObject(argument)->toString(exec); + JSString* input; + if (argument->isString()) + input = asString(argument); + else { + input = JSValue(argument).toStringOrNull(exec); + if (!input) + return false; + } + return asRegExpObject(base)->test(exec, input); +} + +size_t JIT_OPERATION operationRegExpTestGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedArgument) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + JSValue base = JSValue::decode(encodedBase); + JSValue argument = JSValue::decode(encodedArgument); + + if (!base.inherits(RegExpObject::info())) { + throwTypeError(exec); + return false; + } + + JSString* input = argument.toStringOrNull(exec); + if (!input) + return false; return asRegExpObject(base)->test(exec, input); } @@ -633,7 +745,9 @@ char* JIT_OPERATION operationNewArrayWithSize(ExecState* exec, Structure* arrayS if (UNLIKELY(size < 0)) return bitwise_cast<char*>(exec->vm().throwException(exec, createRangeError(exec, ASCIILiteral("Array size is not a small enough positive integer.")))); - return bitwise_cast<char*>(JSArray::create(*vm, arrayStructure, size)); + JSArray* result = JSArray::create(*vm, arrayStructure, size); + result->butterfly(); // Ensure that the backing store is in to-space. + return bitwise_cast<char*>(result); } char* JIT_OPERATION operationNewArrayBuffer(ExecState* exec, Structure* arrayStructure, size_t start, size_t size) @@ -652,7 +766,7 @@ char* JIT_OPERATION operationNewInt8ArrayWithSize( char* JIT_OPERATION operationNewInt8ArrayWithOneArgument( ExecState* exec, Structure* structure, EncodedJSValue encodedValue) { - return newTypedArrayWithOneArgument<JSInt8Array>(exec, structure, encodedValue); + return reinterpret_cast<char*>(constructGenericTypedArrayViewWithArguments<JSInt8Array>(exec, structure, encodedValue, 0, Nullopt)); } char* JIT_OPERATION operationNewInt16ArrayWithSize( @@ -664,7 +778,7 @@ char* JIT_OPERATION operationNewInt16ArrayWithSize( char* JIT_OPERATION operationNewInt16ArrayWithOneArgument( ExecState* exec, Structure* structure, EncodedJSValue encodedValue) { - return newTypedArrayWithOneArgument<JSInt16Array>(exec, structure, encodedValue); + return reinterpret_cast<char*>(constructGenericTypedArrayViewWithArguments<JSInt16Array>(exec, structure, encodedValue, 0, Nullopt)); } char* JIT_OPERATION operationNewInt32ArrayWithSize( @@ -676,7 +790,7 @@ char* JIT_OPERATION operationNewInt32ArrayWithSize( char* JIT_OPERATION operationNewInt32ArrayWithOneArgument( ExecState* exec, Structure* structure, EncodedJSValue encodedValue) { - return newTypedArrayWithOneArgument<JSInt32Array>(exec, structure, encodedValue); + return reinterpret_cast<char*>(constructGenericTypedArrayViewWithArguments<JSInt32Array>(exec, structure, encodedValue, 0, Nullopt)); } char* JIT_OPERATION operationNewUint8ArrayWithSize( @@ -688,7 +802,7 @@ char* JIT_OPERATION operationNewUint8ArrayWithSize( char* JIT_OPERATION operationNewUint8ArrayWithOneArgument( ExecState* exec, Structure* structure, EncodedJSValue encodedValue) { - return newTypedArrayWithOneArgument<JSUint8Array>(exec, structure, encodedValue); + return reinterpret_cast<char*>(constructGenericTypedArrayViewWithArguments<JSUint8Array>(exec, structure, encodedValue, 0, Nullopt)); } char* JIT_OPERATION operationNewUint8ClampedArrayWithSize( @@ -700,7 +814,7 @@ char* JIT_OPERATION operationNewUint8ClampedArrayWithSize( char* JIT_OPERATION operationNewUint8ClampedArrayWithOneArgument( ExecState* exec, Structure* structure, EncodedJSValue encodedValue) { - return newTypedArrayWithOneArgument<JSUint8ClampedArray>(exec, structure, encodedValue); + return reinterpret_cast<char*>(constructGenericTypedArrayViewWithArguments<JSUint8ClampedArray>(exec, structure, encodedValue, 0, Nullopt)); } char* JIT_OPERATION operationNewUint16ArrayWithSize( @@ -712,7 +826,7 @@ char* JIT_OPERATION operationNewUint16ArrayWithSize( char* JIT_OPERATION operationNewUint16ArrayWithOneArgument( ExecState* exec, Structure* structure, EncodedJSValue encodedValue) { - return newTypedArrayWithOneArgument<JSUint16Array>(exec, structure, encodedValue); + return reinterpret_cast<char*>(constructGenericTypedArrayViewWithArguments<JSUint16Array>(exec, structure, encodedValue, 0, Nullopt)); } char* JIT_OPERATION operationNewUint32ArrayWithSize( @@ -724,7 +838,7 @@ char* JIT_OPERATION operationNewUint32ArrayWithSize( char* JIT_OPERATION operationNewUint32ArrayWithOneArgument( ExecState* exec, Structure* structure, EncodedJSValue encodedValue) { - return newTypedArrayWithOneArgument<JSUint32Array>(exec, structure, encodedValue); + return reinterpret_cast<char*>(constructGenericTypedArrayViewWithArguments<JSUint32Array>(exec, structure, encodedValue, 0, Nullopt)); } char* JIT_OPERATION operationNewFloat32ArrayWithSize( @@ -736,7 +850,7 @@ char* JIT_OPERATION operationNewFloat32ArrayWithSize( char* JIT_OPERATION operationNewFloat32ArrayWithOneArgument( ExecState* exec, Structure* structure, EncodedJSValue encodedValue) { - return newTypedArrayWithOneArgument<JSFloat32Array>(exec, structure, encodedValue); + return reinterpret_cast<char*>(constructGenericTypedArrayViewWithArguments<JSFloat32Array>(exec, structure, encodedValue, 0, Nullopt)); } char* JIT_OPERATION operationNewFloat64ArrayWithSize( @@ -748,82 +862,197 @@ char* JIT_OPERATION operationNewFloat64ArrayWithSize( char* JIT_OPERATION operationNewFloat64ArrayWithOneArgument( ExecState* exec, Structure* structure, EncodedJSValue encodedValue) { - return newTypedArrayWithOneArgument<JSFloat64Array>(exec, structure, encodedValue); + return reinterpret_cast<char*>(constructGenericTypedArrayViewWithArguments<JSFloat64Array>(exec, structure, encodedValue, 0, Nullopt)); } -JSCell* JIT_OPERATION operationCreateInlinedArguments( - ExecState* exec, InlineCallFrame* inlineCallFrame) +JSCell* JIT_OPERATION operationCreateActivationDirect(ExecState* exec, Structure* structure, JSScope* scope, SymbolTable* table, EncodedJSValue initialValueEncoded) { + JSValue initialValue = JSValue::decode(initialValueEncoded); + ASSERT(initialValue == jsUndefined() || initialValue == jsTDZValue()); VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); - // NB: This needs to be exceedingly careful with top call frame tracking, since it - // may be called from OSR exit, while the state of the call stack is bizarre. - Arguments* result = Arguments::create(vm, exec, inlineCallFrame); - ASSERT(!vm.exception()); + return JSLexicalEnvironment::create(vm, structure, scope, table, initialValue); +} + +JSCell* JIT_OPERATION operationCreateDirectArguments(ExecState* exec, Structure* structure, int32_t length, int32_t minCapacity) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer target(&vm, exec); + DirectArguments* result = DirectArguments::create( + vm, structure, length, std::max(length, minCapacity)); + // The caller will store to this object without barriers. Most likely, at this point, this is + // still a young object and so no barriers are needed. But it's good to be careful anyway, + // since the GC should be allowed to do crazy (like pretenuring, for example). + vm.heap.writeBarrier(result); return result; } -void JIT_OPERATION operationTearOffInlinedArguments( - ExecState* exec, JSCell* argumentsCell, JSCell* activationCell, InlineCallFrame* inlineCallFrame) +JSCell* JIT_OPERATION operationCreateScopedArguments(ExecState* exec, Structure* structure, Register* argumentStart, int32_t length, JSFunction* callee, JSLexicalEnvironment* scope) { - ASSERT_UNUSED(activationCell, !activationCell); // Currently, we don't inline functions with activations. - jsCast<Arguments*>(argumentsCell)->tearOff(exec, inlineCallFrame); + VM& vm = exec->vm(); + NativeCallFrameTracer target(&vm, exec); + + // We could pass the ScopedArgumentsTable* as an argument. We currently don't because I + // didn't feel like changing the max number of arguments for a slow path call from 6 to 7. + ScopedArgumentsTable* table = scope->symbolTable()->arguments(); + + return ScopedArguments::createByCopyingFrom( + vm, structure, argumentStart, length, callee, table, scope); } -EncodedJSValue JIT_OPERATION operationGetArgumentByVal(ExecState* exec, int32_t argumentsRegister, int32_t index) +JSCell* JIT_OPERATION operationCreateClonedArguments(ExecState* exec, Structure* structure, Register* argumentStart, int32_t length, JSFunction* callee) { VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); + NativeCallFrameTracer target(&vm, exec); + return ClonedArguments::createByCopyingFrom( + exec, structure, argumentStart, length, callee); +} + +JSCell* JIT_OPERATION operationCreateDirectArgumentsDuringExit(ExecState* exec, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer target(&vm, exec); + + DeferGCForAWhile deferGC(vm.heap); + + CodeBlock* codeBlock; + if (inlineCallFrame) + codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame); + else + codeBlock = exec->codeBlock(); + + unsigned length = argumentCount - 1; + unsigned capacity = std::max(length, static_cast<unsigned>(codeBlock->numParameters() - 1)); + DirectArguments* result = DirectArguments::create( + vm, codeBlock->globalObject()->directArgumentsStructure(), length, capacity); + + result->callee().set(vm, result, callee); + + Register* arguments = + exec->registers() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0) + + CallFrame::argumentOffset(0); + for (unsigned i = length; i--;) + result->setIndexQuickly(vm, i, arguments[i].jsValue()); + + return result; +} - JSValue argumentsValue = exec->uncheckedR(argumentsRegister).jsValue(); +JSCell* JIT_OPERATION operationCreateClonedArgumentsDuringExit(ExecState* exec, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer target(&vm, exec); + + DeferGCForAWhile deferGC(vm.heap); + + CodeBlock* codeBlock; + if (inlineCallFrame) + codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame); + else + codeBlock = exec->codeBlock(); + + unsigned length = argumentCount - 1; + ClonedArguments* result = ClonedArguments::createEmpty( + vm, codeBlock->globalObject()->outOfBandArgumentsStructure(), callee); + + Register* arguments = + exec->registers() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0) + + CallFrame::argumentOffset(0); + for (unsigned i = length; i--;) + result->putDirectIndex(exec, i, arguments[i].jsValue()); - // If there are no arguments, and we're accessing out of bounds, then we have to create the - // arguments in case someone has installed a getter on a numeric property. - if (!argumentsValue) - exec->uncheckedR(argumentsRegister) = argumentsValue = Arguments::create(exec->vm(), exec); + result->putDirect(vm, vm.propertyNames->length, jsNumber(length)); - return JSValue::encode(argumentsValue.get(exec, index)); + return result; +} + +void JIT_OPERATION operationCopyRest(ExecState* exec, JSCell* arrayAsCell, Register* argumentStart, unsigned numberOfParamsToSkip, unsigned arraySize) +{ + ASSERT(arraySize); + JSArray* array = jsCast<JSArray*>(arrayAsCell); + ASSERT(arraySize == array->length()); + array->setLength(exec, arraySize); + for (unsigned i = 0; i < arraySize; i++) + array->putDirectIndex(exec, i, argumentStart[i + numberOfParamsToSkip].jsValue()); } -EncodedJSValue JIT_OPERATION operationGetInlinedArgumentByVal( - ExecState* exec, int32_t argumentsRegister, InlineCallFrame* inlineCallFrame, int32_t index) +size_t JIT_OPERATION operationObjectIsObject(ExecState* exec, JSGlobalObject* globalObject, JSCell* object) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); - JSValue argumentsValue = exec->uncheckedR(argumentsRegister).jsValue(); + ASSERT(jsDynamicCast<JSObject*>(object)); - // If there are no arguments, and we're accessing out of bounds, then we have to create the - // arguments in case someone has installed a getter on a numeric property. - if (!argumentsValue) { - exec->uncheckedR(argumentsRegister) = argumentsValue = - Arguments::create(exec->vm(), exec, inlineCallFrame); + if (object->structure(vm)->masqueradesAsUndefined(globalObject)) + return false; + if (object->type() == JSFunctionType) + return false; + if (object->inlineTypeFlags() & TypeOfShouldCallGetCallData) { + CallData callData; + if (object->methodTable(vm)->getCallData(object, callData) != CallTypeNone) + return false; } - return JSValue::encode(argumentsValue.get(exec, index)); + return true; } -JSCell* JIT_OPERATION operationNewFunctionNoCheck(ExecState* exec, JSCell* functionExecutable) +size_t JIT_OPERATION operationObjectIsFunction(ExecState* exec, JSGlobalObject* globalObject, JSCell* object) { - ASSERT(functionExecutable->inherits(FunctionExecutable::info())); VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); - return JSFunction::create(vm, static_cast<FunctionExecutable*>(functionExecutable), exec->scope()); -} -size_t JIT_OPERATION operationIsObject(ExecState* exec, EncodedJSValue value) -{ - return jsIsObjectType(exec, JSValue::decode(value)); + ASSERT(jsDynamicCast<JSObject*>(object)); + + if (object->structure(vm)->masqueradesAsUndefined(globalObject)) + return false; + if (object->type() == JSFunctionType) + return true; + if (object->inlineTypeFlags() & TypeOfShouldCallGetCallData) { + CallData callData; + if (object->methodTable(vm)->getCallData(object, callData) != CallTypeNone) + return true; + } + + return false; } -size_t JIT_OPERATION operationIsFunction(EncodedJSValue value) +JSCell* JIT_OPERATION operationTypeOfObject(ExecState* exec, JSGlobalObject* globalObject, JSCell* object) { - return jsIsFunctionType(JSValue::decode(value)); + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + ASSERT(jsDynamicCast<JSObject*>(object)); + + if (object->structure(vm)->masqueradesAsUndefined(globalObject)) + return vm.smallStrings.undefinedString(); + if (object->type() == JSFunctionType) + return vm.smallStrings.functionString(); + if (object->inlineTypeFlags() & TypeOfShouldCallGetCallData) { + CallData callData; + if (object->methodTable(vm)->getCallData(object, callData) != CallTypeNone) + return vm.smallStrings.functionString(); + } + + return vm.smallStrings.objectString(); } -JSCell* JIT_OPERATION operationTypeOf(ExecState* exec, JSCell* value) +int32_t JIT_OPERATION operationTypeOfObjectAsTypeofType(ExecState* exec, JSGlobalObject* globalObject, JSCell* object) { - return jsTypeStringForValue(exec, JSValue(value)).asCell(); + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + ASSERT(jsDynamicCast<JSObject*>(object)); + + if (object->structure(vm)->masqueradesAsUndefined(globalObject)) + return static_cast<int32_t>(TypeofType::Undefined); + if (object->type() == JSFunctionType) + return static_cast<int32_t>(TypeofType::Function); + if (object->inlineTypeFlags() & TypeOfShouldCallGetCallData) { + CallData callData; + if (object->methodTable(vm)->getCallData(object, callData) != CallTypeNone) + return static_cast<int32_t>(TypeofType::Function); + } + + return static_cast<int32_t>(TypeofType::Object); } char* JIT_OPERATION operationAllocatePropertyStorageWithInitialCapacity(ExecState* exec) @@ -900,17 +1129,6 @@ char* JIT_OPERATION operationEnsureContiguous(ExecState* exec, JSCell* cell) return reinterpret_cast<char*>(asObject(cell)->ensureContiguous(vm).data()); } -char* JIT_OPERATION operationRageEnsureContiguous(ExecState* exec, JSCell* cell) -{ - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - - if (!cell->isObject()) - return 0; - - return reinterpret_cast<char*>(asObject(cell)->rageEnsureContiguous(vm).data()); -} - char* JIT_OPERATION operationEnsureArrayStorage(ExecState* exec, JSCell* cell) { VM& vm = exec->vm(); @@ -962,6 +1180,22 @@ JSCell* JIT_OPERATION operationToString(ExecState* exec, EncodedJSValue value) return JSValue::decode(value).toString(exec); } +JSCell* JIT_OPERATION operationCallStringConstructorOnCell(ExecState* exec, JSCell* cell) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + return stringConstructor(exec, cell); +} + +JSCell* JIT_OPERATION operationCallStringConstructor(ExecState* exec, EncodedJSValue value) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + return stringConstructor(exec, JSValue::decode(value)); +} + JSCell* JIT_OPERATION operationMakeRope2(ExecState* exec, JSString* left, JSString* right) { VM& vm = exec->vm(); @@ -988,6 +1222,44 @@ JSCell* JIT_OPERATION operationMakeRope3(ExecState* exec, JSString* a, JSString* return JSRopeString::create(vm, a, b, c); } +JSCell* JIT_OPERATION operationStrCat2(ExecState* exec, EncodedJSValue a, EncodedJSValue b) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + JSString* str1 = JSValue::decode(a).toString(exec); + ASSERT(!vm.exception()); // Impossible, since we must have been given primitives. + JSString* str2 = JSValue::decode(b).toString(exec); + ASSERT(!vm.exception()); + + if (sumOverflows<int32_t>(str1->length(), str2->length())) { + throwOutOfMemoryError(exec); + return nullptr; + } + + return JSRopeString::create(vm, str1, str2); +} + +JSCell* JIT_OPERATION operationStrCat3(ExecState* exec, EncodedJSValue a, EncodedJSValue b, EncodedJSValue c) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + JSString* str1 = JSValue::decode(a).toString(exec); + ASSERT(!vm.exception()); // Impossible, since we must have been given primitives. + JSString* str2 = JSValue::decode(b).toString(exec); + ASSERT(!vm.exception()); + JSString* str3 = JSValue::decode(c).toString(exec); + ASSERT(!vm.exception()); + + if (sumOverflows<int32_t>(str1->length(), str2->length(), str3->length())) { + throwOutOfMemoryError(exec); + return nullptr; + } + + return JSRopeString::create(vm, str1, str2, str3); +} + char* JIT_OPERATION operationFindSwitchImmTargetForDouble( ExecState* exec, EncodedJSValue encodedValue, size_t tableIndex) { @@ -1010,12 +1282,64 @@ char* JIT_OPERATION operationSwitchString(ExecState* exec, size_t tableIndex, JS return static_cast<char*>(exec->codeBlock()->stringSwitchJumpTable(tableIndex).ctiForValue(string->value(exec).impl()).executableAddress()); } -void JIT_OPERATION operationInvalidate(ExecState* exec, VariableWatchpointSet* set) +int32_t JIT_OPERATION operationSwitchStringAndGetBranchOffset(ExecState* exec, size_t tableIndex, JSString* string) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); - set->invalidate(); + return exec->codeBlock()->stringSwitchJumpTable(tableIndex).offsetForValue(string->value(exec).impl(), std::numeric_limits<int32_t>::min()); +} + +char* JIT_OPERATION operationGetButterfly(ExecState* exec, JSCell* cell) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + return bitwise_cast<char*>(jsCast<JSObject*>(cell)->butterfly()); +} + +char* JIT_OPERATION operationGetArrayBufferVector(ExecState* exec, JSCell* cell) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + return bitwise_cast<char*>(jsCast<JSArrayBufferView*>(cell)->vector()); +} + +void JIT_OPERATION operationNotifyWrite(ExecState* exec, WatchpointSet* set) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + set->touch("Executed NotifyWrite"); +} + +void JIT_OPERATION operationThrowStackOverflowForVarargs(ExecState* exec) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + throwStackOverflowError(exec); +} + +int32_t JIT_OPERATION operationSizeOfVarargs(ExecState* exec, EncodedJSValue encodedArguments, int32_t firstVarArgOffset) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + JSValue arguments = JSValue::decode(encodedArguments); + + return sizeOfVarargs(exec, arguments, firstVarArgOffset); +} + +void JIT_OPERATION operationLoadVarargs(ExecState* exec, int32_t firstElementDest, EncodedJSValue encodedArguments, int32_t offset, int32_t length, int32_t mandatoryMinimum) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + JSValue arguments = JSValue::decode(encodedArguments); + + loadVarargs(exec, VirtualRegister(firstElementDest), arguments, offset, length); + + for (int32_t i = length; i < mandatoryMinimum; ++i) + exec->r(firstElementDest + i) = jsUndefined(); } double JIT_OPERATION operationFModOnInts(int32_t a, int32_t b) @@ -1023,6 +1347,13 @@ double JIT_OPERATION operationFModOnInts(int32_t a, int32_t b) return fmod(a, b); } +#if USE(JSVALUE32_64) +double JIT_OPERATION operationRandom(JSGlobalObject* globalObject) +{ + return globalObject->weakRandomNumber(); +} +#endif + JSCell* JIT_OPERATION operationStringFromCharCode(ExecState* exec, int32_t op1) { VM* vm = &exec->vm(); @@ -1030,13 +1361,31 @@ JSCell* JIT_OPERATION operationStringFromCharCode(ExecState* exec, int32_t op1) return JSC::stringFromCharCode(exec, op1); } -size_t JIT_OPERATION dfgConvertJSValueToInt32(ExecState* exec, EncodedJSValue value) +EncodedJSValue JIT_OPERATION operationStringFromCharCodeUntyped(ExecState* exec, EncodedJSValue encodedValue) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - - // toInt32/toUInt32 return the same value; we want the value zero extended to fill the register. - return JSValue::decode(value).toUInt32(exec); + JSValue charValue = JSValue::decode(encodedValue); + int32_t chInt = charValue.toUInt32(exec); + return JSValue::encode(JSC::stringFromCharCode(exec, chInt)); +} + +int64_t JIT_OPERATION operationConvertBoxedDoubleToInt52(EncodedJSValue encodedValue) +{ + JSValue value = JSValue::decode(encodedValue); + if (!value.isDouble()) + return JSValue::notInt52; + return tryConvertToInt52(value.asDouble()); +} + +int64_t JIT_OPERATION operationConvertDoubleToInt52(double value) +{ + return tryConvertToInt52(value); +} + +void JIT_OPERATION operationProcessTypeProfilerLogDFG(ExecState* exec) +{ + exec->vm().typeProfilerLog()->processLogEntries(ASCIILiteral("Log Full, called from inside DFG.")); } void JIT_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void* debugInfoRaw, void* scratch) @@ -1047,8 +1396,8 @@ void JIT_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void* SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw); CodeBlock* codeBlock = debugInfo->codeBlock; CodeBlock* alternative = codeBlock->alternative(); - dataLog( - "Speculation failure in ", *codeBlock, " with "); + dataLog("Speculation failure in ", *codeBlock); + dataLog(" @ exit #", vm->osrExitIndex, " (bc#", debugInfo->bytecodeOffset, ", ", exitKindToString(debugInfo->kind), ") with "); if (alternative) { dataLog( "executeCounter = ", alternative->jitExecuteCounter(), @@ -1077,7 +1426,7 @@ void JIT_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void* dataLog("\n"); } -extern "C" void JIT_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock) +extern "C" void JIT_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock, OSRExitBase* exit) { // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't // really be profitable. @@ -1101,13 +1450,21 @@ extern "C" void JIT_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock) ASSERT(codeBlock->hasOptimizedReplacement()); CodeBlock* optimizedCodeBlock = codeBlock->replacement(); ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->jitType())); + + bool didTryToEnterIntoInlinedLoops = false; + for (InlineCallFrame* inlineCallFrame = exit->m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) { + if (inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->didTryToEnterInLoop()) { + didTryToEnterIntoInlinedLoops = true; + break; + } + } // In order to trigger reoptimization, one of two things must have happened: // 1) We exited more than some number of times. // 2) We exited and got stuck in a loop, and now we're exiting again. bool didExitABunch = optimizedCodeBlock->shouldReoptimizeNow(); bool didGetStuckInLoop = - codeBlock->checkIfOptimizationThresholdReached() + (codeBlock->checkIfOptimizationThresholdReached() || didTryToEnterIntoInlinedLoops) && optimizedCodeBlock->shouldReoptimizeFromLoopNow(); if (!didExitABunch && !didGetStuckInLoop) { @@ -1117,40 +1474,32 @@ extern "C" void JIT_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock) return; } - optimizedCodeBlock->jettison(CountReoptimization); + optimizedCodeBlock->jettison(Profiler::JettisonDueToOSRExit, CountReoptimization); } #if ENABLE(FTL_JIT) -void JIT_OPERATION triggerTierUpNow(ExecState* exec) +static bool shouldTriggerFTLCompile(CodeBlock* codeBlock, JITCode* jitCode) { - VM* vm = &exec->vm(); - NativeCallFrameTracer tracer(vm, exec); - DeferGC deferGC(vm->heap); - CodeBlock* codeBlock = exec->codeBlock(); - - JITCode* jitCode = codeBlock->jitCode()->dfg(); - - if (Options::verboseOSR()) { - dataLog( - *codeBlock, ": Entered triggerTierUpNow with executeCounter = ", - jitCode->tierUpCounter, "\n"); - } - if (codeBlock->baselineVersion()->m_didFailFTLCompilation) { if (Options::verboseOSR()) dataLog("Deferring FTL-optimization of ", *codeBlock, " indefinitely because there was an FTL failure.\n"); jitCode->dontOptimizeAnytimeSoon(codeBlock); - return; + return false; } - - if (!jitCode->checkIfOptimizationThresholdReached(codeBlock)) { + + if (!codeBlock->hasOptimizedReplacement() + && !jitCode->checkIfOptimizationThresholdReached(codeBlock)) { if (Options::verboseOSR()) dataLog("Choosing not to FTL-optimize ", *codeBlock, " yet.\n"); - return; + return false; } - + return true; +} + +static void triggerFTLReplacementCompile(VM* vm, CodeBlock* codeBlock, JITCode* jitCode) +{ Worklist::State worklistState; - if (Worklist* worklist = vm->worklist.get()) { + if (Worklist* worklist = existingGlobalFTLWorklistOrNull()) { worklistState = worklist->completeAllReadyPlansForVM( *vm, CompilationKey(codeBlock->baselineVersion(), FTLMode)); } else @@ -1179,18 +1528,26 @@ void JIT_OPERATION triggerTierUpNow(ExecState* exec) // We need to compile the code. compile( - *vm, codeBlock->newReplacement().get(), FTLMode, UINT_MAX, Operands<JSValue>(), - ToFTLDeferredCompilationCallback::create(codeBlock), vm->ensureWorklist()); + *vm, codeBlock->newReplacement(), codeBlock, FTLMode, UINT_MAX, + Operands<JSValue>(), ToFTLDeferredCompilationCallback::create()); + + // If we reached here, the counter has not be reset. Do that now. + jitCode->setOptimizationThresholdBasedOnCompilationResult( + codeBlock, CompilationDeferred); } -char* JIT_OPERATION triggerOSREntryNow( - ExecState* exec, int32_t bytecodeIndex, int32_t streamIndex) +static void triggerTierUpNowCommon(ExecState* exec, bool inLoop) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); DeferGC deferGC(vm->heap); CodeBlock* codeBlock = exec->codeBlock(); + if (codeBlock->jitType() != JITCode::DFGJIT) { + dataLog("Unexpected code block in DFG->FTL tier-up: ", *codeBlock, "\n"); + RELEASE_ASSERT_NOT_REACHED(); + } + JITCode* jitCode = codeBlock->jitCode()->dfg(); if (Options::verboseOSR()) { @@ -1198,147 +1555,138 @@ char* JIT_OPERATION triggerOSREntryNow( *codeBlock, ": Entered triggerTierUpNow with executeCounter = ", jitCode->tierUpCounter, "\n"); } + if (inLoop) + jitCode->nestedTriggerIsSet = 1; + + if (shouldTriggerFTLCompile(codeBlock, jitCode)) + triggerFTLReplacementCompile(vm, codeBlock, jitCode); +} + +void JIT_OPERATION triggerTierUpNow(ExecState* exec) +{ + triggerTierUpNowCommon(exec, false); +} + +void JIT_OPERATION triggerTierUpNowInLoop(ExecState* exec) +{ + triggerTierUpNowCommon(exec, true); +} + +char* JIT_OPERATION triggerOSREntryNow( + ExecState* exec, int32_t bytecodeIndex, int32_t streamIndex) +{ + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); + DeferGC deferGC(vm->heap); + CodeBlock* codeBlock = exec->codeBlock(); - if (codeBlock->baselineVersion()->m_didFailFTLCompilation) { - if (Options::verboseOSR()) - dataLog("Deferring FTL-optimization of ", *codeBlock, " indefinitely because there was an FTL failure.\n"); - jitCode->dontOptimizeAnytimeSoon(codeBlock); - return 0; + if (codeBlock->jitType() != JITCode::DFGJIT) { + dataLog("Unexpected code block in DFG->FTL tier-up: ", *codeBlock, "\n"); + RELEASE_ASSERT_NOT_REACHED(); } - if (!jitCode->checkIfOptimizationThresholdReached(codeBlock)) { - if (Options::verboseOSR()) - dataLog("Choosing not to FTL-optimize ", *codeBlock, " yet.\n"); - return 0; + JITCode* jitCode = codeBlock->jitCode()->dfg(); + jitCode->nestedTriggerIsSet = 0; + + if (Options::verboseOSR()) { + dataLog( + *codeBlock, ": Entered triggerOSREntryNow with executeCounter = ", + jitCode->tierUpCounter, "\n"); + } + + // - If we don't have an FTL code block, then try to compile one. + // - If we do have an FTL code block, then try to enter for a while. + // - If we couldn't enter for a while, then trigger OSR entry. + + if (!shouldTriggerFTLCompile(codeBlock, jitCode)) + return nullptr; + + if (!jitCode->neverExecutedEntry) { + triggerFTLReplacementCompile(vm, codeBlock, jitCode); + + if (!codeBlock->hasOptimizedReplacement()) + return nullptr; + + if (jitCode->osrEntryRetry < Options::ftlOSREntryRetryThreshold()) { + jitCode->osrEntryRetry++; + return nullptr; + } } + // It's time to try to compile code for OSR entry. Worklist::State worklistState; - if (Worklist* worklist = vm->worklist.get()) { + if (Worklist* worklist = existingGlobalFTLWorklistOrNull()) { worklistState = worklist->completeAllReadyPlansForVM( *vm, CompilationKey(codeBlock->baselineVersion(), FTLForOSREntryMode)); } else worklistState = Worklist::NotKnown; if (worklistState == Worklist::Compiling) { - ASSERT(!jitCode->osrEntryBlock); jitCode->setOptimizationThresholdBasedOnCompilationResult( codeBlock, CompilationDeferred); - return 0; + return nullptr; } - if (CodeBlock* entryBlock = jitCode->osrEntryBlock.get()) { + if (CodeBlock* entryBlock = jitCode->osrEntryBlock()) { void* address = FTL::prepareOSREntry( exec, codeBlock, entryBlock, bytecodeIndex, streamIndex); - if (address) { - jitCode->optimizeSoon(codeBlock); + if (address) return static_cast<char*>(address); + + if (jitCode->osrEntryRetry < Options::ftlOSREntryRetryThreshold()) { + jitCode->osrEntryRetry++; + return nullptr; } - + FTL::ForOSREntryJITCode* entryCode = entryBlock->jitCode()->ftlForOSREntry(); entryCode->countEntryFailure(); if (entryCode->entryFailureCount() < Options::ftlOSREntryFailureCountForReoptimization()) { - jitCode->optimizeSoon(codeBlock); - return 0; + return nullptr; } // OSR entry failed. Oh no! This implies that we need to retry. We retry // without exponential backoff and we only do this for the entry code block. - jitCode->osrEntryBlock.clear(); - - jitCode->optimizeAfterWarmUp(codeBlock); - return 0; + jitCode->clearOSREntryBlock(); + jitCode->osrEntryRetry = 0; + return nullptr; } if (worklistState == Worklist::Compiled) { // This means that compilation failed and we already set the thresholds. if (Options::verboseOSR()) dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n"); - return 0; + return nullptr; } - // The first order of business is to trigger a for-entry compile. + // We aren't compiling and haven't compiled anything for OSR entry. So, try to compile + // something. Operands<JSValue> mustHandleValues; jitCode->reconstruct( exec, codeBlock, CodeOrigin(bytecodeIndex), streamIndex, mustHandleValues); - CompilationResult forEntryResult = DFG::compile( - *vm, codeBlock->newReplacement().get(), FTLForOSREntryMode, bytecodeIndex, - mustHandleValues, ToFTLForOSREntryDeferredCompilationCallback::create(codeBlock), - vm->ensureWorklist()); - - // But we also want to trigger a replacement compile. Of course, we don't want to - // trigger it if we don't need to. Note that this is kind of weird because we might - // have just finished an FTL compile and that compile failed or was invalidated. - // But this seems uncommon enough that we sort of don't care. It's certainly sound - // to fire off another compile right now so long as we're not already compiling and - // we don't already have an optimized replacement. Note, we don't do this for - // obviously bad cases like global code, where we know that there is a slim chance - // of this code being invoked ever again. - CompilationKey keyForReplacement(codeBlock->baselineVersion(), FTLMode); - if (codeBlock->codeType() != GlobalCode - && !codeBlock->hasOptimizedReplacement() - && (!vm->worklist.get() - || vm->worklist->compilationState(keyForReplacement) == Worklist::NotKnown)) { - compile( - *vm, codeBlock->newReplacement().get(), FTLMode, UINT_MAX, Operands<JSValue>(), - ToFTLDeferredCompilationCallback::create(codeBlock), vm->ensureWorklist()); + CodeBlock* replacementCodeBlock = codeBlock->newReplacement(); + CompilationResult forEntryResult = compile( + *vm, replacementCodeBlock, codeBlock, FTLForOSREntryMode, bytecodeIndex, + mustHandleValues, ToFTLForOSREntryDeferredCompilationCallback::create()); + + if (jitCode->neverExecutedEntry) + triggerFTLReplacementCompile(vm, codeBlock, jitCode); + + if (forEntryResult != CompilationSuccessful) { + jitCode->setOptimizationThresholdBasedOnCompilationResult( + codeBlock, CompilationDeferred); + return nullptr; } - - if (forEntryResult != CompilationSuccessful) - return 0; - + // It's possible that the for-entry compile already succeeded. In that case OSR // entry will succeed unless we ran out of stack. It's not clear what we should do. // We signal to try again after a while if that happens. void* address = FTL::prepareOSREntry( - exec, codeBlock, jitCode->osrEntryBlock.get(), bytecodeIndex, streamIndex); - if (address) - jitCode->optimizeSoon(codeBlock); - else - jitCode->optimizeAfterWarmUp(codeBlock); + exec, codeBlock, jitCode->osrEntryBlock(), bytecodeIndex, streamIndex); return static_cast<char*>(address); } -// FIXME: Make calls work well. Currently they're a pure regression. -// https://bugs.webkit.org/show_bug.cgi?id=113621 -EncodedJSValue JIT_OPERATION operationFTLCall(ExecState* exec) -{ - ExecState* callerExec = exec->callerFrame(); - - VM* vm = &callerExec->vm(); - NativeCallFrameTracer tracer(vm, callerExec); - - JSValue callee = exec->calleeAsValue(); - CallData callData; - CallType callType = getCallData(callee, callData); - if (callType == CallTypeNone) { - vm->throwException(callerExec, createNotAFunctionError(callerExec, callee)); - return JSValue::encode(jsUndefined()); - } - - return JSValue::encode(call(callerExec, callee, callType, callData, exec->thisValue(), exec)); -} - -// FIXME: Make calls work well. Currently they're a pure regression. -// https://bugs.webkit.org/show_bug.cgi?id=113621 -EncodedJSValue JIT_OPERATION operationFTLConstruct(ExecState* exec) -{ - ExecState* callerExec = exec->callerFrame(); - - VM* vm = &callerExec->vm(); - NativeCallFrameTracer tracer(vm, callerExec); - - JSValue callee = exec->calleeAsValue(); - ConstructData constructData; - ConstructType constructType = getConstructData(callee, constructData); - if (constructType == ConstructTypeNone) { - vm->throwException(callerExec, createNotAFunctionError(callerExec, callee)); - return JSValue::encode(jsUndefined()); - } - - return JSValue::encode(construct(callerExec, callee, constructType, constructData, exec)); -} #endif // ENABLE(FTL_JIT) } // extern "C" diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h index 996bbedab..ca6424baf 100644 --- a/Source/JavaScriptCore/dfg/DFGOperations.h +++ b/Source/JavaScriptCore/dfg/DFGOperations.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,24 +28,33 @@ #if ENABLE(DFG_JIT) -#include "DFGJITCompiler.h" #include "JITOperations.h" #include "PutKind.h" -namespace JSC { +namespace JSC { namespace DFG { -namespace DFG { +struct OSRExitBase; extern "C" { JSCell* JIT_OPERATION operationStringFromCharCode(ExecState*, int32_t) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationStringFromCharCodeUntyped(ExecState*, EncodedJSValue) WTF_INTERNAL; // These routines are provide callbacks out to C++ implementations of operations too complex to JIT. JSCell* JIT_OPERATION operationCreateThis(ExecState*, JSObject* constructor, int32_t inlineCapacity) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationToThis(ExecState*, EncodedJSValue encodedOp1) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationToThisStrict(ExecState*, EncodedJSValue encodedOp1) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationValueBitAnd(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationValueBitOr(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationValueBitXor(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationValueBitLShift(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationValueBitRShift(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationValueBitURShift(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationValueAdd(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationValueAddNotNumber(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationValueDiv(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationValueMul(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationValueSub(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationGetByVal(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedProperty) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationGetByValCell(ExecState*, JSCell*, EncodedJSValue encodedProperty) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationGetByValArrayInt(ExecState*, JSArray*, int32_t) WTF_INTERNAL; @@ -93,19 +102,24 @@ EncodedJSValue JIT_OPERATION operationArrayPushDouble(ExecState*, double value, EncodedJSValue JIT_OPERATION operationArrayPop(ExecState*, JSArray*) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationArrayPopAndRecoverLength(ExecState*, JSArray*) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationRegExpExec(ExecState*, JSCell*, JSCell*) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationRegExpExecGeneric(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL; // These comparisons return a boolean within a size_t such that the value is zero extended to fill the register. size_t JIT_OPERATION operationRegExpTest(ExecState*, JSCell*, JSCell*) WTF_INTERNAL; +size_t JIT_OPERATION operationRegExpTestGeneric(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL; size_t JIT_OPERATION operationCompareStrictEqCell(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; size_t JIT_OPERATION operationCompareStrictEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL; -JSCell* JIT_OPERATION operationCreateInlinedArguments(ExecState*, InlineCallFrame*) WTF_INTERNAL; -void JIT_OPERATION operationTearOffInlinedArguments(ExecState*, JSCell*, JSCell*, InlineCallFrame*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationGetInlinedArgumentByVal(ExecState*, int32_t, InlineCallFrame*, int32_t) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationGetArgumentByVal(ExecState*, int32_t, int32_t) WTF_INTERNAL; -JSCell* JIT_OPERATION operationNewFunctionNoCheck(ExecState*, JSCell*) WTF_INTERNAL; +JSCell* JIT_OPERATION operationCreateActivationDirect(ExecState*, Structure*, JSScope*, SymbolTable*, EncodedJSValue); +JSCell* JIT_OPERATION operationCreateDirectArguments(ExecState*, Structure*, int32_t length, int32_t minCapacity); +JSCell* JIT_OPERATION operationCreateDirectArgumentsDuringExit(ExecState*, InlineCallFrame*, JSFunction*, int32_t argumentCount); +JSCell* JIT_OPERATION operationCreateScopedArguments(ExecState*, Structure*, Register* argumentStart, int32_t length, JSFunction* callee, JSLexicalEnvironment*); +JSCell* JIT_OPERATION operationCreateClonedArgumentsDuringExit(ExecState*, InlineCallFrame*, JSFunction*, int32_t argumentCount); +JSCell* JIT_OPERATION operationCreateClonedArguments(ExecState*, Structure*, Register* argumentStart, int32_t length, JSFunction* callee); +void JIT_OPERATION operationCopyRest(ExecState*, JSCell*, Register* argumentStart, unsigned numberOfParamsToSkip, unsigned arraySize); double JIT_OPERATION operationFModOnInts(int32_t, int32_t) WTF_INTERNAL; -size_t JIT_OPERATION operationIsObject(ExecState*, EncodedJSValue) WTF_INTERNAL; -size_t JIT_OPERATION operationIsFunction(EncodedJSValue) WTF_INTERNAL; -JSCell* JIT_OPERATION operationTypeOf(ExecState*, JSCell*) WTF_INTERNAL; +size_t JIT_OPERATION operationObjectIsObject(ExecState*, JSGlobalObject*, JSCell*) WTF_INTERNAL; +size_t JIT_OPERATION operationObjectIsFunction(ExecState*, JSGlobalObject*, JSCell*) WTF_INTERNAL; +JSCell* JIT_OPERATION operationTypeOfObject(ExecState*, JSGlobalObject*, JSCell*) WTF_INTERNAL; +int32_t JIT_OPERATION operationTypeOfObjectAsTypeofType(ExecState*, JSGlobalObject*, JSCell*) WTF_INTERNAL; char* JIT_OPERATION operationAllocatePropertyStorageWithInitialCapacity(ExecState*) WTF_INTERNAL; char* JIT_OPERATION operationAllocatePropertyStorage(ExecState*, size_t newSize) WTF_INTERNAL; char* JIT_OPERATION operationReallocateButterflyToHavePropertyStorageWithInitialCapacity(ExecState*, JSObject*) WTF_INTERNAL; @@ -113,7 +127,6 @@ char* JIT_OPERATION operationReallocateButterflyToGrowPropertyStorage(ExecState* char* JIT_OPERATION operationEnsureInt32(ExecState*, JSCell*); char* JIT_OPERATION operationEnsureDouble(ExecState*, JSCell*); char* JIT_OPERATION operationEnsureContiguous(ExecState*, JSCell*); -char* JIT_OPERATION operationRageEnsureContiguous(ExecState*, JSCell*); char* JIT_OPERATION operationEnsureArrayStorage(ExecState*, JSCell*); StringImpl* JIT_OPERATION operationResolveRope(ExecState*, JSString*); JSString* JIT_OPERATION operationSingleCharacterString(ExecState*, int32_t); @@ -121,29 +134,38 @@ JSString* JIT_OPERATION operationSingleCharacterString(ExecState*, int32_t); JSCell* JIT_OPERATION operationNewStringObject(ExecState*, JSString*, Structure*); JSCell* JIT_OPERATION operationToStringOnCell(ExecState*, JSCell*); JSCell* JIT_OPERATION operationToString(ExecState*, EncodedJSValue); +JSCell* JIT_OPERATION operationCallStringConstructorOnCell(ExecState*, JSCell*); +JSCell* JIT_OPERATION operationCallStringConstructor(ExecState*, EncodedJSValue); JSCell* JIT_OPERATION operationMakeRope2(ExecState*, JSString*, JSString*); JSCell* JIT_OPERATION operationMakeRope3(ExecState*, JSString*, JSString*, JSString*); +JSCell* JIT_OPERATION operationStrCat2(ExecState*, EncodedJSValue, EncodedJSValue); +JSCell* JIT_OPERATION operationStrCat3(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue); char* JIT_OPERATION operationFindSwitchImmTargetForDouble(ExecState*, EncodedJSValue, size_t tableIndex); char* JIT_OPERATION operationSwitchString(ExecState*, size_t tableIndex, JSString*); -void JIT_OPERATION operationInvalidate(ExecState*, VariableWatchpointSet*); +int32_t JIT_OPERATION operationSwitchStringAndGetBranchOffset(ExecState*, size_t tableIndex, JSString*); +char* JIT_OPERATION operationGetButterfly(ExecState*, JSCell*); +char* JIT_OPERATION operationGetArrayBufferVector(ExecState*, JSCell*); +void JIT_OPERATION operationNotifyWrite(ExecState*, WatchpointSet*); +void JIT_OPERATION operationThrowStackOverflowForVarargs(ExecState*) WTF_INTERNAL; +int32_t JIT_OPERATION operationSizeOfVarargs(ExecState*, EncodedJSValue arguments, int32_t firstVarArgOffset); +void JIT_OPERATION operationLoadVarargs(ExecState*, int32_t firstElementDest, EncodedJSValue arguments, int32_t offset, int32_t length, int32_t mandatoryMinimum); -#if ENABLE(FTL_JIT) -// FIXME: Make calls work well. Currently they're a pure regression. -// https://bugs.webkit.org/show_bug.cgi?id=113621 -EncodedJSValue JIT_OPERATION operationFTLCall(ExecState*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationFTLConstruct(ExecState*) WTF_INTERNAL; -#endif // ENABLE(FTL_JIT) +int64_t JIT_OPERATION operationConvertBoxedDoubleToInt52(EncodedJSValue); +int64_t JIT_OPERATION operationConvertDoubleToInt52(double); -// These operations implement the implicitly called ToInt32 and ToBoolean conversions from ES5. -// This conversion returns an int32_t within a size_t such that the value is zero extended to fill the register. -size_t JIT_OPERATION dfgConvertJSValueToInt32(ExecState*, EncodedJSValue) WTF_INTERNAL; +void JIT_OPERATION operationProcessTypeProfilerLogDFG(ExecState*) WTF_INTERNAL; void JIT_OPERATION debugOperationPrintSpeculationFailure(ExecState*, void*, void*) WTF_INTERNAL; -void JIT_OPERATION triggerReoptimizationNow(CodeBlock*) WTF_INTERNAL; +void JIT_OPERATION triggerReoptimizationNow(CodeBlock*, OSRExitBase*) WTF_INTERNAL; + +#if USE(JSVALUE32_64) +double JIT_OPERATION operationRandom(JSGlobalObject*); +#endif #if ENABLE(FTL_JIT) void JIT_OPERATION triggerTierUpNow(ExecState*) WTF_INTERNAL; +void JIT_OPERATION triggerTierUpNowInLoop(ExecState*) WTF_INTERNAL; char* JIT_OPERATION triggerOSREntryNow(ExecState*, int32_t bytecodeIndex, int32_t streamIndex) WTF_INTERNAL; #endif // ENABLE(FTL_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.cpp b/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.cpp new file mode 100644 index 000000000..02b1958ce --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.cpp @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGPhantomInsertionPhase.h" + +#if ENABLE(DFG_JIT) + +#include "BytecodeLivenessAnalysisInlines.h" +#include "DFGForAllKills.h" +#include "DFGGraph.h" +#include "DFGInsertionSet.h" +#include "DFGMayExit.h" +#include "DFGPhase.h" +#include "DFGPredictionPropagationPhase.h" +#include "DFGVariableAccessDataDump.h" +#include "JSCInlines.h" +#include "OperandsInlines.h" + +namespace JSC { namespace DFG { + +namespace { + +bool verbose = false; + +class PhantomInsertionPhase : public Phase { +public: + PhantomInsertionPhase(Graph& graph) + : Phase(graph, "phantom insertion") + , m_insertionSet(graph) + , m_values(OperandsLike, graph.block(0)->variablesAtHead) + { + } + + bool run() + { + // We assume that DCE has already run. If we run before DCE then we think that all + // SetLocals execute, which is inaccurate. That causes us to insert too few Phantoms. + DFG_ASSERT(m_graph, nullptr, m_graph.m_refCountState == ExactRefCount); + + if (verbose) { + dataLog("Graph before Phantom insertion:\n"); + m_graph.dump(); + } + + m_graph.clearEpochs(); + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) + handleBlock(block); + + if (verbose) { + dataLog("Graph after Phantom insertion:\n"); + m_graph.dump(); + } + + return true; + } + +private: + void handleBlock(BasicBlock* block) + { + // FIXME: For blocks that have low register pressure, it would make the most sense to + // simply insert Phantoms at the last point possible since that would obviate the need to + // query bytecode liveness: + // + // - If we MovHint @x into loc42 then put a Phantom on the last MovHinted value in loc42. + // - At the end of the block put Phantoms for each MovHinted value. + // + // This will definitely not work if there are any phantom allocations. For those blocks + // where this would be legal, it remains to be seen how profitable it would be even if there + // was high register pressure. After all, a Phantom would cause a spill but it wouldn't + // cause a fill. + // + // https://bugs.webkit.org/show_bug.cgi?id=144524 + + m_values.fill(nullptr); + + Epoch currentEpoch = Epoch::first(); + unsigned lastExitingIndex = 0; + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + if (verbose) + dataLog("Considering ", node, "\n"); + + switch (node->op()) { + case MovHint: + m_values.operand(node->unlinkedLocal()) = node->child1().node(); + break; + + case ZombieHint: + m_values.operand(node->unlinkedLocal()) = nullptr; + break; + + case SetLocal: + case GetLocal: + case SetArgument: + m_values.operand(node->local()) = nullptr; + break; + + default: + break; + } + + if (mayExit(m_graph, node) != DoesNotExit) { + currentEpoch.bump(); + lastExitingIndex = nodeIndex; + } + + m_graph.doToChildren( + node, + [&] (Edge edge) { + edge->setEpoch(currentEpoch); + }); + + node->setEpoch(currentEpoch); + + forAllKilledOperands( + m_graph, node, block->tryAt(nodeIndex + 1), + [&] (VirtualRegister reg) { + if (verbose) + dataLog(" Killed operand: ", reg, "\n"); + + Node* killedNode = m_values.operand(reg); + if (!killedNode) + return; + + // We only need to insert a Phantom if the node hasn't been used since the last + // exit, and was born before the last exit. + if (killedNode->epoch() == currentEpoch) + return; + + if (verbose) { + dataLog( + " Inserting Phantom on ", killedNode, " after ", + block->at(lastExitingIndex), "\n"); + } + + // We have exact ref counts, so creating a new use means that we have to + // increment the ref count. + killedNode->postfixRef(); + + Node* lastExitingNode = block->at(lastExitingIndex); + + m_insertionSet.insertNode( + lastExitingIndex + 1, SpecNone, Phantom, + lastExitingNode->origin.forInsertingAfter(m_graph, lastExitingNode), + killedNode->defaultEdge()); + }); + } + + m_insertionSet.execute(block); + } + + InsertionSet m_insertionSet; + Operands<Node*> m_values; +}; + +} // anonymous namespace + +bool performPhantomInsertion(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Phantom Insertion Phase"); + return runPhase<PhantomInsertionPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.h b/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.h new file mode 100644 index 000000000..902975b31 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPhantomInsertionPhase.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGPhantomInsertionPhase_h +#define DFGPhantomInsertionPhase_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; + +// Inserts Phantoms based on bytecode liveness. + +bool performPhantomInsertion(Graph&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGPhantomInsertionPhase_h diff --git a/Source/JavaScriptCore/dfg/DFGPhase.cpp b/Source/JavaScriptCore/dfg/DFGPhase.cpp index 32e039ec5..b225531a4 100644 --- a/Source/JavaScriptCore/dfg/DFGPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGPhase.cpp @@ -29,13 +29,26 @@ #if ENABLE(DFG_JIT) #include "DFGValidate.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { +void Phase::validate() +{ + DFG::validate(m_graph, DumpGraph, m_graphDumpBeforePhase); +} + void Phase::beginPhase() { - if (!shouldDumpGraphAtEachPhase()) + if (Options::verboseValidationFailure()) { + StringPrintStream out; + m_graph.dump(out); + m_graphDumpBeforePhase = out.toCString(); + } + + if (!shouldDumpGraphAtEachPhase(m_graph.m_plan.mode)) return; + dataLog("Beginning DFG phase ", m_name, ".\n"); dataLog("Before ", m_name, ":\n"); m_graph.dump(); @@ -45,7 +58,7 @@ void Phase::endPhase() { if (!Options::validateGraphAtEachPhase()) return; - validate(m_graph, DumpGraph); + validate(); } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGPhase.h b/Source/JavaScriptCore/dfg/DFGPhase.h index 6de043bbd..e92049c43 100644 --- a/Source/JavaScriptCore/dfg/DFGPhase.h +++ b/Source/JavaScriptCore/dfg/DFGPhase.h @@ -26,8 +26,6 @@ #ifndef DFGPhase_h #define DFGPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" @@ -51,6 +49,8 @@ public: const char* name() const { return m_name; } + Graph& graph() { return m_graph; } + // Each phase must have a run() method. protected: @@ -60,6 +60,9 @@ protected: VM& vm() { return m_graph.m_vm; } CodeBlock* codeBlock() { return m_graph.m_codeBlock; } CodeBlock* profiledBlock() { return m_graph.m_profiledBlock; } + + // This runs validation, and uses the graph dump before the phase if possible. + void validate(); const char* m_name; @@ -67,13 +70,15 @@ private: // Call these hooks when starting and finishing. void beginPhase(); void endPhase(); + + CString m_graphDumpBeforePhase; }; template<typename PhaseType> bool runAndLog(PhaseType& phase) { bool result = phase.run(); - if (result && logCompilationChanges()) + if (result && logCompilationChanges(phase.graph().m_plan.mode)) dataLogF("Phase %s changed the IR.\n", phase.name()); return result; } diff --git a/Source/JavaScriptCore/dfg/DFGPhiChildren.cpp b/Source/JavaScriptCore/dfg/DFGPhiChildren.cpp new file mode 100644 index 000000000..de078d088 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPhiChildren.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGPhiChildren.h" + +#if ENABLE(DFG_JIT) + +#include "DFGGraph.h" + +namespace JSC { namespace DFG { + +PhiChildren::PhiChildren() +{ +} + +PhiChildren::PhiChildren(Graph& graph) +{ + for (BasicBlock* block : graph.blocksInNaturalOrder()) { + for (Node* node : *block) { + if (node->op() != Upsilon) + continue; + + m_children.add(node->phi(), List()).iterator->value.append(node); + } + } +} + +PhiChildren::~PhiChildren() +{ +} + +const PhiChildren::List& PhiChildren::upsilonsOf(Node* node) const +{ + ASSERT(node->op() == Phi); + return m_children.find(node)->value; +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGPhiChildren.h b/Source/JavaScriptCore/dfg/DFGPhiChildren.h new file mode 100644 index 000000000..808512ed1 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPhiChildren.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGPhiChildren_h +#define DFGPhiChildren_h + +#if ENABLE(DFG_JIT) + +#include "DFGNode.h" +#include <wtf/HashSet.h> +#include <wtf/Vector.h> + +namespace JSC { namespace DFG { + +class Graph; + +class PhiChildren { +public: + typedef Vector<Node*, 3> List; + + PhiChildren(); + PhiChildren(Graph&); + ~PhiChildren(); + + // The list of Upsilons that point to the children of the Phi. + const List& upsilonsOf(Node*) const; + + template<typename Functor> + void forAllIncomingValues(Node* node, const Functor& functor) + { + for (Node* upsilon : upsilonsOf(node)) + functor(upsilon->child1().node()); + } + + // This walks the Phi graph. + template<typename Functor> + void forAllTransitiveIncomingValues(Node* node, const Functor& functor) + { + if (node->op() != Phi) { + functor(node); + return; + } + HashSet<Node*> seen; + Vector<Node*> worklist; + seen.add(node); + worklist.append(node); + while (!worklist.isEmpty()) { + Node* currentNode = worklist.takeLast(); + forAllIncomingValues( + currentNode, + [&] (Node* incomingNode) { + if (incomingNode->op() == Phi) { + if (seen.add(incomingNode).isNewEntry) + worklist.append(incomingNode); + } else + functor(incomingNode); + }); + } + } + +private: + HashMap<Node*, List> m_children; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGPhiChildren_h + diff --git a/Source/JavaScriptCore/dfg/DFGPlan.cpp b/Source/JavaScriptCore/dfg/DFGPlan.cpp index 735f5ffa2..a4a269808 100644 --- a/Source/JavaScriptCore/dfg/DFGPlan.cpp +++ b/Source/JavaScriptCore/dfg/DFGPlan.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,43 +28,58 @@ #if ENABLE(DFG_JIT) -#include "DFGArgumentsSimplificationPhase.h" +#include "DFGArgumentsEliminationPhase.h" #include "DFGBackwardsPropagationPhase.h" #include "DFGByteCodeParser.h" #include "DFGCFAPhase.h" #include "DFGCFGSimplificationPhase.h" #include "DFGCPSRethreadingPhase.h" #include "DFGCSEPhase.h" +#include "DFGCleanUpPhase.h" #include "DFGConstantFoldingPhase.h" +#include "DFGConstantHoistingPhase.h" +#include "DFGCopyBarrierOptimizationPhase.h" #include "DFGCriticalEdgeBreakingPhase.h" #include "DFGDCEPhase.h" #include "DFGFailedFinalizer.h" -#include "DFGFlushLivenessAnalysisPhase.h" #include "DFGFixupPhase.h" +#include "DFGGraphSafepoint.h" +#include "DFGIntegerCheckCombiningPhase.h" +#include "DFGIntegerRangeOptimizationPhase.h" #include "DFGInvalidationPointInjectionPhase.h" #include "DFGJITCompiler.h" #include "DFGLICMPhase.h" +#include "DFGLiveCatchVariablePreservationPhase.h" #include "DFGLivenessAnalysisPhase.h" #include "DFGLoopPreHeaderCreationPhase.h" +#include "DFGMaximalFlushInsertionPhase.h" +#include "DFGMovHintRemovalPhase.h" #include "DFGOSRAvailabilityAnalysisPhase.h" #include "DFGOSREntrypointCreationPhase.h" +#include "DFGObjectAllocationSinkingPhase.h" +#include "DFGPhantomInsertionPhase.h" #include "DFGPredictionInjectionPhase.h" #include "DFGPredictionPropagationPhase.h" -#include "DFGResurrectionForValidationPhase.h" +#include "DFGPutStackSinkingPhase.h" #include "DFGSSAConversionPhase.h" #include "DFGSSALoweringPhase.h" #include "DFGStackLayoutPhase.h" -#include "DFGStoreBarrierElisionPhase.h" +#include "DFGStaticExecutionCountEstimationPhase.h" +#include "DFGStoreBarrierInsertionPhase.h" #include "DFGStrengthReductionPhase.h" +#include "DFGStructureRegistrationPhase.h" #include "DFGTierUpCheckInjectionPhase.h" #include "DFGTypeCheckHoistingPhase.h" #include "DFGUnificationPhase.h" #include "DFGValidate.h" +#include "DFGVarargsForwardingPhase.h" #include "DFGVirtualRegisterAllocationPhase.h" #include "DFGWatchpointCollectionPhase.h" #include "Debugger.h" +#include "JSCInlines.h" #include "OperandsInlines.h" -#include "Operations.h" +#include "ProfilerDatabase.h" +#include "TrackedReferences.h" #include <wtf/CurrentTime.h> #if ENABLE(FTL_JIT) @@ -72,17 +87,23 @@ #include "FTLCompile.h" #include "FTLFail.h" #include "FTLLink.h" -#include "FTLLowerDFGToLLVM.h" +#include "FTLLowerDFGToB3.h" #include "FTLState.h" -#include "InitializeLLVM.h" #endif namespace JSC { namespace DFG { -static void dumpAndVerifyGraph(Graph& graph, const char* text) +namespace { + +double totalDFGCompileTime; +double totalFTLCompileTime; +double totalFTLDFGCompileTime; +double totalFTLB3CompileTime; + +void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false) { GraphDumpMode modeForFinalValidate = DumpGraph; - if (verboseCompilationEnabled()) { + if (verboseCompilationEnabled(graph.m_plan.mode) || forceDump) { dataLog(text, "\n"); graph.dump(); modeForFinalValidate = DontDumpGraph; @@ -91,18 +112,39 @@ static void dumpAndVerifyGraph(Graph& graph, const char* text) validate(graph, modeForFinalValidate); } -Plan::Plan( - PassRefPtr<CodeBlock> passedCodeBlock, CompilationMode mode, - unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues) +Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode) +{ + switch (mode) { + case InvalidCompilationMode: + RELEASE_ASSERT_NOT_REACHED(); + return Profiler::DFG; + case DFGMode: + return Profiler::DFG; + case FTLMode: + return Profiler::FTL; + case FTLForOSREntryMode: + return Profiler::FTLForOSREntry; + } + RELEASE_ASSERT_NOT_REACHED(); + return Profiler::DFG; +} + +} // anonymous namespace + +Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock, + CompilationMode mode, unsigned osrEntryBytecodeIndex, + const Operands<JSValue>& mustHandleValues) : vm(*passedCodeBlock->vm()) , codeBlock(passedCodeBlock) + , profiledDFGCodeBlock(profiledDFGCodeBlock) , mode(mode) , osrEntryBytecodeIndex(osrEntryBytecodeIndex) , mustHandleValues(mustHandleValues) - , compilation(codeBlock->vm()->m_perBytecodeProfiler ? adoptRef(new Profiler::Compilation(codeBlock->vm()->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock.get()), Profiler::DFG)) : 0) - , identifiers(codeBlock.get()) - , weakReferences(codeBlock.get()) - , isCompiled(false) + , compilation(codeBlock->vm()->m_perBytecodeProfiler ? adoptRef(new Profiler::Compilation(codeBlock->vm()->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock), profilerCompilationKindForMode(mode))) : 0) + , inlineCallFrames(adoptRef(new InlineCallFrameSet())) + , identifiers(codeBlock) + , weakReferences(codeBlock) + , stage(Preparing) { } @@ -110,23 +152,54 @@ Plan::~Plan() { } -void Plan::compileInThread(LongLivedState& longLivedState) +bool Plan::computeCompileTimes() const { + return reportCompileTimes() + || Options::reportTotalCompileTimes(); +} + +bool Plan::reportCompileTimes() const +{ + return Options::reportCompileTimes() + || (Options::reportFTLCompileTimes() && isFTL(mode)); +} + +void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData) +{ + this->threadData = threadData; + double before = 0; - if (Options::reportCompileTimes()) - before = currentTimeMS(); + CString codeBlockName; + if (computeCompileTimes()) + before = monotonicallyIncreasingTimeMS(); + if (reportCompileTimes()) + codeBlockName = toCString(*codeBlock); SamplingRegion samplingRegion("DFG Compilation (Plan)"); CompilationScope compilationScope; - if (logCompilationChanges()) + if (logCompilationChanges(mode)) dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n"); CompilationPath path = compileInThreadImpl(longLivedState); - RELEASE_ASSERT(finalizer); + RELEASE_ASSERT(path == CancelPath || finalizer); + RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled)); - if (Options::reportCompileTimes()) { + double after = 0; + if (computeCompileTimes()) + after = monotonicallyIncreasingTimeMS(); + + if (Options::reportTotalCompileTimes()) { + if (isFTL(mode)) { + totalFTLCompileTime += after - before; + totalFTLDFGCompileTime += m_timeBeforeFTL - before; + totalFTLB3CompileTime += after - m_timeBeforeFTL; + } else + totalDFGCompileTime += after - before; + } + + if (reportCompileTimes()) { const char* pathName; switch (path) { case FailPath: @@ -138,22 +211,26 @@ void Plan::compileInThread(LongLivedState& longLivedState) case FTLPath: pathName = "FTL"; break; + case CancelPath: + pathName = "Cancelled"; + break; default: RELEASE_ASSERT_NOT_REACHED(); +#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) pathName = ""; +#endif break; } - double now = currentTimeMS(); - dataLog("Optimized ", *codeBlock->alternative(), " using ", mode, " with ", pathName, " in ", now - before, " ms"); + dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms"); if (path == FTLPath) - dataLog(" (DFG: ", beforeFTL - before, ", LLVM: ", now - beforeFTL, ")"); + dataLog(" (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ")"); dataLog(".\n"); } } Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) { - if (verboseCompilationEnabled() && osrEntryBytecodeIndex != UINT_MAX) { + if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) { dataLog("\n"); dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n"); dataLog("\n"); @@ -162,9 +239,11 @@ Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) Graph dfg(vm, *this, longLivedState); if (!parse(dfg)) { - finalizer = adoptPtr(new FailedFinalizer(*this)); + finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } + + codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters()); // By this point the DFG bytecode parser will have potentially mutated various tables // in the CodeBlock. This is a good time to perform an early shrink, which is more @@ -175,14 +254,26 @@ Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) if (validationEnabled()) validate(dfg); + if (Options::dumpGraphAfterParsing()) { + dataLog("Graph after parsing:\n"); + dfg.dump(); + } + + performLiveCatchVariablePreservationPhase(dfg); + + if (Options::useMaximalFlushInsertionPhase()) + performMaximalFlushInsertion(dfg); + performCPSRethreading(dfg); performUnification(dfg); performPredictionInjection(dfg); + performStaticExecutionCountEstimation(dfg); + if (mode == FTLForOSREntryMode) { bool result = performOSREntrypointCreation(dfg); if (!result) { - finalizer = adoptPtr(new FailedFinalizer(*this)); + finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } performCPSRethreading(dfg); @@ -194,66 +285,91 @@ Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) performBackwardsPropagation(dfg); performPredictionPropagation(dfg); performFixup(dfg); + performStructureRegistration(dfg); performInvalidationPointInjection(dfg); performTypeCheckHoisting(dfg); - unsigned count = 1; dfg.m_fixpointState = FixpointNotConverged; - for (;; ++count) { - if (logCompilationChanges()) - dataLogF("DFG beginning optimization fixpoint iteration #%u.\n", count); - bool changed = false; + + // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision + // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is + // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means + // that the compiler compiles more quickly. We want the third tier to compile quickly, which + // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint. + if (validationEnabled()) + validate(dfg); - if (validationEnabled()) - validate(dfg); + performStrengthReduction(dfg); + performLocalCSE(dfg); + performCPSRethreading(dfg); + performCFA(dfg); + performConstantFolding(dfg); + bool changed = false; + changed |= performCFGSimplification(dfg); + changed |= performLocalCSE(dfg); + + if (validationEnabled()) + validate(dfg); + + performCPSRethreading(dfg); + if (!isFTL(mode)) { + // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and + // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL + // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack + // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases, + // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an + // escape for all of the arguments. This then disables object allocation sinking. + // + // So, for now, we just disable this phase for the FTL. + // + // If we wanted to enable it, we'd have to do any of the following: + // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before + // PutStack sinking and object allocation sinking. + // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into + // GetStack+PutStack. + // + // But, it's not super valuable to enable those optimizations, since the FTL + // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this + // pathology. - changed |= performStrengthReduction(dfg); + changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading. + } + if (changed) { performCFA(dfg); - changed |= performConstantFolding(dfg); - changed |= performArgumentsSimplification(dfg); - changed |= performCFGSimplification(dfg); - changed |= performCSE(dfg); - - if (!changed) - break; - - performCPSRethreading(dfg); + performConstantFolding(dfg); } - if (logCompilationChanges()) - dataLogF("DFG optimization fixpoint converged in %u iterations.\n", count); - - dfg.m_fixpointState = FixpointConverged; - - performStoreBarrierElision(dfg); - performStoreElimination(dfg); - // If we're doing validation, then run some analyses, to give them an opportunity // to self-validate. Now is as good a time as any to do this. if (validationEnabled()) { - dfg.m_dominators.computeIfNecessary(dfg); - dfg.m_naturalLoops.computeIfNecessary(dfg); + dfg.ensureDominators(); + dfg.ensureNaturalLoops(); + dfg.ensurePrePostNumbering(); } switch (mode) { case DFGMode: { + dfg.m_fixpointState = FixpointConverged; + performTierUpCheckInjection(dfg); + performFastStoreBarrierInsertion(dfg); + performCleanUp(dfg); performCPSRethreading(dfg); performDCE(dfg); + if (Options::useCopyBarrierOptimization()) + performCopyBarrierOptimization(dfg); + performPhantomInsertion(dfg); performStackLayout(dfg); performVirtualRegisterAllocation(dfg); performWatchpointCollection(dfg); dumpAndVerifyGraph(dfg, "Graph after optimization:"); JITCompiler dataFlowJIT(dfg); - if (codeBlock->codeType() == FunctionCode) { + if (codeBlock->codeType() == FunctionCode) dataFlowJIT.compileFunction(); - dataFlowJIT.linkFunction(); - } else { + else dataFlowJIT.compile(); - dataFlowJIT.link(); - } return DFGPath; } @@ -262,53 +378,121 @@ Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) case FTLForOSREntryMode: { #if ENABLE(FTL_JIT) if (FTL::canCompile(dfg) == FTL::CannotCompile) { - finalizer = adoptPtr(new FailedFinalizer(*this)); + finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } + performCleanUp(dfg); // Reduce the graph size a bit. performCriticalEdgeBreaking(dfg); - performLoopPreHeaderCreation(dfg); + if (Options::createPreHeaders()) + performLoopPreHeaderCreation(dfg); performCPSRethreading(dfg); performSSAConversion(dfg); performSSALowering(dfg); + + // Ideally, these would be run to fixpoint with the object allocation sinking phase. + performArgumentsElimination(dfg); + if (Options::usePutStackSinking()) + performPutStackSinking(dfg); + + performConstantHoisting(dfg); + performGlobalCSE(dfg); + performLivenessAnalysis(dfg); + performIntegerRangeOptimization(dfg); performLivenessAnalysis(dfg); performCFA(dfg); + performConstantFolding(dfg); + performCleanUp(dfg); // Reduce the graph size a lot. + changed = false; + changed |= performStrengthReduction(dfg); + if (Options::useObjectAllocationSinking()) { + changed |= performCriticalEdgeBreaking(dfg); + changed |= performObjectAllocationSinking(dfg); + } + if (changed) { + // State-at-tail and state-at-head will be invalid if we did strength reduction since + // it might increase live ranges. + performLivenessAnalysis(dfg); + performCFA(dfg); + performConstantFolding(dfg); + } + + // Currently, this relies on pre-headers still being valid. That precludes running CFG + // simplification before it, unless we re-created the pre-headers. There wouldn't be anything + // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point. + // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that + // then we'd need to do some simple SSA fix-up. performLICM(dfg); - performCSE(dfg); + + performCleanUp(dfg); + performIntegerCheckCombining(dfg); + performGlobalCSE(dfg); + + // At this point we're not allowed to do any further code motion because our reasoning + // about code motion assumes that it's OK to insert GC points in random places. + dfg.m_fixpointState = FixpointConverged; + performLivenessAnalysis(dfg); performCFA(dfg); - if (Options::validateFTLOSRExitLiveness()) - performResurrectionForValidation(dfg); - performDCE(dfg); // We rely on this to convert dead SetLocals into the appropriate hint, and to kill dead code that won't be recognized as dead by LLVM. + performGlobalStoreBarrierInsertion(dfg); + if (Options::useMovHintRemoval()) + performMovHintRemoval(dfg); + performCleanUp(dfg); + performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by B3. + if (Options::useCopyBarrierOptimization()) + performCopyBarrierOptimization(dfg); performStackLayout(dfg); performLivenessAnalysis(dfg); - performFlushLivenessAnalysis(dfg); performOSRAvailabilityAnalysis(dfg); performWatchpointCollection(dfg); - dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:"); + if (FTL::canCompile(dfg) == FTL::CannotCompile) { + finalizer = std::make_unique<FailedFinalizer>(*this); + return FailPath; + } + + dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(mode)); + + // Flash a safepoint in case the GC wants some action. + Safepoint::Result safepointResult; + { + GraphSafepoint safepoint(dfg, safepointResult); + } + if (safepointResult.didGetCancelled()) + return CancelPath; + + FTL::State state(dfg); + FTL::lowerDFGToB3(state); - initializeLLVM(); + if (computeCompileTimes()) + m_timeBeforeFTL = monotonicallyIncreasingTimeMS(); - FTL::State state(dfg); - FTL::lowerDFGToLLVM(state); + if (Options::b3AlwaysFailsBeforeCompile()) { + FTL::fail(state); + return FTLPath; + } - if (Options::reportCompileTimes()) - beforeFTL = currentTimeMS(); + FTL::compile(state, safepointResult); + if (safepointResult.didGetCancelled()) + return CancelPath; - if (Options::llvmAlwaysFailsBeforeCompile()) { + if (Options::b3AlwaysFailsBeforeLink()) { FTL::fail(state); return FTLPath; } - FTL::compile(state); + if (state.allocationFailed) { + FTL::fail(state); + return FTLPath; + } - if (Options::llvmAlwaysFailsBeforeLink()) { + FTL::link(state); + + if (state.allocationFailed) { FTL::fail(state); return FTLPath; } - FTL::link(state); return FTLPath; #else RELEASE_ASSERT_NOT_REACHED(); @@ -324,37 +508,52 @@ Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) bool Plan::isStillValid() { - return watchpoints.areStillValid() - && chains.areStillValid(); + CodeBlock* replacement = codeBlock->replacement(); + if (!replacement) + return false; + // FIXME: This is almost certainly not necessary. There's no way for the baseline + // code to be replaced during a compilation, except if we delete the plan, in which + // case we wouldn't be here. + // https://bugs.webkit.org/show_bug.cgi?id=132707 + if (codeBlock->alternative() != replacement->baselineVersion()) + return false; + if (!watchpoints.areStillValid()) + return false; + return true; } void Plan::reallyAdd(CommonData* commonData) { - watchpoints.reallyAdd(codeBlock.get(), *commonData); + watchpoints.reallyAdd(codeBlock, *commonData); identifiers.reallyAdd(vm, commonData); weakReferences.reallyAdd(vm, commonData); transitions.reallyAdd(vm, commonData); - writeBarriers.trigger(vm); +} + +void Plan::notifyCompiling() +{ + stage = Compiling; +} + +void Plan::notifyCompiled() +{ + stage = Compiled; } void Plan::notifyReady() { - callback->compilationDidBecomeReadyAsynchronously(codeBlock.get()); - isCompiled = true; + callback->compilationDidBecomeReadyAsynchronously(codeBlock, profiledDFGCodeBlock); + stage = Ready; } CompilationResult Plan::finalizeWithoutNotifyingCallback() { + // We will establish new references from the code block to things. So, we need a barrier. + vm.heap.writeBarrier(codeBlock); + if (!isStillValid()) return CompilationInvalidated; - if (vm.enabledProfiler()) - return CompilationInvalidated; - - Debugger* debugger = codeBlock->globalObject()->debugger(); - if (debugger && (debugger->isStepping() || codeBlock->baselineAlternative()->hasDebuggerRequests())) - return CompilationInvalidated; - bool result; if (codeBlock->codeType() == FunctionCode) result = finalizer->finalizeFunction(); @@ -366,12 +565,27 @@ CompilationResult Plan::finalizeWithoutNotifyingCallback() reallyAdd(codeBlock->jitCode()->dfgCommon()); + if (validationEnabled()) { + TrackedReferences trackedReferences; + + for (WriteBarrier<JSCell>& reference : codeBlock->jitCode()->dfgCommon()->weakReferences) + trackedReferences.add(reference.get()); + for (WriteBarrier<Structure>& reference : codeBlock->jitCode()->dfgCommon()->weakStructureReferences) + trackedReferences.add(reference.get()); + for (WriteBarrier<Unknown>& constant : codeBlock->constants()) + trackedReferences.add(constant.get()); + + // Check that any other references that we have anywhere in the JITCode are also + // tracked either strongly or weakly. + codeBlock->jitCode()->validateReferences(trackedReferences); + } + return CompilationSuccessful; } void Plan::finalizeAndNotifyCallback() { - callback->compilationDidComplete(codeBlock.get(), finalizeWithoutNotifyingCallback()); + callback->compilationDidComplete(codeBlock, profiledDFGCodeBlock, finalizeWithoutNotifyingCallback()); } CompilationKey Plan::key() @@ -379,6 +593,83 @@ CompilationKey Plan::key() return CompilationKey(codeBlock->alternative(), mode); } +void Plan::rememberCodeBlocks() +{ + // Compilation writes lots of values to a CodeBlock without performing + // an explicit barrier. So, we need to be pessimistic and assume that + // all our CodeBlocks must be visited during GC. + + Heap::heap(codeBlock)->writeBarrier(codeBlock); + Heap::heap(codeBlock)->writeBarrier(codeBlock->alternative()); + if (profiledDFGCodeBlock) + Heap::heap(profiledDFGCodeBlock)->writeBarrier(profiledDFGCodeBlock); +} + +void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor) +{ + if (!isKnownToBeLiveDuringGC()) + return; + + for (unsigned i = mustHandleValues.size(); i--;) + visitor.appendUnbarrieredValue(&mustHandleValues[i]); + + visitor.appendUnbarrieredReadOnlyPointer(codeBlock); + visitor.appendUnbarrieredReadOnlyPointer(codeBlock->alternative()); + visitor.appendUnbarrieredReadOnlyPointer(profiledDFGCodeBlock); + + if (inlineCallFrames) { + for (auto* inlineCallFrame : *inlineCallFrames) { + ASSERT(inlineCallFrame->baselineCodeBlock.get()); + visitor.appendUnbarrieredReadOnlyPointer(inlineCallFrame->baselineCodeBlock.get()); + } + } + + weakReferences.visitChildren(visitor); + transitions.visitChildren(visitor); +} + +bool Plan::isKnownToBeLiveDuringGC() +{ + if (stage == Cancelled) + return false; + if (!Heap::isMarked(codeBlock->ownerExecutable())) + return false; + if (!Heap::isMarked(codeBlock->alternative())) + return false; + if (!!profiledDFGCodeBlock && !Heap::isMarked(profiledDFGCodeBlock)) + return false; + return true; +} + +void Plan::cancel() +{ + codeBlock = nullptr; + profiledDFGCodeBlock = nullptr; + mustHandleValues.clear(); + compilation = nullptr; + finalizer = nullptr; + inlineCallFrames = nullptr; + watchpoints = DesiredWatchpoints(); + identifiers = DesiredIdentifiers(); + weakReferences = DesiredWeakReferences(); + transitions = DesiredTransitions(); + callback = nullptr; + stage = Cancelled; +} + +HashMap<CString, double> Plan::compileTimeStats() +{ + HashMap<CString, double> result; + if (Options::reportTotalCompileTimes()) { + result.add("Compile Time", totalDFGCompileTime + totalFTLCompileTime); + result.add("DFG Compile Time", totalDFGCompileTime); + result.add("FTL Compile Time", totalFTLCompileTime); + result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime); + result.add("FTL (B3) Compile Time", totalFTLB3CompileTime); + } + return result; +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGPlan.h b/Source/JavaScriptCore/dfg/DFGPlan.h index a60269798..091e3cb18 100644 --- a/Source/JavaScriptCore/dfg/DFGPlan.h +++ b/Source/JavaScriptCore/dfg/DFGPlan.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,83 +26,107 @@ #ifndef DFGPlan_h #define DFGPlan_h -#include <wtf/Platform.h> - #include "CompilationResult.h" #include "DFGCompilationKey.h" #include "DFGCompilationMode.h" #include "DFGDesiredIdentifiers.h" -#include "DFGDesiredStructureChains.h" #include "DFGDesiredTransitions.h" #include "DFGDesiredWatchpoints.h" #include "DFGDesiredWeakReferences.h" -#include "DFGDesiredWriteBarriers.h" #include "DFGFinalizer.h" #include "DeferredCompilationCallback.h" #include "Operands.h" #include "ProfilerCompilation.h" +#include <wtf/HashMap.h> #include <wtf/ThreadSafeRefCounted.h> +#include <wtf/text/CString.h> namespace JSC { class CodeBlock; +class SlotVisitor; namespace DFG { class LongLivedState; +class ThreadData; #if ENABLE(DFG_JIT) struct Plan : public ThreadSafeRefCounted<Plan> { Plan( - PassRefPtr<CodeBlock>, CompilationMode, unsigned osrEntryBytecodeIndex, + CodeBlock* codeBlockToCompile, CodeBlock* profiledDFGCodeBlock, + CompilationMode, unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues); ~Plan(); - - void compileInThread(LongLivedState&); + + void compileInThread(LongLivedState&, ThreadData*); CompilationResult finalizeWithoutNotifyingCallback(); void finalizeAndNotifyCallback(); + void notifyCompiling(); + void notifyCompiled(); void notifyReady(); CompilationKey key(); + void rememberCodeBlocks(); + void checkLivenessAndVisitChildren(SlotVisitor&); + bool isKnownToBeLiveDuringGC(); + void cancel(); + VM& vm; - RefPtr<CodeBlock> codeBlock; + + // These can be raw pointers because we visit them during every GC in checkLivenessAndVisitChildren. + CodeBlock* codeBlock; + CodeBlock* profiledDFGCodeBlock; + CompilationMode mode; const unsigned osrEntryBytecodeIndex; Operands<JSValue> mustHandleValues; + + ThreadData* threadData; RefPtr<Profiler::Compilation> compilation; - OwnPtr<Finalizer> finalizer; + std::unique_ptr<Finalizer> finalizer; + RefPtr<InlineCallFrameSet> inlineCallFrames; DesiredWatchpoints watchpoints; DesiredIdentifiers identifiers; - DesiredStructureChains chains; DesiredWeakReferences weakReferences; - DesiredWriteBarriers writeBarriers; DesiredTransitions transitions; - - double beforeFTL; - bool isCompiled; + bool willTryToTierUp { false }; + bool canTierUpAndOSREnter { false }; + + enum Stage { Preparing, Compiling, Compiled, Ready, Cancelled }; + Stage stage; RefPtr<DeferredCompilationCallback> callback; + JS_EXPORT_PRIVATE static HashMap<CString, double> compileTimeStats(); + private: - enum CompilationPath { FailPath, DFGPath, FTLPath }; + bool computeCompileTimes() const; + bool reportCompileTimes() const; + + enum CompilationPath { FailPath, DFGPath, FTLPath, CancelPath }; CompilationPath compileInThreadImpl(LongLivedState&); bool isStillValid(); void reallyAdd(CommonData*); + + double m_timeBeforeFTL; }; #else // ENABLE(DFG_JIT) class Plan : public RefCounted<Plan> { // Dummy class to allow !ENABLE(DFG_JIT) to build. +public: + static HashMap<CString, double> compileTimeStats() { return HashMap<CString, double>(); } }; #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPrePostNumbering.cpp index 4c5f6949c..4b7923190 100644 --- a/Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGPrePostNumbering.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -24,56 +24,65 @@ */ #include "config.h" -#include "DFGResurrectionForValidationPhase.h" +#include "DFGPrePostNumbering.h" #if ENABLE(DFG_JIT) -#include "DFGBasicBlockInlines.h" +#include "DFGBlockMapInlines.h" +#include "DFGBlockWorklist.h" #include "DFGGraph.h" -#include "DFGInsertionSet.h" -#include "DFGPhase.h" -#include "Operations.h" namespace JSC { namespace DFG { -class ResurrectionForValidationPhase : public Phase { -public: - ResurrectionForValidationPhase(Graph& graph) - : Phase(graph, "resurrection for validation") - { - } +PrePostNumbering::PrePostNumbering(Graph& graph) +{ + m_map = BlockMap<Numbering>(graph); - bool run() - { - InsertionSet insertionSet(m_graph); - - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - - for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { - Node* node = block->at(nodeIndex); - if (!node->hasResult()) - continue; - insertionSet.insertNode( - nodeIndex + 1, SpecNone, Phantom, node->codeOrigin, Edge(node)); - } - - insertionSet.execute(block); + PostOrderBlockWorklist worklist; + worklist.push(graph.block(0)); + unsigned nextPreNumber = 0; + unsigned nextPostNumber = 0; + while (BlockWithOrder item = worklist.pop()) { + switch (item.order) { + case VisitOrder::Pre: + m_map[item.node].m_preNumber = nextPreNumber++; + worklist.pushPost(item.node); + for (BasicBlock* successor : item.node->successors()) + worklist.push(successor); + break; + case VisitOrder::Post: + m_map[item.node].m_postNumber = nextPostNumber++; + break; } - - return true; } -}; +} + +PrePostNumbering::~PrePostNumbering() { } + +} } // namespace JSC::DFG + +namespace WTF { + +using namespace JSC::DFG; -bool performResurrectionForValidation(Graph& graph) +void printInternal(PrintStream& out, EdgeKind kind) { - SamplingRegion samplingRegion("DFG Resurrection For Validation Phase"); - return runPhase<ResurrectionForValidationPhase>(graph); + switch (kind) { + case ForwardEdge: + out.print("ForwardEdge"); + return; + case CrossEdge: + out.print("CrossEdge"); + return; + case BackEdge: + out.print("BackEdge"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); } -} } // namespace JSC::DFG +} // namespace WTF #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGPrePostNumbering.h b/Source/JavaScriptCore/dfg/DFGPrePostNumbering.h new file mode 100644 index 000000000..de8282c4a --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPrePostNumbering.h @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGPrePostNumbering_h +#define DFGPrePostNumbering_h + +#if ENABLE(DFG_JIT) + +#include "DFGBasicBlock.h" +#include "DFGBlockMap.h" +#include <wtf/FastMalloc.h> +#include <wtf/Noncopyable.h> + +namespace JSC { namespace DFG { + +enum EdgeKind { + ForwardEdge, + CrossEdge, + BackEdge +}; + +class PrePostNumbering { + WTF_MAKE_NONCOPYABLE(PrePostNumbering); + WTF_MAKE_FAST_ALLOCATED; +public: + PrePostNumbering(Graph&); + ~PrePostNumbering(); + + unsigned preNumber(BasicBlock* block) const { return m_map[block].m_preNumber; } + unsigned postNumber(BasicBlock* block) const { return m_map[block].m_postNumber; } + + // Is from a strict ancestor of to? + bool isStrictAncestorOf(BasicBlock* from, BasicBlock* to) const + { + return preNumber(from) < preNumber(to) + && postNumber(from) > postNumber(to); + } + + bool isAncestorOf(BasicBlock* from, BasicBlock* to) const + { + return from == to || isStrictAncestorOf(from, to); + } + + bool isStrictDescendantOf(BasicBlock* from, BasicBlock* to) const + { + return isStrictAncestorOf(to, from); + } + + bool isDescendantOf(BasicBlock* from, BasicBlock* to) const + { + return isAncestorOf(to, from); + } + + // This will give a bogus answer if there is actually no such edge. If you want to determine + // if there is any such edge, you have to do it yourself. + EdgeKind edgeKind(BasicBlock* from, BasicBlock* to) const + { + if (isStrictDescendantOf(to, from)) + return ForwardEdge; + + if (isAncestorOf(to, from)) + return BackEdge; + + return CrossEdge; + } + +private: + struct Numbering { + unsigned m_preNumber; + unsigned m_postNumber; + }; + + BlockMap<Numbering> m_map; +}; + +} } // namespace JSC::DFG + +namespace WTF { + +void printInternal(PrintStream&, JSC::DFG::EdgeKind); + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + +#endif // DFGPrePostNumbering_h + diff --git a/Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h b/Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h new file mode 100644 index 000000000..3a7d716db --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h @@ -0,0 +1,177 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGPreciseLocalClobberize_h +#define DFGPreciseLocalClobberize_h + +#if ENABLE(DFG_JIT) + +#include "DFGClobberize.h" +#include "DFGMayExit.h" + +namespace JSC { namespace DFG { + +template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor> +class PreciseLocalClobberizeAdaptor { +public: + PreciseLocalClobberizeAdaptor( + Graph& graph, Node* node, + const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def) + : m_graph(graph) + , m_node(node) + , m_read(read) + , m_unconditionalWrite(write) + , m_def(def) + { + } + + void read(AbstractHeap heap) + { + if (heap.kind() == Stack) { + if (heap.payload().isTop()) { + readTop(); + return; + } + + callIfAppropriate(m_read, VirtualRegister(heap.payload().value32())); + return; + } + + if (heap.overlaps(Stack)) { + readTop(); + return; + } + } + + void write(AbstractHeap heap) + { + // We expect stack writes to already be precisely characterized by DFG::clobberize(). + if (heap.kind() == Stack) { + RELEASE_ASSERT(!heap.payload().isTop()); + callIfAppropriate(m_unconditionalWrite, VirtualRegister(heap.payload().value32())); + return; + } + + RELEASE_ASSERT(!heap.overlaps(Stack)); + } + + void def(PureValue) + { + // PureValue defs never have anything to do with locals, so ignore this. + } + + void def(HeapLocation location, LazyNode node) + { + if (location.kind() != StackLoc) + return; + + RELEASE_ASSERT(location.heap().kind() == Stack); + + m_def(VirtualRegister(location.heap().payload().value32()), node); + } + +private: + template<typename Functor> + void callIfAppropriate(const Functor& functor, VirtualRegister operand) + { + if (operand.isLocal() && static_cast<unsigned>(operand.toLocal()) >= m_graph.block(0)->variablesAtHead.numberOfLocals()) + return; + + if (operand.isArgument() && !operand.isHeader() && static_cast<unsigned>(operand.toArgument()) >= m_graph.block(0)->variablesAtHead.numberOfArguments()) + return; + + functor(operand); + } + + void readTop() + { + switch (m_node->op()) { + case GetMyArgumentByVal: + case ForwardVarargs: + case CallForwardVarargs: + case ConstructForwardVarargs: + case TailCallForwardVarargs: + case TailCallForwardVarargsInlinedCaller: { + InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame; + if (!inlineCallFrame) { + // Read the outermost arguments and argument count. + for (unsigned i = m_graph.m_codeBlock->numParameters(); i-- > 1;) + m_read(virtualRegisterForArgument(i)); + m_read(VirtualRegister(JSStack::ArgumentCount)); + break; + } + + for (unsigned i = inlineCallFrame->arguments.size(); i-- > 1;) + m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(i).offset())); + if (inlineCallFrame->isVarargs()) + m_read(VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount)); + break; + } + + default: { + // All of the outermost arguments, except this, are definitely read. + for (unsigned i = m_graph.m_codeBlock->numParameters(); i-- > 1;) + m_read(virtualRegisterForArgument(i)); + + // The stack header is read. + for (unsigned i = 0; i < JSStack::ThisArgument; ++i) + m_read(VirtualRegister(i)); + + // Read all of the inline arguments and call frame headers that we didn't already capture. + for (InlineCallFrame* inlineCallFrame = m_node->origin.semantic.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->getCallerInlineFrameSkippingTailCalls()) { + for (unsigned i = inlineCallFrame->arguments.size(); i-- > 1;) + m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(i).offset())); + if (inlineCallFrame->isClosureCall) + m_read(VirtualRegister(inlineCallFrame->stackOffset + JSStack::Callee)); + if (inlineCallFrame->isVarargs()) + m_read(VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount)); + } + break; + } } + } + + Graph& m_graph; + Node* m_node; + const ReadFunctor& m_read; + const WriteFunctor& m_unconditionalWrite; + const DefFunctor& m_def; +}; + +template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor> +void preciseLocalClobberize( + Graph& graph, Node* node, + const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def) +{ + PreciseLocalClobberizeAdaptor<ReadFunctor, WriteFunctor, DefFunctor> + adaptor(graph, node, read, write, def); + clobberize(graph, node, adaptor); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGPreciseLocalClobberize_h + diff --git a/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp index 21da5fe0a..d9a39f90f 100644 --- a/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp @@ -31,7 +31,7 @@ #include "DFGBasicBlockInlines.h" #include "DFGGraph.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.h b/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.h index 00f04a3d3..232f8bfe5 100644 --- a/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.h +++ b/Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.h @@ -26,8 +26,6 @@ #ifndef DFGPredictionInjectionPhase_h #define DFGPredictionInjectionPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp index d859849a3..9283ae302 100644 --- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2011-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,21 +30,10 @@ #include "DFGGraph.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { -SpeculatedType resultOfToPrimitive(SpeculatedType type) -{ - if (type & SpecObject) { - // Objects get turned into strings. So if the input has hints of objectness, - // the output will have hinsts of stringiness. - return mergeSpeculations(type & ~SpecObject, SpecString); - } - - return type; -} - class PredictionPropagationPhase : public Phase { public: PredictionPropagationPhase(Graph& graph) @@ -56,9 +45,31 @@ public: { ASSERT(m_graph.m_form == ThreadedCPS); ASSERT(m_graph.m_unificationState == GloballyUnified); - - // 1) propagate predictions + propagateThroughArgumentPositions(); + + m_pass = PrimaryPass; + propagateToFixpoint(); + + m_pass = RareCasePass; + propagateToFixpoint(); + + m_pass = DoubleVotingPass; + do { + m_changed = false; + doRoundOfDoubleVoting(); + if (!m_changed) + break; + m_changed = false; + propagateForward(); + } while (m_changed); + + return true; + } + +private: + void propagateToFixpoint() + { do { m_changed = false; @@ -75,22 +86,8 @@ public: m_changed = false; propagateBackward(); } while (m_changed); - - // 2) repropagate predictions while doing double voting. - - do { - m_changed = false; - doRoundOfDoubleVoting(); - if (!m_changed) - break; - m_changed = false; - propagateForward(); - } while (m_changed); - - return true; } -private: bool setPrediction(SpeculatedType prediction) { ASSERT(m_currentNode->hasResult()); @@ -113,11 +110,14 @@ private: SpeculatedType speculatedDoubleTypeForPrediction(SpeculatedType value) { - if (!isFullNumberSpeculation(value)) - return SpecDouble; - if (value & SpecDoubleNaN) - return SpecDouble; - return SpecDoubleReal; + SpeculatedType result = SpecDoubleReal; + if (value & SpecDoubleImpureNaN) + result |= SpecDoubleImpureNaN; + if (value & SpecDoublePureNaN) + result |= SpecDoublePureNaN; + if (!isFullNumberOrBooleanSpeculation(value)) + result |= SpecDoublePureNaN; + return result; } SpeculatedType speculatedDoubleTypeForPredictions(SpeculatedType left, SpeculatedType right) @@ -132,19 +132,23 @@ private: bool changed = false; switch (op) { - case JSConstant: - case WeakJSConstant: { - SpeculatedType type = speculationFromValue(m_graph.valueOfJSConstant(node)); - if (type == SpecInt52AsDouble) + case JSConstant: { + SpeculatedType type = speculationFromValue(node->asJSValue()); + if (type == SpecInt52AsDouble && enableInt52()) type = SpecInt52; changed |= setPrediction(type); break; } + case DoubleConstant: { + SpeculatedType type = speculationFromValue(node->asJSValue()); + changed |= setPrediction(type); + break; + } case GetLocal: { VariableAccessData* variable = node->variableAccessData(); SpeculatedType prediction = variable->prediction(); - if (variable->shouldNeverUnbox() && (prediction & SpecInt52)) + if (!variable->couldRepresentInt52() && (prediction & SpecInt52)) prediction = (prediction | SpecInt52AsDouble) & ~SpecInt52; if (prediction) changed |= mergePrediction(prediction); @@ -163,7 +167,8 @@ private: case BitRShift: case BitLShift: case BitURShift: - case ArithIMul: { + case ArithIMul: + case ArithClz32: { changed |= setPrediction(SpecInt32); break; } @@ -172,17 +177,60 @@ private: case ArrayPush: case RegExpExec: case RegExpTest: + case StringReplace: case GetById: case GetByIdFlush: - case GetMyArgumentByValSafe: case GetByOffset: + case MultiGetByOffset: + case GetDirectPname: case Call: + case TailCallInlinedCaller: case Construct: + case CallVarargs: + case TailCallVarargsInlinedCaller: + case ConstructVarargs: + case CallForwardVarargs: + case ConstructForwardVarargs: + case TailCallForwardVarargsInlinedCaller: case GetGlobalVar: - case GetClosureVar: { + case GetGlobalLexicalVariable: + case GetClosureVar: + case GetFromArguments: { changed |= setPrediction(node->getHeapPrediction()); break; } + + case GetGetterSetterByOffset: + case GetExecutable: { + changed |= setPrediction(SpecCellOther); + break; + } + + case GetGetter: + case GetSetter: + case GetCallee: + case NewArrowFunction: + case NewFunction: + case NewGeneratorFunction: { + changed |= setPrediction(SpecFunction); + break; + } + + case GetArgumentCount: { + changed |= setPrediction(SpecInt32); + break; + } + + case GetRestLength: { + changed |= setPrediction(SpecInt32); + break; + } + + case GetTypedArrayByteOffset: + case GetArrayLength: { + changed |= setPrediction(SpecInt32); + break; + } case StringCharCodeAt: { changed |= setPrediction(SpecInt32); @@ -192,7 +240,7 @@ private: case UInt32ToNumber: { // FIXME: Support Int52. // https://bugs.webkit.org/show_bug.cgi?id=125704 - if (nodeCanSpeculateInt32(node->arithNodeFlags())) + if (node->canSpeculateInt32(m_pass)) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(SpecBytecodeNumber); @@ -204,28 +252,31 @@ private: SpeculatedType right = node->child2()->prediction(); if (left && right) { - if (isFullNumberSpeculationExpectingDefined(left) && isFullNumberSpeculationExpectingDefined(right)) { - if (m_graph.addSpeculationMode(node) != DontSpeculateInt32) + if (isFullNumberOrBooleanSpeculationExpectingDefined(left) + && isFullNumberOrBooleanSpeculationExpectingDefined(right)) { + if (m_graph.addSpeculationMode(node, m_pass) != DontSpeculateInt32) changed |= mergePrediction(SpecInt32); else if (m_graph.addShouldSpeculateMachineInt(node)) changed |= mergePrediction(SpecInt52); else changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); - } else if (!(left & SpecFullNumber) || !(right & SpecFullNumber)) { + } else if ( + !(left & (SpecFullNumber | SpecBoolean)) + || !(right & (SpecFullNumber | SpecBoolean))) { // left or right is definitely something other than a number. changed |= mergePrediction(SpecString); } else - changed |= mergePrediction(SpecString | SpecInt32 | SpecDouble); + changed |= mergePrediction(SpecString | SpecInt32 | SpecBytecodeDouble); } break; } - + case ArithAdd: { SpeculatedType left = node->child1()->prediction(); SpeculatedType right = node->child2()->prediction(); if (left && right) { - if (m_graph.addSpeculationMode(node) != DontSpeculateInt32) + if (m_graph.addSpeculationMode(node, m_pass) != DontSpeculateInt32) changed |= mergePrediction(SpecInt32); else if (m_graph.addShouldSpeculateMachineInt(node)) changed |= mergePrediction(SpecInt52); @@ -238,23 +289,27 @@ private: case ArithSub: { SpeculatedType left = node->child1()->prediction(); SpeculatedType right = node->child2()->prediction(); - + if (left && right) { - if (m_graph.addSpeculationMode(node) != DontSpeculateInt32) - changed |= mergePrediction(SpecInt32); - else if (m_graph.addShouldSpeculateMachineInt(node)) - changed |= mergePrediction(SpecInt52); - else - changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); + if (isFullNumberOrBooleanSpeculationExpectingDefined(left) + && isFullNumberOrBooleanSpeculationExpectingDefined(right)) { + if (m_graph.addSpeculationMode(node, m_pass) != DontSpeculateInt32) + changed |= mergePrediction(SpecInt32); + else if (m_graph.addShouldSpeculateMachineInt(node)) + changed |= mergePrediction(SpecInt52); + else + changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); + } else + changed |= mergePrediction(SpecInt32 | SpecBytecodeDouble); } break; } - + case ArithNegate: if (node->child1()->prediction()) { - if (m_graph.negateShouldSpeculateInt32(node)) + if (m_graph.unaryArithShouldSpeculateInt32(node, m_pass)) changed |= mergePrediction(SpecInt32); - else if (m_graph.negateShouldSpeculateMachineInt(node)) + else if (m_graph.unaryArithShouldSpeculateMachineInt(node, m_pass)) changed |= mergePrediction(SpecInt52); else changed |= mergePrediction(speculatedDoubleTypeForPrediction(node->child1()->prediction())); @@ -267,8 +322,8 @@ private: SpeculatedType right = node->child2()->prediction(); if (left && right) { - if (Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node()) - && nodeCanSpeculateInt32(node->arithNodeFlags())) + if (Node::shouldSpeculateInt32OrBooleanForArithmetic(node->child1().node(), node->child2().node()) + && node->canSpeculateInt32(m_pass)) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); @@ -281,55 +336,71 @@ private: SpeculatedType right = node->child2()->prediction(); if (left && right) { - if (m_graph.mulShouldSpeculateInt32(node)) - changed |= mergePrediction(SpecInt32); - else if (m_graph.mulShouldSpeculateMachineInt(node)) - changed |= mergePrediction(SpecInt52); - else - changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); - } - break; - } - - case ArithDiv: { - SpeculatedType left = node->child1()->prediction(); - SpeculatedType right = node->child2()->prediction(); - - if (left && right) { - if (Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node()) - && nodeCanSpeculateInt32(node->arithNodeFlags())) - changed |= mergePrediction(SpecInt32); - else - changed |= mergePrediction(SpecDouble); + if (isFullNumberOrBooleanSpeculationExpectingDefined(left) + && isFullNumberOrBooleanSpeculationExpectingDefined(right)) { + if (m_graph.binaryArithShouldSpeculateInt32(node, m_pass)) + changed |= mergePrediction(SpecInt32); + else if (m_graph.binaryArithShouldSpeculateMachineInt(node, m_pass)) + changed |= mergePrediction(SpecInt52); + else + changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); + } else { + if (node->mayHaveNonIntResult()) + changed |= mergePrediction(SpecInt32 | SpecBytecodeDouble); + else + changed |= mergePrediction(SpecInt32); + } } break; } - + + case ArithDiv: case ArithMod: { SpeculatedType left = node->child1()->prediction(); SpeculatedType right = node->child2()->prediction(); if (left && right) { - if (Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node()) - && nodeCanSpeculateInt32(node->arithNodeFlags())) - changed |= mergePrediction(SpecInt32); - else - changed |= mergePrediction(SpecDouble); + if (isFullNumberOrBooleanSpeculationExpectingDefined(left) + && isFullNumberOrBooleanSpeculationExpectingDefined(right)) { + if (m_graph.binaryArithShouldSpeculateInt32(node, m_pass)) + changed |= mergePrediction(SpecInt32); + else + changed |= mergePrediction(SpecBytecodeDouble); + } else + changed |= mergePrediction(SpecInt32 | SpecBytecodeDouble); } break; } - + + case ArithPow: case ArithSqrt: + case ArithFRound: case ArithSin: - case ArithCos: { - changed |= setPrediction(SpecDouble); + case ArithCos: + case ArithLog: { + changed |= setPrediction(SpecBytecodeDouble); break; } - + + case ArithRandom: { + changed |= setPrediction(SpecDoubleReal); + break; + } + + case ArithRound: + case ArithFloor: + case ArithCeil: { + if (isInt32OrBooleanSpeculation(node->getHeapPrediction()) && m_graph.roundShouldSpeculateInt32(node, m_pass)) + changed |= setPrediction(SpecInt32); + else + changed |= setPrediction(SpecBytecodeDouble); + break; + } + case ArithAbs: { SpeculatedType child = node->child1()->prediction(); - if (isInt32SpeculationForArithmetic(child) - && nodeCanSpeculateInt32(node->arithNodeFlags())) + if (isInt32OrBooleanSpeculationForArithmetic(child) + && node->canSpeculateInt32(m_pass)) changed |= mergePrediction(SpecInt32); else changed |= mergePrediction(speculatedDoubleTypeForPrediction(child)); @@ -342,51 +413,77 @@ private: case CompareGreater: case CompareGreaterEq: case CompareEq: - case CompareEqConstant: case CompareStrictEq: - case CompareStrictEqConstant: + case OverridesHasInstance: case InstanceOf: + case InstanceOfCustom: case IsUndefined: case IsBoolean: case IsNumber: case IsString: case IsObject: + case IsObjectOrNull: case IsFunction: { changed |= setPrediction(SpecBoolean); break; } case TypeOf: { - changed |= setPrediction(SpecString); + changed |= setPrediction(SpecStringIdent); break; } case GetByVal: { if (!node->child1()->prediction()) break; - if (!node->getHeapPrediction()) - break; - if (node->child1()->shouldSpeculateFloat32Array() - || node->child1()->shouldSpeculateFloat64Array()) - changed |= mergePrediction(SpecDouble); - else if (node->child1()->shouldSpeculateUint32Array()) { - if (isInt32Speculation(node->getHeapPrediction())) + ArrayMode arrayMode = node->arrayMode().refine( + m_graph, node, + node->child1()->prediction(), + node->child2()->prediction(), + SpecNone); + + switch (arrayMode.type()) { + case Array::Int32: + if (arrayMode.isOutOfBounds()) + changed |= mergePrediction(node->getHeapPrediction() | SpecInt32); + else changed |= mergePrediction(SpecInt32); + break; + case Array::Double: + if (arrayMode.isOutOfBounds()) + changed |= mergePrediction(node->getHeapPrediction() | SpecDoubleReal); else - changed |= mergePrediction(SpecInt52); - } else + changed |= mergePrediction(SpecDoubleReal); + break; + case Array::Float32Array: + case Array::Float64Array: + changed |= mergePrediction(SpecFullDouble); + break; + case Array::Uint32Array: + if (isInt32SpeculationForArithmetic(node->getHeapPrediction())) + changed |= mergePrediction(SpecInt32); + else if (enableInt52()) + changed |= mergePrediction(SpecMachineInt); + else + changed |= mergePrediction(SpecInt32 | SpecInt52AsDouble); + break; + case Array::Int8Array: + case Array::Uint8Array: + case Array::Int16Array: + case Array::Uint16Array: + case Array::Int32Array: + changed |= mergePrediction(SpecInt32); + break; + default: changed |= mergePrediction(node->getHeapPrediction()); + break; + } break; } - case GetMyArgumentsLengthSafe: { - changed |= setPrediction(SpecInt32); - break; - } - - case GetClosureRegisters: - case GetButterfly: + case GetButterfly: + case GetButterflyReadOnly: case GetIndexedPropertyStorage: case AllocatePropertyStorage: case ReallocatePropertyStorage: { @@ -395,29 +492,69 @@ private: } case ToThis: { + // ToThis in methods for primitive types should speculate primitive types in strict mode. + ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->isStrictMode() ? StrictMode : NotStrictMode; + if (ecmaMode == StrictMode) { + if (node->child1()->shouldSpeculateBoolean()) { + changed |= mergePrediction(SpecBoolean); + break; + } + + if (node->child1()->shouldSpeculateInt32()) { + changed |= mergePrediction(SpecInt32); + break; + } + + if (enableInt52() && node->child1()->shouldSpeculateMachineInt()) { + changed |= mergePrediction(SpecMachineInt); + break; + } + + if (node->child1()->shouldSpeculateNumber()) { + changed |= mergePrediction(SpecMachineInt); + break; + } + + if (node->child1()->shouldSpeculateSymbol()) { + changed |= mergePrediction(SpecSymbol); + break; + } + + if (node->child1()->shouldSpeculateStringIdent()) { + changed |= mergePrediction(SpecStringIdent); + break; + } + + if (node->child1()->shouldSpeculateString()) { + changed |= mergePrediction(SpecString); + break; + } + } else { + if (node->child1()->shouldSpeculateString()) { + changed |= mergePrediction(SpecStringObject); + break; + } + } + SpeculatedType prediction = node->child1()->prediction(); if (prediction) { if (prediction & ~SpecObject) { - prediction &= SpecObject; - prediction = mergeSpeculations(prediction, SpecObjectOther); + // Wrapper objects are created only in sloppy mode. + if (ecmaMode != StrictMode) { + prediction &= SpecObject; + prediction = mergeSpeculations(prediction, SpecObjectOther); + } } changed |= mergePrediction(prediction); } break; } - case GetMyScope: - case SkipTopScope: case SkipScope: { changed |= setPrediction(SpecObjectOther); break; } - case GetCallee: { - changed |= setPrediction(SpecFunction); - break; - } - case CreateThis: case NewObject: { changed |= setPrediction(SpecFinalObject); @@ -436,7 +573,11 @@ private: break; } - case NewRegexp: + case NewRegexp: { + changed |= setPrediction(SpecRegExpObject); + break; + } + case CreateActivation: { changed |= setPrediction(SpecObjectOther); break; @@ -448,8 +589,10 @@ private: break; } case StringCharAt: + case CallStringConstructor: case ToString: - case MakeRope: { + case MakeRope: + case StrCat: { changed |= setPrediction(SpecString); break; } @@ -466,50 +609,66 @@ private: break; } - case CreateArguments: { - changed |= setPrediction(SpecArguments); + case CreateDirectArguments: { + changed |= setPrediction(SpecDirectArguments); break; } - case NewFunction: { - SpeculatedType child = node->child1()->prediction(); - if (child & SpecEmpty) - changed |= mergePrediction((child & ~SpecEmpty) | SpecFunction); - else - changed |= mergePrediction(child); + case CreateScopedArguments: { + changed |= setPrediction(SpecScopedArguments); break; } - case NewFunctionNoCheck: - case NewFunctionExpression: { - changed |= setPrediction(SpecFunction); + case CreateClonedArguments: { + changed |= setPrediction(SpecObjectOther); break; } + case FiatInt52: { + RELEASE_ASSERT(enableInt52()); + changed |= setPrediction(SpecMachineInt); + break; + } + case PutByValAlias: - case GetArrayLength: - case GetTypedArrayByteOffset: - case Int32ToDouble: case DoubleAsInt32: case GetLocalUnlinked: - case GetMyArgumentsLength: - case GetMyArgumentByVal: - case PhantomPutStructure: - case PhantomArguments: case CheckArray: + case CheckTypeInfoFlags: case Arrayify: case ArrayifyToStructure: case CheckTierUpInLoop: case CheckTierUpAtReturn: case CheckTierUpAndOSREnter: + case CheckTierUpWithNestedTriggerAndOSREnter: case InvalidationPoint: - case Int52ToValue: - case Int52ToDouble: case CheckInBounds: - case ValueToInt32: { + case ValueToInt32: + case DoubleRep: + case ValueRep: + case Int52Rep: + case Int52Constant: + case Identity: + case BooleanToNumber: + case PhantomNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case PhantomDirectArguments: + case PhantomClonedArguments: + case GetMyArgumentByVal: + case ForwardVarargs: + case PutHint: + case CheckStructureImmediate: + case MaterializeNewObject: + case MaterializeCreateActivation: + case PutStack: + case KillStack: + case StoreBarrier: + case GetStack: { // This node should never be visible at this stage of compilation. It is // inserted by fixup(), which follows this phase. - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_graph, node, "Unexpected node during prediction propagation"); break; } @@ -520,69 +679,101 @@ private: break; case Upsilon: - case GetArgument: // These don't get inserted until we go into SSA. RELEASE_ASSERT_NOT_REACHED(); break; - + case GetScope: changed |= setPrediction(SpecObjectOther); break; - + case In: changed |= setPrediction(SpecBoolean); break; - case Identity: - changed |= mergePrediction(node->child1()->prediction()); + case GetEnumerableLength: { + changed |= setPrediction(SpecInt32); + break; + } + case HasGenericProperty: + case HasStructureProperty: + case HasIndexedProperty: { + changed |= setPrediction(SpecBoolean); + break; + } + case GetPropertyEnumerator: { + changed |= setPrediction(SpecCell); break; + } + case GetEnumeratorStructurePname: { + changed |= setPrediction(SpecCell | SpecOther); + break; + } + case GetEnumeratorGenericPname: { + changed |= setPrediction(SpecCell | SpecOther); + break; + } + case ToIndexString: { + changed |= setPrediction(SpecString); + break; + } #ifndef NDEBUG // These get ignored because they don't return anything. - case StoreBarrier: - case ConditionalStoreBarrier: - case StoreBarrierWithNullCheck: case PutByValDirect: case PutByVal: case PutClosureVar: + case PutToArguments: case Return: + case TailCall: + case TailCallVarargs: + case TailCallForwardVarargs: case Throw: case PutById: + case PutByIdFlush: case PutByIdDirect: case PutByOffset: + case MultiPutByOffset: + case PutGetterById: + case PutSetterById: + case PutGetterSetterById: + case PutGetterByVal: + case PutSetterByVal: case DFG::Jump: case Branch: case Switch: case Breakpoint: case ProfileWillCall: case ProfileDidCall: - case CheckHasInstance: + case ProfileType: + case ProfileControlFlow: case ThrowReferenceError: case ForceOSRExit: case SetArgument: case CheckStructure: - case CheckExecutable: - case StructureTransitionWatchpoint: - case CheckFunction: + case CheckCell: + case CheckNotEmpty: + case CheckIdent: + case CheckBadCell: case PutStructure: - case TearOffActivation: - case TearOffArguments: - case CheckArgumentsNotCreated: - case VariableWatchpoint: case VarInjectionWatchpoint: - case AllocationProfileWatchpoint: case Phantom: case Check: - case PutGlobalVar: + case PutGlobalVariable: case CheckWatchdogTimer: case Unreachable: case LoopHint: case NotifyWrite: - case FunctionReentryWatchpoint: - case TypedArrayWatchpoint: case ConstantStoragePointer: case MovHint: case ZombieHint: + case ExitOK: + case LoadVarargs: + case CopyRest: + break; + + // This gets ignored because it only pretends to produce a value. + case BottomValue: break; // This gets ignored because it already has a prediction. @@ -635,8 +826,14 @@ private: } } - void doDoubleVoting(Node* node) + void doDoubleVoting(Node* node, float weight) { + // Loop pre-headers created by OSR entrypoint creation may have NaN weight to indicate + // that we actually don't know they weight. Assume that they execute once. This turns + // out to be an OK assumption since the pre-header doesn't have any meaningful code. + if (weight != weight) + weight = 1; + switch (node->op()) { case ValueAdd: case ArithAdd: @@ -646,15 +843,16 @@ private: DoubleBallot ballot; - if (isFullNumberSpeculationExpectingDefined(left) && isFullNumberSpeculationExpectingDefined(right) - && !m_graph.addShouldSpeculateInt32(node) + if (isFullNumberSpeculation(left) + && isFullNumberSpeculation(right) + && !m_graph.addShouldSpeculateInt32(node, m_pass) && !m_graph.addShouldSpeculateMachineInt(node)) ballot = VoteDouble; else ballot = VoteValue; - m_graph.voteNode(node->child1(), ballot); - m_graph.voteNode(node->child2(), ballot); + m_graph.voteNode(node->child1(), ballot, weight); + m_graph.voteNode(node->child2(), ballot, weight); break; } @@ -664,15 +862,16 @@ private: DoubleBallot ballot; - if (isFullNumberSpeculation(left) && isFullNumberSpeculation(right) - && !m_graph.mulShouldSpeculateInt32(node) - && !m_graph.mulShouldSpeculateMachineInt(node)) + if (isFullNumberSpeculation(left) + && isFullNumberSpeculation(right) + && !m_graph.binaryArithShouldSpeculateInt32(node, m_pass) + && !m_graph.binaryArithShouldSpeculateMachineInt(node, m_pass)) ballot = VoteDouble; else ballot = VoteValue; - m_graph.voteNode(node->child1(), ballot); - m_graph.voteNode(node->child2(), ballot); + m_graph.voteNode(node->child1(), ballot, weight); + m_graph.voteNode(node->child2(), ballot, weight); break; } @@ -685,41 +884,47 @@ private: DoubleBallot ballot; - if (isFullNumberSpeculation(left) && isFullNumberSpeculation(right) - && !(Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node()) && node->canSpeculateInt32())) + if (isFullNumberSpeculation(left) + && isFullNumberSpeculation(right) + && !m_graph.binaryArithShouldSpeculateInt32(node, m_pass)) ballot = VoteDouble; else ballot = VoteValue; - m_graph.voteNode(node->child1(), ballot); - m_graph.voteNode(node->child2(), ballot); + m_graph.voteNode(node->child1(), ballot, weight); + m_graph.voteNode(node->child2(), ballot, weight); break; } case ArithAbs: DoubleBallot ballot; - if (!(node->child1()->shouldSpeculateInt32ForArithmetic() && node->canSpeculateInt32())) + if (node->child1()->shouldSpeculateNumber() + && !m_graph.unaryArithShouldSpeculateInt32(node, m_pass)) ballot = VoteDouble; else ballot = VoteValue; - m_graph.voteNode(node->child1(), ballot); + m_graph.voteNode(node->child1(), ballot, weight); break; case ArithSqrt: case ArithCos: case ArithSin: - m_graph.voteNode(node->child1(), VoteDouble); + case ArithLog: + if (node->child1()->shouldSpeculateNumber()) + m_graph.voteNode(node->child1(), VoteDouble, weight); + else + m_graph.voteNode(node->child1(), VoteValue, weight); break; case SetLocal: { SpeculatedType prediction = node->child1()->prediction(); if (isDoubleSpeculation(prediction)) - node->variableAccessData()->vote(VoteDouble); + node->variableAccessData()->vote(VoteDouble, weight); else if ( !isFullNumberSpeculation(prediction) || isInt32Speculation(prediction) || isMachineIntSpeculation(prediction)) - node->variableAccessData()->vote(VoteValue); + node->variableAccessData()->vote(VoteValue, weight); break; } @@ -729,14 +934,14 @@ private: Edge child1 = m_graph.varArgChild(node, 0); Edge child2 = m_graph.varArgChild(node, 1); Edge child3 = m_graph.varArgChild(node, 2); - m_graph.voteNode(child1, VoteValue); - m_graph.voteNode(child2, VoteValue); + m_graph.voteNode(child1, VoteValue, weight); + m_graph.voteNode(child2, VoteValue, weight); switch (node->arrayMode().type()) { case Array::Double: - m_graph.voteNode(child3, VoteDouble); + m_graph.voteNode(child3, VoteDouble, weight); break; default: - m_graph.voteNode(child3, VoteValue); + m_graph.voteNode(child3, VoteValue, weight); break; } break; @@ -747,7 +952,7 @@ private: break; default: - m_graph.voteChildren(node, VoteValue); + m_graph.voteChildren(node, VoteValue, weight); break; } } @@ -763,7 +968,7 @@ private: ASSERT(block->isReachable); for (unsigned i = 0; i < block->size(); ++i) { m_currentNode = block->at(i); - doDoubleVoting(m_currentNode); + doDoubleVoting(m_currentNode, block->executionCount); } } for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) { @@ -772,8 +977,7 @@ private: continue; m_changed |= variableAccessData->tallyVotesForShouldUseDoubleFormat(); } - for (unsigned i = 0; i < m_graph.m_argumentPositions.size(); ++i) - m_changed |= m_graph.m_argumentPositions[i].mergeArgumentPredictionAwareness(); + propagateThroughArgumentPositions(); for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) { VariableAccessData* variableAccessData = &m_graph.m_variableAccessData[i]; if (!variableAccessData->isRoot()) @@ -782,8 +986,29 @@ private: } } + void propagateThroughArgumentPositions() + { + for (unsigned i = 0; i < m_graph.m_argumentPositions.size(); ++i) + m_changed |= m_graph.m_argumentPositions[i].mergeArgumentPredictionAwareness(); + } + + SpeculatedType resultOfToPrimitive(SpeculatedType type) + { + if (type & SpecObject) { + // We try to be optimistic here about StringObjects since it's unlikely that + // someone overrides the valueOf or toString methods. + if (type & SpecStringObject && m_graph.canOptimizeStringObjectAccess(m_currentNode->origin.semantic)) + return mergeSpeculations(type & ~SpecObject, SpecString); + + return mergeSpeculations(type & ~SpecObject, SpecPrimitive); + } + + return type; + } + Node* m_currentNode; bool m_changed; + PredictionPass m_pass; // We use different logic for considering predictions depending on how far along we are in propagation. }; bool performPredictionPropagation(Graph& graph) diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h index 29fe8455e..082295f32 100644 --- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h +++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h @@ -26,8 +26,6 @@ #ifndef DFGPredictionPropagationPhase_h #define DFGPredictionPropagationPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "SpeculatedType.h" @@ -48,7 +46,7 @@ class Graph; bool performPredictionPropagation(Graph&); // Helper used for FixupPhase for computing the predicted type of a ToPrimitive. -SpeculatedType resultOfToPrimitive(SpeculatedType type); +SpeculatedType resultOfToPrimitive(SpeculatedType); } } // namespace JSC::DFG::Phase diff --git a/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.cpp b/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.cpp new file mode 100644 index 000000000..24f69770e --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.cpp @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGPromotedHeapLocation.h" + +#if ENABLE(DFG_JIT) + +#include "DFGGraph.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +void PromotedLocationDescriptor::dump(PrintStream& out) const +{ + out.print(m_kind, "(", m_info, ")"); +} + +Node* PromotedHeapLocation::createHint(Graph& graph, NodeOrigin origin, Node* value) +{ + return graph.addNode( + SpecNone, PutHint, origin, OpInfo(descriptor().imm1()), OpInfo(descriptor().imm2()), + base()->defaultEdge(), value->defaultEdge()); +} + +void PromotedHeapLocation::dump(PrintStream& out) const +{ + out.print(kind(), "(", m_base, ", ", info(), ")"); +} + +} } // namespace JSC::DFG + +namespace WTF { + +using namespace JSC::DFG; + +void printInternal(PrintStream& out, PromotedLocationKind kind) +{ + switch (kind) { + case InvalidPromotedLocationKind: + out.print("InvalidPromotedLocationKind"); + return; + + case StructurePLoc: + out.print("StructurePLoc"); + return; + + case ActivationSymbolTablePLoc: + out.print("ActivationSymbolTablePLoc"); + return; + + case NamedPropertyPLoc: + out.print("NamedPropertyPLoc"); + return; + + case ArgumentPLoc: + out.print("ArgumentPLoc"); + return; + + case ArgumentCountPLoc: + out.print("ArgumentCountPLoc"); + return; + + case ArgumentsCalleePLoc: + out.print("ArgumentsCalleePLoc"); + return; + + case FunctionExecutablePLoc: + out.print("FunctionExecutablePLoc"); + return; + + case FunctionActivationPLoc: + out.print("FunctionActivationPLoc"); + return; + + case ActivationScopePLoc: + out.print("ActivationScopePLoc"); + return; + + case ClosureVarPLoc: + out.print("ClosureVarPLoc"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF; + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h b/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h new file mode 100644 index 000000000..b4e3c2bf5 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGPromotedHeapLocation_h +#define DFGPromotedHeapLocation_h + +#if ENABLE(DFG_JIT) + +#include "DFGNode.h" +#include <wtf/PrintStream.h> + +namespace JSC { namespace DFG { + +enum PromotedLocationKind { + InvalidPromotedLocationKind, + + StructurePLoc, + ActivationSymbolTablePLoc, + NamedPropertyPLoc, + ArgumentPLoc, + ArgumentCountPLoc, + ArgumentsCalleePLoc, + + FunctionExecutablePLoc, + FunctionActivationPLoc, + ActivationScopePLoc, + ClosureVarPLoc +}; + +class PromotedLocationDescriptor { +public: + PromotedLocationDescriptor( + PromotedLocationKind kind = InvalidPromotedLocationKind, unsigned info = 0) + : m_kind(kind) + , m_info(info) + { + } + + PromotedLocationDescriptor(WTF::HashTableDeletedValueType) + : m_kind(InvalidPromotedLocationKind) + , m_info(1) + { + } + + bool operator!() const { return m_kind == InvalidPromotedLocationKind; } + + explicit operator bool() const { return !!*this; } + + PromotedLocationKind kind() const { return m_kind; } + unsigned info() const { return m_info; } + + OpInfo imm1() const { return OpInfo(static_cast<uint32_t>(m_kind)); } + OpInfo imm2() const { return OpInfo(static_cast<uint32_t>(m_info)); } + + unsigned hash() const + { + return m_kind + m_info; + } + + bool operator==(const PromotedLocationDescriptor& other) const + { + return m_kind == other.m_kind + && m_info == other.m_info; + } + + bool operator!=(const PromotedLocationDescriptor& other) const + { + return !(*this == other); + } + + bool isHashTableDeletedValue() const + { + return m_kind == InvalidPromotedLocationKind && m_info; + } + + bool neededForMaterialization() const + { + switch (kind()) { + case NamedPropertyPLoc: + case ClosureVarPLoc: + return false; + + default: + return true; + } + } + + void dump(PrintStream& out) const; + +private: + PromotedLocationKind m_kind; + unsigned m_info; +}; + +struct PromotedLocationDescriptorHash { + static unsigned hash(const PromotedLocationDescriptor& key) { return key.hash(); } + static bool equal(const PromotedLocationDescriptor& a, const PromotedLocationDescriptor& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +class PromotedHeapLocation { +public: + PromotedHeapLocation( + PromotedLocationKind kind = InvalidPromotedLocationKind, + Node* base = nullptr, unsigned info = 0) + : m_base(base) + , m_meta(kind, info) + { + } + + PromotedHeapLocation( + PromotedLocationKind kind, Edge base, unsigned info = 0) + : PromotedHeapLocation(kind, base.node(), info) + { + } + + PromotedHeapLocation(Node* base, PromotedLocationDescriptor meta) + : m_base(base) + , m_meta(meta) + { + } + + PromotedHeapLocation(WTF::HashTableDeletedValueType) + : m_base(nullptr) + , m_meta(InvalidPromotedLocationKind, 1) + { + } + + Node* createHint(Graph&, NodeOrigin, Node* value); + + bool operator!() const { return kind() == InvalidPromotedLocationKind; } + + PromotedLocationKind kind() const { return m_meta.kind(); } + Node* base() const { return m_base; } + unsigned info() const { return m_meta.info(); } + PromotedLocationDescriptor descriptor() const { return m_meta; } + + unsigned hash() const + { + return m_meta.hash() + WTF::PtrHash<Node*>::hash(m_base); + } + + bool operator==(const PromotedHeapLocation& other) const + { + return m_base == other.m_base + && m_meta == other.m_meta; + } + + bool isHashTableDeletedValue() const + { + return m_meta.isHashTableDeletedValue(); + } + + void dump(PrintStream& out) const; + +private: + Node* m_base; + PromotedLocationDescriptor m_meta; +}; + +struct PromotedHeapLocationHash { + static unsigned hash(const PromotedHeapLocation& key) { return key.hash(); } + static bool equal(const PromotedHeapLocation& a, const PromotedHeapLocation& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} } // namespace JSC::DFG + +namespace WTF { + +void printInternal(PrintStream&, JSC::DFG::PromotedLocationKind); + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::DFG::PromotedHeapLocation> { + typedef JSC::DFG::PromotedHeapLocationHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::DFG::PromotedHeapLocation> : SimpleClassHashTraits<JSC::DFG::PromotedHeapLocation> { + static const bool emptyValueIsZero = false; +}; + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::DFG::PromotedLocationDescriptor> { + typedef JSC::DFG::PromotedLocationDescriptorHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::DFG::PromotedLocationDescriptor> : SimpleClassHashTraits<JSC::DFG::PromotedLocationDescriptor> { + static const bool emptyValueIsZero = false; +}; + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + +#endif // DFGPromotedHeapLocation_h + diff --git a/Source/JavaScriptCore/dfg/DFGPropertyTypeKey.h b/Source/JavaScriptCore/dfg/DFGPropertyTypeKey.h new file mode 100644 index 000000000..65732b798 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPropertyTypeKey.h @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGPropertyTypeKey_h +#define DFGPropertyTypeKey_h + +#if ENABLE(DFG_JIT) + +#include "Structure.h" +#include <wtf/HashMap.h> + +namespace JSC { namespace DFG { + +// This object is a key for finding a property's type. It's a tuple of Structure* and UniquedStringImpl*. + +class PropertyTypeKey { +public: + PropertyTypeKey() + : m_structure(nullptr) + , m_uid(nullptr) + { + } + + PropertyTypeKey(Structure* structure, UniquedStringImpl* uid) + : m_structure(structure) + , m_uid(uid) + { + } + + PropertyTypeKey(WTF::HashTableDeletedValueType) + : m_structure(nullptr) + , m_uid(deletedUID()) + { + } + + explicit operator bool() const { return m_structure && m_uid; } + + Structure* structure() const { return m_structure; } + UniquedStringImpl* uid() const { return m_uid; } + + bool operator==(const PropertyTypeKey& other) const + { + return m_structure == other.m_structure + && m_uid == other.m_uid; + } + + bool operator!=(const PropertyTypeKey& other) const + { + return !(*this == other); + } + + unsigned hash() const + { + return WTF::PtrHash<Structure*>::hash(m_structure) + WTF::PtrHash<UniquedStringImpl*>::hash(m_uid); + } + + bool isHashTableDeletedValue() const + { + return !m_structure && m_uid == deletedUID(); + } + + void dumpInContext(PrintStream& out, DumpContext* context) const + { + out.print(pointerDumpInContext(m_structure, context), "+", m_uid); + } + + void dump(PrintStream& out) const + { + dumpInContext(out, nullptr); + } + +private: + static UniquedStringImpl* deletedUID() + { + return bitwise_cast<UniquedStringImpl*>(static_cast<intptr_t>(1)); + } + + Structure* m_structure; + UniquedStringImpl* m_uid; +}; + +struct PropertyTypeKeyHash { + static unsigned hash(const PropertyTypeKey& key) { return key.hash(); } + static bool equal(const PropertyTypeKey& a, const PropertyTypeKey& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} } // namespace JSC::DFG + +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::DFG::PropertyTypeKey> { + typedef JSC::DFG::PropertyTypeKeyHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::DFG::PropertyTypeKey> : SimpleClassHashTraits<JSC::DFG::PropertyTypeKey> { + static const bool emptyValueIsZero = false; +}; + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + +#endif // DFGPropertyTypeKey_h + diff --git a/Source/JavaScriptCore/dfg/DFGPureValue.cpp b/Source/JavaScriptCore/dfg/DFGPureValue.cpp new file mode 100644 index 000000000..4c9f60c06 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPureValue.cpp @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGPureValue.h" + +#if ENABLE(DFG_JIT) + +#include "DFGGraph.h" + +namespace JSC { namespace DFG { + +void PureValue::dump(PrintStream& out) const +{ + out.print(Graph::opName(op())); + out.print("("); + CommaPrinter comma; + for (unsigned i = 0; i < AdjacencyList::Size; ++i) { + if (children().child(i)) + out.print(comma, children().child(i)); + } + if (m_info) + out.print(comma, m_info); + out.print(")"); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGPureValue.h b/Source/JavaScriptCore/dfg/DFGPureValue.h new file mode 100644 index 000000000..e7d6a3db4 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPureValue.h @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGPureValue_h +#define DFGPureValue_h + +#if ENABLE(DFG_JIT) + +#include "DFGNode.h" + +namespace JSC { namespace DFG { + +class PureValue { +public: + PureValue() + : m_op(LastNodeType) + , m_info(0) + { + } + + PureValue(NodeType op, const AdjacencyList& children, uintptr_t info) + : m_op(op) + , m_children(children.sanitized()) + , m_info(info) + { + ASSERT(!(defaultFlags(op) & NodeHasVarArgs)); + } + + PureValue(NodeType op, const AdjacencyList& children, const void* ptr) + : PureValue(op, children, bitwise_cast<uintptr_t>(ptr)) + { + } + + PureValue(NodeType op, const AdjacencyList& children) + : PureValue(op, children, static_cast<uintptr_t>(0)) + { + } + + PureValue(Node* node, uintptr_t info) + : PureValue(node->op(), node->children, info) + { + } + + PureValue(Node* node, const void* ptr) + : PureValue(node->op(), node->children, ptr) + { + } + + PureValue(Node* node) + : PureValue(node->op(), node->children) + { + } + + PureValue(WTF::HashTableDeletedValueType) + : m_op(LastNodeType) + , m_info(1) + { + } + + bool operator!() const { return m_op == LastNodeType && !m_info; } + + NodeType op() const { return m_op; } + const AdjacencyList& children() const { return m_children; } + uintptr_t info() const { return m_info; } + + unsigned hash() const + { + return WTF::IntHash<int>::hash(static_cast<int>(m_op)) + m_children.hash() + m_info; + } + + bool operator==(const PureValue& other) const + { + return m_op == other.m_op + && m_children == other.m_children + && m_info == other.m_info; + } + + bool isHashTableDeletedValue() const + { + return m_op == LastNodeType && m_info; + } + + void dump(PrintStream& out) const; + +private: + NodeType m_op; + AdjacencyList m_children; + uintptr_t m_info; +}; + +struct PureValueHash { + static unsigned hash(const PureValue& key) { return key.hash(); } + static bool equal(const PureValue& a, const PureValue& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} } // namespace JSC::DFG + +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::DFG::PureValue> { + typedef JSC::DFG::PureValueHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::DFG::PureValue> : SimpleClassHashTraits<JSC::DFG::PureValue> { + static const bool emptyValueIsZero = false; +}; + +} // namespace WTF + +namespace JSC { namespace DFG { + +typedef HashMap<PureValue, Node*> PureMap; +typedef HashMap<PureValue, Vector<Node*>> PureMultiMap; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGPureValue_h + diff --git a/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.cpp b/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.cpp new file mode 100644 index 000000000..6b0bb0763 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.cpp @@ -0,0 +1,593 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGPutStackSinkingPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGBlockMapInlines.h" +#include "DFGGraph.h" +#include "DFGInsertionSet.h" +#include "DFGPhase.h" +#include "DFGPreciseLocalClobberize.h" +#include "DFGSSACalculator.h" +#include "DFGValidate.h" +#include "JSCInlines.h" +#include "OperandsInlines.h" + +namespace JSC { namespace DFG { + +namespace { + +bool verbose = false; + +class PutStackSinkingPhase : public Phase { +public: + PutStackSinkingPhase(Graph& graph) + : Phase(graph, "PutStack sinking") + { + } + + bool run() + { + // FIXME: One of the problems of this approach is that it will create a duplicate Phi graph + // for sunken PutStacks in the presence of interesting control flow merges, and where the + // value being PutStack'd is also otherwise live in the DFG code. We could work around this + // by doing the sinking over CPS, or maybe just by doing really smart hoisting. It's also + // possible that the duplicate Phi graph can be deduplicated by B3. It would be best if we + // could observe that there is already a Phi graph in place that does what we want. In + // principle if we have a request to place a Phi at a particular place, we could just check + // if there is already a Phi that does what we want. Because PutStackSinkingPhase runs just + // after SSA conversion, we have almost a guarantee that the Phi graph we produce here would + // be trivially redundant to the one we already have. + + // FIXME: This phase doesn't adequately use KillStacks. KillStack can be viewed as a def. + // This is mostly inconsequential; it would be a bug to have a local live at a KillStack. + // More important is that KillStack should swallow any deferral. After a KillStack, the + // local should behave like a TOP deferral because it would be invalid for anyone to trust + // the stack. It's not clear to me if this is important or not. + // https://bugs.webkit.org/show_bug.cgi?id=145296 + + if (verbose) { + dataLog("Graph before PutStack sinking:\n"); + m_graph.dump(); + } + + m_graph.ensureDominators(); + + SSACalculator ssaCalculator(m_graph); + InsertionSet insertionSet(m_graph); + + // First figure out where various locals are live. + BlockMap<Operands<bool>> liveAtHead(m_graph); + BlockMap<Operands<bool>> liveAtTail(m_graph); + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + liveAtHead[block] = Operands<bool>(OperandsLike, block->variablesAtHead); + liveAtTail[block] = Operands<bool>(OperandsLike, block->variablesAtHead); + + liveAtHead[block].fill(false); + liveAtTail[block].fill(false); + } + + bool changed; + do { + changed = false; + + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + + Operands<bool> live = liveAtTail[block]; + for (unsigned nodeIndex = block->size(); nodeIndex--;) { + Node* node = block->at(nodeIndex); + if (verbose) + dataLog("Live at ", node, ": ", live, "\n"); + + Vector<VirtualRegister, 4> reads; + Vector<VirtualRegister, 4> writes; + auto escapeHandler = [&] (VirtualRegister operand) { + if (operand.isHeader()) + return; + if (verbose) + dataLog(" ", operand, " is live at ", node, "\n"); + reads.append(operand); + }; + + auto writeHandler = [&] (VirtualRegister operand) { + RELEASE_ASSERT(node->op() == PutStack || node->op() == LoadVarargs || node->op() == ForwardVarargs); + writes.append(operand); + }; + + preciseLocalClobberize( + m_graph, node, escapeHandler, writeHandler, + [&] (VirtualRegister, LazyNode) { }); + + for (VirtualRegister operand : writes) + live.operand(operand) = false; + for (VirtualRegister operand : reads) + live.operand(operand) = true; + } + + if (live == liveAtHead[block]) + continue; + + liveAtHead[block] = live; + changed = true; + + for (BasicBlock* predecessor : block->predecessors) { + for (size_t i = live.size(); i--;) + liveAtTail[predecessor][i] |= live[i]; + } + } + + } while (changed); + + // All of the arguments should be live at head of root. Note that we may find that some + // locals are live at head of root. This seems wrong but isn't. This will happen for example + // if the function accesses closure variable #42 for some other function and we either don't + // have variable #42 at all or we haven't set it at root, for whatever reason. Basically this + // arises since our aliasing for closure variables is conservatively based on variable number + // and ignores the owning symbol table. We should probably fix this eventually and make our + // aliasing more precise. + // + // For our purposes here, the imprecision in the aliasing is harmless. It just means that we + // may not do as much Phi pruning as we wanted. + for (size_t i = liveAtHead.atIndex(0).numberOfArguments(); i--;) + DFG_ASSERT(m_graph, nullptr, liveAtHead.atIndex(0).argument(i)); + + // Next identify where we would want to sink PutStacks to. We say that there is a deferred + // flush if we had a PutStack with a given FlushFormat but it hasn't been materialized yet. + // Deferrals have the following lattice; but it's worth noting that the TOP part of the + // lattice serves an entirely different purpose than the rest of the lattice: it just means + // that we're in a region of code where nobody should have been relying on the value. The + // rest of the lattice means that we either have a PutStack that is deferred (i.e. still + // needs to be executed) or there isn't one (because we've alraedy executed it). + // + // Bottom: + // Represented as DeadFlush. + // Means that all previous PutStacks have been executed so there is nothing deferred. + // During merging this is subordinate to the other kinds of deferrals, because it + // represents the fact that we've already executed all necessary PutStacks. This implies + // that there *had* been some PutStacks that we should have executed. + // + // Top: + // Represented as ConflictingFlush. + // Represents the fact that we know, via forward flow, that there isn't any value in the + // given local that anyone should have been relying on. This comes into play at the + // prologue (because in SSA form at the prologue no local has any value) or when we merge + // deferrals for different formats's. A lexical scope in which a local had some semantic + // meaning will by this point share the same format; if we had stores from different + // lexical scopes that got merged together then we may have a conflicting format. Hence + // a conflicting format proves that we're no longer in an area in which the variable was + // in scope. Note that this is all approximate and only precise enough to later answer + // questions pertinent to sinking. For example, this doesn't always detect when a local + // is no longer semantically relevant - we may well have a deferral from inside some + // inlined call survive outside of that inlined code, and this is generally OK. In the + // worst case it means that we might think that a deferral that is actually dead must + // still be executed. But we usually catch that with liveness. Liveness usually catches + // such cases, but that's not guaranteed since liveness is conservative. + // + // What Top does give us is detects situations where we both don't need to care about a + // deferral and there is no way that we could reason about it anyway. If we merged + // deferrals for different formats then we wouldn't know the format to use. So, we use + // Top in that case because that's also a case where we know that we can ignore the + // deferral. + // + // Deferral with a concrete format: + // Represented by format values other than DeadFlush or ConflictingFlush. + // Represents the fact that the original code would have done a PutStack but we haven't + // identified an operation that would have observed that PutStack. + // + // We need to be precise about liveness in this phase because not doing so + // could cause us to insert a PutStack before a node we thought may escape a + // value that it doesn't really escape. Sinking this PutStack above such a node may + // cause us to insert a GetStack that we forward to the Phi we're feeding into the + // sunken PutStack. Inserting such a GetStack could cause us to load garbage and + // can confuse the AI to claim untrue things (like that the program will exit when + // it really won't). + BlockMap<Operands<FlushFormat>> deferredAtHead(m_graph); + BlockMap<Operands<FlushFormat>> deferredAtTail(m_graph); + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + deferredAtHead[block] = + Operands<FlushFormat>(OperandsLike, block->variablesAtHead); + deferredAtTail[block] = + Operands<FlushFormat>(OperandsLike, block->variablesAtHead); + } + + for (unsigned local = deferredAtHead.atIndex(0).numberOfLocals(); local--;) + deferredAtHead.atIndex(0).local(local) = ConflictingFlush; + + do { + changed = false; + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + Operands<FlushFormat> deferred = deferredAtHead[block]; + + for (Node* node : *block) { + if (verbose) + dataLog("Deferred at ", node, ":", deferred, "\n"); + + if (node->op() == GetStack) { + // Handle the case that the input doesn't match our requirements. This is + // really a bug, but it's a benign one if we simply don't run this phase. + // It usually arises because of patterns like: + // + // if (thing) + // PutStack() + // ... + // if (thing) + // GetStack() + // + // Or: + // + // if (never happens) + // GetStack() + // + // Because this phase runs early in SSA, it should be sensible to enforce + // that no such code pattern has arisen yet. So, when validation is + // enabled, we assert that we aren't seeing this. But with validation + // disabled we silently let this fly and we just abort this phase. + // FIXME: Get rid of all remaining cases of conflicting GetStacks. + // https://bugs.webkit.org/show_bug.cgi?id=150398 + + bool isConflicting = + deferred.operand(node->stackAccessData()->local) == ConflictingFlush; + + if (validationEnabled()) + DFG_ASSERT(m_graph, node, !isConflicting); + + if (isConflicting) { + // Oh noes! Abort!! + return false; + } + + // A GetStack doesn't affect anything, since we know which local we are reading + // from. + continue; + } else if (node->op() == PutStack) { + VirtualRegister operand = node->stackAccessData()->local; + deferred.operand(operand) = node->stackAccessData()->format; + continue; + } + + auto escapeHandler = [&] (VirtualRegister operand) { + if (verbose) + dataLog("For ", node, " escaping ", operand, "\n"); + if (operand.isHeader()) + return; + // We will materialize just before any reads. + deferred.operand(operand) = DeadFlush; + }; + + auto writeHandler = [&] (VirtualRegister operand) { + RELEASE_ASSERT(node->op() == LoadVarargs || node->op() == ForwardVarargs); + deferred.operand(operand) = DeadFlush; + }; + + preciseLocalClobberize( + m_graph, node, escapeHandler, writeHandler, + [&] (VirtualRegister, LazyNode) { }); + } + + if (deferred == deferredAtTail[block]) + continue; + + deferredAtTail[block] = deferred; + changed = true; + + for (BasicBlock* successor : block->successors()) { + for (size_t i = deferred.size(); i--;) { + if (verbose) + dataLog("Considering ", VirtualRegister(deferred.operandForIndex(i)), " at ", pointerDump(block), "->", pointerDump(successor), ": ", deferred[i], " and ", deferredAtHead[successor][i], " merges to "); + + deferredAtHead[successor][i] = + merge(deferredAtHead[successor][i], deferred[i]); + + if (verbose) + dataLog(deferredAtHead[successor][i], "\n"); + } + } + } + + } while (changed); + + // We wish to insert PutStacks at all of the materialization points, which are defined + // implicitly as the places where we set deferred to Dead while it was previously not Dead. + // To do this, we may need to build some Phi functions to handle stuff like this: + // + // Before: + // + // if (p) + // PutStack(r42, @x) + // else + // PutStack(r42, @y) + // + // After: + // + // if (p) + // Upsilon(@x, ^z) + // else + // Upsilon(@y, ^z) + // z: Phi() + // PutStack(r42, @z) + // + // This means that we have an SSACalculator::Variable for each local, and a Def is any + // PutStack in the original program. The original PutStacks will simply vanish. + + Operands<SSACalculator::Variable*> operandToVariable( + OperandsLike, m_graph.block(0)->variablesAtHead); + Vector<VirtualRegister> indexToOperand; + for (size_t i = m_graph.block(0)->variablesAtHead.size(); i--;) { + VirtualRegister operand(m_graph.block(0)->variablesAtHead.operandForIndex(i)); + + SSACalculator::Variable* variable = ssaCalculator.newVariable(); + operandToVariable.operand(operand) = variable; + ASSERT(indexToOperand.size() == variable->index()); + indexToOperand.append(operand); + } + + HashSet<Node*> putStacksToSink; + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + for (Node* node : *block) { + switch (node->op()) { + case PutStack: + putStacksToSink.add(node); + ssaCalculator.newDef( + operandToVariable.operand(node->stackAccessData()->local), + block, node->child1().node()); + break; + case GetStack: + ssaCalculator.newDef( + operandToVariable.operand(node->stackAccessData()->local), + block, node); + break; + default: + break; + } + } + } + + ssaCalculator.computePhis( + [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* { + VirtualRegister operand = indexToOperand[variable->index()]; + + if (!liveAtHead[block].operand(operand)) + return nullptr; + + FlushFormat format = deferredAtHead[block].operand(operand); + + // We could have an invalid deferral because liveness is imprecise. + if (!isConcrete(format)) + return nullptr; + + if (verbose) + dataLog("Adding Phi for ", operand, " at ", pointerDump(block), "\n"); + + Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit()); + phiNode->mergeFlags(resultFor(format)); + return phiNode; + }); + + Operands<Node*> mapping(OperandsLike, m_graph.block(0)->variablesAtHead); + Operands<FlushFormat> deferred; + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + mapping.fill(nullptr); + + for (size_t i = mapping.size(); i--;) { + VirtualRegister operand(mapping.operandForIndex(i)); + + SSACalculator::Variable* variable = operandToVariable.operand(operand); + SSACalculator::Def* def = ssaCalculator.reachingDefAtHead(block, variable); + if (!def) + continue; + + mapping.operand(operand) = def->value(); + } + + if (verbose) + dataLog("Mapping at top of ", pointerDump(block), ": ", mapping, "\n"); + + for (SSACalculator::Def* phiDef : ssaCalculator.phisForBlock(block)) { + VirtualRegister operand = indexToOperand[phiDef->variable()->index()]; + + insertionSet.insert(0, phiDef->value()); + + if (verbose) + dataLog(" Mapping ", operand, " to ", phiDef->value(), "\n"); + mapping.operand(operand) = phiDef->value(); + } + + deferred = deferredAtHead[block]; + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + if (verbose) + dataLog("Deferred at ", node, ":", deferred, "\n"); + + switch (node->op()) { + case PutStack: { + StackAccessData* data = node->stackAccessData(); + VirtualRegister operand = data->local; + deferred.operand(operand) = data->format; + if (verbose) + dataLog(" Mapping ", operand, " to ", node->child1().node(), " at ", node, "\n"); + mapping.operand(operand) = node->child1().node(); + break; + } + + case GetStack: { + StackAccessData* data = node->stackAccessData(); + FlushFormat format = deferred.operand(data->local); + if (!isConcrete(format)) { + DFG_ASSERT( + m_graph, node, + deferred.operand(data->local) != ConflictingFlush); + + // This means there is no deferral. No deferral means that the most + // authoritative value for this stack slot is what is stored in the stack. So, + // keep the GetStack. + mapping.operand(data->local) = node; + break; + } + + // We have a concrete deferral, which means a PutStack that hasn't executed yet. It + // would have stored a value with a certain format. That format must match our + // format. But more importantly, we can simply use the value that the PutStack would + // have stored and get rid of the GetStack. + DFG_ASSERT(m_graph, node, format == data->format); + + Node* incoming = mapping.operand(data->local); + node->child1() = incoming->defaultEdge(); + node->convertToIdentity(); + break; + } + + default: { + auto escapeHandler = [&] (VirtualRegister operand) { + if (verbose) + dataLog("For ", node, " escaping ", operand, "\n"); + + if (operand.isHeader()) + return; + + FlushFormat format = deferred.operand(operand); + if (!isConcrete(format)) { + // It's dead now, rather than conflicting. + deferred.operand(operand) = DeadFlush; + return; + } + + // Gotta insert a PutStack. + if (verbose) + dataLog("Inserting a PutStack for ", operand, " at ", node, "\n"); + + Node* incoming = mapping.operand(operand); + DFG_ASSERT(m_graph, node, incoming); + + insertionSet.insertNode( + nodeIndex, SpecNone, PutStack, node->origin, + OpInfo(m_graph.m_stackAccessData.add(operand, format)), + Edge(incoming, uncheckedUseKindFor(format))); + + deferred.operand(operand) = DeadFlush; + }; + + auto writeHandler = [&] (VirtualRegister operand) { + // LoadVarargs and ForwardVarargs are unconditional writes to the stack + // locations they claim to write to. They do not read from the stack + // locations they write to. This makes those stack locations dead right + // before a LoadVarargs/ForwardVarargs. This means we should never sink + // PutStacks right to this point. + RELEASE_ASSERT(node->op() == LoadVarargs || node->op() == ForwardVarargs); + deferred.operand(operand) = DeadFlush; + }; + + preciseLocalClobberize( + m_graph, node, escapeHandler, writeHandler, + [&] (VirtualRegister, LazyNode) { }); + break; + } } + } + + NodeAndIndex terminal = block->findTerminal(); + size_t upsilonInsertionPoint = terminal.index; + NodeOrigin upsilonOrigin = terminal.node->origin; + for (BasicBlock* successorBlock : block->successors()) { + for (SSACalculator::Def* phiDef : ssaCalculator.phisForBlock(successorBlock)) { + Node* phiNode = phiDef->value(); + SSACalculator::Variable* variable = phiDef->variable(); + VirtualRegister operand = indexToOperand[variable->index()]; + if (verbose) + dataLog("Creating Upsilon for ", operand, " at ", pointerDump(block), "->", pointerDump(successorBlock), "\n"); + FlushFormat format = deferredAtHead[successorBlock].operand(operand); + DFG_ASSERT(m_graph, nullptr, isConcrete(format)); + UseKind useKind = uncheckedUseKindFor(format); + + // We need to get a value for the stack slot. This phase doesn't really have a + // good way of determining if a stack location got clobbered. It just knows if + // there is a deferral. The lack of a deferral might mean that a PutStack or + // GetStack had never happened, or it might mean that the value was read, or + // that it was written. It's OK for us to make some bad decisions here, since + // GCSE will clean it up anyway. + Node* incoming; + if (isConcrete(deferred.operand(operand))) { + incoming = mapping.operand(operand); + DFG_ASSERT(m_graph, phiNode, incoming); + } else { + // Issue a GetStack to get the value. This might introduce some redundancy + // into the code, but if it's bad enough, GCSE will clean it up. + incoming = insertionSet.insertNode( + upsilonInsertionPoint, SpecNone, GetStack, upsilonOrigin, + OpInfo(m_graph.m_stackAccessData.add(operand, format))); + incoming->setResult(resultFor(format)); + } + + insertionSet.insertNode( + upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin, + OpInfo(phiNode), Edge(incoming, useKind)); + } + } + + insertionSet.execute(block); + } + + // Finally eliminate the sunken PutStacks by turning them into Checks. This keeps whatever + // type check they were doing. + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + + if (!putStacksToSink.contains(node)) + continue; + + node->remove(); + } + } + + if (verbose) { + dataLog("Graph after PutStack sinking:\n"); + m_graph.dump(); + } + + return true; + } +}; + +} // anonymous namespace + +bool performPutStackSinking(Graph& graph) +{ + SamplingRegion samplingRegion("DFG PutStack Sinking Phase"); + return runPhase<PutStackSinkingPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.h b/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.h new file mode 100644 index 000000000..24bbb81f0 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGPutStackSinkingPhase.h @@ -0,0 +1,46 @@ + /* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGPutStackSinkingPhase_h +#define DFGPutStackSinkingPhase_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; + +// Sinks PutStacks to the absolute latest point where they can possibly happen, which is usually +// side-effects that may observe them. This eliminates PutStacks if it sinks them past the point of +// their deaths. + +bool performPutStackSinking(Graph&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGPutStackSinkingPhase_h + diff --git a/Source/JavaScriptCore/dfg/DFGSSACalculator.cpp b/Source/JavaScriptCore/dfg/DFGSSACalculator.cpp new file mode 100644 index 000000000..899fa15f9 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGSSACalculator.cpp @@ -0,0 +1,150 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGSSACalculator.h" + +#if ENABLE(DFG_JIT) + +#include "DFGBlockMapInlines.h" +#include <wtf/CommaPrinter.h> +#include <wtf/ListDump.h> + +namespace JSC { namespace DFG { + +void SSACalculator::Variable::dump(PrintStream& out) const +{ + out.print("var", m_index); +} + +void SSACalculator::Variable::dumpVerbose(PrintStream& out) const +{ + dump(out); + if (!m_blocksWithDefs.isEmpty()) { + out.print("(defs: "); + CommaPrinter comma; + for (BasicBlock* block : m_blocksWithDefs) + out.print(comma, *block); + out.print(")"); + } +} + +void SSACalculator::Def::dump(PrintStream& out) const +{ + out.print("def(", *m_variable, ", ", *m_block, ", ", m_value, ")"); +} + +SSACalculator::SSACalculator(Graph& graph) + : m_data(graph) + , m_graph(graph) +{ +} + +SSACalculator::~SSACalculator() +{ +} + +void SSACalculator::reset() +{ + m_variables.clear(); + m_defs.clear(); + m_phis.clear(); + for (BlockIndex blockIndex = m_data.size(); blockIndex--;) { + m_data[blockIndex].m_defs.clear(); + m_data[blockIndex].m_phis.clear(); + } +} + +SSACalculator::Variable* SSACalculator::newVariable() +{ + return &m_variables.alloc(Variable(m_variables.size())); +} + +SSACalculator::Def* SSACalculator::newDef(Variable* variable, BasicBlock* block, Node* value) +{ + Def* def = m_defs.add(Def(variable, block, value)); + auto result = m_data[block].m_defs.add(variable, def); + if (result.isNewEntry) + variable->m_blocksWithDefs.append(block); + else + result.iterator->value = def; + return def; +} + +SSACalculator::Def* SSACalculator::nonLocalReachingDef(BasicBlock* block, Variable* variable) +{ + return reachingDefAtTail(m_graph.m_dominators->idom(block), variable); +} + +SSACalculator::Def* SSACalculator::reachingDefAtTail(BasicBlock* block, Variable* variable) +{ + for (; block; block = m_graph.m_dominators->idom(block)) { + if (Def* def = m_data[block].m_defs.get(variable)) + return def; + } + return nullptr; +} + +void SSACalculator::dump(PrintStream& out) const +{ + out.print("<Variables: ["); + CommaPrinter comma; + for (unsigned i = 0; i < m_variables.size(); ++i) { + out.print(comma); + m_variables[i].dumpVerbose(out); + } + out.print("], Defs: ["); + comma = CommaPrinter(); + for (Def* def : const_cast<SSACalculator*>(this)->m_defs) + out.print(comma, *def); + out.print("], Phis: ["); + comma = CommaPrinter(); + for (Def* def : const_cast<SSACalculator*>(this)->m_phis) + out.print(comma, *def); + out.print("], Block data: ["); + comma = CommaPrinter(); + for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + + out.print(comma, *block, "=>("); + out.print("Defs: {"); + CommaPrinter innerComma; + for (auto entry : m_data[block].m_defs) + out.print(innerComma, *entry.key, "->", *entry.value); + out.print("}, Phis: {"); + innerComma = CommaPrinter(); + for (Def* def : m_data[block].m_phis) + out.print(innerComma, *def); + out.print("})"); + } + out.print("]>"); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGSSACalculator.h b/Source/JavaScriptCore/dfg/DFGSSACalculator.h new file mode 100644 index 000000000..66e305fa8 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGSSACalculator.h @@ -0,0 +1,263 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGSSACalculator_h +#define DFGSSACalculator_h + +#if ENABLE(DFG_JIT) + +#include "DFGDominators.h" +#include "DFGGraph.h" + +namespace JSC { namespace DFG { + +// SSACalculator provides a reusable tool for using the Cytron, Ferrante, Rosen, Wegman, and +// Zadeck "Efficiently Computing Static Single Assignment Form and the Control Dependence Graph" +// (TOPLAS'91) algorithm for computing SSA. SSACalculator doesn't magically do everything for you +// but it maintains the major data structures and handles most of the non-local reasoning. Here's +// the workflow of using SSACalculator to execute this algorithm: +// +// 0) Create a fresh SSACalculator instance. You will need this instance only for as long as +// you're not yet done computing SSA. +// +// 1) Create an SSACalculator::Variable for every variable that you want to do Phi insertion +// on. SSACalculator::Variable::index() is a dense indexing of the Variables that you +// created, so you can easily use a Vector to map the SSACalculator::Variables to your +// variables. +// +// 2) Create a SSACalculator::Def for every assignment to those variables. A Def knows about the +// variable, the block, and the DFG::Node* that has the value being put into the variable. +// Note that creating a Def in block B for variable V if block B already has a def for variable +// V will overwrite the previous Def's DFG::Node* value. This enables you to create Defs by +// processing basic blocks in forward order. If a block has multiple Defs of a variable, this +// "just works" because each block will then remember the last Def of each variable. +// +// 3) Call SSACalculator::computePhis(). This takes a functor that will create the Phi nodes. The +// functor returns either the Phi node it created, or nullptr, if it chooses to prune. (As an +// aside, it's always sound not to prune, and the safest reason for pruning is liveness.) The +// computePhis() code will record the created Phi nodes as Defs, and it will separately record +// the list of Phis inserted at each block. It's OK for the functor you pass here to modify the +// DFG::Graph on the fly, but the easiest way to write this is to just create the Phi nodes by +// doing Graph::addNode() and return them. It's then best to insert all Phi nodes for a block +// in bulk as part of the pass you do below, in step (4). +// +// 4) Modify the graph to create the SSA data flow. For each block, this should: +// +// 4.0) Compute the set of reaching defs (aka available values) for each variable by calling +// SSACalculator::reachingDefAtHead() for each variable. Record this in a local table that +// will be incrementally updated as you proceed through the block in forward order in the +// next steps: +// +// FIXME: It might be better to compute reaching defs for all live variables in one go, to +// avoid doing repeated dom tree traversals. +// https://bugs.webkit.org/show_bug.cgi?id=136610 +// +// 4.1) Insert all of the Phi nodes for the block by using SSACalculator::phisForBlock(), and +// record those Phi nodes as being available values. +// +// 4.2) Process the block in forward order. For each load from a variable, replace it with the +// available SSA value for that variable. For each store, delete it and record the stored +// value as being available. +// +// Note that you have two options of how to replace loads with SSA values. You can replace +// the load with an Identity node; this will end up working fairly naturally so long as +// you run GCSE after your phase. Or, you can replace all uses of the load with the SSA +// value yourself (using the Graph::performSubstitution() idiom), but that requires that +// your loop over basic blocks proceeds in the appropriate graph order, for example +// preorder. +// +// FIXME: Make it easier to do this, that doesn't involve rerunning GCSE. +// https://bugs.webkit.org/show_bug.cgi?id=136639 +// +// 4.3) Insert Upsilons at the end of the current block for the corresponding Phis in each successor block. +// Use the available values table to decide the source value for each Phi's variable. Note that +// you could also use SSACalculator::reachingDefAtTail() instead of the available values table, +// though your local available values table is likely to be more efficient. +// +// The most obvious use of SSACalculator is for the CPS->SSA conversion itself, but it's meant to +// also be used for SSA update and for things like the promotion of heap fields to local SSA +// variables. + +class SSACalculator { +public: + SSACalculator(Graph&); + ~SSACalculator(); + + void reset(); + + class Variable { + public: + unsigned index() const { return m_index; } + + void dump(PrintStream&) const; + void dumpVerbose(PrintStream&) const; + + private: + friend class SSACalculator; + + Variable() + : m_index(UINT_MAX) + { + } + + Variable(unsigned index) + : m_index(index) + { + } + + BlockList m_blocksWithDefs; + unsigned m_index; + }; + + class Def { + public: + Variable* variable() const { return m_variable; } + BasicBlock* block() const { return m_block; } + + Node* value() const { return m_value; } + + void dump(PrintStream&) const; + + private: + friend class SSACalculator; + + Def() + : m_variable(nullptr) + , m_block(nullptr) + , m_value(nullptr) + { + } + + Def(Variable* variable, BasicBlock* block, Node* value) + : m_variable(variable) + , m_block(block) + , m_value(value) + { + } + + Variable* m_variable; + BasicBlock* m_block; + Node* m_value; + }; + + Variable* newVariable(); + Def* newDef(Variable*, BasicBlock*, Node*); + + Variable* variable(unsigned index) { return &m_variables[index]; } + + // The PhiInsertionFunctor takes a Variable and a BasicBlock and either inserts a Phi and + // returns the Node for that Phi, or it decides that it's not worth it to insert a Phi at that + // block because of some additional pruning condition (typically liveness) and returns + // nullptr. If a non-null Node* is returned, a new Def is created, so that + // nonLocalReachingDef() will find it later. Note that it is generally always sound to not + // prune any Phis (that is, to always have the functor insert a Phi and never return nullptr). + template<typename PhiInsertionFunctor> + void computePhis(const PhiInsertionFunctor& functor) + { + DFG_ASSERT(m_graph, nullptr, m_graph.m_dominators); + + for (Variable& variable : m_variables) { + m_graph.m_dominators->forAllBlocksInPrunedIteratedDominanceFrontierOf( + variable.m_blocksWithDefs, + [&] (BasicBlock* block) -> bool { + Node* phiNode = functor(&variable, block); + if (!phiNode) + return false; + + BlockData& data = m_data[block]; + Def* phiDef = m_phis.add(Def(&variable, block, phiNode)); + data.m_phis.append(phiDef); + + // Note that it's possible to have a block that looks like this before SSA + // conversion: + // + // label: + // print(x); + // ... + // x = 42; + // goto label; + // + // And it may look like this after SSA conversion: + // + // label: + // x1: Phi() + // ... + // Upsilon(42, ^x1) + // goto label; + // + // In this case, we will want to insert a Phi in this block, and the block + // will already have a Def for the variable. When this happens, we don't want + // the Phi to override the original Def, since the Phi is at the top, the + // original Def in the m_defs table would have been at the bottom, and we want + // m_defs to tell us about defs at tail. + // + // So, we rely on the fact that HashMap::add() does nothing if the key was + // already present. + data.m_defs.add(&variable, phiDef); + return true; + }); + } + } + + const Vector<Def*>& phisForBlock(BasicBlock* block) + { + return m_data[block].m_phis; + } + + // Ignores defs within the given block; it assumes that you've taken care of those + // yourself. + Def* nonLocalReachingDef(BasicBlock*, Variable*); + Def* reachingDefAtHead(BasicBlock* block, Variable* variable) + { + return nonLocalReachingDef(block, variable); + } + + // Considers the def within the given block, but only works at the tail of the block. + Def* reachingDefAtTail(BasicBlock*, Variable*); + + void dump(PrintStream&) const; + +private: + SegmentedVector<Variable> m_variables; + Bag<Def> m_defs; + + Bag<Def> m_phis; + + struct BlockData { + HashMap<Variable*, Def*> m_defs; + Vector<Def*> m_phis; + }; + + BlockMap<BlockData> m_data; + + Graph& m_graph; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGSSACalculator_h + diff --git a/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp b/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp index 57fc09529..de100687b 100644 --- a/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,7 +32,9 @@ #include "DFGGraph.h" #include "DFGInsertionSet.h" #include "DFGPhase.h" -#include "Operations.h" +#include "DFGSSACalculator.h" +#include "DFGVariableAccessDataDump.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -42,8 +44,8 @@ class SSAConversionPhase : public Phase { public: SSAConversionPhase(Graph& graph) : Phase(graph, "SSA conversion") + , m_calculator(graph) , m_insertionSet(graph) - , m_changed(false) { } @@ -51,315 +53,318 @@ public: { RELEASE_ASSERT(m_graph.m_form == ThreadedCPS); - // Figure out which SetLocal's need flushing. Need to do this while the - // Phi graph is still intact. - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (unsigned nodeIndex = block->size(); nodeIndex--;) { - Node* node = block->at(nodeIndex); - if (node->op() != Flush) - continue; - addFlushedLocalOp(node); - } + m_graph.clearReplacements(); + m_graph.ensureDominators(); + + if (verbose) { + dataLog("Graph before SSA transformation:\n"); + m_graph.dump(); } - while (!m_flushedLocalOpWorklist.isEmpty()) { - Node* node = m_flushedLocalOpWorklist.takeLast(); - ASSERT(m_flushedLocalOps.contains(node)); - DFG_NODE_DO_TO_CHILDREN(m_graph, node, addFlushedLocalEdge); + + // Create a SSACalculator::Variable for every root VariableAccessData. + for (VariableAccessData& variable : m_graph.m_variableAccessData) { + if (!variable.isRoot()) + continue; + + SSACalculator::Variable* ssaVariable = m_calculator.newVariable(); + ASSERT(ssaVariable->index() == m_variableForSSAIndex.size()); + m_variableForSSAIndex.append(&variable); + m_ssaVariableForVariable.add(&variable, ssaVariable); } - // Eliminate all duplicate or self-pointing Phi edges. This means that - // we transform: - // - // p: Phi(@n1, @n2, @n3) - // - // into: - // - // p: Phi(@x) - // - // if each @ni in {@n1, @n2, @n3} is either equal to @p to is equal - // to @x, for exactly one other @x. Additionally, trivial Phis (i.e. - // p: Phi(@x)) are forwarded, so that if have an edge to such @p, we - // replace it with @x. This loop does this for Phis only; later we do - // such forwarding for Phi references found in other nodes. - // - // See Aycock and Horspool in CC'00 for a better description of what - // we're doing here. - do { - m_changed = false; - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (unsigned phiIndex = block->phis.size(); phiIndex--;) { - Node* phi = block->phis[phiIndex]; - if (phi->variableAccessData()->isCaptured()) - continue; - forwardPhiChildren(phi); - deduplicateChildren(phi); - } - } - } while (m_changed); - - // For each basic block, for each local live at the head of that block, - // figure out what node we should be referring to instead of that local. - // If it turns out to be a non-trivial Phi, make sure that we create an - // SSA Phi and Upsilons in predecessor blocks. We reuse - // BasicBlock::variablesAtHead for tracking which nodes to refer to. + // Find all SetLocals and create Defs for them. We handle SetArgument by creating a + // GetLocal, and recording the flush format. for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; - for (unsigned i = block->variablesAtHead.size(); i--;) { - Node* node = block->variablesAtHead[i]; - if (!node) + // Must process the block in forward direction because we want to see the last + // assignment for every local. + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + if (node->op() != SetLocal && node->op() != SetArgument) continue; VariableAccessData* variable = node->variableAccessData(); - if (variable->isCaptured()) { - // Poison this entry in variablesAtHead because we don't - // want anyone to try to refer to it, if the variable is - // captured. - block->variablesAtHead[i] = 0; - continue; - } - - switch (node->op()) { - case Phi: - case SetArgument: - break; - case Flush: - case GetLocal: - case PhantomLocal: - node = node->child1().node(); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - } - RELEASE_ASSERT(node->op() == Phi || node->op() == SetArgument); - bool isFlushed = m_flushedLocalOps.contains(node); - - if (node->op() == Phi) { - Edge edge = node->children.justOneChild(); - if (edge) - node = edge.node(); // It's something from a different basic block. - else { - // It's a non-trivial Phi. - FlushFormat format = variable->flushFormat(); - NodeFlags result = resultFor(format); - UseKind useKind = useKindFor(format); - - node = m_insertionSet.insertNode(0, SpecNone, Phi, CodeOrigin()); - node->mergeFlags(result); - RELEASE_ASSERT((node->flags() & NodeResultMask) == result); - - for (unsigned j = block->predecessors.size(); j--;) { - BasicBlock* predecessor = block->predecessors[j]; - predecessor->appendNonTerminal( - m_graph, SpecNone, Upsilon, predecessor->last()->codeOrigin, - OpInfo(node), Edge(predecessor->variablesAtTail[i], useKind)); - } - - if (isFlushed) { - // Do nothing. For multiple reasons. - - // Reason #1: If the local is flushed then we don't need to bother - // with a MovHint since every path to this point in the code will - // have flushed the bytecode variable using a SetLocal and hence - // the Availability::flushedAt() will agree, and that will be - // sufficient for figuring out how to recover the variable's value. - - // Reason #2: If we had inserted a MovHint and the Phi function had - // died (because the only user of the value was the "flush" - i.e. - // some asynchronous runtime thingy) then the MovHint would turn - // into a ZombieHint, which would fool us into thinking that the - // variable is dead. - - // Reason #3: If we had inserted a MovHint then even if the Phi - // stayed alive, we would still end up generating inefficient code - // since we would be telling the OSR exit compiler to use some SSA - // value for the bytecode variable rather than just telling it that - // the value was already on the stack. - } else { - m_insertionSet.insertNode( - 0, SpecNone, MovHint, CodeOrigin(), - OpInfo(variable->local().offset()), Edge(node)); - } - } + Node* childNode; + if (node->op() == SetLocal) + childNode = node->child1().node(); + else { + ASSERT(node->op() == SetArgument); + childNode = m_insertionSet.insertNode( + nodeIndex, node->variableAccessData()->prediction(), + GetStack, node->origin, + OpInfo(m_graph.m_stackAccessData.add(variable->local(), variable->flushFormat()))); + if (!ASSERT_DISABLED) + m_argumentGetters.add(childNode); + m_argumentMapping.add(node, childNode); } - block->variablesAtHead[i] = node; + m_calculator.newDef( + m_ssaVariableForVariable.get(variable), block, childNode); } - + m_insertionSet.execute(block); } + // Decide where Phis are to be inserted. This creates the Phi's but doesn't insert them + // yet. We will later know where to insert them because SSACalculator is such a bro. + m_calculator.computePhis( + [&] (SSACalculator::Variable* ssaVariable, BasicBlock* block) -> Node* { + VariableAccessData* variable = m_variableForSSAIndex[ssaVariable->index()]; + + // Prune by liveness. This doesn't buy us much other than compile times. + Node* headNode = block->variablesAtHead.operand(variable->local()); + if (!headNode) + return nullptr; + + // There is the possibiltiy of "rebirths". The SSA calculator will already prune + // rebirths for the same VariableAccessData. But it will not be able to prune + // rebirths that arose from the same local variable number but a different + // VariableAccessData. We do that pruning here. + // + // Here's an example of a rebirth that this would catch: + // + // var x; + // if (foo) { + // if (bar) { + // x = 42; + // } else { + // x = 43; + // } + // print(x); + // x = 44; + // } else { + // x = 45; + // } + // print(x); // Without this check, we'd have a Phi for x = 42|43 here. + // + // FIXME: Consider feeding local variable numbers, not VariableAccessData*'s, as + // the "variables" for SSACalculator. That would allow us to eliminate this + // special case. + // https://bugs.webkit.org/show_bug.cgi?id=136641 + if (headNode->variableAccessData() != variable) + return nullptr; + + Node* phiNode = m_graph.addNode( + variable->prediction(), Phi, block->at(0)->origin.withInvalidExit()); + FlushFormat format = variable->flushFormat(); + NodeFlags result = resultFor(format); + phiNode->mergeFlags(result); + return phiNode; + }); + if (verbose) { - dataLog("Variables at head after SSA Phi insertion:\n"); - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - dataLog(" ", *block, ": ", block->variablesAtHead, "\n"); - } + dataLog("Computed Phis, about to transform the graph.\n"); + dataLog("\n"); + dataLog("Graph:\n"); + m_graph.dump(); + dataLog("\n"); + dataLog("Mappings:\n"); + for (unsigned i = 0; i < m_variableForSSAIndex.size(); ++i) + dataLog(" ", i, ": ", VariableAccessDataDump(m_graph, m_variableForSSAIndex[i]), "\n"); + dataLog("\n"); + dataLog("SSA calculator: ", m_calculator, "\n"); } - // At this point variablesAtHead in each block refers to either: + // Do the bulk of the SSA conversion. For each block, this tracks the operand->Node + // mapping based on a combination of what the SSACalculator tells us, and us walking over + // the block in forward order. We use our own data structure, valueForOperand, for + // determining the local mapping, but we rely on SSACalculator for the non-local mapping. // - // 1) A new SSA phi in the current block. - // 2) A SetArgument, which will soon get converted into a GetArgument. - // 3) An old CPS phi in a different block. + // This does three things at once: // - // We don't have to do anything for (1) and (2), but we do need to - // do a replacement for (3). - - // Clear all replacements, since other phases may have used them. - m_graph.clearReplacements(); - - // For all of the old CPS Phis, figure out what they correspond to in SSA. - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (unsigned phiIndex = block->phis.size(); phiIndex--;) { - Node* phi = block->phis[phiIndex]; - if (verbose) { - dataLog( - "Considering ", phi, ", for r", phi->local(), - ", and its replacement in ", *block, ", ", - block->variablesAtHead.operand(phi->local()), "\n"); + // - Inserts the Phis in all of the places where they need to go. We've already created + // them and they are accounted for in the SSACalculator's data structures, but we + // haven't inserted them yet, mostly because we want to insert all of a block's Phis in + // one go to amortize the cost of node insertion. + // + // - Create and insert Upsilons. + // + // - Convert all of the preexisting SSA nodes (other than the old CPS Phi nodes) into SSA + // form by replacing as follows: + // + // - MovHint has KillLocal prepended to it. + // + // - GetLocal die and get replaced with references to the node specified by + // valueForOperand. + // + // - SetLocal turns into PutStack if it's flushed, or turns into a Check otherwise. + // + // - Flush loses its children and turns into a Phantom. + // + // - PhantomLocal becomes Phantom, and its child is whatever is specified by + // valueForOperand. + // + // - SetArgument is removed. Note that GetStack nodes have already been inserted. + Operands<Node*> valueForOperand(OperandsLike, m_graph.block(0)->variablesAtHead); + for (BasicBlock* block : m_graph.blocksInPreOrder()) { + valueForOperand.clear(); + + // CPS will claim that the root block has all arguments live. But we have already done + // the first step of SSA conversion: argument locals are no longer live at head; + // instead we have GetStack nodes for extracting the values of arguments. So, we + // skip the at-head available value calculation for the root block. + if (block != m_graph.block(0)) { + for (size_t i = valueForOperand.size(); i--;) { + Node* nodeAtHead = block->variablesAtHead[i]; + if (!nodeAtHead) + continue; + + VariableAccessData* variable = nodeAtHead->variableAccessData(); + + if (verbose) + dataLog("Considering live variable ", VariableAccessDataDump(m_graph, variable), " at head of block ", *block, "\n"); + + SSACalculator::Variable* ssaVariable = m_ssaVariableForVariable.get(variable); + SSACalculator::Def* def = m_calculator.reachingDefAtHead(block, ssaVariable); + if (!def) { + // If we are required to insert a Phi, then we won't have a reaching def + // at head. + continue; + } + + Node* node = def->value(); + if (node->replacement()) { + // This will occur when a SetLocal had a GetLocal as its source. The + // GetLocal would get replaced with an actual SSA value by the time we get + // here. Note that the SSA value with which the GetLocal got replaced + // would not in turn have a replacement. + node = node->replacement(); + ASSERT(!node->replacement()); + } + if (verbose) + dataLog("Mapping: ", VirtualRegister(valueForOperand.operandForIndex(i)), " -> ", node, "\n"); + valueForOperand[i] = node; } - phi->misc.replacement = block->variablesAtHead.operand(phi->local()); - } - } - - // Now make sure that all variablesAtHead in each block points to the - // canonical SSA value. Prior to this, variablesAtHead[local] may point to - // an old CPS Phi in a different block. - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (size_t i = block->variablesAtHead.size(); i--;) { - Node* node = block->variablesAtHead[i]; - if (!node) - continue; - while (node->misc.replacement) - node = node->misc.replacement; - block->variablesAtHead[i] = node; - } - } - - if (verbose) { - dataLog("Variables at head after convergence:\n"); - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - dataLog(" ", *block, ": ", block->variablesAtHead, "\n"); } - } - - // Convert operations over locals into operations over SSA nodes. - // - GetLocal over captured variables lose their phis. - // - GetLocal over uncaptured variables die and get replaced with references - // to the node specified by variablesAtHead. - // - SetLocal gets NodeMustGenerate if it's flushed, or turns into a - // Check otherwise. - // - Flush loses its children but remains, because we want to know when a - // flushed SetLocal's value is no longer needed. This also makes it simpler - // to reason about the format of a local, since we can just do a backwards - // analysis (see FlushLivenessAnalysisPhase). As part of the backwards - // analysis, we say that the type of a local can be either int32, double, - // value, or dead. - // - PhantomLocal becomes Phantom, and its child is whatever is specified - // by variablesAtHead. - // - SetArgument turns into GetArgument unless it's a captured variable. - // - Upsilons get their children fixed to refer to the true value of that local - // at the end of the block. Prior to this loop, Upsilons will refer to - // variableAtTail[operand], which may be any of Flush, PhantomLocal, GetLocal, - // SetLocal, SetArgument, or Phi. We accomplish this by setting the - // replacement pointers of all of those nodes to refer to either - // variablesAtHead[operand], or the child of the SetLocal. - for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { - BasicBlock* block = m_graph.block(blockIndex); - if (!block) - continue; - for (unsigned phiIndex = block->phis.size(); phiIndex--;) { - block->phis[phiIndex]->misc.replacement = - block->variablesAtHead.operand(block->phis[phiIndex]->local()); + // Insert Phis by asking the calculator what phis there are in this block. Also update + // valueForOperand with those Phis. For Phis associated with variables that are not + // flushed, we also insert a MovHint. + size_t phiInsertionPoint = 0; + for (SSACalculator::Def* phiDef : m_calculator.phisForBlock(block)) { + VariableAccessData* variable = m_variableForSSAIndex[phiDef->variable()->index()]; + + m_insertionSet.insert(phiInsertionPoint, phiDef->value()); + valueForOperand.operand(variable->local()) = phiDef->value(); + + m_insertionSet.insertNode( + phiInsertionPoint, SpecNone, MovHint, block->at(0)->origin.withInvalidExit(), + OpInfo(variable->local().offset()), phiDef->value()->defaultEdge()); } - for (unsigned nodeIndex = block->size(); nodeIndex--;) - ASSERT(!block->at(nodeIndex)->misc.replacement); + + if (block->at(0)->origin.exitOK) + m_insertionSet.insertNode(phiInsertionPoint, SpecNone, ExitOK, block->at(0)->origin); for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { Node* node = block->at(nodeIndex); + if (verbose) { + dataLog("Processing node ", node, ":\n"); + m_graph.dump(WTF::dataFile(), " ", node); + } + m_graph.performSubstitution(node); switch (node->op()) { + case MovHint: { + m_insertionSet.insertNode( + nodeIndex, SpecNone, KillStack, node->origin, + OpInfo(node->unlinkedLocal().offset())); + node->origin.exitOK = false; // KillStack clobbers exit. + break; + } + case SetLocal: { VariableAccessData* variable = node->variableAccessData(); - if (variable->isCaptured() || m_flushedLocalOps.contains(node)) - node->mergeFlags(NodeMustGenerate); - else - node->setOpAndDefaultFlags(Check); - node->misc.replacement = node->child1().node(); // Only for Upsilons. + Node* child = node->child1().node(); + + if (!!(node->flags() & NodeIsFlushed)) { + node->convertToPutStack( + m_graph.m_stackAccessData.add( + variable->local(), variable->flushFormat())); + } else + node->remove(); + + if (verbose) + dataLog("Mapping: ", variable->local(), " -> ", child, "\n"); + valueForOperand.operand(variable->local()) = child; + break; + } + + case GetStack: { + ASSERT(m_argumentGetters.contains(node)); + valueForOperand.operand(node->stackAccessData()->local) = node; break; } case GetLocal: { - // It seems tempting to just do forwardPhi(GetLocal), except that we - // could have created a new (SSA) Phi, and the GetLocal could still be - // referring to an old (CPS) Phi. Uses variablesAtHead to tell us what - // to refer to. - node->children.reset(); VariableAccessData* variable = node->variableAccessData(); - if (variable->isCaptured()) - break; - node->convertToPhantom(); - node->misc.replacement = block->variablesAtHead.operand(variable->local()); + node->children.reset(); + + node->remove(); + if (verbose) + dataLog("Replacing node ", node, " with ", valueForOperand.operand(variable->local()), "\n"); + node->setReplacement(valueForOperand.operand(variable->local())); break; } case Flush: { node->children.reset(); - // This is only for Upsilons. An Upsilon will only refer to a Flush if - // there were no SetLocals or GetLocals in the block. - node->misc.replacement = block->variablesAtHead.operand(node->local()); + node->remove(); break; } case PhantomLocal: { + ASSERT(node->child1().useKind() == UntypedUse); VariableAccessData* variable = node->variableAccessData(); - if (variable->isCaptured()) - break; - node->child1().setNode(block->variablesAtHead.operand(variable->local())); - node->convertToPhantom(); - // This is only for Upsilons. An Upsilon will only refer to a - // PhantomLocal if there were no SetLocals or GetLocals in the block. - node->misc.replacement = block->variablesAtHead.operand(variable->local()); + node->child1() = valueForOperand.operand(variable->local())->defaultEdge(); + node->remove(); break; } case SetArgument: { - VariableAccessData* variable = node->variableAccessData(); - if (variable->isCaptured()) - break; - node->setOpAndDefaultFlags(GetArgument); - node->mergeFlags(resultFor(node->variableAccessData()->flushFormat())); + node->remove(); break; } - + default: break; } } + + // We want to insert Upsilons just before the end of the block. On the surface this + // seems dangerous because the Upsilon will have a checking UseKind. But, we will not + // actually be performing the check at the point of the Upsilon; the check will + // already have been performed at the point where the original SetLocal was. + NodeAndIndex terminal = block->findTerminal(); + size_t upsilonInsertionPoint = terminal.index; + NodeOrigin upsilonOrigin = terminal.node->origin; + for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) { + BasicBlock* successorBlock = block->successor(successorIndex); + for (SSACalculator::Def* phiDef : m_calculator.phisForBlock(successorBlock)) { + Node* phiNode = phiDef->value(); + SSACalculator::Variable* ssaVariable = phiDef->variable(); + VariableAccessData* variable = m_variableForSSAIndex[ssaVariable->index()]; + FlushFormat format = variable->flushFormat(); + + // We can use an unchecked use kind because the SetLocal was turned into a Check. + // We have to use an unchecked use because at least sometimes, the end of the block + // is not exitOK. + UseKind useKind = uncheckedUseKindFor(format); + + m_insertionSet.insertNode( + upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin, + OpInfo(phiNode), Edge( + valueForOperand.operand(variable->local()), + useKind)); + } + } + + m_insertionSet.execute(block); } // Free all CPS phis and reset variables vectors. @@ -374,95 +379,39 @@ public: block->variablesAtTail.clear(); block->valuesAtHead.clear(); block->valuesAtHead.clear(); - block->ssa = adoptPtr(new BasicBlock::SSAData(block)); + block->ssa = std::make_unique<BasicBlock::SSAData>(block); } - m_graph.m_arguments.clear(); + m_graph.m_argumentFormats.resize(m_graph.m_arguments.size()); + for (unsigned i = m_graph.m_arguments.size(); i--;) { + FlushFormat format = FlushedJSValue; + + Node* node = m_argumentMapping.get(m_graph.m_arguments[i]); + + RELEASE_ASSERT(node); + format = node->stackAccessData()->format; + + m_graph.m_argumentFormats[i] = format; + m_graph.m_arguments[i] = node; // Record the load that loads the arguments for the benefit of exit profiling. + } m_graph.m_form = SSA; - return true; - } -private: - void forwardPhiChildren(Node* node) - { - for (unsigned i = 0; i < AdjacencyList::Size; ++i) { - Edge& edge = node->children.child(i); - if (!edge) - break; - m_changed |= forwardPhiEdge(edge); - } - } - - Node* forwardPhi(Node* node) - { - for (;;) { - switch (node->op()) { - case Phi: { - Edge edge = node->children.justOneChild(); - if (!edge) - return node; - node = edge.node(); - break; - } - case GetLocal: - case SetLocal: - if (node->variableAccessData()->isCaptured()) - return node; - node = node->child1().node(); - break; - default: - return node; - } + if (verbose) { + dataLog("Graph after SSA transformation:\n"); + m_graph.dump(); } - } - - bool forwardPhiEdge(Edge& edge) - { - Node* newNode = forwardPhi(edge.node()); - if (newNode == edge.node()) - return false; - edge.setNode(newNode); + return true; } - - void deduplicateChildren(Node* node) - { - for (unsigned i = 0; i < AdjacencyList::Size; ++i) { - Edge edge = node->children.child(i); - if (!edge) - break; - if (edge == node) { - node->children.removeEdge(i--); - m_changed = true; - continue; - } - for (unsigned j = i + 1; j < AdjacencyList::Size; ++j) { - if (node->children.child(j) == edge) { - node->children.removeEdge(j--); - m_changed = true; - } - } - } - } - - void addFlushedLocalOp(Node* node) - { - if (m_flushedLocalOps.contains(node)) - return; - m_flushedLocalOps.add(node); - m_flushedLocalOpWorklist.append(node); - } - void addFlushedLocalEdge(Node*, Edge edge) - { - addFlushedLocalOp(edge.node()); - } - +private: + SSACalculator m_calculator; InsertionSet m_insertionSet; - HashSet<Node*> m_flushedLocalOps; - Vector<Node*> m_flushedLocalOpWorklist; - bool m_changed; + HashMap<VariableAccessData*, SSACalculator::Variable*> m_ssaVariableForVariable; + HashMap<Node*, Node*> m_argumentMapping; + HashSet<Node*> m_argumentGetters; + Vector<VariableAccessData*> m_variableForSSAIndex; }; bool performSSAConversion(Graph& graph) diff --git a/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.h b/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.h index 2fa5ff41a..027f8156f 100644 --- a/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.h +++ b/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGSSAConversionPhase_h #define DFGSSAConversionPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { @@ -36,10 +34,9 @@ class Graph; // Convert ThreadedCPS form into SSA form. This results in a form that has: // -// - Roughly minimal Phi's. We use the Aycock & Horspool fixpoint for -// converting the CPS maximal Phis into SSA minimal Phis, with the caveat -// that irreducible control flow may result in some missed opportunities -// for Phi reduction. +// - Minimal Phi's. We use the the Cytron et al (TOPLAS'91) algorithm for +// Phi insertion. Most of the algorithm is implemented in SSACalculator +// and Dominators. // // - No uses of GetLocal/SetLocal except for captured variables and flushes. // After this, any remaining SetLocal means Flush. PhantomLocals become @@ -84,12 +81,6 @@ class Graph; // the caveat that the Phi predecessor block lists would have to be // updated). // -// The easiest way to convert from this SSA form into a different SSA -// form is to redo SSA conversion for Phi functions. That is, treat each -// Phi in our IR as a non-SSA variable in the foreign IR (so, as an -// alloca in LLVM IR, for example); the Upsilons that refer to the Phi -// become stores and the Phis themselves become loads. -// // Fun fact: Upsilon is so named because it comes before Phi in the // alphabet. It can be written as "Y". diff --git a/Source/JavaScriptCore/dfg/DFGSSALoweringPhase.cpp b/Source/JavaScriptCore/dfg/DFGSSALoweringPhase.cpp index 51d5fd0e4..c4b67a361 100644 --- a/Source/JavaScriptCore/dfg/DFGSSALoweringPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGSSALoweringPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,7 +32,7 @@ #include "DFGGraph.h" #include "DFGInsertionSet.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -69,36 +69,49 @@ private: { switch (m_node->op()) { case GetByVal: + case HasIndexedProperty: lowerBoundsCheck(m_node->child1(), m_node->child2(), m_node->child3()); break; case PutByVal: - case PutByValDirect: - lowerBoundsCheck( - m_graph.varArgChild(m_node, 0), - m_graph.varArgChild(m_node, 1), - m_graph.varArgChild(m_node, 3)); + case PutByValDirect: { + Edge base = m_graph.varArgChild(m_node, 0); + Edge index = m_graph.varArgChild(m_node, 1); + Edge storage = m_graph.varArgChild(m_node, 3); + if (lowerBoundsCheck(base, index, storage)) + break; + + if (m_node->arrayMode().typedArrayType() != NotTypedArray && m_node->arrayMode().isOutOfBounds()) { + Node* length = m_insertionSet.insertNode( + m_nodeIndex, SpecInt32, GetArrayLength, m_node->origin, + OpInfo(m_node->arrayMode().asWord()), base, storage); + + m_graph.varArgChild(m_node, 4) = Edge(length, KnownInt32Use); + break; + } break; + } default: break; } } - void lowerBoundsCheck(Edge base, Edge index, Edge storage) + bool lowerBoundsCheck(Edge base, Edge index, Edge storage) { if (!m_node->arrayMode().permitsBoundsCheckLowering()) - return; + return false; if (!m_node->arrayMode().lengthNeedsStorage()) storage = Edge(); Node* length = m_insertionSet.insertNode( - m_nodeIndex, SpecInt32, GetArrayLength, m_node->codeOrigin, + m_nodeIndex, SpecInt32, GetArrayLength, m_node->origin, OpInfo(m_node->arrayMode().asWord()), base, storage); m_insertionSet.insertNode( - m_nodeIndex, SpecInt32, CheckInBounds, m_node->codeOrigin, + m_nodeIndex, SpecInt32, CheckInBounds, m_node->origin, index, Edge(length, KnownInt32Use)); + return true; } InsertionSet m_insertionSet; diff --git a/Source/JavaScriptCore/dfg/DFGSafeToExecute.h b/Source/JavaScriptCore/dfg/DFGSafeToExecute.h index b6cd5dc08..5e7597790 100644 --- a/Source/JavaScriptCore/dfg/DFGSafeToExecute.h +++ b/Source/JavaScriptCore/dfg/DFGSafeToExecute.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGSafeToExecute_h #define DFGSafeToExecute_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGGraph.h" @@ -48,29 +46,40 @@ public: switch (edge.useKind()) { case UntypedUse: case Int32Use: - case RealNumberUse: + case DoubleRepUse: + case DoubleRepRealUse: + case Int52RepUse: case NumberUse: + case RealNumberUse: case BooleanUse: case CellUse: + case CellOrOtherUse: case ObjectUse: + case FunctionUse: case FinalObjectUse: + case RegExpObjectUse: case ObjectOrOtherUse: case StringIdentUse: case StringUse: + case StringOrOtherUse: + case SymbolUse: case StringObjectUse: case StringOrStringObjectUse: + case NotStringVarUse: case NotCellUse: case OtherUse: + case MiscUse: case MachineIntUse: + case DoubleRepMachineIntUse: return; case KnownInt32Use: if (m_state.forNode(edge).m_type & ~SpecInt32) m_result = false; return; - - case KnownNumberUse: - if (m_state.forNode(edge).m_type & ~SpecFullNumber) + + case KnownBooleanUse: + if (m_state.forNode(edge).m_type & ~SpecBoolean) m_result = false; return; @@ -83,6 +92,11 @@ public: if (m_state.forNode(edge).m_type & ~SpecString) m_result = false; return; + + case KnownPrimitiveUse: + if (m_state.forNode(edge).m_type & ~(SpecHeapTop & ~SpecObject)) + m_result = false; + return; case LastUseKind: RELEASE_ASSERT_NOT_REACHED(); @@ -99,8 +113,15 @@ private: // Determines if it's safe to execute a node within the given abstract state. This may // return false conservatively. If it returns true, then you can hoist the given node -// up to the given point and expect that it will not crash. This doesn't guarantee that -// the node will produce the result you wanted other than not crashing. +// up to the given point and expect that it will not crash. It also guarantees that the +// node will not produce a malformed JSValue or object pointer when executed in the +// given state. But this doesn't guarantee that the node will produce the result you +// wanted. For example, you may have a GetByOffset from a prototype that only makes +// semantic sense if you've also checked that some nearer prototype doesn't also have +// a property of the same name. This could still return true even if that check hadn't +// been performed in the given abstract state. That's fine though: the load can still +// safely execute before that check, so long as that check continues to guard any +// user-observable things done to the loaded value. template<typename AbstractStateType> bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node) { @@ -109,18 +130,27 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node) if (!safeToExecuteEdge.result()) return false; + // NOTE: This tends to lie when it comes to effectful nodes, because it knows that they aren't going to + // get hoisted anyway. + switch (node->op()) { case JSConstant: - case WeakJSConstant: + case DoubleConstant: + case Int52Constant: case Identity: case ToThis: case CreateThis: case GetCallee: + case GetArgumentCount: + case GetRestLength: case GetLocal: case SetLocal: + case PutStack: + case KillStack: + case GetStack: case MovHint: case ZombieHint: - case GetArgument: + case ExitOK: case Phantom: case Upsilon: case Phi: @@ -136,9 +166,9 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node) case BitURShift: case ValueToInt32: case UInt32ToNumber: - case Int32ToDouble: case DoubleAsInt32: case ArithAdd: + case ArithClz32: case ArithSub: case ArithNegate: case ArithMul: @@ -148,33 +178,46 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node) case ArithAbs: case ArithMin: case ArithMax: + case ArithPow: + case ArithRandom: case ArithSqrt: + case ArithFRound: + case ArithRound: + case ArithFloor: + case ArithCeil: case ArithSin: case ArithCos: + case ArithLog: case ValueAdd: case GetById: case GetByIdFlush: case PutById: + case PutByIdFlush: case PutByIdDirect: + case PutGetterById: + case PutSetterById: + case PutGetterSetterById: + case PutGetterByVal: + case PutSetterByVal: case CheckStructure: - case CheckExecutable: + case GetExecutable: case GetButterfly: + case GetButterflyReadOnly: case CheckArray: case Arrayify: case ArrayifyToStructure: case GetScope: - case GetMyScope: - case SkipTopScope: case SkipScope: - case GetClosureRegisters: case GetClosureVar: case PutClosureVar: case GetGlobalVar: - case PutGlobalVar: - case VariableWatchpoint: + case GetGlobalLexicalVariable: + case PutGlobalVariable: case VarInjectionWatchpoint: - case CheckFunction: - case AllocationProfileWatchpoint: + case CheckCell: + case CheckBadCell: + case CheckNotEmpty: + case CheckIdent: case RegExpExec: case RegExpTest: case CompareLess: @@ -182,11 +225,17 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node) case CompareGreater: case CompareGreaterEq: case CompareEq: - case CompareEqConstant: case CompareStrictEq: - case CompareStrictEqConstant: case Call: + case TailCallInlinedCaller: case Construct: + case CallVarargs: + case TailCallVarargsInlinedCaller: + case TailCallForwardVarargsInlinedCaller: + case ConstructVarargs: + case LoadVarargs: + case CallForwardVarargs: + case ConstructForwardVarargs: case NewObject: case NewArray: case NewArrayWithSize: @@ -195,38 +244,44 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node) case Breakpoint: case ProfileWillCall: case ProfileDidCall: - case CheckHasInstance: + case ProfileType: + case ProfileControlFlow: + case CheckTypeInfoFlags: + case OverridesHasInstance: case InstanceOf: + case InstanceOfCustom: case IsUndefined: case IsBoolean: case IsNumber: case IsString: case IsObject: + case IsObjectOrNull: case IsFunction: case TypeOf: case LogicalNot: case ToPrimitive: case ToString: + case StrCat: + case CallStringConstructor: case NewStringObject: case MakeRope: case In: case CreateActivation: - case TearOffActivation: - case CreateArguments: - case PhantomArguments: - case TearOffArguments: - case GetMyArgumentsLength: - case GetMyArgumentByVal: - case GetMyArgumentsLengthSafe: - case GetMyArgumentByValSafe: - case CheckArgumentsNotCreated: - case NewFunctionNoCheck: + case CreateDirectArguments: + case CreateScopedArguments: + case CreateClonedArguments: + case GetFromArguments: + case PutToArguments: + case NewArrowFunction: case NewFunction: - case NewFunctionExpression: + case NewGeneratorFunction: case Jump: case Branch: case Switch: case Return: + case TailCall: + case TailCallVarargs: + case TailCallForwardVarargs: case Throw: case ThrowReferenceError: case CountExecution: @@ -239,21 +294,52 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node) case CheckTierUpInLoop: case CheckTierUpAtReturn: case CheckTierUpAndOSREnter: + case CheckTierUpWithNestedTriggerAndOSREnter: case LoopHint: - case Int52ToDouble: - case Int52ToValue: case StoreBarrier: - case ConditionalStoreBarrier: - case StoreBarrierWithNullCheck: case InvalidationPoint: case NotifyWrite: - case FunctionReentryWatchpoint: - case TypedArrayWatchpoint: case CheckInBounds: case ConstantStoragePointer: case Check: + case MultiPutByOffset: + case ValueRep: + case DoubleRep: + case Int52Rep: + case BooleanToNumber: + case FiatInt52: + case GetGetter: + case GetSetter: + case GetEnumerableLength: + case HasGenericProperty: + case HasStructureProperty: + case HasIndexedProperty: + case GetDirectPname: + case GetPropertyEnumerator: + case GetEnumeratorStructurePname: + case GetEnumeratorGenericPname: + case ToIndexString: + case PhantomNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case PutHint: + case CheckStructureImmediate: + case MaterializeNewObject: + case MaterializeCreateActivation: + case PhantomDirectArguments: + case PhantomClonedArguments: + case GetMyArgumentByVal: + case ForwardVarargs: + case CopyRest: + case StringReplace: return true; - + + case BottomValue: + // If in doubt, assume that this isn't safe to execute, just because we have no way of + // compiling this node. + return false; + case GetByVal: case GetIndexedPropertyStorage: case GetArrayLength: @@ -272,22 +358,62 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node) return node->arrayMode().modeForPut().alreadyChecked( graph, node, state.forNode(graph.varArgChild(node, 0))); - case StructureTransitionWatchpoint: - return state.forNode(node->child1()).m_futurePossibleStructure.isSubsetOf( - StructureSet(node->structure())); - case PutStructure: - case PhantomPutStructure: case AllocatePropertyStorage: case ReallocatePropertyStorage: - return state.forNode(node->child1()).m_currentKnownStructure.isSubsetOf( - StructureSet(node->structureTransitionData().previousStructure)); + return state.forNode(node->child1()).m_structure.isSubsetOf( + StructureSet(node->transition()->previous)); case GetByOffset: - case PutByOffset: - return state.forNode(node->child1()).m_currentKnownStructure.isValidOffset( - graph.m_storageAccessData[node->storageAccessDataIndex()].offset); + case GetGetterSetterByOffset: + case PutByOffset: { + PropertyOffset offset = node->storageAccessData().offset; + + if (state.structureClobberState() == StructuresAreWatched) { + if (JSObject* knownBase = node->child1()->dynamicCastConstant<JSObject*>()) { + if (graph.isSafeToLoad(knownBase, offset)) + return true; + } + } + StructureAbstractValue& value = state.forNode(node->child1()).m_structure; + if (value.isInfinite()) + return false; + for (unsigned i = value.size(); i--;) { + if (!value[i]->isValidOffset(offset)) + return false; + } + return true; + } + + case MultiGetByOffset: { + // We can't always guarantee that the MultiGetByOffset is safe to execute if it + // contains loads from prototypes. If the load requires a check in IR, which is rare, then + // we currently claim that we don't know if it's safe to execute because finding that + // check in the abstract state would be hard. If the load requires watchpoints, we just + // check if we're not in a clobbered state (i.e. in between a side effect and an + // invalidation point). + for (const MultiGetByOffsetCase& getCase : node->multiGetByOffsetData().cases) { + GetByOffsetMethod method = getCase.method(); + switch (method.kind()) { + case GetByOffsetMethod::Invalid: + RELEASE_ASSERT_NOT_REACHED(); + break; + case GetByOffsetMethod::Constant: // OK because constants are always safe to execute. + case GetByOffsetMethod::Load: // OK because the MultiGetByOffset has its own checks for loading from self. + break; + case GetByOffsetMethod::LoadFromPrototype: + // Only OK if the state isn't clobbered. That's almost always the case. + if (state.structureClobberState() != StructuresAreWatched) + return false; + if (!graph.isSafeToLoad(method.prototype()->cast<JSObject*>(), method.offset())) + return false; + break; + } + } + return true; + } + case LastNodeType: RELEASE_ASSERT_NOT_REACHED(); return false; diff --git a/Source/JavaScriptCore/dfg/DFGSafepoint.cpp b/Source/JavaScriptCore/dfg/DFGSafepoint.cpp new file mode 100644 index 000000000..11ba5ad9b --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGSafepoint.cpp @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGSafepoint.h" + +#if ENABLE(DFG_JIT) + +#include "DFGPlan.h" +#include "DFGScannable.h" +#include "DFGThreadData.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +Safepoint::Result::~Result() +{ + RELEASE_ASSERT(m_wasChecked); +} + +bool Safepoint::Result::didGetCancelled() +{ + m_wasChecked = true; + return m_didGetCancelled; +} + +Safepoint::Safepoint(Plan& plan, Result& result) + : m_plan(plan) + , m_didCallBegin(false) + , m_result(result) +{ + RELEASE_ASSERT(result.m_wasChecked); + result.m_wasChecked = false; + result.m_didGetCancelled = false; +} + +Safepoint::~Safepoint() +{ + RELEASE_ASSERT(m_didCallBegin); + if (ThreadData* data = m_plan.threadData) { + RELEASE_ASSERT(data->m_safepoint == this); + data->m_rightToRun.lock(); + data->m_safepoint = nullptr; + } +} + +void Safepoint::add(Scannable* scannable) +{ + RELEASE_ASSERT(!m_didCallBegin); + m_scannables.append(scannable); +} + +void Safepoint::begin() +{ + RELEASE_ASSERT(!m_didCallBegin); + m_didCallBegin = true; + if (ThreadData* data = m_plan.threadData) { + RELEASE_ASSERT(!data->m_safepoint); + data->m_safepoint = this; + data->m_rightToRun.unlock(); + } +} + +void Safepoint::checkLivenessAndVisitChildren(SlotVisitor& visitor) +{ + RELEASE_ASSERT(m_didCallBegin); + + if (m_result.m_didGetCancelled) + return; // We were cancelled during a previous GC! + + if (!isKnownToBeLiveDuringGC()) + return; + + for (unsigned i = m_scannables.size(); i--;) + m_scannables[i]->visitChildren(visitor); +} + +bool Safepoint::isKnownToBeLiveDuringGC() +{ + RELEASE_ASSERT(m_didCallBegin); + + if (m_result.m_didGetCancelled) + return true; // We were cancelled during a previous GC, so let's not mess with it this time around - pretend it's live and move on. + + return m_plan.isKnownToBeLiveDuringGC(); +} + +void Safepoint::cancel() +{ + RELEASE_ASSERT(m_didCallBegin); + RELEASE_ASSERT(!m_result.m_didGetCancelled); // We cannot get cancelled twice because subsequent GCs will think that we're alive and they will not do anything to us. + + m_plan.cancel(); + m_result.m_didGetCancelled = true; +} + +VM& Safepoint::vm() const +{ + return m_plan.vm; +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGAnalysis.h b/Source/JavaScriptCore/dfg/DFGSafepoint.h index 1a49a8f51..96f4b8ecd 100644 --- a/Source/JavaScriptCore/dfg/DFGAnalysis.h +++ b/Source/JavaScriptCore/dfg/DFGSafepoint.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,53 +23,67 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef DFGAnalysis_h -#define DFGAnalysis_h - -#include <wtf/Platform.h> +#ifndef DFGSafepoint_h +#define DFGSafepoint_h #if ENABLE(DFG_JIT) -namespace JSC { namespace DFG { +#include <wtf/Vector.h> + +namespace JSC { + +class SlotVisitor; +class VM; -class Graph; +namespace DFG { -// Use this as a mixin for DFG analyses. The analysis itself implements a public -// compute(Graph&) method. Clients call computeIfNecessary() when they want -// results. +class Scannable; +struct Plan; -template<typename T> -class Analysis { +class Safepoint { public: - Analysis() - : m_valid(false) - { - } + class Result { + public: + Result() + : m_didGetCancelled(false) + , m_wasChecked(true) + { + } + + ~Result(); + + bool didGetCancelled(); + + private: + friend class Safepoint; + + bool m_didGetCancelled; + bool m_wasChecked; + }; + + Safepoint(Plan&, Result&); + ~Safepoint(); + + void add(Scannable*); - void invalidate() - { - m_valid = false; - } + void begin(); - void computeIfNecessary(Graph& graph) - { - if (m_valid) - return; - // Set to true early, since the analysis may choose to call its own methods in - // compute() and it may want to ASSERT() validity in those methods. - m_valid = true; - static_cast<T*>(this)->compute(graph); - } + void checkLivenessAndVisitChildren(SlotVisitor&); + bool isKnownToBeLiveDuringGC(); + void cancel(); - bool isValid() const { return m_valid; } + VM& vm() const; private: - bool m_valid; + Plan& m_plan; + Vector<Scannable*> m_scannables; + bool m_didCallBegin; + Result& m_result; }; } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) -#endif // DFGAnalysis_h +#endif // DFGSafepoint_h diff --git a/Source/JavaScriptCore/dfg/DFGSaneStringGetByValSlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGSaneStringGetByValSlowPathGenerator.h index b9198472b..568c87895 100644 --- a/Source/JavaScriptCore/dfg/DFGSaneStringGetByValSlowPathGenerator.h +++ b/Source/JavaScriptCore/dfg/DFGSaneStringGetByValSlowPathGenerator.h @@ -26,8 +26,6 @@ #ifndef DFGSaneStringGetByValSlowPathGenerator_h #define DFGSaneStringGetByValSlowPathGenerator_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" @@ -78,6 +76,7 @@ protected: GPRReg canTrample = SpeculativeJIT::pickCanTrample(extractResult(m_resultRegs)); for (unsigned i = m_plans.size(); i--;) jit->silentFill(m_plans[i], canTrample); + jit->m_jit.exceptionCheck(); jumpTo(jit); } diff --git a/Source/JavaScriptCore/dfg/DFGScannable.h b/Source/JavaScriptCore/dfg/DFGScannable.h new file mode 100644 index 000000000..6b85cc024 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGScannable.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGScannable_h +#define DFGScannable_h + +#if ENABLE(DFG_JIT) + +namespace JSC { + +class SlotVisitor; + +namespace DFG { + +class Scannable { +public: + Scannable() { } + virtual ~Scannable() { } + + virtual void visitChildren(SlotVisitor&) = 0; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGScannable_h + diff --git a/Source/JavaScriptCore/dfg/DFGScoreBoard.h b/Source/JavaScriptCore/dfg/DFGScoreBoard.h index 15af609a9..c8795c8a4 100644 --- a/Source/JavaScriptCore/dfg/DFGScoreBoard.h +++ b/Source/JavaScriptCore/dfg/DFGScoreBoard.h @@ -55,21 +55,27 @@ public: assertClear(); } + void sortFree() + { + std::sort(m_free.begin(), m_free.end()); + } + void assertClear() { -#if !ASSERT_DISABLED + if (ASSERT_DISABLED) + return; + // For every entry in the used list the use count of the virtual register should be zero, or max, due to it being a preserved local. for (size_t i = 0; i < m_used.size(); ++i) - ASSERT(!m_used[i] || m_used[i] == max()); + RELEASE_ASSERT(!m_used[i] || m_used[i] == max()); // For every entry in the free list, the use count should be zero. for (size_t i = 0; i < m_free.size(); ++i) - ASSERT(!m_used[m_free[i]]); + RELEASE_ASSERT(!m_used[m_free[i]]); // There must not be duplicates in the free list. for (size_t i = 0; i < m_free.size(); ++i) { for (size_t j = i + 1; j < m_free.size(); ++j) - ASSERT(m_free[i] != m_free[j]); + RELEASE_ASSERT(m_free[i] != m_free[j]); } -#endif } VirtualRegister allocate() diff --git a/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h b/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h index 31945cea0..8de98d88d 100644 --- a/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h +++ b/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h @@ -26,8 +26,6 @@ #ifndef DFGSilentRegisterSavePlan_h #define DFGSilentRegisterSavePlan_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" diff --git a/Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h index 34d3631ea..e595c383c 100644 --- a/Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h +++ b/Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h @@ -26,15 +26,12 @@ #ifndef DFGSlowPathGenerator_h #define DFGSlowPathGenerator_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" #include "DFGSilentRegisterSavePlan.h" #include "DFGSpeculativeJIT.h" #include <wtf/FastMalloc.h> -#include <wtf/PassOwnPtr.h> namespace JSC { namespace DFG { @@ -43,6 +40,8 @@ class SlowPathGenerator { public: SlowPathGenerator(SpeculativeJIT* jit) : m_currentNode(jit->m_currentNode) + , m_streamIndex(jit->m_stream->size()) + , m_origin(jit->m_origin) { } virtual ~SlowPathGenerator() { } @@ -50,9 +49,12 @@ public: { m_label = jit->m_jit.label(); jit->m_currentNode = m_currentNode; + jit->m_outOfLineStreamIndex = m_streamIndex; + jit->m_origin = m_origin; generateInternal(jit); + jit->m_outOfLineStreamIndex = UINT_MAX; if (!ASSERT_DISABLED) - jit->m_jit.breakpoint(); // make sure that the generator jumps back to somewhere + jit->m_jit.abortWithReason(DFGSlowPathGeneratorFellThrough); } MacroAssembler::Label label() const { return m_label; } virtual MacroAssembler::Call call() const @@ -60,10 +62,15 @@ public: RELEASE_ASSERT_NOT_REACHED(); // By default slow path generators don't have a call. return MacroAssembler::Call(); } + + const NodeOrigin& origin() const { return m_origin; } + protected: virtual void generateInternal(SpeculativeJIT*) = 0; MacroAssembler::Label m_label; Node* m_currentNode; + unsigned m_streamIndex; + NodeOrigin m_origin; }; template<typename JumpType> @@ -91,15 +98,21 @@ protected: MacroAssembler::Label m_to; }; +enum class ExceptionCheckRequirement { + CheckNeeded, + CheckNotNeeded +}; + template<typename JumpType, typename FunctionType, typename ResultType> class CallSlowPathGenerator : public JumpingSlowPathGenerator<JumpType> { public: CallSlowPathGenerator( JumpType from, SpeculativeJIT* jit, FunctionType function, - SpillRegistersMode spillMode, ResultType result) + SpillRegistersMode spillMode, ExceptionCheckRequirement requirement, ResultType result) : JumpingSlowPathGenerator<JumpType>(from, jit) , m_function(function) , m_spillMode(spillMode) + , m_exceptionCheckRequirement(requirement) , m_result(result) { if (m_spillMode == NeedToSpill) @@ -133,11 +146,14 @@ protected: for (unsigned i = m_plans.size(); i--;) jit->silentFill(m_plans[i], canTrample); } + if (m_exceptionCheckRequirement == ExceptionCheckRequirement::CheckNeeded) + jit->m_jit.exceptionCheck(); this->jumpTo(jit); } FunctionType m_function; SpillRegistersMode m_spillMode; + ExceptionCheckRequirement m_exceptionCheckRequirement; ResultType m_result; MacroAssembler::Call m_call; Vector<SilentRegisterSavePlan, 2> m_plans; @@ -149,9 +165,9 @@ class CallResultAndNoArgumentsSlowPathGenerator public: CallResultAndNoArgumentsSlowPathGenerator( JumpType from, SpeculativeJIT* jit, FunctionType function, - SpillRegistersMode spillMode, ResultType result) + SpillRegistersMode spillMode, ExceptionCheckRequirement requirement, ResultType result) : CallSlowPathGenerator<JumpType, FunctionType, ResultType>( - from, jit, function, spillMode, result) + from, jit, function, spillMode, requirement, result) { } @@ -172,9 +188,9 @@ class CallResultAndOneArgumentSlowPathGenerator public: CallResultAndOneArgumentSlowPathGenerator( JumpType from, SpeculativeJIT* jit, FunctionType function, - SpillRegistersMode spillMode, ResultType result, ArgumentType1 argument1) + SpillRegistersMode spillMode, ExceptionCheckRequirement requirement, ResultType result, ArgumentType1 argument1) : CallSlowPathGenerator<JumpType, FunctionType, ResultType>( - from, jit, function, spillMode, result) + from, jit, function, spillMode, requirement, result) , m_argument1(argument1) { } @@ -198,10 +214,10 @@ class CallResultAndTwoArgumentsSlowPathGenerator public: CallResultAndTwoArgumentsSlowPathGenerator( JumpType from, SpeculativeJIT* jit, FunctionType function, - SpillRegistersMode spillMode, ResultType result, ArgumentType1 argument1, + SpillRegistersMode spillMode, ExceptionCheckRequirement requirement, ResultType result, ArgumentType1 argument1, ArgumentType2 argument2) : CallSlowPathGenerator<JumpType, FunctionType, ResultType>( - from, jit, function, spillMode, result) + from, jit, function, spillMode, requirement, result) , m_argument1(argument1) , m_argument2(argument2) { @@ -227,10 +243,10 @@ class CallResultAndThreeArgumentsSlowPathGenerator public: CallResultAndThreeArgumentsSlowPathGenerator( JumpType from, SpeculativeJIT* jit, FunctionType function, - SpillRegistersMode spillMode, ResultType result, ArgumentType1 argument1, + SpillRegistersMode spillMode, ExceptionCheckRequirement requirement, ResultType result, ArgumentType1 argument1, ArgumentType2 argument2, ArgumentType3 argument3) : CallSlowPathGenerator<JumpType, FunctionType, ResultType>( - from, jit, function, spillMode, result) + from, jit, function, spillMode, requirement, result) , m_argument1(argument1) , m_argument2(argument2) , m_argument3(argument3) @@ -262,10 +278,10 @@ class CallResultAndFourArgumentsSlowPathGenerator public: CallResultAndFourArgumentsSlowPathGenerator( JumpType from, SpeculativeJIT* jit, FunctionType function, - SpillRegistersMode spillMode, ResultType result, ArgumentType1 argument1, + SpillRegistersMode spillMode, ExceptionCheckRequirement requirement, ResultType result, ArgumentType1 argument1, ArgumentType2 argument2, ArgumentType3 argument3, ArgumentType4 argument4) : CallSlowPathGenerator<JumpType, FunctionType, ResultType>( - from, jit, function, spillMode, result) + from, jit, function, spillMode, requirement, result) , m_argument1(argument1) , m_argument2(argument2) , m_argument3(argument3) @@ -299,11 +315,11 @@ class CallResultAndFiveArgumentsSlowPathGenerator public: CallResultAndFiveArgumentsSlowPathGenerator( JumpType from, SpeculativeJIT* jit, FunctionType function, - SpillRegistersMode spillMode, ResultType result, ArgumentType1 argument1, + SpillRegistersMode spillMode, ExceptionCheckRequirement requirement, ResultType result, ArgumentType1 argument1, ArgumentType2 argument2, ArgumentType3 argument3, ArgumentType4 argument4, ArgumentType5 argument5) : CallSlowPathGenerator<JumpType, FunctionType, ResultType>( - from, jit, function, spillMode, result) + from, jit, function, spillMode, requirement, result) , m_argument1(argument1) , m_argument2(argument2) , m_argument3(argument3) @@ -331,94 +347,77 @@ protected: }; template<typename JumpType, typename FunctionType, typename ResultType> -inline PassOwnPtr<SlowPathGenerator> slowPathCall( +inline std::unique_ptr<SlowPathGenerator> slowPathCall( JumpType from, SpeculativeJIT* jit, FunctionType function, - ResultType result, SpillRegistersMode spillMode = NeedToSpill) + ResultType result, SpillRegistersMode spillMode = NeedToSpill, ExceptionCheckRequirement requirement = ExceptionCheckRequirement::CheckNeeded) { - return adoptPtr( - new CallResultAndNoArgumentsSlowPathGenerator< - JumpType, FunctionType, ResultType>( - from, jit, function, spillMode, result)); + return std::make_unique<CallResultAndNoArgumentsSlowPathGenerator<JumpType, FunctionType, ResultType>>( + from, jit, function, spillMode, requirement, result); } template< typename JumpType, typename FunctionType, typename ResultType, typename ArgumentType1> -inline PassOwnPtr<SlowPathGenerator> slowPathCall( +inline std::unique_ptr<SlowPathGenerator> slowPathCall( JumpType from, SpeculativeJIT* jit, FunctionType function, ResultType result, ArgumentType1 argument1, - SpillRegistersMode spillMode = NeedToSpill) + SpillRegistersMode spillMode = NeedToSpill, ExceptionCheckRequirement requirement = ExceptionCheckRequirement::CheckNeeded) { - return adoptPtr( - new CallResultAndOneArgumentSlowPathGenerator< - JumpType, FunctionType, ResultType, ArgumentType1>( - from, jit, function, spillMode, result, argument1)); + return std::make_unique<CallResultAndOneArgumentSlowPathGenerator<JumpType, FunctionType, ResultType, ArgumentType1>>( + from, jit, function, spillMode, requirement, result, argument1); } template< typename JumpType, typename FunctionType, typename ResultType, typename ArgumentType1, typename ArgumentType2> -inline PassOwnPtr<SlowPathGenerator> slowPathCall( +inline std::unique_ptr<SlowPathGenerator> slowPathCall( JumpType from, SpeculativeJIT* jit, FunctionType function, ResultType result, ArgumentType1 argument1, ArgumentType2 argument2, - SpillRegistersMode spillMode = NeedToSpill) + SpillRegistersMode spillMode = NeedToSpill, ExceptionCheckRequirement requirement = ExceptionCheckRequirement::CheckNeeded) { - return adoptPtr( - new CallResultAndTwoArgumentsSlowPathGenerator< - JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2>( - from, jit, function, spillMode, result, argument1, argument2)); + return std::make_unique<CallResultAndTwoArgumentsSlowPathGenerator<JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2>>( + from, jit, function, spillMode, requirement, result, argument1, argument2); } template< typename JumpType, typename FunctionType, typename ResultType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3> -inline PassOwnPtr<SlowPathGenerator> slowPathCall( +inline std::unique_ptr<SlowPathGenerator> slowPathCall( JumpType from, SpeculativeJIT* jit, FunctionType function, ResultType result, ArgumentType1 argument1, ArgumentType2 argument2, - ArgumentType3 argument3, SpillRegistersMode spillMode = NeedToSpill) + ArgumentType3 argument3, SpillRegistersMode spillMode = NeedToSpill, ExceptionCheckRequirement requirement = ExceptionCheckRequirement::CheckNeeded) { - return adoptPtr( - new CallResultAndThreeArgumentsSlowPathGenerator< - JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2, - ArgumentType3>( - from, jit, function, spillMode, result, argument1, argument2, - argument3)); + return std::make_unique<CallResultAndThreeArgumentsSlowPathGenerator<JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2, + ArgumentType3>>(from, jit, function, spillMode, requirement, result, argument1, argument2, argument3); } template< typename JumpType, typename FunctionType, typename ResultType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4> -inline PassOwnPtr<SlowPathGenerator> slowPathCall( +inline std::unique_ptr<SlowPathGenerator> slowPathCall( JumpType from, SpeculativeJIT* jit, FunctionType function, ResultType result, ArgumentType1 argument1, ArgumentType2 argument2, ArgumentType3 argument3, ArgumentType4 argument4, - SpillRegistersMode spillMode = NeedToSpill) + SpillRegistersMode spillMode = NeedToSpill, ExceptionCheckRequirement requirement = ExceptionCheckRequirement::CheckNeeded) { - return adoptPtr( - new CallResultAndFourArgumentsSlowPathGenerator< - JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2, - ArgumentType3, ArgumentType4>( - from, jit, function, spillMode, result, argument1, argument2, - argument3, argument4)); + return std::make_unique<CallResultAndFourArgumentsSlowPathGenerator<JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2, + ArgumentType3, ArgumentType4>>(from, jit, function, spillMode, requirement, result, argument1, argument2, argument3, argument4); } template< typename JumpType, typename FunctionType, typename ResultType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4, typename ArgumentType5> -inline PassOwnPtr<SlowPathGenerator> slowPathCall( +inline std::unique_ptr<SlowPathGenerator> slowPathCall( JumpType from, SpeculativeJIT* jit, FunctionType function, ResultType result, ArgumentType1 argument1, ArgumentType2 argument2, ArgumentType3 argument3, ArgumentType4 argument4, ArgumentType5 argument5, - SpillRegistersMode spillMode = NeedToSpill) + SpillRegistersMode spillMode = NeedToSpill, ExceptionCheckRequirement requirement = ExceptionCheckRequirement::CheckNeeded) { - return adoptPtr( - new CallResultAndFiveArgumentsSlowPathGenerator< - JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2, - ArgumentType3, ArgumentType4, ArgumentType5>( - from, jit, function, spillMode, result, argument1, argument2, - argument3, argument4, argument5)); + return std::make_unique<CallResultAndFiveArgumentsSlowPathGenerator<JumpType, FunctionType, ResultType, ArgumentType1, ArgumentType2, + ArgumentType3, ArgumentType4, ArgumentType5>>(from, jit, function, spillMode, requirement, result, argument1, argument2, argument3, + argument4, argument5); } template<typename JumpType, typename DestinationType, typename SourceType, unsigned numberOfAssignments> @@ -451,37 +450,31 @@ private: }; template<typename JumpType, typename DestinationType, typename SourceType, unsigned numberOfAssignments> -inline PassOwnPtr<SlowPathGenerator> slowPathMove( +inline std::unique_ptr<SlowPathGenerator> slowPathMove( JumpType from, SpeculativeJIT* jit, SourceType source[numberOfAssignments], DestinationType destination[numberOfAssignments]) { - return adoptPtr( - new AssigningSlowPathGenerator< - JumpType, DestinationType, SourceType, numberOfAssignments>( - from, jit, destination, source)); + return std::make_unique<AssigningSlowPathGenerator<JumpType, DestinationType, SourceType, numberOfAssignments>>( + from, jit, destination, source); } template<typename JumpType, typename DestinationType, typename SourceType> -inline PassOwnPtr<SlowPathGenerator> slowPathMove( +inline std::unique_ptr<SlowPathGenerator> slowPathMove( JumpType from, SpeculativeJIT* jit, SourceType source, DestinationType destination) { SourceType sourceArray[1] = { source }; DestinationType destinationArray[1] = { destination }; - return adoptPtr( - new AssigningSlowPathGenerator< - JumpType, DestinationType, SourceType, 1>( - from, jit, destinationArray, sourceArray)); + return std::make_unique<AssigningSlowPathGenerator<JumpType, DestinationType, SourceType, 1>>( + from, jit, destinationArray, sourceArray); } template<typename JumpType, typename DestinationType, typename SourceType> -inline PassOwnPtr<SlowPathGenerator> slowPathMove( +inline std::unique_ptr<SlowPathGenerator> slowPathMove( JumpType from, SpeculativeJIT* jit, SourceType source1, DestinationType destination1, SourceType source2, DestinationType destination2) { SourceType sourceArray[2] = { source1, source2 }; DestinationType destinationArray[2] = { destination1, destination2 }; - return adoptPtr( - new AssigningSlowPathGenerator< - JumpType, DestinationType, SourceType, 2>( - from, jit, destinationArray, sourceArray)); + return std::make_unique<AssigningSlowPathGenerator<JumpType, DestinationType, SourceType, 2>>( + from, jit, destinationArray, sourceArray); } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp index ee64f721f..de2025ada 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,15 +28,31 @@ #if ENABLE(DFG_JIT) -#include "Arguments.h" +#include "BinarySwitch.h" #include "DFGAbstractInterpreterInlines.h" #include "DFGArrayifySlowPathGenerator.h" -#include "DFGBinarySwitch.h" #include "DFGCallArrayAllocatorSlowPathGenerator.h" +#include "DFGCallCreateDirectArgumentsSlowPathGenerator.h" +#include "DFGMayExit.h" +#include "DFGOSRExitFuzz.h" #include "DFGSaneStringGetByValSlowPathGenerator.h" #include "DFGSlowPathGenerator.h" -#include "JSCJSValueInlines.h" +#include "DirectArguments.h" +#include "JITAddGenerator.h" +#include "JITBitAndGenerator.h" +#include "JITBitOrGenerator.h" +#include "JITBitXorGenerator.h" +#include "JITDivGenerator.h" +#include "JITLeftShiftGenerator.h" +#include "JITMulGenerator.h" +#include "JITRightShiftGenerator.h" +#include "JITSubGenerator.h" +#include "JSCInlines.h" +#include "JSEnvironmentRecord.h" +#include "JSGeneratorFunction.h" +#include "JSLexicalEnvironment.h" #include "LinkBuffer.h" +#include "ScopedArguments.h" #include "ScratchRegisterAllocator.h" #include "WriteBarrierBuffer.h" #include <wtf/MathExtras.h> @@ -47,13 +63,13 @@ SpeculativeJIT::SpeculativeJIT(JITCompiler& jit) : m_compileOkay(true) , m_jit(jit) , m_currentNode(0) + , m_lastGeneratedNode(LastNodeType) , m_indexInBlock(0) , m_generationInfo(m_jit.graph().frameRegisterCount()) , m_state(m_jit.graph()) , m_interpreter(m_jit.graph(), m_state) , m_stream(&jit.jitCode()->variableEventStream) , m_minifiedGraph(&jit.jitCode()->minifiedDFG) - , m_isCheckingArgumentTypes(false) { } @@ -84,12 +100,12 @@ void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, if (hasDouble(structure->indexingType()) && numElements < vectorLength) { #if USE(JSVALUE64) - m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR); + m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR); for (unsigned i = numElements; i < vectorLength; ++i) m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i)); #else EncodedValueDescriptor value; - value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN)); + value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN)); for (unsigned i = numElements; i < vectorLength; ++i) { m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); @@ -100,18 +116,102 @@ void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, // I want a slow path that also loads out the storage pointer, and that's // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot // of work for a very small piece of functionality. :-/ - addSlowPathGenerator(adoptPtr( - new CallArrayAllocatorSlowPathGenerator( - slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR, - structure, numElements))); + addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>( + slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR, + structure, numElements)); +} + +void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis) +{ + if (inlineCallFrame && !inlineCallFrame->isVarargs()) + m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR); + else { + VirtualRegister argumentCountRegister; + if (!inlineCallFrame) + argumentCountRegister = VirtualRegister(JSStack::ArgumentCount); + else + argumentCountRegister = inlineCallFrame->argumentCountRegister; + m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR); + if (!includeThis) + m_jit.sub32(TrustedImm32(1), lengthGPR); + } +} + +void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis) +{ + emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis); +} + +void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR) +{ + if (origin.inlineCallFrame) { + if (origin.inlineCallFrame->isClosureCall) { + m_jit.loadPtr( + JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()), + calleeGPR); + } else { + m_jit.move( + TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()), + calleeGPR); + } + } else + m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR); +} + +void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR) +{ + m_jit.addPtr( + TrustedImm32( + JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))), + GPRInfo::callFrameRegister, startGPR); +} + +MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck() +{ + if (!doOSRExitFuzzing()) + return MacroAssembler::Jump(); + + MacroAssembler::Jump result; + + m_jit.pushToSave(GPRInfo::regT0); + m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0); + m_jit.add32(TrustedImm32(1), GPRInfo::regT0); + m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks); + unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter(); + unsigned at = Options::fireOSRExitFuzzAt(); + if (at || atOrAfter) { + unsigned threshold; + MacroAssembler::RelationalCondition condition; + if (atOrAfter) { + threshold = atOrAfter; + condition = MacroAssembler::Below; + } else { + threshold = at; + condition = MacroAssembler::NotEqual; + } + MacroAssembler::Jump ok = m_jit.branch32( + condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold)); + m_jit.popToRestore(GPRInfo::regT0); + result = m_jit.jump(); + ok.link(&m_jit); + } + m_jit.popToRestore(GPRInfo::regT0); + + return result; } void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail) { if (!m_compileOkay) return; - ASSERT(m_isCheckingArgumentTypes || m_canExit); - m_jit.appendExitInfo(jumpToFail); + JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck(); + if (fuzzJump.isSet()) { + JITCompiler::JumpList jumpsToFail; + jumpsToFail.append(fuzzJump); + jumpsToFail.append(jumpToFail); + m_jit.appendExitInfo(jumpsToFail); + } else + m_jit.appendExitInfo(jumpToFail); m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size())); } @@ -119,8 +219,14 @@ void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource { if (!m_compileOkay) return; - ASSERT(m_isCheckingArgumentTypes || m_canExit); - m_jit.appendExitInfo(jumpsToFail); + JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck(); + if (fuzzJump.isSet()) { + JITCompiler::JumpList myJumpsToFail; + myJumpsToFail.append(jumpsToFail); + myJumpsToFail.append(fuzzJump); + m_jit.appendExitInfo(myJumpsToFail); + } else + m_jit.appendExitInfo(jumpsToFail); m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size())); } @@ -128,7 +234,6 @@ OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSo { if (!m_compileOkay) return OSRExitJumpPlaceholder(); - ASSERT(m_isCheckingArgumentTypes || m_canExit); unsigned index = m_jit.jitCode()->osrExit.size(); m_jit.appendExitInfo(); m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size())); @@ -137,19 +242,16 @@ OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSo OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse) { - ASSERT(m_isCheckingArgumentTypes || m_canExit); return speculationCheck(kind, jsValueSource, nodeUse.node()); } void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail) { - ASSERT(m_isCheckingArgumentTypes || m_canExit); speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail); } void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail) { - ASSERT(m_isCheckingArgumentTypes || m_canExit); speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail); } @@ -157,7 +259,6 @@ void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource { if (!m_compileOkay) return; - ASSERT(m_isCheckingArgumentTypes || m_canExit); unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery); m_jit.appendExitInfo(jumpToFail); m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex)); @@ -165,7 +266,6 @@ void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery) { - ASSERT(m_isCheckingArgumentTypes || m_canExit); speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery); } @@ -173,7 +273,6 @@ void SpeculativeJIT::emitInvalidationPoint(Node* node) { if (!m_compileOkay) return; - ASSERT(m_canExit); OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList()); m_jit.jitCode()->appendOSRExit(OSRExit( UncountableInvalidation, JSValueSource(), @@ -186,24 +285,24 @@ void SpeculativeJIT::emitInvalidationPoint(Node* node) void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node) { - ASSERT(m_isCheckingArgumentTypes || m_canExit); if (!m_compileOkay) return; speculationCheck(kind, jsValueRegs, node, m_jit.jump()); m_compileOkay = false; + if (verboseCompilationEnabled()) + dataLog("Bailing compilation.\n"); } void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse) { - ASSERT(m_isCheckingArgumentTypes || m_canExit); terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node()); } -void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail) +void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind) { ASSERT(needsTypeCheck(edge, typesPassedThrough)); m_interpreter.filter(edge, typesPassedThrough); - speculationCheck(BadType, source, edge.node(), jumpToFail); + speculationCheck(exitKind, source, edge.node(), jumpToFail); } RegisterSet SpeculativeJIT::usedRegisters() @@ -221,20 +320,22 @@ RegisterSet SpeculativeJIT::usedRegisters() result.set(fpr); } - result.merge(RegisterSet::specialRegisters()); + result.merge(RegisterSet::stubUnavailableRegisters()); return result; } -void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator) +void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator) { - m_slowPathGenerators.append(slowPathGenerator); + m_slowPathGenerators.append(WTFMove(slowPathGenerator)); } -void SpeculativeJIT::runSlowPathGenerators() +void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder) { - for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i) + for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i) { + pcToCodeOriginMapBuilder.appendItem(m_jit.label(), m_slowPathGenerators[i]->origin().semantic); m_slowPathGenerators[i]->generate(this); + } } // On Windows we need to wrap fmod; on other platforms we can call it directly. @@ -297,18 +398,20 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spil ASSERT(info.gpr() == source); ASSERT(isJSInt32(info.registerFormat())); if (node->hasConstant()) { - ASSERT(isInt32Constant(node)); + ASSERT(node->isInt32Constant()); fillAction = SetInt32Constant; } else fillAction = Load32Payload; } else if (registerFormat == DataFormatBoolean) { #if USE(JSVALUE64) RELEASE_ASSERT_NOT_REACHED(); +#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) fillAction = DoNothingForFill; +#endif #elif USE(JSVALUE32_64) ASSERT(info.gpr() == source); if (node->hasConstant()) { - ASSERT(isBooleanConstant(node)); + ASSERT(node->isBooleanConstant()); fillAction = SetBooleanConstant; } else fillAction = Load32Payload; @@ -316,8 +419,8 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spil } else if (registerFormat == DataFormatCell) { ASSERT(info.gpr() == source); if (node->hasConstant()) { - JSValue value = valueOfJSConstant(node); - ASSERT_UNUSED(value, value.isCell()); + DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant()); + node->asCell(); // To get the assertion. fillAction = SetCellConstant; } else { #if USE(JSVALUE64) @@ -332,8 +435,6 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spil } else if (registerFormat == DataFormatInt52) { if (node->hasConstant()) fillAction = SetInt52Constant; - else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS) - fillAction = Load32PayloadConvertToInt52; else if (info.spillFormat() == DataFormatInt52) fillAction = Load64; else if (info.spillFormat() == DataFormatStrictInt52) @@ -341,17 +442,14 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spil else if (info.spillFormat() == DataFormatNone) fillAction = Load64; else { - // Should never happen. Anything that qualifies as an int32 will never - // be turned into a cell (immediate spec fail) or a double (to-double - // conversions involve a separate node). RELEASE_ASSERT_NOT_REACHED(); +#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) fillAction = Load64; // Make GCC happy. +#endif } } else if (registerFormat == DataFormatStrictInt52) { if (node->hasConstant()) fillAction = SetStrictInt52Constant; - else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS) - fillAction = Load32PayloadSignExtend; else if (info.spillFormat() == DataFormatInt52) fillAction = Load64ShiftInt52Right; else if (info.spillFormat() == DataFormatStrictInt52) @@ -359,26 +457,23 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spil else if (info.spillFormat() == DataFormatNone) fillAction = Load64; else { - // Should never happen. Anything that qualifies as an int32 will never - // be turned into a cell (immediate spec fail) or a double (to-double - // conversions involve a separate node). RELEASE_ASSERT_NOT_REACHED(); +#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) fillAction = Load64; // Make GCC happy. +#endif } } else { ASSERT(registerFormat & DataFormatJS); #if USE(JSVALUE64) ASSERT(info.gpr() == source); if (node->hasConstant()) { - if (valueOfJSConstant(node).isCell()) + if (node->isCellConstant()) fillAction = SetTrustedJSConstant; + else fillAction = SetJSConstant; } else if (info.spillFormat() == DataFormatInt32) { ASSERT(registerFormat == DataFormatJSInt32); fillAction = Load32PayloadBoxInt; - } else if (info.spillFormat() == DataFormatDouble) { - ASSERT(registerFormat == DataFormatJSDouble); - fillAction = LoadDoubleBoxDouble; } else fillAction = Load64; #else @@ -432,18 +527,16 @@ SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spil #if USE(JSVALUE64) if (node->hasConstant()) { - ASSERT(isNumberConstant(node)); + node->asNumber(); // To get the assertion. fillAction = SetDoubleConstant; - } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) { - // it was already spilled previously and not as a double, which means we need unboxing. - ASSERT(info.spillFormat() & DataFormatJS); - fillAction = LoadJSUnboxDouble; - } else + } else { + ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble); fillAction = LoadDouble; + } #elif USE(JSVALUE32_64) - ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble); + ASSERT(info.registerFormat() == DataFormatDouble); if (node->hasConstant()) { - ASSERT(isNumberConstant(node)); + node->asNumber(); // To get the assertion. fillAction = SetDoubleConstant; } else fillAction = LoadDouble; @@ -488,21 +581,21 @@ void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTr case DoNothingForFill: break; case SetInt32Constant: - m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr()); + m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr()); break; #if USE(JSVALUE64) case SetInt52Constant: - m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt() << JSValue::int52ShiftAmount), plan.gpr()); + m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr()); break; case SetStrictInt52Constant: - m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt()), plan.gpr()); + m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr()); break; #endif // USE(JSVALUE64) case SetBooleanConstant: - m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr()); + m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr()); break; case SetCellConstant: - m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr()); + m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr()); break; #if USE(JSVALUE64) case SetTrustedJSConstant: @@ -512,7 +605,7 @@ void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTr m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr()); break; case SetDoubleConstant: - m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample); + m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample); m_jit.move64ToDouble(canTrample, plan.fpr()); break; case Load32PayloadBoxInt: @@ -528,20 +621,12 @@ void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTr m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr()); m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr()); break; - case LoadDoubleBoxDouble: - m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr()); - m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr()); - break; - case LoadJSUnboxDouble: - m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample); - unboxDouble(canTrample, plan.fpr()); - break; #else case SetJSConstantTag: - m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr()); + m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr()); break; case SetJSConstantPayload: - m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr()); + m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr()); break; case SetInt32Tag: m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr()); @@ -553,7 +638,7 @@ void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTr m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr()); break; case SetDoubleConstant: - m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr()); + m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr()); break; #endif case Load32Tag: @@ -591,8 +676,10 @@ JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, A switch (arrayMode.arrayClass()) { case Array::OriginalArray: { CRASH(); +#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG. return result; +#endif } case Array::Array: @@ -629,6 +716,9 @@ JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGP case Array::Contiguous: return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape); + case Array::Undecided: + return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape); + case Array::ArrayStorage: case Array::SlowPutArrayStorage: { ASSERT(!arrayMode.isJSArrayWithOriginalStructure()); @@ -688,19 +778,19 @@ void SpeculativeJIT::checkArray(Node* node) const ClassInfo* expectedClassInfo = 0; switch (node->arrayMode().type()) { + case Array::AnyTypedArray: case Array::String: RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:) break; case Array::Int32: case Array::Double: case Array::Contiguous: + case Array::Undecided: case Array::ArrayStorage: case Array::SlowPutArrayStorage: { GPRTemporary temp(this); GPRReg tempGPR = temp.gpr(); - m_jit.loadPtr( - MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR); - m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR); + m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR); speculationCheck( BadIndexingType, JSValueSource::unboxedCell(baseReg), 0, jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode())); @@ -708,19 +798,27 @@ void SpeculativeJIT::checkArray(Node* node) noResult(m_currentNode); return; } - case Array::Arguments: - expectedClassInfo = Arguments::info(); - break; + case Array::DirectArguments: + speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType); + noResult(m_currentNode); + return; + case Array::ScopedArguments: + speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType); + noResult(m_currentNode); + return; default: - expectedClassInfo = classInfoForType(node->arrayMode().typedArrayType()); - break; + speculateCellTypeWithoutTypeFiltering( + node->child1(), baseReg, + typeForTypedArrayType(node->arrayMode().typedArrayType())); + noResult(m_currentNode); + return; } RELEASE_ASSERT(expectedClassInfo); GPRTemporary temp(this); - m_jit.loadPtr( - MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr()); + GPRTemporary temp2(this); + m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr()); speculationCheck( BadType, JSValueSource::unboxedCell(baseReg), node, m_jit.branchPtr( @@ -750,22 +848,19 @@ void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg) MacroAssembler::JumpList slowPath; if (node->op() == ArrayifyToStructure) { - slowPath.append(m_jit.branchWeakPtr( + slowPath.append(m_jit.branchWeakStructure( JITCompiler::NotEqual, - JITCompiler::Address(baseReg, JSCell::structureOffset()), + JITCompiler::Address(baseReg, JSCell::structureIDOffset()), node->structure())); } else { - m_jit.loadPtr( - MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR); - m_jit.load8( - MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR); + MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR); slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode())); } - addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator( - slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR))); + addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>( + slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)); noResult(m_currentNode); } @@ -849,13 +944,10 @@ void SpeculativeJIT::compileIn(Node* node) { SpeculateCellOperand base(this, node->child2()); GPRReg baseGPR = base.gpr(); - - if (isConstant(node->child1().node())) { - JSString* string = - jsDynamicCast<JSString*>(valueOfJSConstant(node->child1().node())); - if (string && string->tryGetValueImpl() - && string->tryGetValueImpl()->isIdentifier()) { - StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(); + + if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) { + if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) { + StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In); GPRTemporary result(this); GPRReg resultGPR = result.gpr(); @@ -865,36 +957,37 @@ void SpeculativeJIT::compileIn(Node* node) MacroAssembler::PatchableJump jump = m_jit.patchableJump(); MacroAssembler::Label done = m_jit.label(); - OwnPtr<SlowPathGenerator> slowPath = slowPathCall( + // Since this block is executed only when the result of string->tryGetValueImpl() is atomic, + // we can cast it to const AtomicStringImpl* safely. + auto slowPath = slowPathCall( jump.m_jump, this, operationInOptimize, JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR, - string->tryGetValueImpl()); + static_cast<const AtomicStringImpl*>(string->tryGetValueImpl())); - stubInfo->codeOrigin = node->codeOrigin; + stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic); + stubInfo->codeOrigin = node->origin.semantic; stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR); stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR); +#if USE(JSVALUE32_64) + stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg); + stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg); +#endif stubInfo->patch.usedRegisters = usedRegisters(); - stubInfo->patch.registersFlushed = false; - + m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo)); - addSlowPathGenerator(slowPath.release()); - + addSlowPathGenerator(WTFMove(slowPath)); + base.use(); - -#if USE(JSVALUE64) - jsValueResult( - resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly); -#else - booleanResult(resultGPR, node, UseChildrenCalledExplicitly); -#endif + + blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly); return; } } - + JSValueOperand key(this, node->child1()); JSValueRegs regs = key.jsValueRegs(); - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); base.use(); @@ -904,11 +997,8 @@ void SpeculativeJIT::compileIn(Node* node) callOperation( operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)), baseGPR, regs); -#if USE(JSVALUE64) - jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly); -#else - booleanResult(resultGPR, node, UseChildrenCalledExplicitly); -#endif + m_jit.exceptionCheck(); + blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly); } bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction) @@ -1042,6 +1132,29 @@ GPRTemporary::GPRTemporary( } #endif // USE(JSVALUE32_64) +JSValueRegsTemporary::JSValueRegsTemporary() { } + +JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit) +#if USE(JSVALUE64) + : m_gpr(jit) +#else + : m_payloadGPR(jit) + , m_tagGPR(jit) +#endif +{ +} + +JSValueRegsTemporary::~JSValueRegsTemporary() { } + +JSValueRegs JSValueRegsTemporary::regs() +{ +#if USE(JSVALUE64) + return JSValueRegs(m_gpr.gpr()); +#else + return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr()); +#endif +} + void GPRTemporary::adopt(GPRTemporary& other) { ASSERT(!m_jit); @@ -1079,6 +1192,8 @@ FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, Spe m_fpr = m_jit->reuse(op1.fpr()); else if (m_jit->canReuse(op2.node())) m_fpr = m_jit->reuse(op2.fpr()); + else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr()) + m_fpr = m_jit->reuse(op1.fpr()); else m_fpr = m_jit->fprAllocate(); } @@ -1097,8 +1212,8 @@ FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1) void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; SpeculateDoubleOperand op1(this, node->child1()); SpeculateDoubleOperand op2(this, node->child2()); @@ -1109,8 +1224,8 @@ void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, J void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; MacroAssembler::RelationalCondition condition = MacroAssembler::Equal; @@ -1130,52 +1245,33 @@ void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode) if (masqueradesAsUndefinedWatchpointIsStillValid()) { if (m_state.forNode(node->child1()).m_type & ~SpecObject) { speculationCheck( - BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), - m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op1GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR)); } if (m_state.forNode(node->child2()).m_type & ~SpecObject) { speculationCheck( - BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), - m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op2GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR)); } } else { - GPRTemporary structure(this); - GPRReg structureGPR = structure.gpr(); - - m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR); if (m_state.forNode(node->child1()).m_type & ~SpecObject) { speculationCheck( BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), - m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + m_jit.branchIfNotObject(op1GPR)); } speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); - m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR); if (m_state.forNode(node->child2()).m_type & ~SpecObject) { speculationCheck( BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), - m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + m_jit.branchIfNotObject(op2GPR)); } speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } @@ -1185,8 +1281,8 @@ void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode) void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; // The branch instruction will branch to the taken block. // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. @@ -1197,14 +1293,14 @@ void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, notTaken = tmp; } - if (isBooleanConstant(node->child1().node())) { - bool imm = valueOfBooleanConstant(node->child1().node()); + if (node->child1()->isInt32Constant()) { + int32_t imm = node->child1()->asInt32(); SpeculateBooleanOperand op2(this, node->child2()); - branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken); - } else if (isBooleanConstant(node->child2().node())) { + branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken); + } else if (node->child2()->isInt32Constant()) { SpeculateBooleanOperand op1(this, node->child1()); - bool imm = valueOfBooleanConstant(node->child2().node()); - branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken); + int32_t imm = node->child2()->asInt32(); + branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken); } else { SpeculateBooleanOperand op1(this, node->child1()); SpeculateBooleanOperand op2(this, node->child2()); @@ -1216,8 +1312,8 @@ void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; // The branch instruction will branch to the taken block. // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. @@ -1228,13 +1324,13 @@ void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JI notTaken = tmp; } - if (isInt32Constant(node->child1().node())) { - int32_t imm = valueOfInt32Constant(node->child1().node()); + if (node->child1()->isInt32Constant()) { + int32_t imm = node->child1()->asInt32(); SpeculateInt32Operand op2(this, node->child2()); branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken); - } else if (isInt32Constant(node->child2().node())) { + } else if (node->child2()->isInt32Constant()) { SpeculateInt32Operand op1(this, node->child1()); - int32_t imm = valueOfInt32Constant(node->child2().node()); + int32_t imm = node->child2()->asInt32(); branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken); } else { SpeculateInt32Operand op1(this, node->child1()); @@ -1260,10 +1356,10 @@ bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::Relationa if (node->isBinaryUseKind(Int32Use)) compilePeepHoleInt32Branch(node, branchNode, condition); #if USE(JSVALUE64) - else if (node->isBinaryUseKind(MachineIntUse)) + else if (node->isBinaryUseKind(Int52RepUse)) compilePeepHoleInt52Branch(node, branchNode, condition); #endif // USE(JSVALUE64) - else if (node->isBinaryUseKind(NumberUse)) + else if (node->isBinaryUseKind(DoubleRepUse)) compilePeepHoleDoubleBranch(node, branchNode, doubleCondition); else if (node->op() == CompareEq) { if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) { @@ -1272,12 +1368,18 @@ bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::Relationa } if (node->isBinaryUseKind(BooleanUse)) compilePeepHoleBooleanBranch(node, branchNode, condition); + else if (node->isBinaryUseKind(SymbolUse)) + compilePeepHoleSymbolEquality(node, branchNode); else if (node->isBinaryUseKind(ObjectUse)) compilePeepHoleObjectEquality(node, branchNode); - else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse) + else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode); - else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse) + else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode); + else if (!needsTypeCheck(node->child1(), SpecOther)) + nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode); + else if (!needsTypeCheck(node->child2(), SpecOther)) + nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode); else { nonSpeculativePeepholeBranch(node, branchNode, condition, operation); return true; @@ -1317,10 +1419,12 @@ void SpeculativeJIT::compileMovHint(Node* node) m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal())); } -void SpeculativeJIT::bail() +void SpeculativeJIT::bail(AbortReason reason) { + if (verboseCompilationEnabled()) + dataLog("Bailing compilation.\n"); m_compileOkay = true; - m_jit.breakpoint(); + m_jit.abortWithReason(reason, m_lastGeneratedNode); clearGenerationInfo(); } @@ -1335,29 +1439,26 @@ void SpeculativeJIT::compileCurrentBlock() m_jit.blockHeads()[m_block->index] = m_jit.label(); - if (!m_block->cfaHasVisited) { + if (!m_block->intersectionOfCFAHasVisited) { // Don't generate code for basic blocks that are unreachable according to CFA. // But to be sure that nobody has generated a jump to this block, drop in a // breakpoint here. - m_jit.breakpoint(); + m_jit.abortWithReason(DFGUnreachableBasicBlock); return; } m_stream->appendAndLog(VariableEvent::reset()); m_jit.jitAssertHasValidCallFrame(); + m_jit.jitAssertTagsInPlace(); + m_jit.jitAssertArgumentCountSane(); - for (size_t i = 0; i < m_block->variablesAtHead.numberOfArguments(); ++i) { - m_stream->appendAndLog( - VariableEvent::setLocal( - virtualRegisterForArgument(i), virtualRegisterForArgument(i), DataFormatJS)); - } - m_state.reset(); m_state.beginBasicBlock(m_block); - for (size_t i = 0; i < m_block->variablesAtHead.numberOfLocals(); ++i) { - Node* node = m_block->variablesAtHead.local(i); + for (size_t i = m_block->variablesAtHead.size(); i--;) { + int operand = m_block->variablesAtHead.operandForIndex(i); + Node* node = m_block->variablesAtHead[i]; if (!node) continue; // No need to record dead SetLocal's. @@ -1365,89 +1466,64 @@ void SpeculativeJIT::compileCurrentBlock() DataFormat format; if (!node->refCount()) continue; // No need to record dead SetLocal's. - else - format = dataFormatFor(variable->flushFormat()); + format = dataFormatFor(variable->flushFormat()); m_stream->appendAndLog( - VariableEvent::setLocal(virtualRegisterForLocal(i), variable->machineLocal(), format)); + VariableEvent::setLocal( + VirtualRegister(operand), + variable->machineLocal(), + format)); } - - m_codeOriginForExitTarget = CodeOrigin(); - m_codeOriginForExitProfile = CodeOrigin(); + + m_origin = NodeOrigin(); for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) { m_currentNode = m_block->at(m_indexInBlock); - // We may have his a contradiction that the CFA was aware of but that the JIT + // We may have hit a contradiction that the CFA was aware of but that the JIT // didn't cause directly. if (!m_state.isValid()) { - bail(); + bail(DFGBailedAtTopOfBlock); return; } - - m_canExit = m_currentNode->canExit(); - bool shouldExecuteEffects = m_interpreter.startExecuting(m_currentNode); + + m_interpreter.startExecuting(); m_jit.setForNode(m_currentNode); - m_codeOriginForExitTarget = m_currentNode->codeOriginForExitTarget; - m_codeOriginForExitProfile = m_currentNode->codeOrigin; - if (!m_currentNode->shouldGenerate()) { - switch (m_currentNode->op()) { - case JSConstant: - m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode)); - break; - - case WeakJSConstant: - m_jit.addWeakReference(m_currentNode->weakConstant()); - m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode)); - break; - - case SetLocal: - RELEASE_ASSERT_NOT_REACHED(); - break; - - case MovHint: - compileMovHint(m_currentNode); - break; - - case ZombieHint: { - recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead); - break; - } + m_origin = m_currentNode->origin; + if (validationEnabled()) + m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits; + m_lastGeneratedNode = m_currentNode->op(); + + ASSERT(m_currentNode->shouldGenerate()); + + if (verboseCompilationEnabled()) { + dataLogF( + "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x", + (int)m_currentNode->index(), + m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset()); + dataLog("\n"); + } - default: - if (belongsInMinifiedGraph(m_currentNode->op())) - m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode)); - break; - } - } else { - - if (verboseCompilationEnabled()) { - dataLogF( - "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x", - (int)m_currentNode->index(), - m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset()); - dataLog("\n"); - } - - compile(m_currentNode); + if (Options::validateDFGExceptionHandling() && mayExit(m_jit.graph(), m_currentNode) != DoesNotExit) + m_jit.jitReleaseAssertNoException(); + + m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic); + compile(m_currentNode); + + if (belongsInMinifiedGraph(m_currentNode->op())) + m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode)); + #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) - m_jit.clearRegisterAllocationOffsets(); + m_jit.clearRegisterAllocationOffsets(); #endif - - if (!m_compileOkay) { - bail(); - return; - } - - if (belongsInMinifiedGraph(m_currentNode->op())) { - m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode)); - noticeOSRBirth(m_currentNode); - } + + if (!m_compileOkay) { + bail(DFGBailedAtEndOfNode); + return; } // Make sure that the abstract state is rematerialized for the next node. - if (shouldExecuteEffects) - m_interpreter.executeEffects(m_indexInBlock); + m_interpreter.executeEffects(m_indexInBlock); } // Perform the most basic verification that children have been used correctly. @@ -1464,9 +1540,7 @@ void SpeculativeJIT::compileCurrentBlock() void SpeculativeJIT::checkArgumentTypes() { ASSERT(!m_currentNode); - m_isCheckingArgumentTypes = true; - m_codeOriginForExitTarget = CodeOrigin(0); - m_codeOriginForExitProfile = CodeOrigin(0); + m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true); for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) { Node* node = m_jit.graph().m_arguments[i]; @@ -1529,13 +1603,14 @@ void SpeculativeJIT::checkArgumentTypes() } #endif } - m_isCheckingArgumentTypes = false; + + m_origin = NodeOrigin(); } bool SpeculativeJIT::compile() { checkArgumentTypes(); - + ASSERT(!m_currentNode); for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) { m_jit.setForBlockIndex(blockIndex); @@ -1573,6 +1648,15 @@ void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer) m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer); } ASSERT(osrEntryIndex == m_osrEntryHeads.size()); + + if (verboseCompilationEnabled()) { + DumpContext dumpContext; + dataLog("OSR Entries:\n"); + for (OSREntryData& entryData : m_jit.jitCode()->osrEntry) + dataLog(" ", inContext(entryData, &dumpContext), "\n"); + if (!dumpContext.isEmpty()) + dumpContext.dump(WTF::dataFile()); + } } void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property) @@ -1747,15 +1831,24 @@ void SpeculativeJIT::compileGetByValOnString(Node* node) m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg); #endif - JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin); + JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic); if (globalObject->stringPrototypeChainIsSane()) { + // FIXME: This could be captured using a Speculation mode that means "out-of-bounds + // loads return a trivial value". Something like SaneChainOutOfBounds. This should + // speculate that we don't take negative out-of-bounds, or better yet, it should rely + // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative + // indexed properties either. + // https://bugs.webkit.org/show_bug.cgi?id=144668 + m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet()); + m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet()); + #if USE(JSVALUE64) - addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator( - outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg))); + addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>( + outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)); #else - addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator( + addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>( outOfBounds, this, JSValueRegs(resultTagReg, scratchReg), - baseReg, propertyReg))); + baseReg, propertyReg)); #endif } else { #if USE(JSVALUE64) @@ -1782,7 +1875,27 @@ void SpeculativeJIT::compileGetByValOnString(Node* node) void SpeculativeJIT::compileFromCharCode(Node* node) { - SpeculateStrictInt32Operand property(this, node->child1()); + Edge& child = node->child1(); + if (child.useKind() == UntypedUse) { + JSValueOperand opr(this, child); + JSValueRegs oprRegs = opr.jsValueRegs(); +#if USE(JSVALUE64) + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); +#else + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); +#endif + flushRegisters(); + callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs); + m_jit.exceptionCheck(); + + jsValueResult(resultRegs, node); + return; + } + + SpeculateStrictInt32Operand property(this, child); GPRReg propertyReg = property.gpr(); GPRTemporary smallStrings(this); GPRTemporary scratch(this); @@ -1817,16 +1930,13 @@ GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node) case DataFormatJSCell: case DataFormatJS: case DataFormatJSBoolean: + case DataFormatJSDouble: return GeneratedOperandJSValue; case DataFormatJSInt32: case DataFormatInt32: return GeneratedOperandInteger; - case DataFormatJSDouble: - case DataFormatDouble: - return GeneratedOperandDouble; - default: RELEASE_ASSERT_NOT_REACHED(); return GeneratedOperandTypeUnknown; @@ -1836,16 +1946,8 @@ GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node) void SpeculativeJIT::compileValueToInt32(Node* node) { switch (node->child1().useKind()) { - case Int32Use: { - SpeculateInt32Operand op1(this, node->child1()); - GPRTemporary result(this, Reuse, op1); - m_jit.move(op1.gpr(), result.gpr()); - int32Result(result.gpr(), node, op1.format()); - return; - } - #if USE(JSVALUE64) - case MachineIntUse: { + case Int52RepUse: { SpeculateStrictInt52Operand op1(this, node->child1()); GPRTemporary result(this, Reuse, op1); GPRReg op1GPR = op1.gpr(); @@ -1855,6 +1957,19 @@ void SpeculativeJIT::compileValueToInt32(Node* node) return; } #endif // USE(JSVALUE64) + + case DoubleRepUse: { + GPRTemporary result(this); + SpeculateDoubleOperand op1(this, node->child1()); + FPRReg fpr = op1.fpr(); + GPRReg gpr = result.gpr(); + JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed); + + addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded)); + + int32Result(gpr, node); + return; + } case NumberUse: case NotCellUse: { @@ -1866,18 +1981,6 @@ void SpeculativeJIT::compileValueToInt32(Node* node) int32Result(result.gpr(), node, op1.format()); return; } - case GeneratedOperandDouble: { - GPRTemporary result(this); - SpeculateDoubleOperand op1(this, node->child1(), ManualOperandSpeculation); - FPRReg fpr = op1.fpr(); - GPRReg gpr = result.gpr(); - JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed); - - addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr)); - - int32Result(gpr, node); - return; - } case GeneratedOperandJSValue: { GPRTemporary result(this); #if USE(JSVALUE64) @@ -1893,16 +1996,14 @@ void SpeculativeJIT::compileValueToInt32(Node* node) if (node->child1().useKind() == NumberUse) { DFG_TYPE_CHECK( - JSValueRegs(gpr), node->child1(), SpecFullNumber, + JSValueRegs(gpr), node->child1(), SpecBytecodeNumber, m_jit.branchTest64( MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister)); } else { JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister); DFG_TYPE_CHECK( - JSValueRegs(gpr), node->child1(), ~SpecCell, - m_jit.branchTest64( - JITCompiler::Zero, gpr, GPRInfo::tagMaskRegister)); + JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr))); // It's not a cell: so true turns into 1 and all else turns into 0. m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr); @@ -1912,8 +2013,7 @@ void SpeculativeJIT::compileValueToInt32(Node* node) } // First, if we get here we have a double encoded as a JSValue - m_jit.move(gpr, resultGpr); - unboxDouble(resultGpr, fpr); + unboxDouble(gpr, resultGpr, fpr); silentSpillAllRegisters(resultGpr); callOperation(toInt32, resultGpr, fpr); @@ -1949,7 +2049,7 @@ void SpeculativeJIT::compileValueToInt32(Node* node) if (node->child1().useKind() == NumberUse) { DFG_TYPE_CHECK( - JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecFullNumber, + op1.jsValueRegs(), node->child1(), SpecBytecodeNumber, m_jit.branch32( MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag))); @@ -1957,9 +2057,8 @@ void SpeculativeJIT::compileValueToInt32(Node* node) JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag)); DFG_TYPE_CHECK( - JSValueRegs(tagGPR, payloadGPR), node->child1(), ~SpecCell, - m_jit.branch32( - JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::CellTag))); + op1.jsValueRegs(), node->child1(), ~SpecCell, + m_jit.branchIfCell(op1.jsValueRegs())); // It's not a cell: so true turns into 1 and all else turns into 0. JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag)); @@ -1998,17 +2097,6 @@ void SpeculativeJIT::compileValueToInt32(Node* node) return; } - case BooleanUse: { - SpeculateBooleanOperand op1(this, node->child1()); - GPRTemporary result(this, Reuse, op1); - - m_jit.move(op1.gpr(), result.gpr()); - m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr()); - - int32Result(result.gpr(), node); - return; - } - default: ASSERT(!m_compileOkay); return; @@ -2069,70 +2157,220 @@ void SpeculativeJIT::compileDoubleAsInt32(Node* node) int32Result(resultGPR, node); } -void SpeculativeJIT::compileInt32ToDouble(Node* node) +void SpeculativeJIT::compileDoubleRep(Node* node) { - ASSERT(!isInt32Constant(node->child1().node())); // This should have been constant folded. - - if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) { - SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation); + switch (node->child1().useKind()) { + case RealNumberUse: { + JSValueOperand op1(this, node->child1(), ManualOperandSpeculation); FPRTemporary result(this); - m_jit.convertInt32ToDouble(op1.gpr(), result.fpr()); - doubleResult(result.fpr(), node); + + JSValueRegs op1Regs = op1.jsValueRegs(); + FPRReg resultFPR = result.fpr(); + +#if USE(JSVALUE64) + GPRTemporary temp(this); + GPRReg tempGPR = temp.gpr(); + m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR); +#else + FPRTemporary temp(this); + FPRReg tempFPR = temp.fpr(); + unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR); +#endif + + JITCompiler::Jump done = m_jit.branchDouble( + JITCompiler::DoubleEqual, resultFPR, resultFPR); + + DFG_TYPE_CHECK( + op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs)); + m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR); + + done.link(&m_jit); + + doubleResult(resultFPR, node); return; } - JSValueOperand op1(this, node->child1(), ManualOperandSpeculation); - FPRTemporary result(this); - + case NotCellUse: + case NumberUse: { + ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded. + + SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type; + if (isInt32Speculation(possibleTypes)) { + SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation); + FPRTemporary result(this); + m_jit.convertInt32ToDouble(op1.gpr(), result.fpr()); + doubleResult(result.fpr(), node); + return; + } + + JSValueOperand op1(this, node->child1(), ManualOperandSpeculation); + FPRTemporary result(this); + #if USE(JSVALUE64) - GPRTemporary temp(this); + GPRTemporary temp(this); - GPRReg op1GPR = op1.gpr(); - GPRReg tempGPR = temp.gpr(); - FPRReg resultFPR = result.fpr(); - - JITCompiler::Jump isInteger = m_jit.branch64( - MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister); - - if (needsTypeCheck(node->child1(), SpecFullNumber)) { - typeCheck( - JSValueRegs(op1GPR), node->child1(), SpecFullNumber, - m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister)); - } + GPRReg op1GPR = op1.gpr(); + GPRReg tempGPR = temp.gpr(); + FPRReg resultFPR = result.fpr(); + JITCompiler::JumpList done; + + JITCompiler::Jump isInteger = m_jit.branch64( + MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister); + + if (node->child1().useKind() == NotCellUse) { + JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister); + JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined)); + + static const double zero = 0; + m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR); + + JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull)); + done.append(isNull); + + DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell, + m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool)))); + + JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse)); + static const double one = 1; + m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR); + done.append(m_jit.jump()); + done.append(isFalse); + + isUndefined.link(&m_jit); + static const double NaN = PNaN; + m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR); + done.append(m_jit.jump()); + + isNumber.link(&m_jit); + } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) { + typeCheck( + JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber, + m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister)); + } + + unboxDouble(op1GPR, tempGPR, resultFPR); + done.append(m_jit.jump()); - m_jit.move(op1GPR, tempGPR); - unboxDouble(tempGPR, resultFPR); - JITCompiler::Jump done = m_jit.jump(); + isInteger.link(&m_jit); + m_jit.convertInt32ToDouble(op1GPR, resultFPR); + done.link(&m_jit); +#else // USE(JSVALUE64) -> this is the 32_64 case + FPRTemporary temp(this); - isInteger.link(&m_jit); - m_jit.convertInt32ToDouble(op1GPR, resultFPR); - done.link(&m_jit); -#else - FPRTemporary temp(this); + GPRReg op1TagGPR = op1.tagGPR(); + GPRReg op1PayloadGPR = op1.payloadGPR(); + FPRReg tempFPR = temp.fpr(); + FPRReg resultFPR = result.fpr(); + JITCompiler::JumpList done; - GPRReg op1TagGPR = op1.tagGPR(); - GPRReg op1PayloadGPR = op1.payloadGPR(); - FPRReg tempFPR = temp.fpr(); - FPRReg resultFPR = result.fpr(); + JITCompiler::Jump isInteger = m_jit.branch32( + MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag)); + + if (node->child1().useKind() == NotCellUse) { + JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1)); + JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag)); + + static const double zero = 0; + m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR); + + JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag)); + done.append(isNull); + + DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag))); + + JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1)); + static const double one = 1; + m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR); + done.append(m_jit.jump()); + done.append(isFalse); + + isUndefined.link(&m_jit); + static const double NaN = PNaN; + m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR); + done.append(m_jit.jump()); + + isNumber.link(&m_jit); + } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) { + typeCheck( + JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber, + m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag))); + } + + unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR); + done.append(m_jit.jump()); - JITCompiler::Jump isInteger = m_jit.branch32( - MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag)); + isInteger.link(&m_jit); + m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR); + done.link(&m_jit); +#endif // USE(JSVALUE64) - if (needsTypeCheck(node->child1(), SpecFullNumber)) { - typeCheck( - JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecFullNumber, - m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag))); + doubleResult(resultFPR, node); + return; + } + +#if USE(JSVALUE64) + case Int52RepUse: { + SpeculateStrictInt52Operand value(this, node->child1()); + FPRTemporary result(this); + + GPRReg valueGPR = value.gpr(); + FPRReg resultFPR = result.fpr(); + + m_jit.convertInt64ToDouble(valueGPR, resultFPR); + + doubleResult(resultFPR, node); + return; + } +#endif // USE(JSVALUE64) + + default: + RELEASE_ASSERT_NOT_REACHED(); + return; + } +} + +void SpeculativeJIT::compileValueRep(Node* node) +{ + switch (node->child1().useKind()) { + case DoubleRepUse: { + SpeculateDoubleOperand value(this, node->child1()); + JSValueRegsTemporary result(this); + + FPRReg valueFPR = value.fpr(); + JSValueRegs resultRegs = result.regs(); + + // It's very tempting to in-place filter the value to indicate that it's not impure NaN + // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was + // subject to a prior SetLocal, filtering the value would imply that the corresponding + // local was purified. + if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN)) + m_jit.purifyNaN(valueFPR); + + boxDouble(valueFPR, resultRegs); + + jsValueResult(resultRegs, node); + return; + } + +#if USE(JSVALUE64) + case Int52RepUse: { + SpeculateStrictInt52Operand value(this, node->child1()); + GPRTemporary result(this); + + GPRReg valueGPR = value.gpr(); + GPRReg resultGPR = result.gpr(); + + boxInt52(valueGPR, resultGPR, DataFormatStrictInt52); + + jsValueResult(resultGPR, node); + return; + } +#endif // USE(JSVALUE64) + + default: + RELEASE_ASSERT_NOT_REACHED(); + return; } - - unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR); - JITCompiler::Jump done = m_jit.jump(); - - isInteger.link(&m_jit); - m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR); - done.link(&m_jit); -#endif - - doubleResult(resultFPR, node); } static double clampDoubleToByte(double d) @@ -2163,12 +2401,12 @@ static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg sou static const double zero = 0; static const double byteMax = 255; static const double half = 0.5; - jit.loadDouble(&zero, scratch); + jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch); MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch); - jit.loadDouble(&byteMax, scratch); + jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch); MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch); - jit.loadDouble(&half, scratch); + jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch); // FIXME: This should probably just use a floating point round! // https://bugs.webkit.org/show_bug.cgi?id=72054 jit.addDouble(source, scratch); @@ -2191,10 +2429,12 @@ JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRRe { if (node->op() == PutByValAlias) return JITCompiler::Jump(); - if (JSArrayBufferView* view = m_jit.graph().tryGetFoldableViewForChild1(node)) { + JSArrayBufferView* view = m_jit.graph().tryGetFoldableView( + m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode()); + if (view) { uint32_t length = view->length(); Node* indexNode = m_jit.graph().child(node, 1).node(); - if (m_jit.graph().isInt32Constant(indexNode) && static_cast<uint32_t>(m_jit.graph().valueOfInt32Constant(indexNode)) < length) + if (indexNode->isInt32Constant() && indexNode->asUInt32() < length) return JITCompiler::Jump(); return m_jit.branch32( MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length)); @@ -2233,13 +2473,13 @@ void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType t switch (elementSize(type)) { case 1: if (isSigned(type)) - m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg); + m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg); else m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg); break; case 2: if (isSigned(type)) - m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg); + m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg); else m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg); break; @@ -2290,7 +2530,7 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg propert GPRReg valueGPR = InvalidGPRReg; if (valueUse->isConstant()) { - JSValue jsValue = valueOfJSConstant(valueUse.node()); + JSValue jsValue = valueUse->asJSValue(); if (!jsValue.isNumber()) { terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); noResult(node); @@ -2323,7 +2563,7 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg propert } #if USE(JSVALUE64) - case MachineIntUse: { + case Int52RepUse: { SpeculateStrictInt52Operand valueOp(this, valueUse); GPRTemporary scratch(this); GPRReg scratchReg = scratch.gpr(); @@ -2347,7 +2587,7 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg propert } #endif // USE(JSVALUE64) - case NumberUse: { + case DoubleRepUse: { if (isClamped(type)) { ASSERT(elementSize(type) == 1); SpeculateDoubleOperand valueOp(this, valueUse); @@ -2371,7 +2611,7 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg propert MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32( fpr, gpr, MacroAssembler::BranchIfTruncateFailed); - addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr)); + addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded)); fixed.link(&m_jit); value.adopt(result); @@ -2390,6 +2630,10 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg propert ASSERT(valueGPR != base); ASSERT(valueGPR != storageReg); MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property); + if (node->arrayMode().isInBounds() && outOfBounds.isSet()) { + speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds); + outOfBounds = MacroAssembler::Jump(); + } switch (elementSize(type)) { case 1: @@ -2439,11 +2683,6 @@ void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType RELEASE_ASSERT_NOT_REACHED(); } - MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg); - static const double NaN = QNaN; - m_jit.loadDouble(&NaN, resultReg); - notNaN.link(&m_jit); - doubleResult(resultReg, node); } @@ -2465,6 +2704,10 @@ void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg prope ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse))); MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property); + if (node->arrayMode().isInBounds() && outOfBounds.isSet()) { + speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds); + outOfBounds = MacroAssembler::Jump(); + } switch (elementSize(type)) { case 4: { @@ -2484,26 +2727,23 @@ void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg prope noResult(node); } -void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg) +void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg) { // Check that prototype is an object. - m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg); - speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(scratchReg)); + speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg)); // Initialize scratchReg with the value being checked. m_jit.move(valueReg, scratchReg); // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg. MacroAssembler::Label loop(&m_jit); - m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg); + m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg); + m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg); + MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg); #if USE(JSVALUE64) - m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg); - MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg); - m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit); + m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit); #else - m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg); - MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg); - m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit); + m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit); #endif // No match - result is false. @@ -2524,6 +2764,17 @@ void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg p putResult.link(&m_jit); } +void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node) +{ + SpeculateCellOperand base(this, node->child1()); + + GPRReg baseGPR = base.gpr(); + + speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand()))); + + noResult(node); +} + void SpeculativeJIT::compileInstanceOf(Node* node) { if (node->child1().useKind() == UntypedUse) { @@ -2534,34 +2785,25 @@ void SpeculativeJIT::compileInstanceOf(Node* node) JSValueOperand value(this, node->child1()); SpeculateCellOperand prototype(this, node->child2()); GPRTemporary scratch(this); + GPRTemporary scratch2(this); GPRReg prototypeReg = prototype.gpr(); GPRReg scratchReg = scratch.gpr(); + GPRReg scratch2Reg = scratch2.gpr(); -#if USE(JSVALUE64) - GPRReg valueReg = value.gpr(); - MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister); - m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg); -#else - GPRReg valueTagReg = value.tagGPR(); - GPRReg valueReg = value.payloadGPR(); - MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag)); - m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg); -#endif + MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs()); + GPRReg valueReg = value.jsValueRegs().payloadGPR(); + moveFalseTo(scratchReg); MacroAssembler::Jump done = m_jit.jump(); isCell.link(&m_jit); - compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg); + compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg); done.link(&m_jit); -#if USE(JSVALUE64) - jsValueResult(scratchReg, node, DataFormatJSBoolean); -#else - booleanResult(scratchReg, node); -#endif + blessedBooleanResult(scratchReg, node); return; } @@ -2569,51 +2811,449 @@ void SpeculativeJIT::compileInstanceOf(Node* node) SpeculateCellOperand prototype(this, node->child2()); GPRTemporary scratch(this); + GPRTemporary scratch2(this); GPRReg valueReg = value.gpr(); GPRReg prototypeReg = prototype.gpr(); GPRReg scratchReg = scratch.gpr(); + GPRReg scratch2Reg = scratch2.gpr(); - compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg); + compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg); + + blessedBooleanResult(scratchReg, node); +} + +template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction> +void SpeculativeJIT::emitUntypedBitOp(Node* node) +{ + Edge& leftChild = node->child1(); + Edge& rightChild = node->child2(); + + if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) { + JSValueOperand left(this, leftChild); + JSValueOperand right(this, rightChild); + JSValueRegs leftRegs = left.jsValueRegs(); + JSValueRegs rightRegs = right.jsValueRegs(); +#if USE(JSVALUE64) + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); +#else + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); +#endif + flushRegisters(); + callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs); + m_jit.exceptionCheck(); + + jsValueResult(resultRegs, node); + return; + } + + Optional<JSValueOperand> left; + Optional<JSValueOperand> right; + + JSValueRegs leftRegs; + JSValueRegs rightRegs; #if USE(JSVALUE64) - jsValueResult(scratchReg, node, DataFormatJSBoolean); + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); + GPRTemporary scratch(this); + GPRReg scratchGPR = scratch.gpr(); #else - booleanResult(scratchReg, node); + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); + GPRReg scratchGPR = resultTag.gpr(); #endif + + SnippetOperand leftOperand; + SnippetOperand rightOperand; + + // The snippet generator does not support both operands being constant. If the left + // operand is already const, we'll ignore the right operand's constness. + if (leftChild->isInt32Constant()) + leftOperand.setConstInt32(leftChild->asInt32()); + else if (rightChild->isInt32Constant()) + rightOperand.setConstInt32(rightChild->asInt32()); + + RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst()); + + if (!leftOperand.isConst()) { + left = JSValueOperand(this, leftChild); + leftRegs = left->jsValueRegs(); + } + if (!rightOperand.isConst()) { + right = JSValueOperand(this, rightChild); + rightRegs = right->jsValueRegs(); + } + + SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR); + gen.generateFastPath(m_jit); + + ASSERT(gen.didEmitFastPath()); + gen.endJumpList().append(m_jit.jump()); + + gen.slowPathJumpList().link(&m_jit); + silentSpillAllRegisters(resultRegs); + + if (leftOperand.isConst()) { + leftRegs = resultRegs; + m_jit.moveValue(leftChild->asJSValue(), leftRegs); + } else if (rightOperand.isConst()) { + rightRegs = resultRegs; + m_jit.moveValue(rightChild->asJSValue(), rightRegs); + } + + callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs); + + silentFillAllRegisters(resultRegs); + m_jit.exceptionCheck(); + + gen.endJumpList().link(&m_jit); + jsValueResult(resultRegs, node); } -void SpeculativeJIT::compileAdd(Node* node) +void SpeculativeJIT::compileBitwiseOp(Node* node) { - switch (node->binaryUseKind()) { - case Int32Use: { - ASSERT(!shouldCheckNegativeZero(node->arithMode())); + NodeType op = node->op(); + Edge& leftChild = node->child1(); + Edge& rightChild = node->child2(); + + if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) { + switch (op) { + case BitAnd: + emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node); + return; + case BitOr: + emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node); + return; + case BitXor: + emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node); + return; + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + + if (leftChild->isInt32Constant()) { + SpeculateInt32Operand op2(this, rightChild); + GPRTemporary result(this, Reuse, op2); + + bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr()); + + int32Result(result.gpr(), node); + + } else if (rightChild->isInt32Constant()) { + SpeculateInt32Operand op1(this, leftChild); + GPRTemporary result(this, Reuse, op1); + + bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr()); + + int32Result(result.gpr(), node); + + } else { + SpeculateInt32Operand op1(this, leftChild); + SpeculateInt32Operand op2(this, rightChild); + GPRTemporary result(this, Reuse, op1, op2); - if (isNumberConstant(node->child1().node())) { - int32_t imm1 = valueOfInt32Constant(node->child1().node()); - SpeculateInt32Operand op2(this, node->child2()); - GPRTemporary result(this); + GPRReg reg1 = op1.gpr(); + GPRReg reg2 = op2.gpr(); + bitOp(op, reg1, reg2, result.gpr()); + + int32Result(result.gpr(), node); + } +} - if (!shouldCheckOverflow(node->arithMode())) { - m_jit.move(op2.gpr(), result.gpr()); - m_jit.add32(Imm32(imm1), result.gpr()); - } else - speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr())); +void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node) +{ + J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift + ? operationValueBitRShift : operationValueBitURShift; + JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift + ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift; - int32Result(result.gpr(), node); + Edge& leftChild = node->child1(); + Edge& rightChild = node->child2(); + + if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) { + JSValueOperand left(this, leftChild); + JSValueOperand right(this, rightChild); + JSValueRegs leftRegs = left.jsValueRegs(); + JSValueRegs rightRegs = right.jsValueRegs(); +#if USE(JSVALUE64) + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); +#else + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); +#endif + flushRegisters(); + callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs); + m_jit.exceptionCheck(); + + jsValueResult(resultRegs, node); + return; + } + + Optional<JSValueOperand> left; + Optional<JSValueOperand> right; + + JSValueRegs leftRegs; + JSValueRegs rightRegs; + + FPRTemporary leftNumber(this); + FPRReg leftFPR = leftNumber.fpr(); + +#if USE(JSVALUE64) + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); + GPRTemporary scratch(this); + GPRReg scratchGPR = scratch.gpr(); + FPRReg scratchFPR = InvalidFPRReg; +#else + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); + GPRReg scratchGPR = resultTag.gpr(); + FPRTemporary fprScratch(this); + FPRReg scratchFPR = fprScratch.fpr(); +#endif + + SnippetOperand leftOperand; + SnippetOperand rightOperand; + + // The snippet generator does not support both operands being constant. If the left + // operand is already const, we'll ignore the right operand's constness. + if (leftChild->isInt32Constant()) + leftOperand.setConstInt32(leftChild->asInt32()); + else if (rightChild->isInt32Constant()) + rightOperand.setConstInt32(rightChild->asInt32()); + + RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst()); + + if (!leftOperand.isConst()) { + left = JSValueOperand(this, leftChild); + leftRegs = left->jsValueRegs(); + } + if (!rightOperand.isConst()) { + right = JSValueOperand(this, rightChild); + rightRegs = right->jsValueRegs(); + } + + JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, + leftFPR, scratchGPR, scratchFPR, shiftType); + gen.generateFastPath(m_jit); + + ASSERT(gen.didEmitFastPath()); + gen.endJumpList().append(m_jit.jump()); + + gen.slowPathJumpList().link(&m_jit); + silentSpillAllRegisters(resultRegs); + + if (leftOperand.isConst()) { + leftRegs = resultRegs; + m_jit.moveValue(leftChild->asJSValue(), leftRegs); + } else if (rightOperand.isConst()) { + rightRegs = resultRegs; + m_jit.moveValue(rightChild->asJSValue(), rightRegs); + } + + callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs); + + silentFillAllRegisters(resultRegs); + m_jit.exceptionCheck(); + + gen.endJumpList().link(&m_jit); + jsValueResult(resultRegs, node); + return; +} + +void SpeculativeJIT::compileShiftOp(Node* node) +{ + NodeType op = node->op(); + Edge& leftChild = node->child1(); + Edge& rightChild = node->child2(); + + if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) { + switch (op) { + case BitLShift: + emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node); return; + case BitRShift: + case BitURShift: + emitUntypedRightShiftBitOp(node); + return; + default: + RELEASE_ASSERT_NOT_REACHED(); } - - if (isNumberConstant(node->child2().node())) { + } + + if (rightChild->isInt32Constant()) { + SpeculateInt32Operand op1(this, leftChild); + GPRTemporary result(this, Reuse, op1); + + shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr()); + + int32Result(result.gpr(), node); + } else { + // Do not allow shift amount to be used as the result, MacroAssembler does not permit this. + SpeculateInt32Operand op1(this, leftChild); + SpeculateInt32Operand op2(this, rightChild); + GPRTemporary result(this, Reuse, op1); + + GPRReg reg1 = op1.gpr(); + GPRReg reg2 = op2.gpr(); + shiftOp(op, reg1, reg2, result.gpr()); + + int32Result(result.gpr(), node); + } +} + +void SpeculativeJIT::compileValueAdd(Node* node) +{ + Edge& leftChild = node->child1(); + Edge& rightChild = node->child2(); + + if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) { + JSValueOperand left(this, leftChild); + JSValueOperand right(this, rightChild); + JSValueRegs leftRegs = left.jsValueRegs(); + JSValueRegs rightRegs = right.jsValueRegs(); +#if USE(JSVALUE64) + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); +#else + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); +#endif + flushRegisters(); + callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs); + m_jit.exceptionCheck(); + + jsValueResult(resultRegs, node); + return; + } + + Optional<JSValueOperand> left; + Optional<JSValueOperand> right; + + JSValueRegs leftRegs; + JSValueRegs rightRegs; + + FPRTemporary leftNumber(this); + FPRTemporary rightNumber(this); + FPRReg leftFPR = leftNumber.fpr(); + FPRReg rightFPR = rightNumber.fpr(); + +#if USE(JSVALUE64) + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); + GPRTemporary scratch(this); + GPRReg scratchGPR = scratch.gpr(); + FPRReg scratchFPR = InvalidFPRReg; +#else + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); + GPRReg scratchGPR = resultTag.gpr(); + FPRTemporary fprScratch(this); + FPRReg scratchFPR = fprScratch.fpr(); +#endif + + SnippetOperand leftOperand(m_state.forNode(leftChild).resultType()); + SnippetOperand rightOperand(m_state.forNode(rightChild).resultType()); + + // The snippet generator does not support both operands being constant. If the left + // operand is already const, we'll ignore the right operand's constness. + if (leftChild->isInt32Constant()) + leftOperand.setConstInt32(leftChild->asInt32()); + else if (rightChild->isInt32Constant()) + rightOperand.setConstInt32(rightChild->asInt32()); + + ASSERT(!leftOperand.isConst() || !rightOperand.isConst()); + + if (!leftOperand.isConst()) { + left = JSValueOperand(this, leftChild); + leftRegs = left->jsValueRegs(); + } + if (!rightOperand.isConst()) { + right = JSValueOperand(this, rightChild); + rightRegs = right->jsValueRegs(); + } + + JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, + leftFPR, rightFPR, scratchGPR, scratchFPR); + gen.generateFastPath(m_jit); + + ASSERT(gen.didEmitFastPath()); + gen.endJumpList().append(m_jit.jump()); + + gen.slowPathJumpList().link(&m_jit); + + silentSpillAllRegisters(resultRegs); + + if (leftOperand.isConst()) { + leftRegs = resultRegs; + m_jit.moveValue(leftChild->asJSValue(), leftRegs); + } else if (rightOperand.isConst()) { + rightRegs = resultRegs; + m_jit.moveValue(rightChild->asJSValue(), rightRegs); + } + + callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs); + + silentFillAllRegisters(resultRegs); + m_jit.exceptionCheck(); + + gen.endJumpList().link(&m_jit); + jsValueResult(resultRegs, node); + return; +} + +void SpeculativeJIT::compileInstanceOfCustom(Node* node) +{ + // We could do something smarter here but this case is currently super rare and unless + // Symbol.hasInstance becomes popular will likely remain that way. + + JSValueOperand value(this, node->child1()); + SpeculateCellOperand constructor(this, node->child2()); + JSValueOperand hasInstanceValue(this, node->child3()); + GPRTemporary result(this); + + JSValueRegs valueRegs = value.jsValueRegs(); + GPRReg constructorGPR = constructor.gpr(); + JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs(); + GPRReg resultGPR = result.gpr(); + + MacroAssembler::Jump slowCase = m_jit.jump(); + + addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs)); + + unblessedBooleanResult(resultGPR, node); +} + +void SpeculativeJIT::compileArithAdd(Node* node) +{ + switch (node->binaryUseKind()) { + case Int32Use: { + ASSERT(!shouldCheckNegativeZero(node->arithMode())); + + if (node->child2()->isInt32Constant()) { SpeculateInt32Operand op1(this, node->child1()); - int32_t imm2 = valueOfInt32Constant(node->child2().node()); - GPRTemporary result(this); - + int32_t imm2 = node->child2()->asInt32(); + if (!shouldCheckOverflow(node->arithMode())) { - m_jit.move(op1.gpr(), result.gpr()); - m_jit.add32(Imm32(imm2), result.gpr()); - } else - speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr())); + GPRTemporary result(this, Reuse, op1); + m_jit.add32(Imm32(imm2), op1.gpr(), result.gpr()); + int32Result(result.gpr(), node); + return; + } + + GPRTemporary result(this); + speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr())); int32Result(result.gpr(), node); return; @@ -2627,14 +3267,9 @@ void SpeculativeJIT::compileAdd(Node* node) GPRReg gpr2 = op2.gpr(); GPRReg gprResult = result.gpr(); - if (!shouldCheckOverflow(node->arithMode())) { - if (gpr1 == gprResult) - m_jit.add32(gpr2, gprResult); - else { - m_jit.move(gpr2, gprResult); - m_jit.add32(gpr1, gprResult); - } - } else { + if (!shouldCheckOverflow(node->arithMode())) + m_jit.add32(gpr1, gpr2, gprResult); + else { MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult); if (gpr1 == gprResult) @@ -2650,7 +3285,7 @@ void SpeculativeJIT::compileAdd(Node* node) } #if USE(JSVALUE64) - case MachineIntUse: { + case Int52RepUse: { ASSERT(shouldCheckOverflow(node->arithMode())); ASSERT(!shouldCheckNegativeZero(node->arithMode())); @@ -2661,8 +3296,7 @@ void SpeculativeJIT::compileAdd(Node* node) SpeculateWhicheverInt52Operand op1(this, node->child1()); SpeculateWhicheverInt52Operand op2(this, node->child2(), op1); GPRTemporary result(this, Reuse, op1); - m_jit.move(op1.gpr(), result.gpr()); - m_jit.add64(op2.gpr(), result.gpr()); + m_jit.add64(op1.gpr(), op2.gpr(), result.gpr()); int52Result(result.gpr(), node, op1.format()); return; } @@ -2679,7 +3313,7 @@ void SpeculativeJIT::compileAdd(Node* node) } #endif // USE(JSVALUE64) - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand op1(this, node->child1()); SpeculateDoubleOperand op2(this, node->child2()); FPRTemporary result(this, op1, op2); @@ -2727,7 +3361,7 @@ void SpeculativeJIT::compileMakeRope(Node* node) GPRReg scratchGPR = scratch.gpr(); JITCompiler::JumpList slowPath; - MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString)); + MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString)); m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR); emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath); @@ -2741,7 +3375,7 @@ void SpeculativeJIT::compileMakeRope(Node* node) if (!ASSERT_DISABLED) { JITCompiler::Jump ok = m_jit.branch32( JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0)); - m_jit.breakpoint(); + m_jit.abortWithReason(DFGNegativeStringLength); ok.link(&m_jit); } for (unsigned i = 1; i < numOpGPRs; ++i) { @@ -2757,7 +3391,7 @@ void SpeculativeJIT::compileMakeRope(Node* node) if (!ASSERT_DISABLED) { JITCompiler::Jump ok = m_jit.branch32( JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0)); - m_jit.breakpoint(); + m_jit.abortWithReason(DFGNegativeStringLength); ok.link(&m_jit); } m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength())); @@ -2779,15 +3413,26 @@ void SpeculativeJIT::compileMakeRope(Node* node) cellResult(resultGPR, node); } +void SpeculativeJIT::compileArithClz32(Node* node) +{ + ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand."); + SpeculateInt32Operand value(this, node->child1()); + GPRTemporary result(this, Reuse, value); + GPRReg valueReg = value.gpr(); + GPRReg resultReg = result.gpr(); + m_jit.countLeadingZeros32(valueReg, resultReg); + int32Result(resultReg, node); +} + void SpeculativeJIT::compileArithSub(Node* node) { switch (node->binaryUseKind()) { case Int32Use: { ASSERT(!shouldCheckNegativeZero(node->arithMode())); - if (isNumberConstant(node->child2().node())) { + if (node->child2()->isInt32Constant()) { SpeculateInt32Operand op1(this, node->child1()); - int32_t imm2 = valueOfInt32Constant(node->child2().node()); + int32_t imm2 = node->child2()->asInt32(); GPRTemporary result(this); if (!shouldCheckOverflow(node->arithMode())) { @@ -2802,8 +3447,8 @@ void SpeculativeJIT::compileArithSub(Node* node) return; } - if (isNumberConstant(node->child1().node())) { - int32_t imm1 = valueOfInt32Constant(node->child1().node()); + if (node->child1()->isInt32Constant()) { + int32_t imm1 = node->child1()->asInt32(); SpeculateInt32Operand op2(this, node->child2()); GPRTemporary result(this); @@ -2832,7 +3477,7 @@ void SpeculativeJIT::compileArithSub(Node* node) } #if USE(JSVALUE64) - case MachineIntUse: { + case Int52RepUse: { ASSERT(shouldCheckOverflow(node->arithMode())); ASSERT(!shouldCheckNegativeZero(node->arithMode())); @@ -2861,7 +3506,7 @@ void SpeculativeJIT::compileArithSub(Node* node) } #endif // USE(JSVALUE64) - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand op1(this, node->child1()); SpeculateDoubleOperand op2(this, node->child2()); FPRTemporary result(this, op1); @@ -2873,7 +3518,58 @@ void SpeculativeJIT::compileArithSub(Node* node) doubleResult(result.fpr(), node); return; } - + + case UntypedUse: { + Edge& leftChild = node->child1(); + Edge& rightChild = node->child2(); + + JSValueOperand left(this, leftChild); + JSValueOperand right(this, rightChild); + + JSValueRegs leftRegs = left.jsValueRegs(); + JSValueRegs rightRegs = right.jsValueRegs(); + + FPRTemporary leftNumber(this); + FPRTemporary rightNumber(this); + FPRReg leftFPR = leftNumber.fpr(); + FPRReg rightFPR = rightNumber.fpr(); + +#if USE(JSVALUE64) + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); + GPRTemporary scratch(this); + GPRReg scratchGPR = scratch.gpr(); + FPRReg scratchFPR = InvalidFPRReg; +#else + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); + GPRReg scratchGPR = resultTag.gpr(); + FPRTemporary fprScratch(this); + FPRReg scratchFPR = fprScratch.fpr(); +#endif + + SnippetOperand leftOperand(m_state.forNode(leftChild).resultType()); + SnippetOperand rightOperand(m_state.forNode(rightChild).resultType()); + + JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, + leftFPR, rightFPR, scratchGPR, scratchFPR); + gen.generateFastPath(m_jit); + + ASSERT(gen.didEmitFastPath()); + gen.endJumpList().append(m_jit.jump()); + + gen.slowPathJumpList().link(&m_jit); + silentSpillAllRegisters(resultRegs); + callOperation(operationValueSub, resultRegs, leftRegs, rightRegs); + silentFillAllRegisters(resultRegs); + m_jit.exceptionCheck(); + + gen.endJumpList().link(&m_jit); + jsValueResult(resultRegs, node); + return; + } + default: RELEASE_ASSERT_NOT_REACHED(); return; @@ -2906,7 +3602,7 @@ void SpeculativeJIT::compileArithNegate(Node* node) } #if USE(JSVALUE64) - case MachineIntUse: { + case Int52RepUse: { ASSERT(shouldCheckOverflow(node->arithMode())); if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) { @@ -2943,7 +3639,7 @@ void SpeculativeJIT::compileArithNegate(Node* node) } #endif // USE(JSVALUE64) - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand op1(this, node->child1()); FPRTemporary result(this); @@ -2962,6 +3658,38 @@ void SpeculativeJIT::compileArithMul(Node* node) { switch (node->binaryUseKind()) { case Int32Use: { + if (node->child2()->isInt32Constant()) { + SpeculateInt32Operand op1(this, node->child1()); + GPRTemporary result(this); + + int32_t imm = node->child2()->asInt32(); + GPRReg op1GPR = op1.gpr(); + GPRReg resultGPR = result.gpr(); + + if (!shouldCheckOverflow(node->arithMode())) + m_jit.mul32(Imm32(imm), op1GPR, resultGPR); + else { + speculationCheck(Overflow, JSValueRegs(), 0, + m_jit.branchMul32(MacroAssembler::Overflow, op1GPR, Imm32(imm), resultGPR)); + } + + // The only way to create negative zero with a constant is: + // -negative-op1 * 0. + // -zero-op1 * negative constant. + if (shouldCheckNegativeZero(node->arithMode())) { + if (!imm) + speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, op1GPR)); + else if (imm < 0) { + if (shouldCheckOverflow(node->arithMode())) + speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, resultGPR)); + else + speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, op1GPR)); + } + } + + int32Result(resultGPR, node); + return; + } SpeculateInt32Operand op1(this, node->child1()); SpeculateInt32Operand op2(this, node->child2()); GPRTemporary result(this); @@ -2984,17 +3712,17 @@ void SpeculativeJIT::compileArithMul(Node* node) // Check for negative zero, if the users of this node care about such things. if (shouldCheckNegativeZero(node->arithMode())) { MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr()); - speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0))); - speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0))); + speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, reg1)); + speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, reg2)); resultNonZero.link(&m_jit); } int32Result(result.gpr(), node); return; } - -#if USE(JSVALUE64) - case MachineIntUse: { + +#if USE(JSVALUE64) + case Int52RepUse: { ASSERT(shouldCheckOverflow(node->arithMode())); // This is super clever. We want to do an int52 multiplication and check the @@ -3051,7 +3779,7 @@ void SpeculativeJIT::compileArithMul(Node* node) } #endif // USE(JSVALUE64) - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand op1(this, node->child1()); SpeculateDoubleOperand op2(this, node->child2()); FPRTemporary result(this, op1, op2); @@ -3064,7 +3792,107 @@ void SpeculativeJIT::compileArithMul(Node* node) doubleResult(result.fpr(), node); return; } - + + case UntypedUse: { + Edge& leftChild = node->child1(); + Edge& rightChild = node->child2(); + + if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) { + JSValueOperand left(this, leftChild); + JSValueOperand right(this, rightChild); + JSValueRegs leftRegs = left.jsValueRegs(); + JSValueRegs rightRegs = right.jsValueRegs(); +#if USE(JSVALUE64) + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); +#else + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); +#endif + flushRegisters(); + callOperation(operationValueMul, resultRegs, leftRegs, rightRegs); + m_jit.exceptionCheck(); + + jsValueResult(resultRegs, node); + return; + } + + Optional<JSValueOperand> left; + Optional<JSValueOperand> right; + + JSValueRegs leftRegs; + JSValueRegs rightRegs; + + FPRTemporary leftNumber(this); + FPRTemporary rightNumber(this); + FPRReg leftFPR = leftNumber.fpr(); + FPRReg rightFPR = rightNumber.fpr(); + +#if USE(JSVALUE64) + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); + GPRTemporary scratch(this); + GPRReg scratchGPR = scratch.gpr(); + FPRReg scratchFPR = InvalidFPRReg; +#else + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); + GPRReg scratchGPR = resultTag.gpr(); + FPRTemporary fprScratch(this); + FPRReg scratchFPR = fprScratch.fpr(); +#endif + + SnippetOperand leftOperand(m_state.forNode(leftChild).resultType()); + SnippetOperand rightOperand(m_state.forNode(rightChild).resultType()); + + // The snippet generator does not support both operands being constant. If the left + // operand is already const, we'll ignore the right operand's constness. + if (leftChild->isInt32Constant()) + leftOperand.setConstInt32(leftChild->asInt32()); + else if (rightChild->isInt32Constant()) + rightOperand.setConstInt32(rightChild->asInt32()); + + ASSERT(!leftOperand.isConst() || !rightOperand.isConst()); + + if (!leftOperand.isPositiveConstInt32()) { + left = JSValueOperand(this, leftChild); + leftRegs = left->jsValueRegs(); + } + if (!rightOperand.isPositiveConstInt32()) { + right = JSValueOperand(this, rightChild); + rightRegs = right->jsValueRegs(); + } + + JITMulGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, + leftFPR, rightFPR, scratchGPR, scratchFPR); + gen.generateFastPath(m_jit); + + ASSERT(gen.didEmitFastPath()); + gen.endJumpList().append(m_jit.jump()); + + gen.slowPathJumpList().link(&m_jit); + silentSpillAllRegisters(resultRegs); + + if (leftOperand.isPositiveConstInt32()) { + leftRegs = resultRegs; + m_jit.moveValue(leftChild->asJSValue(), leftRegs); + } else if (rightOperand.isPositiveConstInt32()) { + rightRegs = resultRegs; + m_jit.moveValue(rightChild->asJSValue(), rightRegs); + } + + callOperation(operationValueMul, resultRegs, leftRegs, rightRegs); + + silentFillAllRegisters(resultRegs); + m_jit.exceptionCheck(); + + gen.endJumpList().link(&m_jit); + jsValueResult(resultRegs, node); + return; + } + default: RELEASE_ASSERT_NOT_REACHED(); return; @@ -3144,8 +3972,8 @@ void SpeculativeJIT::compileArithDiv(Node* node) } m_jit.move(op1GPR, eax.gpr()); - m_jit.assembler().cdq(); - m_jit.assembler().idivl_r(op2GPR); + m_jit.x86ConvertToDoubleWord32(); + m_jit.x86Div32(op2GPR); if (op2TempGPR != InvalidGPRReg) unlock(op2TempGPR); @@ -3157,7 +3985,7 @@ void SpeculativeJIT::compileArithDiv(Node* node) done.link(&m_jit); int32Result(eax.gpr(), node); -#elif CPU(APPLE_ARMV7S) +#elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64) SpeculateInt32Operand op1(this, node->child1()); SpeculateInt32Operand op2(this, node->child2()); GPRReg op1GPR = op1.gpr(); @@ -3173,31 +4001,8 @@ void SpeculativeJIT::compileArithDiv(Node* node) numeratorNonZero.link(&m_jit); } - m_jit.assembler().sdiv(quotient.gpr(), op1GPR, op2GPR); - - // Check that there was no remainder. If there had been, then we'd be obligated to - // produce a double result instead. - if (shouldCheckOverflow(node->arithMode())) { - speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr())); - speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR)); - } - - int32Result(quotient.gpr(), node); -#elif CPU(ARM64) - SpeculateInt32Operand op1(this, node->child1()); - SpeculateInt32Operand op2(this, node->child2()); - GPRReg op1GPR = op1.gpr(); - GPRReg op2GPR = op2.gpr(); - GPRTemporary quotient(this); - GPRTemporary multiplyAnswer(this); - - // If the user cares about negative zero, then speculate that we're not about - // to produce negative zero. - if (shouldCheckNegativeZero(node->arithMode())) { - MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR); - speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0))); - numeratorNonZero.link(&m_jit); - } + if (shouldCheckOverflow(node->arithMode())) + speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Zero, op2GPR)); m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR); @@ -3215,7 +4020,7 @@ void SpeculativeJIT::compileArithDiv(Node* node) break; } - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand op1(this, node->child1()); SpeculateDoubleOperand op2(this, node->child2()); FPRTemporary result(this, op1); @@ -3227,7 +4032,117 @@ void SpeculativeJIT::compileArithDiv(Node* node) doubleResult(result.fpr(), node); break; } - + + case UntypedUse: { + Edge& leftChild = node->child1(); + Edge& rightChild = node->child2(); + + if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) { + JSValueOperand left(this, leftChild); + JSValueOperand right(this, rightChild); + JSValueRegs leftRegs = left.jsValueRegs(); + JSValueRegs rightRegs = right.jsValueRegs(); +#if USE(JSVALUE64) + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); +#else + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); +#endif + flushRegisters(); + callOperation(operationValueDiv, resultRegs, leftRegs, rightRegs); + m_jit.exceptionCheck(); + + jsValueResult(resultRegs, node); + return; + } + + Optional<JSValueOperand> left; + Optional<JSValueOperand> right; + + JSValueRegs leftRegs; + JSValueRegs rightRegs; + + FPRTemporary leftNumber(this); + FPRTemporary rightNumber(this); + FPRReg leftFPR = leftNumber.fpr(); + FPRReg rightFPR = rightNumber.fpr(); + FPRTemporary fprScratch(this); + FPRReg scratchFPR = fprScratch.fpr(); + +#if USE(JSVALUE64) + GPRTemporary result(this); + JSValueRegs resultRegs = JSValueRegs(result.gpr()); + GPRTemporary scratch(this); + GPRReg scratchGPR = scratch.gpr(); +#else + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr()); + GPRReg scratchGPR = resultTag.gpr(); +#endif + + SnippetOperand leftOperand(m_state.forNode(leftChild).resultType()); + SnippetOperand rightOperand(m_state.forNode(rightChild).resultType()); + + if (leftChild->isInt32Constant()) + leftOperand.setConstInt32(leftChild->asInt32()); +#if USE(JSVALUE64) + else if (leftChild->isDoubleConstant()) + leftOperand.setConstDouble(leftChild->asNumber()); +#endif + + if (leftOperand.isConst()) { + // The snippet generator only supports 1 argument as a constant. + // Ignore the rightChild's const-ness. + } else if (rightChild->isInt32Constant()) + rightOperand.setConstInt32(rightChild->asInt32()); +#if USE(JSVALUE64) + else if (rightChild->isDoubleConstant()) + rightOperand.setConstDouble(rightChild->asNumber()); +#endif + + RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst()); + + if (!leftOperand.isConst()) { + left = JSValueOperand(this, leftChild); + leftRegs = left->jsValueRegs(); + } + if (!rightOperand.isConst()) { + right = JSValueOperand(this, rightChild); + rightRegs = right->jsValueRegs(); + } + + JITDivGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, + leftFPR, rightFPR, scratchGPR, scratchFPR); + gen.generateFastPath(m_jit); + + ASSERT(gen.didEmitFastPath()); + gen.endJumpList().append(m_jit.jump()); + + gen.slowPathJumpList().link(&m_jit); + silentSpillAllRegisters(resultRegs); + + if (leftOperand.isConst()) { + leftRegs = resultRegs; + m_jit.moveValue(leftChild->asJSValue(), leftRegs); + } + if (rightOperand.isConst()) { + rightRegs = resultRegs; + m_jit.moveValue(rightChild->asJSValue(), rightRegs); + } + + callOperation(operationValueDiv, resultRegs, leftRegs, rightRegs); + + silentFillAllRegisters(resultRegs); + m_jit.exceptionCheck(); + + gen.endJumpList().link(&m_jit); + jsValueResult(resultRegs, node); + return; + } + default: RELEASE_ASSERT_NOT_REACHED(); break; @@ -3242,10 +4157,10 @@ void SpeculativeJIT::compileArithMod(Node* node) // (in case of |dividend| < |divisor|), so we speculate it as strict int32. SpeculateStrictInt32Operand op1(this, node->child1()); - if (isInt32Constant(node->child2().node())) { - int32_t divisor = valueOfInt32Constant(node->child2().node()); + if (node->child2()->isInt32Constant()) { + int32_t divisor = node->child2()->asInt32(); if (divisor > 1 && hasOneBitSet(divisor)) { - unsigned logarithm = WTF::fastLog2(divisor); + unsigned logarithm = WTF::fastLog2(static_cast<uint32_t>(divisor)); GPRReg dividendGPR = op1.gpr(); GPRTemporary result(this); GPRReg resultGPR = result.gpr(); @@ -3304,8 +4219,8 @@ void SpeculativeJIT::compileArithMod(Node* node) } #if CPU(X86) || CPU(X86_64) - if (isInt32Constant(node->child2().node())) { - int32_t divisor = valueOfInt32Constant(node->child2().node()); + if (node->child2()->isInt32Constant()) { + int32_t divisor = node->child2()->asInt32(); if (divisor && divisor != -1) { GPRReg op1Gpr = op1.gpr(); @@ -3326,8 +4241,8 @@ void SpeculativeJIT::compileArithMod(Node* node) m_jit.move(op1Gpr, eax.gpr()); m_jit.move(TrustedImm32(divisor), scratchGPR); - m_jit.assembler().cdq(); - m_jit.assembler().idivl_r(scratchGPR); + m_jit.x86ConvertToDoubleWord32(); + m_jit.x86Div32(scratchGPR); if (shouldCheckNegativeZero(node->arithMode())) { JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0)); speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr())); @@ -3417,8 +4332,8 @@ void SpeculativeJIT::compileArithMod(Node* node) } m_jit.move(op1GPR, eax.gpr()); - m_jit.assembler().cdq(); - m_jit.assembler().idivl_r(op2GPR); + m_jit.x86ConvertToDoubleWord32(); + m_jit.x86Div32(op2GPR); if (op2TempGPR != InvalidGPRReg) unlock(op2TempGPR); @@ -3436,7 +4351,7 @@ void SpeculativeJIT::compileArithMod(Node* node) done.link(&m_jit); int32Result(edx.gpr(), node); -#elif CPU(APPLE_ARMV7S) +#elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64) GPRTemporary temp(this); GPRTemporary quotientThenRemainder(this); GPRTemporary multiplyAnswer(this); @@ -3445,38 +4360,27 @@ void SpeculativeJIT::compileArithMod(Node* node) GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr(); GPRReg multiplyAnswerGPR = multiplyAnswer.gpr(); - m_jit.assembler().sdiv(quotientThenRemainderGPR, dividendGPR, divisorGPR); - // FIXME: It seems like there are cases where we don't need this? What if we have - // arithMode() == Arith::Unchecked? - // https://bugs.webkit.org/show_bug.cgi?id=126444 - speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR)); - m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR); - - // If the user cares about negative zero, then speculate that we're not about - // to produce negative zero. - if (shouldCheckNegativeZero(node->arithMode())) { - // Check that we're not about to create negative zero. - JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0)); - speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR)); - numeratorPositive.link(&m_jit); + JITCompiler::JumpList done; + + if (shouldCheckOverflow(node->arithMode())) + speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR)); + else { + JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR); + m_jit.move(divisorGPR, quotientThenRemainderGPR); + done.append(m_jit.jump()); + denominatorNotZero.link(&m_jit); } - int32Result(quotientThenRemainderGPR, node); -#elif CPU(ARM64) - GPRTemporary temp(this); - GPRTemporary quotientThenRemainder(this); - GPRTemporary multiplyAnswer(this); - GPRReg dividendGPR = op1.gpr(); - GPRReg divisorGPR = op2.gpr(); - GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr(); - GPRReg multiplyAnswerGPR = multiplyAnswer.gpr(); - m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR); // FIXME: It seems like there are cases where we don't need this? What if we have // arithMode() == Arith::Unchecked? // https://bugs.webkit.org/show_bug.cgi?id=126444 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR)); +#if HAVE(ARM_IDIV_INSTRUCTIONS) + m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR); +#else m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR); +#endif // If the user cares about negative zero, then speculate that we're not about // to produce negative zero. @@ -3487,6 +4391,8 @@ void SpeculativeJIT::compileArithMod(Node* node) numeratorPositive.link(&m_jit); } + done.link(&m_jit); + int32Result(quotientThenRemainderGPR, node); #else // not architecture that can do integer division RELEASE_ASSERT_NOT_REACHED(); @@ -3494,7 +4400,7 @@ void SpeculativeJIT::compileArithMod(Node* node) return; } - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand op1(this, node->child1()); SpeculateDoubleOperand op2(this, node->child2()); @@ -3517,6 +4423,207 @@ void SpeculativeJIT::compileArithMod(Node* node) } } +void SpeculativeJIT::compileArithRounding(Node* node) +{ + ASSERT(node->child1().useKind() == DoubleRepUse); + + SpeculateDoubleOperand value(this, node->child1()); + FPRReg valueFPR = value.fpr(); + + auto setResult = [&] (FPRReg resultFPR) { + if (producesInteger(node->arithRoundingMode())) { + GPRTemporary roundedResultAsInt32(this); + FPRTemporary scratch(this); + FPRReg scratchFPR = scratch.fpr(); + GPRReg resultGPR = roundedResultAsInt32.gpr(); + JITCompiler::JumpList failureCases; + m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR, shouldCheckNegativeZero(node->arithRoundingMode())); + speculationCheck(Overflow, JSValueRegs(), node, failureCases); + + int32Result(resultGPR, node); + } else + doubleResult(resultFPR, node); + }; + + if (m_jit.supportsFloatingPointRounding()) { + switch (node->op()) { + case ArithRound: { + FPRTemporary result(this); + FPRReg resultFPR = result.fpr(); + if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) { + static const double halfConstant = 0.5; + m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), resultFPR); + m_jit.addDouble(valueFPR, resultFPR); + m_jit.floorDouble(resultFPR, resultFPR); + } else { + m_jit.ceilDouble(valueFPR, resultFPR); + FPRTemporary realPart(this); + FPRReg realPartFPR = realPart.fpr(); + m_jit.subDouble(resultFPR, valueFPR, realPartFPR); + + FPRTemporary scratch(this); + FPRReg scratchFPR = scratch.fpr(); + static const double halfConstant = 0.5; + m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), scratchFPR); + + JITCompiler::Jump shouldUseCeiled = m_jit.branchDouble(JITCompiler::DoubleLessThanOrEqual, realPartFPR, scratchFPR); + static const double oneConstant = -1.0; + m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), scratchFPR); + m_jit.addDouble(scratchFPR, resultFPR); + shouldUseCeiled.link(&m_jit); + } + setResult(resultFPR); + return; + } + + case ArithFloor: { + FPRTemporary rounded(this); + FPRReg resultFPR = rounded.fpr(); + m_jit.floorDouble(valueFPR, resultFPR); + setResult(resultFPR); + return; + } + + case ArithCeil: { + FPRTemporary rounded(this); + FPRReg resultFPR = rounded.fpr(); + m_jit.ceilDouble(valueFPR, resultFPR); + setResult(resultFPR); + return; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } else { + flushRegisters(); + FPRResult roundedResultAsDouble(this); + FPRReg resultFPR = roundedResultAsDouble.fpr(); + if (node->op() == ArithRound) + callOperation(jsRound, resultFPR, valueFPR); + else if (node->op() == ArithFloor) + callOperation(floor, resultFPR, valueFPR); + else { + ASSERT(node->op() == ArithCeil); + callOperation(ceil, resultFPR, valueFPR); + } + m_jit.exceptionCheck(); + setResult(resultFPR); + } +} + +void SpeculativeJIT::compileArithSqrt(Node* node) +{ + SpeculateDoubleOperand op1(this, node->child1()); + FPRReg op1FPR = op1.fpr(); + + if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::useArchitectureSpecificOptimizations()) { + flushRegisters(); + FPRResult result(this); + callOperation(sqrt, result.fpr(), op1FPR); + doubleResult(result.fpr(), node); + } else { + FPRTemporary result(this, op1); + m_jit.sqrtDouble(op1.fpr(), result.fpr()); + doubleResult(result.fpr(), node); + } +} + +// For small positive integers , it is worth doing a tiny inline loop to exponentiate the base. +// Every register is clobbered by this helper. +static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result) +{ + MacroAssembler::JumpList skipFastPath; + skipFastPath.append(assembler.branch32(MacroAssembler::Above, yOperand, MacroAssembler::TrustedImm32(1000))); + + static const double oneConstant = 1.0; + assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result); + + MacroAssembler::Label startLoop(assembler.label()); + MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1)); + assembler.mulDouble(xOperand, result); + exponentIsEven.link(&assembler); + assembler.mulDouble(xOperand, xOperand); + assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand); + assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler); + + MacroAssembler::Jump skipSlowPath = assembler.jump(); + skipFastPath.link(&assembler); + + return skipSlowPath; +} + +void SpeculativeJIT::compileArithPow(Node* node) +{ + if (node->child2().useKind() == Int32Use) { + SpeculateDoubleOperand xOperand(this, node->child1()); + SpeculateInt32Operand yOperand(this, node->child2()); + FPRReg xOperandfpr = xOperand.fpr(); + GPRReg yOperandGpr = yOperand.gpr(); + FPRTemporary yOperandfpr(this); + + flushRegisters(); + + FPRResult result(this); + FPRReg resultFpr = result.fpr(); + + FPRTemporary xOperandCopy(this); + FPRReg xOperandCopyFpr = xOperandCopy.fpr(); + m_jit.moveDouble(xOperandfpr, xOperandCopyFpr); + + GPRTemporary counter(this); + GPRReg counterGpr = counter.gpr(); + m_jit.move(yOperandGpr, counterGpr); + + MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, counterGpr, resultFpr); + m_jit.convertInt32ToDouble(yOperandGpr, yOperandfpr.fpr()); + callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr.fpr()); + + skipFallback.link(&m_jit); + doubleResult(resultFpr, node); + return; + } + + SpeculateDoubleOperand xOperand(this, node->child1()); + SpeculateDoubleOperand yOperand(this, node->child2()); + FPRReg xOperandfpr = xOperand.fpr(); + FPRReg yOperandfpr = yOperand.fpr(); + + flushRegisters(); + + FPRResult result(this); + FPRReg resultFpr = result.fpr(); + + FPRTemporary xOperandCopy(this); + FPRReg xOperandCopyFpr = xOperandCopy.fpr(); + + FPRTemporary scratch(this); + FPRReg scratchFpr = scratch.fpr(); + + GPRTemporary yOperandInteger(this); + GPRReg yOperandIntegerGpr = yOperandInteger.gpr(); + MacroAssembler::JumpList failedExponentConversionToInteger; + m_jit.branchConvertDoubleToInt32(yOperandfpr, yOperandIntegerGpr, failedExponentConversionToInteger, scratchFpr, false); + + m_jit.moveDouble(xOperandfpr, xOperandCopyFpr); + MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, yOperandInteger.gpr(), resultFpr); + failedExponentConversionToInteger.link(&m_jit); + + callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr); + skipFallback.link(&m_jit); + doubleResult(resultFpr, node); +} + +void SpeculativeJIT::compileArithLog(Node* node) +{ + SpeculateDoubleOperand op1(this, node->child1()); + FPRReg op1FPR = op1.fpr(); + flushRegisters(); + FPRResult result(this); + callOperation(log, result.fpr(), op1FPR); + doubleResult(result.fpr(), node); +} + // Returns true if the compare is fused with a subsequent branch. bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation) { @@ -3529,13 +4636,13 @@ bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition con } #if USE(JSVALUE64) - if (node->isBinaryUseKind(MachineIntUse)) { + if (node->isBinaryUseKind(Int52RepUse)) { compileInt52Compare(node, condition); return false; } #endif // USE(JSVALUE64) - if (node->isBinaryUseKind(NumberUse)) { + if (node->isBinaryUseKind(DoubleRepUse)) { compileDoubleCompare(node, doubleCondition); return false; } @@ -3555,107 +4662,51 @@ bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition con compileStringIdentEquality(node); return false; } + + if (node->isBinaryUseKind(SymbolUse)) { + compileSymbolEquality(node); + return false; + } if (node->isBinaryUseKind(ObjectUse)) { compileObjectEquality(node); return false; } - if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse) { + if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) { compileObjectToObjectOrOtherEquality(node->child1(), node->child2()); return false; } - if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse) { + if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) { compileObjectToObjectOrOtherEquality(node->child2(), node->child1()); return false; } - } - - nonSpeculativeNonPeepholeCompare(node, condition, operation); - return false; -} -bool SpeculativeJIT::compileStrictEqForConstant(Node* node, Edge value, JSValue constant) -{ - JSValueOperand op1(this, value); - - // FIXME: This code is wrong for the case that the constant is null or undefined, - // and the value is an object that MasqueradesAsUndefined. - // https://bugs.webkit.org/show_bug.cgi?id=109487 - - unsigned branchIndexInBlock = detectPeepHoleBranch(); - if (branchIndexInBlock != UINT_MAX) { - Node* branchNode = m_block->at(branchIndexInBlock); - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); - MacroAssembler::RelationalCondition condition = MacroAssembler::Equal; - - // The branch instruction will branch to the taken block. - // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. - if (taken == nextBlock()) { - condition = MacroAssembler::NotEqual; - BasicBlock* tmp = taken; - taken = notTaken; - notTaken = tmp; + if (!needsTypeCheck(node->child1(), SpecOther)) { + nonSpeculativeNonPeepholeCompareNullOrUndefined(node->child2()); + return false; } -#if USE(JSVALUE64) - branch64(condition, op1.gpr(), MacroAssembler::TrustedImm64(JSValue::encode(constant)), taken); -#else - GPRReg payloadGPR = op1.payloadGPR(); - GPRReg tagGPR = op1.tagGPR(); - if (condition == MacroAssembler::Equal) { - // Drop down if not equal, go elsewhere if equal. - MacroAssembler::Jump notEqual = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag())); - branch32(MacroAssembler::Equal, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken); - notEqual.link(&m_jit); - } else { - // Drop down if equal, go elsehwere if not equal. - branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()), taken); - branch32(MacroAssembler::NotEqual, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken); + if (!needsTypeCheck(node->child2(), SpecOther)) { + nonSpeculativeNonPeepholeCompareNullOrUndefined(node->child1()); + return false; } -#endif - - jump(notTaken); - - use(node->child1()); - use(node->child2()); - m_indexInBlock = branchIndexInBlock; - m_currentNode = branchNode; - return true; } - - GPRTemporary result(this); - -#if USE(JSVALUE64) - GPRReg op1GPR = op1.gpr(); - GPRReg resultGPR = result.gpr(); - m_jit.move(MacroAssembler::TrustedImm64(ValueFalse), resultGPR); - MacroAssembler::Jump notEqual = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImm64(JSValue::encode(constant))); - m_jit.or32(MacroAssembler::TrustedImm32(1), resultGPR); - notEqual.link(&m_jit); - jsValueResult(resultGPR, node, DataFormatJSBoolean); -#else - GPRReg op1PayloadGPR = op1.payloadGPR(); - GPRReg op1TagGPR = op1.tagGPR(); - GPRReg resultGPR = result.gpr(); - m_jit.move(TrustedImm32(0), resultGPR); - MacroAssembler::JumpList notEqual; - notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, MacroAssembler::Imm32(constant.tag()))); - notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1PayloadGPR, MacroAssembler::Imm32(constant.payload()))); - m_jit.move(TrustedImm32(1), resultGPR); - notEqual.link(&m_jit); - booleanResult(resultGPR, node); -#endif - + + nonSpeculativeNonPeepholeCompare(node, condition, operation); return false; } bool SpeculativeJIT::compileStrictEq(Node* node) { - switch (node->binaryUseKind()) { - case BooleanUse: { + // FIXME: Currently, we have op_jless, op_jgreater etc. But we don't have op_jeq, op_jstricteq etc. + // `==` and `===` operations with branching will be compiled to op_{eq,stricteq} and op_{jfalse,jtrue}. + // In DFG bytecodes, between op_eq and op_jfalse, we have MovHint to store the result of op_eq. + // As a result, detectPeepHoleBranch() never detects peep hole for that case. + // https://bugs.webkit.org/show_bug.cgi?id=149713 + + if (node->isBinaryUseKind(BooleanUse)) { unsigned branchIndexInBlock = detectPeepHoleBranch(); if (branchIndexInBlock != UINT_MAX) { Node* branchNode = m_block->at(branchIndexInBlock); @@ -3670,7 +4721,7 @@ bool SpeculativeJIT::compileStrictEq(Node* node) return false; } - case Int32Use: { + if (node->isBinaryUseKind(Int32Use)) { unsigned branchIndexInBlock = detectPeepHoleBranch(); if (branchIndexInBlock != UINT_MAX) { Node* branchNode = m_block->at(branchIndexInBlock); @@ -3686,7 +4737,7 @@ bool SpeculativeJIT::compileStrictEq(Node* node) } #if USE(JSVALUE64) - case MachineIntUse: { + if (node->isBinaryUseKind(Int52RepUse)) { unsigned branchIndexInBlock = detectPeepHoleBranch(); if (branchIndexInBlock != UINT_MAX) { Node* branchNode = m_block->at(branchIndexInBlock); @@ -3701,8 +4752,8 @@ bool SpeculativeJIT::compileStrictEq(Node* node) return false; } #endif // USE(JSVALUE64) - - case NumberUse: { + + if (node->isBinaryUseKind(DoubleRepUse)) { unsigned branchIndexInBlock = detectPeepHoleBranch(); if (branchIndexInBlock != UINT_MAX) { Node* branchNode = m_block->at(branchIndexInBlock); @@ -3716,18 +4767,63 @@ bool SpeculativeJIT::compileStrictEq(Node* node) compileDoubleCompare(node, MacroAssembler::DoubleEqual); return false; } - - case StringUse: { + + if (node->isBinaryUseKind(SymbolUse)) { + unsigned branchIndexInBlock = detectPeepHoleBranch(); + if (branchIndexInBlock != UINT_MAX) { + Node* branchNode = m_block->at(branchIndexInBlock); + compilePeepHoleSymbolEquality(node, branchNode); + use(node->child1()); + use(node->child2()); + m_indexInBlock = branchIndexInBlock; + m_currentNode = branchNode; + return true; + } + compileSymbolEquality(node); + return false; + } + + if (node->isBinaryUseKind(StringUse)) { compileStringEquality(node); return false; } - - case StringIdentUse: { + + if (node->isBinaryUseKind(StringIdentUse)) { compileStringIdentEquality(node); return false; } - - case ObjectUse: { + + if (node->isBinaryUseKind(ObjectUse, UntypedUse)) { + unsigned branchIndexInBlock = detectPeepHoleBranch(); + if (branchIndexInBlock != UINT_MAX) { + Node* branchNode = m_block->at(branchIndexInBlock); + compilePeepHoleObjectStrictEquality(node->child1(), node->child2(), branchNode); + use(node->child1()); + use(node->child2()); + m_indexInBlock = branchIndexInBlock; + m_currentNode = branchNode; + return true; + } + compileObjectStrictEquality(node->child1(), node->child2()); + return false; + } + + if (node->isBinaryUseKind(UntypedUse, ObjectUse)) { + unsigned branchIndexInBlock = detectPeepHoleBranch(); + if (branchIndexInBlock != UINT_MAX) { + Node* branchNode = m_block->at(branchIndexInBlock); + compilePeepHoleObjectStrictEquality(node->child2(), node->child1(), branchNode); + use(node->child1()); + use(node->child2()); + m_indexInBlock = branchIndexInBlock; + m_currentNode = branchNode; + return true; + } + compileObjectStrictEquality(node->child2(), node->child1()); + return false; + } + + if (node->isBinaryUseKind(ObjectUse)) { unsigned branchIndexInBlock = detectPeepHoleBranch(); if (branchIndexInBlock != UINT_MAX) { Node* branchNode = m_block->at(branchIndexInBlock); @@ -3741,15 +4837,35 @@ bool SpeculativeJIT::compileStrictEq(Node* node) compileObjectEquality(node); return false; } - - case UntypedUse: { - return nonSpeculativeStrictEq(node); + + if (node->isBinaryUseKind(MiscUse, UntypedUse) + || node->isBinaryUseKind(UntypedUse, MiscUse)) { + compileMiscStrictEq(node); + return false; } - - default: - RELEASE_ASSERT_NOT_REACHED(); + + if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) { + compileStringIdentToNotStringVarEquality(node, node->child1(), node->child2()); + return false; + } + + if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) { + compileStringIdentToNotStringVarEquality(node, node->child2(), node->child1()); + return false; + } + + if (node->isBinaryUseKind(StringUse, UntypedUse)) { + compileStringToUntypedEquality(node, node->child1(), node->child2()); + return false; + } + + if (node->isBinaryUseKind(UntypedUse, StringUse)) { + compileStringToUntypedEquality(node, node->child2(), node->child1()); return false; } + + RELEASE_ASSERT(node->isBinaryUseKind(UntypedUse)); + return nonSpeculativeStrictEq(node); } void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition) @@ -3760,44 +4876,66 @@ void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::Relationa m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr()); - // If we add a DataFormatBool, we should use it here. -#if USE(JSVALUE32_64) - booleanResult(result.gpr(), node); -#else - m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); - jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean); -#endif + unblessedBooleanResult(result.gpr(), node); } -void SpeculativeJIT::compileStringEquality(Node* node) +template<typename Functor> +void SpeculativeJIT::extractStringImplFromBinarySymbols(Edge leftSymbolEdge, Edge rightSymbolEdge, const Functor& functor) { - SpeculateCellOperand left(this, node->child1()); - SpeculateCellOperand right(this, node->child2()); - GPRTemporary length(this); + SpeculateCellOperand left(this, leftSymbolEdge); + SpeculateCellOperand right(this, rightSymbolEdge); GPRTemporary leftTemp(this); GPRTemporary rightTemp(this); - GPRTemporary leftTemp2(this, Reuse, left); - GPRTemporary rightTemp2(this, Reuse, right); - + GPRReg leftGPR = left.gpr(); GPRReg rightGPR = right.gpr(); - GPRReg lengthGPR = length.gpr(); GPRReg leftTempGPR = leftTemp.gpr(); GPRReg rightTempGPR = rightTemp.gpr(); - GPRReg leftTemp2GPR = leftTemp2.gpr(); - GPRReg rightTemp2GPR = rightTemp2.gpr(); - + + speculateSymbol(leftSymbolEdge, leftGPR); + speculateSymbol(rightSymbolEdge, rightGPR); + + m_jit.loadPtr(JITCompiler::Address(leftGPR, Symbol::offsetOfPrivateName()), leftTempGPR); + m_jit.loadPtr(JITCompiler::Address(rightGPR, Symbol::offsetOfPrivateName()), rightTempGPR); + + functor(leftTempGPR, rightTempGPR); +} + +void SpeculativeJIT::compileSymbolEquality(Node* node) +{ + extractStringImplFromBinarySymbols(node->child1(), node->child2(), [&] (GPRReg leftStringImpl, GPRReg rightStringImpl) { + m_jit.comparePtr(JITCompiler::Equal, leftStringImpl, rightStringImpl, leftStringImpl); + unblessedBooleanResult(leftStringImpl, node); + }); +} + +void SpeculativeJIT::compilePeepHoleSymbolEquality(Node* node, Node* branchNode) +{ + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; + + extractStringImplFromBinarySymbols(node->child1(), node->child2(), [&] (GPRReg leftStringImpl, GPRReg rightStringImpl) { + if (taken == nextBlock()) { + branchPtr(JITCompiler::NotEqual, leftStringImpl, rightStringImpl, notTaken); + jump(taken); + } else { + branchPtr(JITCompiler::Equal, leftStringImpl, rightStringImpl, taken); + jump(notTaken); + } + }); +} + +void SpeculativeJIT::compileStringEquality( + Node* node, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, GPRReg leftTempGPR, + GPRReg rightTempGPR, GPRReg leftTemp2GPR, GPRReg rightTemp2GPR, + JITCompiler::JumpList fastTrue, JITCompiler::JumpList fastFalse) +{ JITCompiler::JumpList trueCase; JITCompiler::JumpList falseCase; JITCompiler::JumpList slowCase; - speculateString(node->child1(), leftGPR); - - // It's safe to branch around the type check below, since proving that the values are - // equal does indeed prove that the right value is a string. - trueCase.append(m_jit.branchPtr(MacroAssembler::Equal, leftGPR, rightGPR)); - - speculateString(node->child2(), rightGPR); + trueCase.append(fastTrue); + falseCase.append(fastFalse); m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR); @@ -3839,48 +4977,101 @@ void SpeculativeJIT::compileStringEquality(Node* node) m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit); trueCase.link(&m_jit); -#if USE(JSVALUE64) - m_jit.move(TrustedImm64(ValueTrue), leftTempGPR); -#else - m_jit.move(TrustedImm32(true), leftTempGPR); -#endif + moveTrueTo(leftTempGPR); JITCompiler::Jump done = m_jit.jump(); falseCase.link(&m_jit); -#if USE(JSVALUE64) - m_jit.move(TrustedImm64(ValueFalse), leftTempGPR); -#else - m_jit.move(TrustedImm32(false), leftTempGPR); -#endif + moveFalseTo(leftTempGPR); done.link(&m_jit); addSlowPathGenerator( slowPathCall( slowCase, this, operationCompareStringEq, leftTempGPR, leftGPR, rightGPR)); -#if USE(JSVALUE64) - jsValueResult(leftTempGPR, node, DataFormatJSBoolean); -#else - booleanResult(leftTempGPR, node); -#endif + blessedBooleanResult(leftTempGPR, node); } -void SpeculativeJIT::compileStringIdentEquality(Node* node) +void SpeculativeJIT::compileStringEquality(Node* node) { SpeculateCellOperand left(this, node->child1()); SpeculateCellOperand right(this, node->child2()); + GPRTemporary length(this); GPRTemporary leftTemp(this); GPRTemporary rightTemp(this); + GPRTemporary leftTemp2(this, Reuse, left); + GPRTemporary rightTemp2(this, Reuse, right); GPRReg leftGPR = left.gpr(); GPRReg rightGPR = right.gpr(); + GPRReg lengthGPR = length.gpr(); GPRReg leftTempGPR = leftTemp.gpr(); GPRReg rightTempGPR = rightTemp.gpr(); + GPRReg leftTemp2GPR = leftTemp2.gpr(); + GPRReg rightTemp2GPR = rightTemp2.gpr(); + + speculateString(node->child1(), leftGPR); + + // It's safe to branch around the type check below, since proving that the values are + // equal does indeed prove that the right value is a string. + JITCompiler::Jump fastTrue = m_jit.branchPtr(MacroAssembler::Equal, leftGPR, rightGPR); + + speculateString(node->child2(), rightGPR); + + compileStringEquality( + node, leftGPR, rightGPR, lengthGPR, leftTempGPR, rightTempGPR, leftTemp2GPR, + rightTemp2GPR, fastTrue, JITCompiler::Jump()); +} - JITCompiler::JumpList trueCase; - JITCompiler::JumpList falseCase; +void SpeculativeJIT::compileStringToUntypedEquality(Node* node, Edge stringEdge, Edge untypedEdge) +{ + SpeculateCellOperand left(this, stringEdge); + JSValueOperand right(this, untypedEdge, ManualOperandSpeculation); + GPRTemporary length(this); + GPRTemporary leftTemp(this); + GPRTemporary rightTemp(this); + GPRTemporary leftTemp2(this, Reuse, left); + GPRTemporary rightTemp2(this); + + GPRReg leftGPR = left.gpr(); + JSValueRegs rightRegs = right.jsValueRegs(); + GPRReg lengthGPR = length.gpr(); + GPRReg leftTempGPR = leftTemp.gpr(); + GPRReg rightTempGPR = rightTemp.gpr(); + GPRReg leftTemp2GPR = leftTemp2.gpr(); + GPRReg rightTemp2GPR = rightTemp2.gpr(); + + speculateString(stringEdge, leftGPR); + + JITCompiler::JumpList fastTrue; + JITCompiler::JumpList fastFalse; + + fastFalse.append(m_jit.branchIfNotCell(rightRegs)); + + // It's safe to branch around the type check below, since proving that the values are + // equal does indeed prove that the right value is a string. + fastTrue.append(m_jit.branchPtr( + MacroAssembler::Equal, leftGPR, rightRegs.payloadGPR())); + + fastFalse.append(m_jit.branchIfNotString(rightRegs.payloadGPR())); + + compileStringEquality( + node, leftGPR, rightRegs.payloadGPR(), lengthGPR, leftTempGPR, rightTempGPR, leftTemp2GPR, + rightTemp2GPR, fastTrue, fastFalse); +} + +void SpeculativeJIT::compileStringIdentEquality(Node* node) +{ + SpeculateCellOperand left(this, node->child1()); + SpeculateCellOperand right(this, node->child2()); + GPRTemporary leftTemp(this); + GPRTemporary rightTemp(this); + GPRReg leftGPR = left.gpr(); + GPRReg rightGPR = right.gpr(); + GPRReg leftTempGPR = leftTemp.gpr(); + GPRReg rightTempGPR = rightTemp.gpr(); + speculateString(node->child1(), leftGPR); speculateString(node->child2(), rightGPR); @@ -3889,12 +5080,35 @@ void SpeculativeJIT::compileStringIdentEquality(Node* node) m_jit.comparePtr(MacroAssembler::Equal, leftTempGPR, rightTempGPR, leftTempGPR); -#if USE(JSVALUE64) - m_jit.or32(TrustedImm32(ValueFalse), leftTempGPR); - jsValueResult(leftTempGPR, node, DataFormatJSBoolean); -#else - booleanResult(leftTempGPR, node); -#endif + unblessedBooleanResult(leftTempGPR, node); +} + +void SpeculativeJIT::compileStringIdentToNotStringVarEquality( + Node* node, Edge stringEdge, Edge notStringVarEdge) +{ + SpeculateCellOperand left(this, stringEdge); + JSValueOperand right(this, notStringVarEdge, ManualOperandSpeculation); + GPRTemporary leftTemp(this); + GPRTemporary rightTemp(this); + GPRReg leftTempGPR = leftTemp.gpr(); + GPRReg rightTempGPR = rightTemp.gpr(); + GPRReg leftGPR = left.gpr(); + JSValueRegs rightRegs = right.jsValueRegs(); + + speculateString(stringEdge, leftGPR); + speculateStringIdentAndLoadStorage(stringEdge, leftGPR, leftTempGPR); + + moveFalseTo(rightTempGPR); + JITCompiler::JumpList notString; + notString.append(m_jit.branchIfNotCell(rightRegs)); + notString.append(m_jit.branchIfNotString(rightRegs.payloadGPR())); + + speculateStringIdentAndLoadStorage(notStringVarEdge, rightRegs.payloadGPR(), rightTempGPR); + + m_jit.comparePtr(MacroAssembler::Equal, leftTempGPR, rightTempGPR, rightTempGPR); + notString.link(&m_jit); + + unblessedBooleanResult(rightTempGPR, node); } void SpeculativeJIT::compileStringZeroLength(Node* node) @@ -3911,12 +5125,61 @@ void SpeculativeJIT::compileStringZeroLength(Node* node) // Fetch the length field from the string object. m_jit.test32(MacroAssembler::Zero, MacroAssembler::Address(strGPR, JSString::offsetOfLength()), MacroAssembler::TrustedImm32(-1), eqGPR); -#if USE(JSVALUE64) - m_jit.or32(TrustedImm32(ValueFalse), eqGPR); - jsValueResult(eqGPR, node, DataFormatJSBoolean); -#else - booleanResult(eqGPR, node); -#endif + unblessedBooleanResult(eqGPR, node); +} + +void SpeculativeJIT::compileLogicalNotStringOrOther(Node* node) +{ + JSValueOperand value(this, node->child1(), ManualOperandSpeculation); + GPRTemporary temp(this); + JSValueRegs valueRegs = value.jsValueRegs(); + GPRReg tempGPR = temp.gpr(); + + JITCompiler::Jump notCell = m_jit.branchIfNotCell(valueRegs); + GPRReg cellGPR = valueRegs.payloadGPR(); + DFG_TYPE_CHECK( + valueRegs, node->child1(), (~SpecCell) | SpecString, m_jit.branchIfNotString(cellGPR)); + m_jit.test32( + JITCompiler::Zero, JITCompiler::Address(cellGPR, JSString::offsetOfLength()), + JITCompiler::TrustedImm32(-1), tempGPR); + JITCompiler::Jump done = m_jit.jump(); + notCell.link(&m_jit); + DFG_TYPE_CHECK( + valueRegs, node->child1(), SpecCell | SpecOther, m_jit.branchIfNotOther(valueRegs, tempGPR)); + m_jit.move(TrustedImm32(1), tempGPR); + done.link(&m_jit); + + unblessedBooleanResult(tempGPR, node); +} + +void SpeculativeJIT::emitStringBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken) +{ + SpeculateCellOperand str(this, nodeUse); + speculateString(nodeUse, str.gpr()); + branchTest32(JITCompiler::NonZero, MacroAssembler::Address(str.gpr(), JSString::offsetOfLength()), taken); + jump(notTaken); + noResult(m_currentNode); +} + +void SpeculativeJIT::emitStringOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken) +{ + JSValueOperand value(this, nodeUse, ManualOperandSpeculation); + GPRTemporary temp(this); + JSValueRegs valueRegs = value.jsValueRegs(); + GPRReg tempGPR = temp.gpr(); + + JITCompiler::Jump notCell = m_jit.branchIfNotCell(valueRegs); + GPRReg cellGPR = valueRegs.payloadGPR(); + DFG_TYPE_CHECK(valueRegs, nodeUse, (~SpecCell) | SpecString, m_jit.branchIfNotString(cellGPR)); + branchTest32( + JITCompiler::Zero, JITCompiler::Address(cellGPR, JSString::offsetOfLength()), + JITCompiler::TrustedImm32(-1), notTaken); + jump(taken, ForceJump); + notCell.link(&m_jit); + DFG_TYPE_CHECK( + valueRegs, nodeUse, SpecCell | SpecOther, m_jit.branchIfNotOther(valueRegs, tempGPR)); + jump(notTaken); + noResult(m_currentNode); } void SpeculativeJIT::compileConstantStoragePointer(Node* node) @@ -3949,9 +5212,11 @@ void SpeculativeJIT::compileGetIndexedPropertyStorage(Node* node) default: ASSERT(isTypedView(node->arrayMode().typedArrayType())); - m_jit.loadPtr( - MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfVector()), - storageReg); + + JITCompiler::Jump fail = m_jit.loadTypedArrayVector(baseReg, storageReg); + + addSlowPathGenerator( + slowPathCall(fail, this, operationGetArrayBufferVector, storageReg, baseReg)); break; } @@ -3972,9 +5237,15 @@ void SpeculativeJIT::compileGetTypedArrayByteOffset(Node* node) MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()), TrustedImm32(WastefulTypedArray)); - + m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), dataGPR); + m_jit.removeSpaceBits(dataGPR); m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), vectorGPR); + JITCompiler::JumpList vectorReady; + vectorReady.append(m_jit.branchIfToSpace(vectorGPR)); + vectorReady.append(m_jit.branchIfNotFastTypedArray(baseGPR)); + m_jit.removeSpaceBits(vectorGPR); + vectorReady.link(&m_jit); m_jit.loadPtr(MacroAssembler::Address(dataGPR, Butterfly::offsetOfArrayBuffer()), dataGPR); m_jit.loadPtr(MacroAssembler::Address(dataGPR, ArrayBuffer::offsetOfData()), dataGPR); m_jit.subPtr(dataGPR, vectorGPR); @@ -3985,11 +5256,11 @@ void SpeculativeJIT::compileGetTypedArrayByteOffset(Node* node) m_jit.move(TrustedImmPtr(0), vectorGPR); done.link(&m_jit); - + int32Result(vectorGPR, node); } -void SpeculativeJIT::compileGetByValOnArguments(Node* node) +void SpeculativeJIT::compileGetByValOnDirectArguments(Node* node) { SpeculateCellOperand base(this, node->child1()); SpeculateStrictInt32Operand property(this, node->child2()); @@ -3997,87 +5268,134 @@ void SpeculativeJIT::compileGetByValOnArguments(Node* node) #if USE(JSVALUE32_64) GPRTemporary resultTag(this); #endif - GPRTemporary scratch(this); GPRReg baseReg = base.gpr(); GPRReg propertyReg = property.gpr(); GPRReg resultReg = result.gpr(); #if USE(JSVALUE32_64) GPRReg resultTagReg = resultTag.gpr(); + JSValueRegs resultRegs = JSValueRegs(resultTagReg, resultReg); +#else + JSValueRegs resultRegs = JSValueRegs(resultReg); #endif - GPRReg scratchReg = scratch.gpr(); if (!m_compileOkay) return; - - ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))); - // Two really lame checks. - speculationCheck( - Uncountable, JSValueSource(), 0, - m_jit.branch32( - MacroAssembler::AboveOrEqual, propertyReg, - MacroAssembler::Address(baseReg, Arguments::offsetOfNumArguments()))); + ASSERT(ArrayMode(Array::DirectArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))); + speculationCheck( - Uncountable, JSValueSource(), 0, + ExoticObjectMode, JSValueSource(), 0, m_jit.branchTestPtr( MacroAssembler::NonZero, - MacroAssembler::Address( - baseReg, Arguments::offsetOfSlowArgumentData()))); - - m_jit.move(propertyReg, resultReg); - m_jit.signExtend32ToPtr(resultReg, resultReg); - m_jit.loadPtr( - MacroAssembler::Address(baseReg, Arguments::offsetOfRegisters()), - scratchReg); + MacroAssembler::Address(baseReg, DirectArguments::offsetOfOverrides()))); + speculationCheck( + ExoticObjectMode, JSValueSource(), 0, + m_jit.branch32( + MacroAssembler::AboveOrEqual, propertyReg, + MacroAssembler::Address(baseReg, DirectArguments::offsetOfLength()))); -#if USE(JSVALUE32_64) - m_jit.load32( + m_jit.loadValue( MacroAssembler::BaseIndex( - scratchReg, resultReg, MacroAssembler::TimesEight, - CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register) + - OBJECT_OFFSETOF(JSValue, u.asBits.tag)), - resultTagReg); - m_jit.load32( - MacroAssembler::BaseIndex( - scratchReg, resultReg, MacroAssembler::TimesEight, - CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register) + - OBJECT_OFFSETOF(JSValue, u.asBits.payload)), - resultReg); - jsValueResult(resultTagReg, resultReg, node); -#else - m_jit.load64( - MacroAssembler::BaseIndex( - scratchReg, resultReg, MacroAssembler::TimesEight, - CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register)), - resultReg); - jsValueResult(resultReg, node); -#endif + baseReg, propertyReg, MacroAssembler::TimesEight, DirectArguments::storageOffset()), + resultRegs); + + jsValueResult(resultRegs, node); } -void SpeculativeJIT::compileGetArgumentsLength(Node* node) +void SpeculativeJIT::compileGetByValOnScopedArguments(Node* node) { SpeculateCellOperand base(this, node->child1()); - GPRTemporary result(this, Reuse, base); + SpeculateStrictInt32Operand property(this, node->child2()); + GPRTemporary result(this); +#if USE(JSVALUE32_64) + GPRTemporary resultTag(this); +#endif + GPRTemporary scratch(this); + GPRTemporary scratch2(this); GPRReg baseReg = base.gpr(); + GPRReg propertyReg = property.gpr(); GPRReg resultReg = result.gpr(); +#if USE(JSVALUE32_64) + GPRReg resultTagReg = resultTag.gpr(); + JSValueRegs resultRegs = JSValueRegs(resultTagReg, resultReg); +#else + JSValueRegs resultRegs = JSValueRegs(resultReg); +#endif + GPRReg scratchReg = scratch.gpr(); + GPRReg scratch2Reg = scratch2.gpr(); if (!m_compileOkay) return; - ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))); + ASSERT(ArrayMode(Array::ScopedArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))); speculationCheck( - Uncountable, JSValueSource(), 0, - m_jit.branchTest8( - MacroAssembler::NonZero, - MacroAssembler::Address(baseReg, Arguments::offsetOfOverrodeLength()))); + ExoticObjectMode, JSValueSource(), nullptr, + m_jit.branch32( + MacroAssembler::AboveOrEqual, propertyReg, + MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTotalLength()))); + m_jit.loadPtr(MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTable()), scratchReg); m_jit.load32( - MacroAssembler::Address(baseReg, Arguments::offsetOfNumArguments()), - resultReg); - int32Result(resultReg, node); + MacroAssembler::Address(scratchReg, ScopedArgumentsTable::offsetOfLength()), scratch2Reg); + + MacroAssembler::Jump overflowArgument = m_jit.branch32( + MacroAssembler::AboveOrEqual, propertyReg, scratch2Reg); + + m_jit.loadPtr(MacroAssembler::Address(baseReg, ScopedArguments::offsetOfScope()), scratch2Reg); + + m_jit.loadPtr( + MacroAssembler::Address(scratchReg, ScopedArgumentsTable::offsetOfArguments()), + scratchReg); + m_jit.load32( + MacroAssembler::BaseIndex(scratchReg, propertyReg, MacroAssembler::TimesFour), + scratchReg); + + speculationCheck( + ExoticObjectMode, JSValueSource(), nullptr, + m_jit.branch32( + MacroAssembler::Equal, scratchReg, TrustedImm32(ScopeOffset::invalidOffset))); + + m_jit.loadValue( + MacroAssembler::BaseIndex( + scratch2Reg, propertyReg, MacroAssembler::TimesEight, + JSEnvironmentRecord::offsetOfVariables()), + resultRegs); + + MacroAssembler::Jump done = m_jit.jump(); + overflowArgument.link(&m_jit); + + m_jit.sub32(propertyReg, scratch2Reg); + m_jit.neg32(scratch2Reg); + + m_jit.loadValue( + MacroAssembler::BaseIndex( + baseReg, scratch2Reg, MacroAssembler::TimesEight, + ScopedArguments::overflowStorageOffset()), + resultRegs); + speculationCheck(ExoticObjectMode, JSValueSource(), nullptr, m_jit.branchIfEmpty(resultRegs)); + + done.link(&m_jit); + + jsValueResult(resultRegs, node); +} + +void SpeculativeJIT::compileGetScope(Node* node) +{ + SpeculateCellOperand function(this, node->child1()); + GPRTemporary result(this, Reuse, function); + m_jit.loadPtr(JITCompiler::Address(function.gpr(), JSFunction::offsetOfScopeChain()), result.gpr()); + cellResult(result.gpr(), node); +} + +void SpeculativeJIT::compileSkipScope(Node* node) +{ + SpeculateCellOperand scope(this, node->child1()); + GPRTemporary result(this, Reuse, scope); + m_jit.loadPtr(JITCompiler::Address(scope.gpr(), JSScope::offsetOfNext()), result.gpr()); + cellResult(result.gpr(), node); } void SpeculativeJIT::compileGetArrayLength(Node* node) @@ -4117,12 +5435,56 @@ void SpeculativeJIT::compileGetArrayLength(Node* node) int32Result(resultGPR, node); break; } - case Array::Arguments: { - compileGetArgumentsLength(node); + case Array::DirectArguments: { + SpeculateCellOperand base(this, node->child1()); + GPRTemporary result(this, Reuse, base); + + GPRReg baseReg = base.gpr(); + GPRReg resultReg = result.gpr(); + + if (!m_compileOkay) + return; + + ASSERT(ArrayMode(Array::DirectArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))); + + speculationCheck( + ExoticObjectMode, JSValueSource(), 0, + m_jit.branchTestPtr( + MacroAssembler::NonZero, + MacroAssembler::Address(baseReg, DirectArguments::offsetOfOverrides()))); + + m_jit.load32( + MacroAssembler::Address(baseReg, DirectArguments::offsetOfLength()), resultReg); + + int32Result(resultReg, node); + break; + } + case Array::ScopedArguments: { + SpeculateCellOperand base(this, node->child1()); + GPRTemporary result(this, Reuse, base); + + GPRReg baseReg = base.gpr(); + GPRReg resultReg = result.gpr(); + + if (!m_compileOkay) + return; + + ASSERT(ArrayMode(Array::ScopedArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))); + + speculationCheck( + ExoticObjectMode, JSValueSource(), 0, + m_jit.branchTest8( + MacroAssembler::NonZero, + MacroAssembler::Address(baseReg, ScopedArguments::offsetOfOverrodeThings()))); + + m_jit.load32( + MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTotalLength()), resultReg); + + int32Result(resultReg, node); break; } default: { - ASSERT(isTypedView(node->arrayMode().typedArrayType())); + ASSERT(node->arrayMode().isSomeTypedArrayView()); SpeculateCellOperand base(this, node->child1()); GPRTemporary result(this, Reuse, base); GPRReg baseGPR = base.gpr(); @@ -4133,78 +5495,723 @@ void SpeculativeJIT::compileGetArrayLength(Node* node) } } } -void SpeculativeJIT::compileNewFunctionNoCheck(Node* node) +void SpeculativeJIT::compileCheckIdent(Node* node) +{ + SpeculateCellOperand operand(this, node->child1()); + UniquedStringImpl* uid = node->uidOperand(); + if (uid->isSymbol()) { + speculateSymbol(node->child1(), operand.gpr()); + speculationCheck( + BadIdent, JSValueSource(), nullptr, + m_jit.branchPtr( + JITCompiler::NotEqual, + JITCompiler::Address(operand.gpr(), Symbol::offsetOfPrivateName()), + TrustedImmPtr(uid))); + } else { + speculateString(node->child1(), operand.gpr()); + speculateStringIdent(node->child1(), operand.gpr()); + speculationCheck( + BadIdent, JSValueSource(), nullptr, + m_jit.branchPtr( + JITCompiler::NotEqual, + JITCompiler::Address(operand.gpr(), JSString::offsetOfValue()), + TrustedImmPtr(uid))); + } + noResult(node); +} + +template <typename ClassType> void SpeculativeJIT::compileNewFunctionCommon(GPRReg resultGPR, Structure* structure, GPRReg scratch1GPR, GPRReg scratch2GPR, GPRReg scopeGPR, MacroAssembler::JumpList& slowPath, size_t size, FunctionExecutable* executable, ptrdiff_t offsetOfScopeChain, ptrdiff_t offsetOfExecutable, ptrdiff_t offsetOfRareData) +{ + emitAllocateJSObjectWithKnownSize<ClassType>(resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratch1GPR, scratch2GPR, slowPath, size); + + m_jit.storePtr(scopeGPR, JITCompiler::Address(resultGPR, offsetOfScopeChain)); + m_jit.storePtr(TrustedImmPtr(executable), JITCompiler::Address(resultGPR, offsetOfExecutable)); + m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, offsetOfRareData)); +} + +void SpeculativeJIT::compileNewFunction(Node* node) +{ + NodeType nodeType = node->op(); + ASSERT(nodeType == NewFunction || nodeType == NewArrowFunction || nodeType == NewGeneratorFunction); + + SpeculateCellOperand scope(this, node->child1()); + GPRReg scopeGPR = scope.gpr(); + + FunctionExecutable* executable = node->castOperand<FunctionExecutable*>(); + + if (executable->singletonFunction()->isStillValid()) { + GPRFlushedCallResult result(this); + GPRReg resultGPR = result.gpr(); + + flushRegisters(); + + if (nodeType == NewGeneratorFunction) + callOperation(operationNewGeneratorFunction, resultGPR, scopeGPR, executable); + else + callOperation(operationNewFunction, resultGPR, scopeGPR, executable); + m_jit.exceptionCheck(); + cellResult(resultGPR, node); + return; + } + + Structure* structure = + nodeType == NewGeneratorFunction ? m_jit.graph().globalObjectFor(node->origin.semantic)->generatorFunctionStructure() : + m_jit.graph().globalObjectFor(node->origin.semantic)->functionStructure(); + + GPRTemporary result(this); + GPRTemporary scratch1(this); + GPRTemporary scratch2(this); + + GPRReg resultGPR = result.gpr(); + GPRReg scratch1GPR = scratch1.gpr(); + GPRReg scratch2GPR = scratch2.gpr(); + + JITCompiler::JumpList slowPath; + + if (nodeType == NewFunction || nodeType == NewArrowFunction) { + compileNewFunctionCommon<JSFunction>(resultGPR, structure, scratch1GPR, scratch2GPR, scopeGPR, slowPath, JSFunction::allocationSize(0), executable, JSFunction::offsetOfScopeChain(), JSFunction::offsetOfExecutable(), JSFunction::offsetOfRareData()); + + addSlowPathGenerator(slowPathCall(slowPath, this, operationNewFunctionWithInvalidatedReallocationWatchpoint, resultGPR, scopeGPR, executable)); + } + + if (nodeType == NewGeneratorFunction) { + compileNewFunctionCommon<JSGeneratorFunction>(resultGPR, structure, scratch1GPR, scratch2GPR, scopeGPR, slowPath, JSGeneratorFunction::allocationSize(0), executable, JSGeneratorFunction::offsetOfScopeChain(), JSGeneratorFunction::offsetOfExecutable(), JSGeneratorFunction::offsetOfRareData()); + + addSlowPathGenerator(slowPathCall(slowPath, this, operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint, resultGPR, scopeGPR, executable)); + } + + cellResult(resultGPR, node); +} + +void SpeculativeJIT::compileForwardVarargs(Node* node) +{ + LoadVarargsData* data = node->loadVarargsData(); + InlineCallFrame* inlineCallFrame = node->child1()->origin.semantic.inlineCallFrame; + + GPRTemporary length(this); + JSValueRegsTemporary temp(this); + GPRReg lengthGPR = length.gpr(); + JSValueRegs tempRegs = temp.regs(); + + emitGetLength(inlineCallFrame, lengthGPR, /* includeThis = */ true); + if (data->offset) + m_jit.sub32(TrustedImm32(data->offset), lengthGPR); + + speculationCheck( + VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32( + MacroAssembler::Above, + lengthGPR, TrustedImm32(data->limit))); + + m_jit.store32(lengthGPR, JITCompiler::payloadFor(data->machineCount)); + + VirtualRegister sourceStart = JITCompiler::argumentsStart(inlineCallFrame) + data->offset; + VirtualRegister targetStart = data->machineStart; + + m_jit.sub32(TrustedImm32(1), lengthGPR); + + // First have a loop that fills in the undefined slots in case of an arity check failure. + m_jit.move(TrustedImm32(data->mandatoryMinimum), tempRegs.payloadGPR()); + JITCompiler::Jump done = m_jit.branch32(JITCompiler::BelowOrEqual, tempRegs.payloadGPR(), lengthGPR); + + JITCompiler::Label loop = m_jit.label(); + m_jit.sub32(TrustedImm32(1), tempRegs.payloadGPR()); + m_jit.storeTrustedValue( + jsUndefined(), + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, tempRegs.payloadGPR(), JITCompiler::TimesEight, + targetStart.offset() * sizeof(EncodedJSValue))); + m_jit.branch32(JITCompiler::Above, tempRegs.payloadGPR(), lengthGPR).linkTo(loop, &m_jit); + done.link(&m_jit); + + // And then fill in the actual argument values. + done = m_jit.branchTest32(JITCompiler::Zero, lengthGPR); + + loop = m_jit.label(); + m_jit.sub32(TrustedImm32(1), lengthGPR); + m_jit.loadValue( + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight, + sourceStart.offset() * sizeof(EncodedJSValue)), + tempRegs); + m_jit.storeValue( + tempRegs, + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight, + targetStart.offset() * sizeof(EncodedJSValue))); + m_jit.branchTest32(JITCompiler::NonZero, lengthGPR).linkTo(loop, &m_jit); + + done.link(&m_jit); + + noResult(node); +} + +void SpeculativeJIT::compileCreateActivation(Node* node) { - GPRResult result(this); + SymbolTable* table = node->castOperand<SymbolTable*>(); + Structure* structure = m_jit.graph().globalObjectFor( + node->origin.semantic)->activationStructure(); + + SpeculateCellOperand scope(this, node->child1()); + GPRReg scopeGPR = scope.gpr(); + JSValue initializationValue = node->initializationValueForActivation(); + ASSERT(initializationValue == jsUndefined() || initializationValue == jsTDZValue()); + + if (table->singletonScope()->isStillValid()) { + GPRFlushedCallResult result(this); + GPRReg resultGPR = result.gpr(); + + flushRegisters(); + +#if USE(JSVALUE64) + callOperation(operationCreateActivationDirect, + resultGPR, structure, scopeGPR, table, TrustedImm64(JSValue::encode(initializationValue))); +#else + callOperation(operationCreateActivationDirect, + resultGPR, structure, scopeGPR, table, TrustedImm32(initializationValue.tag()), TrustedImm32(initializationValue.payload())); +#endif + m_jit.exceptionCheck(); + cellResult(resultGPR, node); + return; + } + + GPRTemporary result(this); + GPRTemporary scratch1(this); + GPRTemporary scratch2(this); + GPRReg resultGPR = result.gpr(); + GPRReg scratch1GPR = scratch1.gpr(); + GPRReg scratch2GPR = scratch2.gpr(); + + JITCompiler::JumpList slowPath; + emitAllocateJSObjectWithKnownSize<JSLexicalEnvironment>( + resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratch1GPR, scratch2GPR, + slowPath, JSLexicalEnvironment::allocationSize(table)); + + // Don't need a memory barriers since we just fast-created the activation, so the + // activation must be young. + m_jit.storePtr(scopeGPR, JITCompiler::Address(resultGPR, JSScope::offsetOfNext())); + m_jit.storePtr( + TrustedImmPtr(table), + JITCompiler::Address(resultGPR, JSLexicalEnvironment::offsetOfSymbolTable())); + + // Must initialize all members to undefined or the TDZ empty value. + for (unsigned i = 0; i < table->scopeSize(); ++i) { + m_jit.storeTrustedValue( + initializationValue, + JITCompiler::Address( + resultGPR, JSLexicalEnvironment::offsetOfVariable(ScopeOffset(i)))); + } + +#if USE(JSVALUE64) + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationCreateActivationDirect, resultGPR, structure, scopeGPR, table, TrustedImm64(JSValue::encode(initializationValue)))); +#else + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationCreateActivationDirect, resultGPR, structure, scopeGPR, table, TrustedImm32(initializationValue.tag()), TrustedImm32(initializationValue.payload()))); +#endif + + cellResult(resultGPR, node); +} + +void SpeculativeJIT::compileCreateDirectArguments(Node* node) +{ + // FIXME: A more effective way of dealing with the argument count and callee is to have + // them be explicit arguments to this node. + // https://bugs.webkit.org/show_bug.cgi?id=142207 + + GPRTemporary result(this); + GPRTemporary scratch1(this); + GPRTemporary scratch2(this); + GPRTemporary length; + GPRReg resultGPR = result.gpr(); + GPRReg scratch1GPR = scratch1.gpr(); + GPRReg scratch2GPR = scratch2.gpr(); + GPRReg lengthGPR = InvalidGPRReg; + JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(scratch1GPR, scratch2GPR); + + unsigned minCapacity = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->numParameters() - 1; + + unsigned knownLength; + bool lengthIsKnown; // if false, lengthGPR will have the length. + if (node->origin.semantic.inlineCallFrame + && !node->origin.semantic.inlineCallFrame->isVarargs()) { + knownLength = node->origin.semantic.inlineCallFrame->arguments.size() - 1; + lengthIsKnown = true; + } else { + knownLength = UINT_MAX; + lengthIsKnown = false; + + GPRTemporary realLength(this); + length.adopt(realLength); + lengthGPR = length.gpr(); + + VirtualRegister argumentCountRegister; + if (!node->origin.semantic.inlineCallFrame) + argumentCountRegister = VirtualRegister(JSStack::ArgumentCount); + else + argumentCountRegister = node->origin.semantic.inlineCallFrame->argumentCountRegister; + m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR); + m_jit.sub32(TrustedImm32(1), lengthGPR); + } + + Structure* structure = + m_jit.graph().globalObjectFor(node->origin.semantic)->directArgumentsStructure(); + + // Use a different strategy for allocating the object depending on whether we know its + // size statically. + JITCompiler::JumpList slowPath; + if (lengthIsKnown) { + emitAllocateJSObjectWithKnownSize<DirectArguments>( + resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratch1GPR, scratch2GPR, + slowPath, DirectArguments::allocationSize(std::max(knownLength, minCapacity))); + + m_jit.store32( + TrustedImm32(knownLength), + JITCompiler::Address(resultGPR, DirectArguments::offsetOfLength())); + } else { + JITCompiler::Jump tooFewArguments; + if (minCapacity) { + tooFewArguments = + m_jit.branch32(JITCompiler::Below, lengthGPR, TrustedImm32(minCapacity)); + } + m_jit.lshift32(lengthGPR, TrustedImm32(3), scratch1GPR); + m_jit.add32(TrustedImm32(DirectArguments::storageOffset()), scratch1GPR); + if (minCapacity) { + JITCompiler::Jump done = m_jit.jump(); + tooFewArguments.link(&m_jit); + m_jit.move(TrustedImm32(DirectArguments::allocationSize(minCapacity)), scratch1GPR); + done.link(&m_jit); + } + + emitAllocateVariableSizedJSObject<DirectArguments>( + resultGPR, TrustedImmPtr(structure), scratch1GPR, scratch1GPR, scratch2GPR, + slowPath); + + m_jit.store32( + lengthGPR, JITCompiler::Address(resultGPR, DirectArguments::offsetOfLength())); + } + + m_jit.store32( + TrustedImm32(minCapacity), + JITCompiler::Address(resultGPR, DirectArguments::offsetOfMinCapacity())); + + m_jit.storePtr( + TrustedImmPtr(0), JITCompiler::Address(resultGPR, DirectArguments::offsetOfOverrides())); + + if (lengthIsKnown) { + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationCreateDirectArguments, resultGPR, structure, + knownLength, minCapacity)); + } else { + auto generator = std::make_unique<CallCreateDirectArgumentsSlowPathGenerator>( + slowPath, this, resultGPR, structure, lengthGPR, minCapacity); + addSlowPathGenerator(WTFMove(generator)); + } + + if (node->origin.semantic.inlineCallFrame) { + if (node->origin.semantic.inlineCallFrame->isClosureCall) { + m_jit.loadPtr( + JITCompiler::addressFor( + node->origin.semantic.inlineCallFrame->calleeRecovery.virtualRegister()), + scratch1GPR); + } else { + m_jit.move( + TrustedImmPtr( + node->origin.semantic.inlineCallFrame->calleeRecovery.constant().asCell()), + scratch1GPR); + } + } else + m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), scratch1GPR); + + // Don't need a memory barriers since we just fast-created the activation, so the + // activation must be young. + m_jit.storePtr( + scratch1GPR, JITCompiler::Address(resultGPR, DirectArguments::offsetOfCallee())); + + VirtualRegister start = m_jit.argumentsStart(node->origin.semantic); + if (lengthIsKnown) { + for (unsigned i = 0; i < std::max(knownLength, minCapacity); ++i) { + m_jit.loadValue(JITCompiler::addressFor(start + i), valueRegs); + m_jit.storeValue( + valueRegs, JITCompiler::Address(resultGPR, DirectArguments::offsetOfSlot(i))); + } + } else { + JITCompiler::Jump done; + if (minCapacity) { + JITCompiler::Jump startLoop = m_jit.branch32( + JITCompiler::AboveOrEqual, lengthGPR, TrustedImm32(minCapacity)); + m_jit.move(TrustedImm32(minCapacity), lengthGPR); + startLoop.link(&m_jit); + } else + done = m_jit.branchTest32(MacroAssembler::Zero, lengthGPR); + JITCompiler::Label loop = m_jit.label(); + m_jit.sub32(TrustedImm32(1), lengthGPR); + m_jit.loadValue( + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight, + start.offset() * static_cast<int>(sizeof(Register))), + valueRegs); + m_jit.storeValue( + valueRegs, + JITCompiler::BaseIndex( + resultGPR, lengthGPR, JITCompiler::TimesEight, + DirectArguments::storageOffset())); + m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit); + if (done.isSet()) + done.link(&m_jit); + } + + cellResult(resultGPR, node); +} + +void SpeculativeJIT::compileGetFromArguments(Node* node) +{ + SpeculateCellOperand arguments(this, node->child1()); + JSValueRegsTemporary result(this); + + GPRReg argumentsGPR = arguments.gpr(); + JSValueRegs resultRegs = result.regs(); + + m_jit.loadValue(JITCompiler::Address(argumentsGPR, DirectArguments::offsetOfSlot(node->capturedArgumentsOffset().offset())), resultRegs); + jsValueResult(resultRegs, node); +} + +void SpeculativeJIT::compilePutToArguments(Node* node) +{ + SpeculateCellOperand arguments(this, node->child1()); + JSValueOperand value(this, node->child2()); + + GPRReg argumentsGPR = arguments.gpr(); + JSValueRegs valueRegs = value.jsValueRegs(); + + m_jit.storeValue(valueRegs, JITCompiler::Address(argumentsGPR, DirectArguments::offsetOfSlot(node->capturedArgumentsOffset().offset()))); + noResult(node); +} + +void SpeculativeJIT::compileCreateScopedArguments(Node* node) +{ + SpeculateCellOperand scope(this, node->child1()); + GPRReg scopeGPR = scope.gpr(); + + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); flushRegisters(); - callOperation( - operationNewFunctionNoCheck, resultGPR, m_jit.codeBlock()->functionDecl(node->functionDeclIndex())); + + // We set up the arguments ourselves, because we have the whole register file and we can + // set them up directly into the argument registers. This also means that we don't have to + // invent a four-argument-register shuffle. + + // Arguments: 0:exec, 1:structure, 2:start, 3:length, 4:callee, 5:scope + + // Do the scopeGPR first, since it might alias an argument register. + m_jit.setupArgument(5, [&] (GPRReg destGPR) { m_jit.move(scopeGPR, destGPR); }); + + // These other things could be done in any order. + m_jit.setupArgument(4, [&] (GPRReg destGPR) { emitGetCallee(node->origin.semantic, destGPR); }); + m_jit.setupArgument(3, [&] (GPRReg destGPR) { emitGetLength(node->origin.semantic, destGPR); }); + m_jit.setupArgument(2, [&] (GPRReg destGPR) { emitGetArgumentStart(node->origin.semantic, destGPR); }); + m_jit.setupArgument( + 1, [&] (GPRReg destGPR) { + m_jit.move( + TrustedImmPtr(m_jit.globalObjectFor(node->origin.semantic)->scopedArgumentsStructure()), + destGPR); + }); + m_jit.setupArgument(0, [&] (GPRReg destGPR) { m_jit.move(GPRInfo::callFrameRegister, destGPR); }); + + appendCallSetResult(operationCreateScopedArguments, resultGPR); + m_jit.exceptionCheck(); + cellResult(resultGPR, node); } -void SpeculativeJIT::compileNewFunctionExpression(Node* node) +void SpeculativeJIT::compileCreateClonedArguments(Node* node) { - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); flushRegisters(); - callOperation( - operationNewFunctionNoCheck, - resultGPR, - m_jit.codeBlock()->functionExpr(node->functionExprIndex())); + + // We set up the arguments ourselves, because we have the whole register file and we can + // set them up directly into the argument registers. + + // Arguments: 0:exec, 1:structure, 2:start, 3:length, 4:callee + m_jit.setupArgument(4, [&] (GPRReg destGPR) { emitGetCallee(node->origin.semantic, destGPR); }); + m_jit.setupArgument(3, [&] (GPRReg destGPR) { emitGetLength(node->origin.semantic, destGPR); }); + m_jit.setupArgument(2, [&] (GPRReg destGPR) { emitGetArgumentStart(node->origin.semantic, destGPR); }); + m_jit.setupArgument( + 1, [&] (GPRReg destGPR) { + m_jit.move( + TrustedImmPtr( + m_jit.globalObjectFor(node->origin.semantic)->outOfBandArgumentsStructure()), + destGPR); + }); + m_jit.setupArgument(0, [&] (GPRReg destGPR) { m_jit.move(GPRInfo::callFrameRegister, destGPR); }); + + appendCallSetResult(operationCreateClonedArguments, resultGPR); + m_jit.exceptionCheck(); + cellResult(resultGPR, node); } -bool SpeculativeJIT::compileRegExpExec(Node* node) +void SpeculativeJIT::compileCopyRest(Node* node) { - unsigned branchIndexInBlock = detectPeepHoleBranch(); - if (branchIndexInBlock == UINT_MAX) - return false; - Node* branchNode = m_block->at(branchIndexInBlock); - ASSERT(node->adjustedRefCount() == 1); + ASSERT(node->op() == CopyRest); + + SpeculateCellOperand array(this, node->child1()); + GPRTemporary argumentsStart(this); + SpeculateStrictInt32Operand arrayLength(this, node->child2()); + + GPRReg arrayGPR = array.gpr(); + GPRReg argumentsStartGPR = argumentsStart.gpr(); + GPRReg arrayLengthGPR = arrayLength.gpr(); + + CCallHelpers::Jump done = m_jit.branch32(MacroAssembler::Equal, arrayLengthGPR, TrustedImm32(0)); + + emitGetArgumentStart(node->origin.semantic, argumentsStartGPR); + silentSpillAllRegisters(argumentsStartGPR); + // Arguments: 0:exec, 1:JSCell* array, 2:arguments start, 3:number of arguments to skip, 4:array length + callOperation(operationCopyRest, arrayGPR, argumentsStartGPR, Imm32(node->numberOfArgumentsToSkip()), arrayLengthGPR); + silentFillAllRegisters(argumentsStartGPR); + m_jit.exceptionCheck(); - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + done.link(&m_jit); + + noResult(node); +} + +void SpeculativeJIT::compileGetRestLength(Node* node) +{ + ASSERT(node->op() == GetRestLength); + + GPRTemporary result(this); + GPRReg resultGPR = result.gpr(); + + emitGetLength(node->origin.semantic, resultGPR); + CCallHelpers::Jump hasNonZeroLength = m_jit.branch32(MacroAssembler::Above, resultGPR, Imm32(node->numberOfArgumentsToSkip())); + m_jit.move(TrustedImm32(0), resultGPR); + CCallHelpers::Jump done = m_jit.jump(); + hasNonZeroLength.link(&m_jit); + if (node->numberOfArgumentsToSkip()) + m_jit.sub32(TrustedImm32(node->numberOfArgumentsToSkip()), resultGPR); + done.link(&m_jit); + int32Result(resultGPR, node); +} + +void SpeculativeJIT::compileNotifyWrite(Node* node) +{ + WatchpointSet* set = node->watchpointSet(); - bool invert = false; - if (taken == nextBlock()) { - invert = true; - BasicBlock* tmp = taken; - taken = notTaken; - notTaken = tmp; - } + JITCompiler::Jump slowCase = m_jit.branch8( + JITCompiler::NotEqual, + JITCompiler::AbsoluteAddress(set->addressOfState()), + TrustedImm32(IsInvalidated)); + + addSlowPathGenerator( + slowPathCall(slowCase, this, operationNotifyWrite, NoResult, set, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded)); + + noResult(node); +} - SpeculateCellOperand base(this, node->child1()); - SpeculateCellOperand argument(this, node->child2()); - GPRReg baseGPR = base.gpr(); - GPRReg argumentGPR = argument.gpr(); +void SpeculativeJIT::compileIsObjectOrNull(Node* node) +{ + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); - flushRegisters(); - GPRResult result(this); - callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); + JSValueOperand value(this, node->child1()); + JSValueRegs valueRegs = value.jsValueRegs(); + + GPRTemporary result(this); + GPRReg resultGPR = result.gpr(); + + JITCompiler::Jump isCell = m_jit.branchIfCell(valueRegs); + + JITCompiler::Jump isNull = m_jit.branchIfEqual(valueRegs, jsNull()); + JITCompiler::Jump isNonNullNonCell = m_jit.jump(); + + isCell.link(&m_jit); + JITCompiler::Jump isFunction = m_jit.branchIfFunction(valueRegs.payloadGPR()); + JITCompiler::Jump notObject = m_jit.branchIfNotObject(valueRegs.payloadGPR()); + + JITCompiler::Jump slowPath = m_jit.branchTest8( + JITCompiler::NonZero, + JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoFlagsOffset()), + TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData)); + + isNull.link(&m_jit); + m_jit.move(TrustedImm32(1), resultGPR); + JITCompiler::Jump done = m_jit.jump(); + + isNonNullNonCell.link(&m_jit); + isFunction.link(&m_jit); + notObject.link(&m_jit); + m_jit.move(TrustedImm32(0), resultGPR); + + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationObjectIsObject, resultGPR, globalObject, + valueRegs.payloadGPR())); + + done.link(&m_jit); + + unblessedBooleanResult(resultGPR, node); +} - branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, result.gpr(), taken); - jump(notTaken); +void SpeculativeJIT::compileIsFunction(Node* node) +{ + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); + + JSValueOperand value(this, node->child1()); + JSValueRegs valueRegs = value.jsValueRegs(); + + GPRTemporary result(this); + GPRReg resultGPR = result.gpr(); + + JITCompiler::Jump notCell = m_jit.branchIfNotCell(valueRegs); + JITCompiler::Jump isFunction = m_jit.branchIfFunction(valueRegs.payloadGPR()); + JITCompiler::Jump notObject = m_jit.branchIfNotObject(valueRegs.payloadGPR()); + + JITCompiler::Jump slowPath = m_jit.branchTest8( + JITCompiler::NonZero, + JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoFlagsOffset()), + TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData)); + + notCell.link(&m_jit); + notObject.link(&m_jit); + m_jit.move(TrustedImm32(0), resultGPR); + JITCompiler::Jump done = m_jit.jump(); + + isFunction.link(&m_jit); + m_jit.move(TrustedImm32(1), resultGPR); + + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationObjectIsFunction, resultGPR, globalObject, + valueRegs.payloadGPR())); + + done.link(&m_jit); + + unblessedBooleanResult(resultGPR, node); +} - use(node->child1()); - use(node->child2()); - m_indexInBlock = branchIndexInBlock; - m_currentNode = branchNode; +void SpeculativeJIT::compileTypeOf(Node* node) +{ + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); + + JSValueOperand value(this, node->child1()); + JSValueRegs valueRegs = value.jsValueRegs(); + + GPRTemporary result(this); + GPRReg resultGPR = result.gpr(); + + JITCompiler::JumpList done; + JITCompiler::Jump slowPath; + m_jit.emitTypeOf( + valueRegs, resultGPR, + [&] (TypeofType type, bool fallsThrough) { + m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.typeString(type)), resultGPR); + if (!fallsThrough) + done.append(m_jit.jump()); + }, + [&] (JITCompiler::Jump theSlowPath) { + slowPath = theSlowPath; + }); + done.link(&m_jit); - return true; + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationTypeOfObject, resultGPR, globalObject, + valueRegs.payloadGPR())); + + cellResult(resultGPR, node); +} + +void SpeculativeJIT::compileCheckStructure(Node* node, GPRReg cellGPR, GPRReg tempGPR) +{ + ASSERT(node->structureSet().size()); + + if (node->structureSet().size() == 1) { + speculationCheck( + BadCache, JSValueSource::unboxedCell(cellGPR), 0, + m_jit.branchWeakStructure( + JITCompiler::NotEqual, + JITCompiler::Address(cellGPR, JSCell::structureIDOffset()), + node->structureSet()[0])); + } else { + std::unique_ptr<GPRTemporary> structure; + GPRReg structureGPR; + + if (tempGPR == InvalidGPRReg) { + structure = std::make_unique<GPRTemporary>(this); + structureGPR = structure->gpr(); + } else + structureGPR = tempGPR; + + m_jit.load32(JITCompiler::Address(cellGPR, JSCell::structureIDOffset()), structureGPR); + + JITCompiler::JumpList done; + + for (size_t i = 0; i < node->structureSet().size() - 1; ++i) { + done.append( + m_jit.branchWeakStructure(JITCompiler::Equal, structureGPR, node->structureSet()[i])); + } + + speculationCheck( + BadCache, JSValueSource::unboxedCell(cellGPR), 0, + m_jit.branchWeakStructure( + JITCompiler::NotEqual, structureGPR, node->structureSet().last())); + + done.link(&m_jit); + } +} + +void SpeculativeJIT::compileCheckStructure(Node* node) +{ + switch (node->child1().useKind()) { + case CellUse: + case KnownCellUse: { + SpeculateCellOperand cell(this, node->child1()); + compileCheckStructure(node, cell.gpr(), InvalidGPRReg); + noResult(node); + return; + } + + case CellOrOtherUse: { + JSValueOperand value(this, node->child1(), ManualOperandSpeculation); + GPRTemporary temp(this); + + JSValueRegs valueRegs = value.jsValueRegs(); + GPRReg tempGPR = temp.gpr(); + + JITCompiler::Jump cell = m_jit.branchIfCell(valueRegs); + DFG_TYPE_CHECK( + valueRegs, node->child1(), SpecCell | SpecOther, + m_jit.branchIfNotOther(valueRegs, tempGPR)); + JITCompiler::Jump done = m_jit.jump(); + cell.link(&m_jit); + compileCheckStructure(node, valueRegs.payloadGPR(), tempGPR); + done.link(&m_jit); + noResult(node); + return; + } + + default: + DFG_CRASH(m_jit.graph(), node, "Bad use kind"); + return; + } } void SpeculativeJIT::compileAllocatePropertyStorage(Node* node) { - if (node->structureTransitionData().previousStructure->couldHaveIndexingHeader()) { + if (node->transition()->previous->couldHaveIndexingHeader()) { SpeculateCellOperand base(this, node->child1()); GPRReg baseGPR = base.gpr(); flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); callOperation(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity, result.gpr(), baseGPR); + m_jit.exceptionCheck(); storageResult(result.gpr(), node); return; @@ -4216,8 +6223,8 @@ void SpeculativeJIT::compileAllocatePropertyStorage(Node* node) GPRReg baseGPR = base.gpr(); GPRReg scratchGPR1 = scratch1.gpr(); - ASSERT(!node->structureTransitionData().previousStructure->outOfLineCapacity()); - ASSERT(initialOutOfLineCapacity == node->structureTransitionData().newStructure->outOfLineCapacity()); + ASSERT(!node->transition()->previous->outOfLineCapacity()); + ASSERT(initialOutOfLineCapacity == node->transition()->next->outOfLineCapacity()); JITCompiler::Jump slowPath = emitAllocateBasicStorage( @@ -4235,23 +6242,20 @@ void SpeculativeJIT::compileAllocatePropertyStorage(Node* node) void SpeculativeJIT::compileReallocatePropertyStorage(Node* node) { - size_t oldSize = node->structureTransitionData().previousStructure->outOfLineCapacity() * sizeof(JSValue); + size_t oldSize = node->transition()->previous->outOfLineCapacity() * sizeof(JSValue); size_t newSize = oldSize * outOfLineGrowthFactor; - ASSERT(newSize == node->structureTransitionData().newStructure->outOfLineCapacity() * sizeof(JSValue)); + ASSERT(newSize == node->transition()->next->outOfLineCapacity() * sizeof(JSValue)); - if (node->structureTransitionData().previousStructure->couldHaveIndexingHeader()) { + if (node->transition()->previous->couldHaveIndexingHeader()) { SpeculateCellOperand base(this, node->child1()); GPRReg baseGPR = base.gpr(); flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); callOperation(operationReallocateButterflyToGrowPropertyStorage, result.gpr(), baseGPR, newSize / sizeof(JSValue)); - - MacroAssembler::Jump notNull = m_jit.branchTestPtr(MacroAssembler::NonZero, result.gpr()); - m_jit.breakpoint(); - notNull.link(&m_jit); + m_jit.exceptionCheck(); storageResult(result.gpr(), node); return; @@ -4285,6 +6289,36 @@ void SpeculativeJIT::compileReallocatePropertyStorage(Node* node) storageResult(scratchGPR1, node); } +void SpeculativeJIT::compileGetButterfly(Node* node) +{ + SpeculateCellOperand base(this, node->child1()); + GPRTemporary result(this, Reuse, base); + + GPRReg baseGPR = base.gpr(); + GPRReg resultGPR = result.gpr(); + + m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR); + + switch (node->op()) { + case GetButterfly: + addSlowPathGenerator( + slowPathCall( + m_jit.branchIfNotToSpace(resultGPR), + this, operationGetButterfly, resultGPR, baseGPR)); + break; + + case GetButterflyReadOnly: + m_jit.removeSpaceBits(resultGPR); + break; + + default: + DFG_CRASH(m_jit.graph(), node, "Bad node type"); + break; + } + + storageResult(resultGPR, node); +} + GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, ArrayMode arrayMode) { if (!putByValWillNeedExtraRegister(arrayMode)) @@ -4295,7 +6329,7 @@ GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, Arr return temporary.gpr(); } -void SpeculativeJIT::compileToStringOnCell(Node* node) +void SpeculativeJIT::compileToStringOrCallStringConstructorOnCell(Node* node) { SpeculateCellOperand op1(this, node->child1()); GPRReg op1GPR = op1.gpr(); @@ -4316,11 +6350,13 @@ void SpeculativeJIT::compileToStringOnCell(Node* node) case StringOrStringObjectUse: { GPRTemporary result(this); GPRReg resultGPR = result.gpr(); - - m_jit.loadPtr(JITCompiler::Address(op1GPR, JSCell::structureOffset()), resultGPR); - JITCompiler::Jump isString = m_jit.branchPtr( - JITCompiler::Equal, resultGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get())); - + + m_jit.load32(JITCompiler::Address(op1GPR, JSCell::structureIDOffset()), resultGPR); + JITCompiler::Jump isString = m_jit.branchStructure( + JITCompiler::Equal, + resultGPR, + m_jit.vm()->stringStructure.get()); + speculateStringObjectForStructure(node->child1(), resultGPR); m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR); @@ -4337,7 +6373,7 @@ void SpeculativeJIT::compileToStringOnCell(Node* node) } case CellUse: { - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); // We flush registers instead of silent spill/fill because in this mode we @@ -4346,15 +6382,18 @@ void SpeculativeJIT::compileToStringOnCell(Node* node) flushRegisters(); JITCompiler::Jump done; if (node->child1()->prediction() & SpecString) { - JITCompiler::Jump needCall = m_jit.branchPtr( - JITCompiler::NotEqual, - JITCompiler::Address(op1GPR, JSCell::structureOffset()), - TrustedImmPtr(m_jit.vm()->stringStructure.get())); + JITCompiler::Jump needCall = m_jit.branchIfNotString(op1GPR); m_jit.move(op1GPR, resultGPR); done = m_jit.jump(); needCall.link(&m_jit); } - callOperation(operationToStringOnCell, resultGPR, op1GPR); + if (node->op() == ToString) + callOperation(operationToStringOnCell, resultGPR, op1GPR); + else { + ASSERT(node->op() == CallStringConstructor); + callOperation(operationCallStringConstructorOnCell, resultGPR, op1GPR); + } + m_jit.exceptionCheck(); if (done.isSet()) done.link(&m_jit); cellResult(resultGPR, node); @@ -4408,7 +6447,7 @@ void SpeculativeJIT::compileNewStringObject(Node* node) void SpeculativeJIT::compileNewTypedArray(Node* node) { - JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); TypedArrayType type = node->typedArrayType(); Structure* structure = globalObject->typedArrayStructure(type); @@ -4484,6 +6523,28 @@ void SpeculativeJIT::compileNewTypedArray(Node* node) cellResult(resultGPR, node); } +void SpeculativeJIT::speculateCellTypeWithoutTypeFiltering( + Edge edge, GPRReg cellGPR, JSType jsType) +{ + speculationCheck( + BadType, JSValueSource::unboxedCell(cellGPR), edge, + m_jit.branch8( + MacroAssembler::NotEqual, + MacroAssembler::Address(cellGPR, JSCell::typeInfoTypeOffset()), + MacroAssembler::TrustedImm32(jsType))); +} + +void SpeculativeJIT::speculateCellType( + Edge edge, GPRReg cellGPR, SpeculatedType specType, JSType jsType) +{ + DFG_TYPE_CHECK( + JSValueSource::unboxedCell(cellGPR), edge, specType, + m_jit.branch8( + MacroAssembler::NotEqual, + MacroAssembler::Address(cellGPR, JSCell::typeInfoTypeOffset()), + TrustedImm32(jsType))); +} + void SpeculativeJIT::speculateInt32(Edge edge) { if (!needsTypeCheck(edge, SpecInt32)) @@ -4492,36 +6553,66 @@ void SpeculativeJIT::speculateInt32(Edge edge) (SpeculateInt32Operand(this, edge)).gpr(); } -void SpeculativeJIT::speculateMachineInt(Edge edge) +void SpeculativeJIT::speculateNumber(Edge edge) { -#if USE(JSVALUE64) - if (!needsTypeCheck(edge, SpecMachineInt)) + if (!needsTypeCheck(edge, SpecBytecodeNumber)) return; - (SpeculateWhicheverInt52Operand(this, edge)).gpr(); -#else // USE(JSVALUE64) - UNUSED_PARAM(edge); - UNREACHABLE_FOR_PLATFORM(); -#endif // USE(JSVALUE64) + JSValueOperand value(this, edge, ManualOperandSpeculation); +#if USE(JSVALUE64) + GPRReg gpr = value.gpr(); + typeCheck( + JSValueRegs(gpr), edge, SpecBytecodeNumber, + m_jit.branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister)); +#else + GPRReg tagGPR = value.tagGPR(); + DFG_TYPE_CHECK( + value.jsValueRegs(), edge, ~SpecInt32, + m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag))); + DFG_TYPE_CHECK( + value.jsValueRegs(), edge, SpecBytecodeNumber, + m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag))); +#endif } -void SpeculativeJIT::speculateNumber(Edge edge) +void SpeculativeJIT::speculateRealNumber(Edge edge) { - if (!needsTypeCheck(edge, SpecFullNumber)) + if (!needsTypeCheck(edge, SpecBytecodeRealNumber)) return; - (SpeculateDoubleOperand(this, edge)).fpr(); + JSValueOperand op1(this, edge, ManualOperandSpeculation); + FPRTemporary result(this); + + JSValueRegs op1Regs = op1.jsValueRegs(); + FPRReg resultFPR = result.fpr(); + +#if USE(JSVALUE64) + GPRTemporary temp(this); + GPRReg tempGPR = temp.gpr(); + m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR); +#else + FPRTemporary temp(this); + FPRReg tempFPR = temp.fpr(); + unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR); +#endif + + JITCompiler::Jump done = m_jit.branchDouble( + JITCompiler::DoubleEqual, resultFPR, resultFPR); + + typeCheck(op1Regs, edge, SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs)); + + done.link(&m_jit); } -void SpeculativeJIT::speculateRealNumber(Edge edge) +void SpeculativeJIT::speculateDoubleRepReal(Edge edge) { - if (!needsTypeCheck(edge, SpecFullRealNumber)) + if (!needsTypeCheck(edge, SpecDoubleReal)) return; SpeculateDoubleOperand operand(this, edge); FPRReg fpr = operand.fpr(); - DFG_TYPE_CHECK( - JSValueRegs(), edge, SpecFullRealNumber, + typeCheck( + JSValueRegs(), edge, SpecDoubleReal, m_jit.branchDouble( MacroAssembler::DoubleNotEqualOrUnordered, fpr, fpr)); } @@ -4542,6 +6633,22 @@ void SpeculativeJIT::speculateCell(Edge edge) (SpeculateCellOperand(this, edge)).gpr(); } +void SpeculativeJIT::speculateCellOrOther(Edge edge) +{ + if (!needsTypeCheck(edge, SpecCell | SpecOther)) + return; + + JSValueOperand operand(this, edge, ManualOperandSpeculation); + GPRTemporary temp(this); + GPRReg tempGPR = temp.gpr(); + + MacroAssembler::Jump ok = m_jit.branchIfCell(operand.jsValueRegs()); + DFG_TYPE_CHECK( + operand.jsValueRegs(), edge, SpecCell | SpecOther, + m_jit.branchIfNotOther(operand.jsValueRegs(), tempGPR)); + ok.link(&m_jit); +} + void SpeculativeJIT::speculateObject(Edge edge) { if (!needsTypeCheck(edge, SpecObject)) @@ -4550,10 +6657,16 @@ void SpeculativeJIT::speculateObject(Edge edge) SpeculateCellOperand operand(this, edge); GPRReg gpr = operand.gpr(); DFG_TYPE_CHECK( - JSValueSource::unboxedCell(gpr), edge, SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(gpr, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(gpr), edge, SpecObject, m_jit.branchIfNotObject(gpr)); +} + +void SpeculativeJIT::speculateFunction(Edge edge) +{ + if (!needsTypeCheck(edge, SpecFunction)) + return; + + SpeculateCellOperand operand(this, edge); + speculateCellType(edge, operand.gpr(), SpecFunction, JSFunctionType); } void SpeculativeJIT::speculateFinalObject(Edge edge) @@ -4562,15 +6675,16 @@ void SpeculativeJIT::speculateFinalObject(Edge edge) return; SpeculateCellOperand operand(this, edge); - GPRTemporary structure(this); - GPRReg gpr = operand.gpr(); - GPRReg structureGPR = structure.gpr(); - m_jit.loadPtr(MacroAssembler::Address(gpr, JSCell::structureOffset()), structureGPR); - DFG_TYPE_CHECK( - JSValueSource::unboxedCell(gpr), edge, SpecFinalObject, m_jit.branch8( - MacroAssembler::NotEqual, - MacroAssembler::Address(structureGPR, Structure::typeInfoTypeOffset()), - TrustedImm32(FinalObjectType))); + speculateCellType(edge, operand.gpr(), SpecFinalObject, FinalObjectType); +} + +void SpeculativeJIT::speculateRegExpObject(Edge edge) +{ + if (!needsTypeCheck(edge, SpecRegExpObject)) + return; + + SpeculateCellOperand operand(this, edge); + speculateCellType(edge, operand.gpr(), SpecRegExpObject, RegExpObjectType); } void SpeculativeJIT::speculateObjectOrOther(Edge edge) @@ -4581,68 +6695,52 @@ void SpeculativeJIT::speculateObjectOrOther(Edge edge) JSValueOperand operand(this, edge, ManualOperandSpeculation); GPRTemporary temp(this); GPRReg tempGPR = temp.gpr(); -#if USE(JSVALUE64) - GPRReg gpr = operand.gpr(); - MacroAssembler::Jump notCell = m_jit.branchTest64( - MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister); + MacroAssembler::Jump notCell = m_jit.branchIfNotCell(operand.jsValueRegs()); + GPRReg gpr = operand.jsValueRegs().payloadGPR(); DFG_TYPE_CHECK( - JSValueRegs(gpr), edge, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(gpr, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + operand.jsValueRegs(), edge, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(gpr)); MacroAssembler::Jump done = m_jit.jump(); notCell.link(&m_jit); - if (needsTypeCheck(edge, SpecCell | SpecOther)) { - m_jit.move(gpr, tempGPR); - m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), tempGPR); - - typeCheck( - JSValueRegs(gpr), edge, SpecCell | SpecOther, - m_jit.branch64( - MacroAssembler::NotEqual, tempGPR, - MacroAssembler::TrustedImm64(ValueNull))); - } - done.link(&m_jit); -#else - GPRReg tagGPR = operand.tagGPR(); - GPRReg payloadGPR = operand.payloadGPR(); - MacroAssembler::Jump notCell = - m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag)); DFG_TYPE_CHECK( - JSValueRegs(tagGPR, payloadGPR), edge, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(payloadGPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); - MacroAssembler::Jump done = m_jit.jump(); - notCell.link(&m_jit); - if (needsTypeCheck(edge, SpecCell | SpecOther)) { - m_jit.move(tagGPR, tempGPR); - m_jit.or32(TrustedImm32(1), tempGPR); - - typeCheck( - JSValueRegs(tagGPR, payloadGPR), edge, SpecCell | SpecOther, - m_jit.branch32( - MacroAssembler::NotEqual, tempGPR, - MacroAssembler::TrustedImm32(JSValue::NullTag))); - } + operand.jsValueRegs(), edge, SpecCell | SpecOther, + m_jit.branchIfNotOther(operand.jsValueRegs(), tempGPR)); done.link(&m_jit); -#endif } void SpeculativeJIT::speculateString(Edge edge, GPRReg cell) { DFG_TYPE_CHECK( - JSValueSource::unboxedCell(cell), edge, SpecString, m_jit.branchPtr( - MacroAssembler::NotEqual, - MacroAssembler::Address(cell, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(cell), edge, SpecString | ~SpecCell, m_jit.branchIfNotString(cell)); +} + +void SpeculativeJIT::speculateStringOrOther(Edge edge, JSValueRegs regs, GPRReg scratch) +{ + JITCompiler::Jump notCell = m_jit.branchIfNotCell(regs); + GPRReg cell = regs.payloadGPR(); + DFG_TYPE_CHECK(regs, edge, (~SpecCell) | SpecString, m_jit.branchIfNotString(cell)); + JITCompiler::Jump done = m_jit.jump(); + notCell.link(&m_jit); + DFG_TYPE_CHECK(regs, edge, SpecCell | SpecOther, m_jit.branchIfNotOther(regs, scratch)); + done.link(&m_jit); +} + +void SpeculativeJIT::speculateStringOrOther(Edge edge) +{ + if (!needsTypeCheck(edge, SpecString | SpecOther)) + return; + + JSValueOperand operand(this, edge, ManualOperandSpeculation); + GPRTemporary temp(this); + JSValueRegs regs = operand.jsValueRegs(); + GPRReg tempGPR = temp.gpr(); + speculateStringOrOther(edge, regs, tempGPR); } void SpeculativeJIT::speculateStringIdentAndLoadStorage(Edge edge, GPRReg string, GPRReg storage) { m_jit.loadPtr(MacroAssembler::Address(string, JSString::offsetOfValue()), storage); - if (!needsTypeCheck(edge, SpecStringIdent)) + if (!needsTypeCheck(edge, SpecStringIdent | ~SpecString)) return; speculationCheck( @@ -4652,9 +6750,9 @@ void SpeculativeJIT::speculateStringIdentAndLoadStorage(Edge edge, GPRReg string BadType, JSValueSource::unboxedCell(string), edge, m_jit.branchTest32( MacroAssembler::Zero, MacroAssembler::Address(storage, StringImpl::flagsOffset()), - MacroAssembler::TrustedImm32(StringImpl::flagIsIdentifier()))); + MacroAssembler::TrustedImm32(StringImpl::flagIsAtomic()))); - m_interpreter.filter(edge, SpecStringIdent); + m_interpreter.filter(edge, SpecStringIdent | ~SpecString); } void SpeculativeJIT::speculateStringIdent(Edge edge, GPRReg string) @@ -4688,7 +6786,7 @@ void SpeculativeJIT::speculateString(Edge edge) void SpeculativeJIT::speculateStringObject(Edge edge, GPRReg gpr) { - speculateStringObjectForStructure(edge, JITCompiler::Address(gpr, JSCell::structureOffset())); + speculateStringObjectForStructure(edge, JITCompiler::Address(gpr, JSCell::structureIDOffset())); } void SpeculativeJIT::speculateStringObject(Edge edge) @@ -4714,20 +6812,52 @@ void SpeculativeJIT::speculateStringOrStringObject(Edge edge) GPRReg gpr = operand.gpr(); if (!needsTypeCheck(edge, SpecString | SpecStringObject)) return; + + GPRTemporary structureID(this); + GPRReg structureIDGPR = structureID.gpr(); + + m_jit.load32(JITCompiler::Address(gpr, JSCell::structureIDOffset()), structureIDGPR); + JITCompiler::Jump isString = m_jit.branchStructure( + JITCompiler::Equal, + structureIDGPR, + m_jit.vm()->stringStructure.get()); - GPRTemporary structure(this); - GPRReg structureGPR = structure.gpr(); + speculateStringObjectForStructure(edge, structureIDGPR); - m_jit.loadPtr(JITCompiler::Address(gpr, JSCell::structureOffset()), structureGPR); + isString.link(&m_jit); - JITCompiler::Jump isString = m_jit.branchPtr( - JITCompiler::Equal, structureGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get())); + m_interpreter.filter(edge, SpecString | SpecStringObject); +} + +void SpeculativeJIT::speculateNotStringVar(Edge edge) +{ + JSValueOperand operand(this, edge, ManualOperandSpeculation); + GPRTemporary temp(this); + GPRReg tempGPR = temp.gpr(); - speculateStringObjectForStructure(edge, structureGPR); + JITCompiler::Jump notCell = m_jit.branchIfNotCell(operand.jsValueRegs()); + GPRReg cell = operand.jsValueRegs().payloadGPR(); - isString.link(&m_jit); + JITCompiler::Jump notString = m_jit.branchIfNotString(cell); - m_interpreter.filter(edge, SpecString | SpecStringObject); + speculateStringIdentAndLoadStorage(edge, cell, tempGPR); + + notString.link(&m_jit); + notCell.link(&m_jit); +} + +void SpeculativeJIT::speculateSymbol(Edge edge, GPRReg cell) +{ + DFG_TYPE_CHECK(JSValueSource::unboxedCell(cell), edge, SpecSymbol, m_jit.branchIfNotSymbol(cell)); +} + +void SpeculativeJIT::speculateSymbol(Edge edge) +{ + if (!needsTypeCheck(edge, SpecSymbol)) + return; + + SpeculateCellOperand operand(this, edge); + speculateSymbol(edge, operand.gpr()); } void SpeculativeJIT::speculateNotCell(Edge edge) @@ -4735,18 +6865,8 @@ void SpeculativeJIT::speculateNotCell(Edge edge) if (!needsTypeCheck(edge, ~SpecCell)) return; - JSValueOperand operand(this, edge, ManualOperandSpeculation); -#if USE(JSVALUE64) - typeCheck( - JSValueRegs(operand.gpr()), edge, ~SpecCell, - m_jit.branchTest64( - JITCompiler::Zero, operand.gpr(), GPRInfo::tagMaskRegister)); -#else - typeCheck( - JSValueRegs(operand.tagGPR(), operand.payloadGPR()), edge, ~SpecCell, - m_jit.branch32( - JITCompiler::Equal, operand.tagGPR(), TrustedImm32(JSValue::CellTag))); -#endif + JSValueOperand operand(this, edge, ManualOperandSpeculation); + typeCheck(operand.jsValueRegs(), edge, ~SpecCell, m_jit.branchIfCell(operand.jsValueRegs())); } void SpeculativeJIT::speculateOther(Edge edge) @@ -4757,21 +6877,34 @@ void SpeculativeJIT::speculateOther(Edge edge) JSValueOperand operand(this, edge, ManualOperandSpeculation); GPRTemporary temp(this); GPRReg tempGPR = temp.gpr(); -#if USE(JSVALUE64) - m_jit.move(operand.gpr(), tempGPR); - m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), tempGPR); typeCheck( - JSValueRegs(operand.gpr()), edge, SpecOther, - m_jit.branch64( - MacroAssembler::NotEqual, tempGPR, - MacroAssembler::TrustedImm64(ValueNull))); + operand.jsValueRegs(), edge, SpecOther, + m_jit.branchIfNotOther(operand.jsValueRegs(), tempGPR)); +} + +void SpeculativeJIT::speculateMisc(Edge edge, JSValueRegs regs) +{ +#if USE(JSVALUE64) + DFG_TYPE_CHECK( + regs, edge, SpecMisc, + m_jit.branch64(MacroAssembler::Above, regs.gpr(), MacroAssembler::TrustedImm64(TagBitTypeOther | TagBitBool | TagBitUndefined))); #else - m_jit.move(operand.tagGPR(), tempGPR); - m_jit.or32(TrustedImm32(1), tempGPR); - typeCheck( - JSValueRegs(operand.tagGPR(), operand.payloadGPR()), edge, SpecOther, - m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(JSValue::NullTag))); -#endif + DFG_TYPE_CHECK( + regs, edge, ~SpecInt32, + m_jit.branch32(MacroAssembler::Equal, regs.tagGPR(), MacroAssembler::TrustedImm32(JSValue::Int32Tag))); + DFG_TYPE_CHECK( + regs, edge, SpecMisc, + m_jit.branch32(MacroAssembler::Below, regs.tagGPR(), MacroAssembler::TrustedImm32(JSValue::UndefinedTag))); +#endif +} + +void SpeculativeJIT::speculateMisc(Edge edge) +{ + if (!needsTypeCheck(edge, SpecMisc)) + return; + + JSValueOperand operand(this, edge, ManualOperandSpeculation); + speculateMisc(edge, operand.jsValueRegs()); } void SpeculativeJIT::speculate(Node*, Edge edge) @@ -4782,8 +6915,11 @@ void SpeculativeJIT::speculate(Node*, Edge edge) case KnownInt32Use: ASSERT(!needsTypeCheck(edge, SpecInt32)); break; - case KnownNumberUse: - ASSERT(!needsTypeCheck(edge, SpecFullNumber)); + case DoubleRepUse: + ASSERT(!needsTypeCheck(edge, SpecFullDouble)); + break; + case Int52RepUse: + ASSERT(!needsTypeCheck(edge, SpecMachineInt)); break; case KnownCellUse: ASSERT(!needsTypeCheck(edge, SpecCell)); @@ -4791,30 +6927,53 @@ void SpeculativeJIT::speculate(Node*, Edge edge) case KnownStringUse: ASSERT(!needsTypeCheck(edge, SpecString)); break; + case KnownPrimitiveUse: + ASSERT(!needsTypeCheck(edge, SpecHeapTop & ~SpecObject)); + break; case Int32Use: speculateInt32(edge); break; - case MachineIntUse: - speculateMachineInt(edge); + case NumberUse: + speculateNumber(edge); break; case RealNumberUse: speculateRealNumber(edge); break; - case NumberUse: - speculateNumber(edge); + case DoubleRepRealUse: + speculateDoubleRepReal(edge); break; +#if USE(JSVALUE64) + case MachineIntUse: + speculateMachineInt(edge); + break; + case DoubleRepMachineIntUse: + speculateDoubleRepMachineInt(edge); + break; +#endif case BooleanUse: speculateBoolean(edge); break; + case KnownBooleanUse: + ASSERT(!needsTypeCheck(edge, SpecBoolean)); + break; case CellUse: speculateCell(edge); break; + case CellOrOtherUse: + speculateCellOrOther(edge); + break; case ObjectUse: speculateObject(edge); break; + case FunctionUse: + speculateFunction(edge); + break; case FinalObjectUse: speculateFinalObject(edge); break; + case RegExpObjectUse: + speculateRegExpObject(edge); + break; case ObjectOrOtherUse: speculateObjectOrOther(edge); break; @@ -4824,18 +6983,30 @@ void SpeculativeJIT::speculate(Node*, Edge edge) case StringUse: speculateString(edge); break; + case StringOrOtherUse: + speculateStringOrOther(edge); + break; + case SymbolUse: + speculateSymbol(edge); + break; case StringObjectUse: speculateStringObject(edge); break; case StringOrStringObjectUse: speculateStringOrStringObject(edge); break; + case NotStringVarUse: + speculateNotStringVar(edge); + break; case NotCellUse: speculateNotCell(edge); break; case OtherUse: speculateOther(edge); break; + case MiscUse: + speculateMisc(edge); + break; default: RELEASE_ASSERT_NOT_REACHED(); break; @@ -4846,10 +7017,11 @@ void SpeculativeJIT::emitSwitchIntJump( SwitchData* data, GPRReg value, GPRReg scratch) { SimpleJumpTable& table = m_jit.codeBlock()->switchJumpTable(data->switchTableIndex); + table.ensureCTITable(); m_jit.sub32(Imm32(table.min), value); addBranch( m_jit.branch32(JITCompiler::AboveOrEqual, value, Imm32(table.ctiOffsets.size())), - data->fallThrough); + data->fallThrough.block); m_jit.move(TrustedImmPtr(table.ctiOffsets.begin()), scratch); m_jit.loadPtr(JITCompiler::BaseIndex(scratch, value, JITCompiler::timesPtr()), scratch); m_jit.jump(scratch); @@ -4883,7 +7055,7 @@ void SpeculativeJIT::emitSwitchImm(Node* node, SwitchData* data) addBranch( m_jit.branchTest64( JITCompiler::Zero, valueRegs.gpr(), GPRInfo::tagTypeNumberRegister), - data->fallThrough); + data->fallThrough.block); silentSpillAllRegisters(scratch); callOperation(operationFindSwitchImmTargetForDouble, scratch, valueRegs.gpr(), data->switchTableIndex); silentFillAllRegisters(scratch); @@ -4897,10 +7069,11 @@ void SpeculativeJIT::emitSwitchImm(Node* node, SwitchData* data) m_jit.branch32( JITCompiler::AboveOrEqual, valueRegs.tagGPR(), TrustedImm32(JSValue::LowestTag)), - data->fallThrough); + data->fallThrough.block); silentSpillAllRegisters(scratch); callOperation(operationFindSwitchImmTargetForDouble, scratch, valueRegs, data->switchTableIndex); silentFillAllRegisters(scratch); + m_jit.jump(scratch); #endif noResult(node, UseChildrenCalledExplicitly); @@ -4921,7 +7094,7 @@ void SpeculativeJIT::emitSwitchCharStringJump( MacroAssembler::NotEqual, MacroAssembler::Address(value, JSString::offsetOfLength()), TrustedImm32(1)), - data->fallThrough); + data->fallThrough.block); m_jit.loadPtr(MacroAssembler::Address(value, JSString::offsetOfValue()), scratch); @@ -4975,24 +7148,9 @@ void SpeculativeJIT::emitSwitchChar(Node* node, SwitchData* data) op1.use(); -#if USE(JSVALUE64) - addBranch( - m_jit.branchTest64( - MacroAssembler::NonZero, op1Regs.gpr(), GPRInfo::tagMaskRegister), - data->fallThrough); -#else - addBranch( - m_jit.branch32( - MacroAssembler::NotEqual, op1Regs.tagGPR(), TrustedImm32(JSValue::CellTag)), - data->fallThrough); -#endif + addBranch(m_jit.branchIfNotCell(op1Regs), data->fallThrough.block); - addBranch( - m_jit.branchPtr( - MacroAssembler::NotEqual, - MacroAssembler::Address(op1Regs.payloadGPR(), JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())), - data->fallThrough); + addBranch(m_jit.branchIfNotString(op1Regs.payloadGPR()), data->fallThrough.block); emitSwitchCharStringJump(data, op1Regs.payloadGPR(), tempGPR); noResult(node, UseChildrenCalledExplicitly); @@ -5005,18 +7163,6 @@ void SpeculativeJIT::emitSwitchChar(Node* node, SwitchData* data) } } -bool SpeculativeJIT::StringSwitchCase::operator<( - const SpeculativeJIT::StringSwitchCase& other) const -{ - unsigned minLength = std::min(string->length(), other.string->length()); - for (unsigned i = 0; i < minLength; ++i) { - if (string->at(i) == other.string->at(i)) - continue; - return string->at(i) < other.string->at(i); - } - return string->length() < other.string->length(); -} - namespace { struct CharacterCase { @@ -5047,7 +7193,7 @@ void SpeculativeJIT::emitBinarySwitchStringRecurse( } if (begin == end) { - jump(data->fallThrough, ForceJump); + jump(data->fallThrough.block, ForceJump); return; } @@ -5083,14 +7229,14 @@ void SpeculativeJIT::emitBinarySwitchStringRecurse( dataLog("length = ", minLength, ", commonChars = ", commonChars, ", allLengthsEqual = ", allLengthsEqual, "\n"); if (!allLengthsEqual && alreadyCheckedLength < minLength) - branch32(MacroAssembler::Below, length, Imm32(minLength), data->fallThrough); + branch32(MacroAssembler::Below, length, Imm32(minLength), data->fallThrough.block); if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength)) - branch32(MacroAssembler::NotEqual, length, Imm32(minLength), data->fallThrough); + branch32(MacroAssembler::NotEqual, length, Imm32(minLength), data->fallThrough.block); for (unsigned i = numChecked; i < commonChars; ++i) { branch8( MacroAssembler::NotEqual, MacroAssembler::Address(buffer, i), - TrustedImm32(cases[begin].string->at(i)), data->fallThrough); + TrustedImm32(cases[begin].string->at(i)), data->fallThrough.block); } if (minLength == commonChars) { @@ -5160,7 +7306,7 @@ void SpeculativeJIT::emitBinarySwitchStringRecurse( temp, minLength, allLengthsEqual); } - addBranch(binarySwitch.fallThrough(), data->fallThrough); + addBranch(binarySwitch.fallThrough(), data->fallThrough.block); } void SpeculativeJIT::emitSwitchStringOnString(SwitchData* data, GPRReg string) @@ -5187,6 +7333,7 @@ void SpeculativeJIT::emitSwitchStringOnString(SwitchData* data, GPRReg string) flushRegisters(); callOperation( operationSwitchString, string, data->switchTableIndex, string); + m_jit.exceptionCheck(); m_jit.jump(string); return; } @@ -5212,7 +7359,7 @@ void SpeculativeJIT::emitSwitchStringOnString(SwitchData* data, GPRReg string) Vector<StringSwitchCase> cases; for (unsigned i = 0; i < data->cases.size(); ++i) { cases.append( - StringSwitchCase(data->cases[i].value.stringImpl(), data->cases[i].target)); + StringSwitchCase(data->cases[i].value.stringImpl(), data->cases[i].target.block)); } std::sort(cases.begin(), cases.end()); @@ -5224,6 +7371,7 @@ void SpeculativeJIT::emitSwitchStringOnString(SwitchData* data, GPRReg string) silentSpillAllRegisters(string); callOperation(operationSwitchString, string, data->switchTableIndex, string); silentFillAllRegisters(string); + m_jit.exceptionCheck(); m_jit.jump(string); } @@ -5248,8 +7396,8 @@ void SpeculativeJIT::emitSwitchString(Node* node, SwitchData* data) BinarySwitch binarySwitch(tempGPR, identifierCaseValues, BinarySwitch::IntPtr); while (binarySwitch.advance(m_jit)) - jump(data->cases[binarySwitch.caseIndex()].target, ForceJump); - addBranch(binarySwitch.fallThrough(), data->fallThrough); + jump(data->cases[binarySwitch.caseIndex()].target.block, ForceJump); + addBranch(binarySwitch.fallThrough(), data->fallThrough.block); noResult(node); break; @@ -5275,24 +7423,9 @@ void SpeculativeJIT::emitSwitchString(Node* node, SwitchData* data) op1.use(); -#if USE(JSVALUE64) - addBranch( - m_jit.branchTest64( - MacroAssembler::NonZero, op1Regs.gpr(), GPRInfo::tagMaskRegister), - data->fallThrough); -#else - addBranch( - m_jit.branch32( - MacroAssembler::NotEqual, op1Regs.tagGPR(), TrustedImm32(JSValue::CellTag)), - data->fallThrough); -#endif + addBranch(m_jit.branchIfNotCell(op1Regs), data->fallThrough.block); - addBranch( - m_jit.branchPtr( - MacroAssembler::NotEqual, - MacroAssembler::Address(op1Regs.payloadGPR(), JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())), - data->fallThrough); + addBranch(m_jit.branchIfNotString(op1Regs.payloadGPR()), data->fallThrough.block); emitSwitchStringOnString(data, op1Regs.payloadGPR()); noResult(node, UseChildrenCalledExplicitly); @@ -5320,6 +7453,10 @@ void SpeculativeJIT::emitSwitch(Node* node) case SwitchString: { emitSwitchString(node, data); return; + } + case SwitchCell: { + DFG_CRASH(m_jit.graph(), node, "Bad switch kind"); + return; } } RELEASE_ASSERT_NOT_REACHED(); } @@ -5338,87 +7475,30 @@ void SpeculativeJIT::linkBranches() } } -#if ENABLE(GGC) void SpeculativeJIT::compileStoreBarrier(Node* node) { - switch (node->op()) { - case ConditionalStoreBarrier: { - compileBaseValueStoreBarrier(node->child1(), node->child2()); - break; - } - - case StoreBarrier: { - SpeculateCellOperand base(this, node->child1()); - GPRTemporary scratch1(this); - GPRTemporary scratch2(this); + ASSERT(node->op() == StoreBarrier); - writeBarrier(base.gpr(), scratch1.gpr(), scratch2.gpr()); - break; - } - - case StoreBarrierWithNullCheck: { - JSValueOperand base(this, node->child1()); - GPRTemporary scratch1(this); - GPRTemporary scratch2(this); + SpeculateCellOperand base(this, node->child1()); + GPRTemporary scratch1(this); + GPRTemporary scratch2(this); -#if USE(JSVALUE64) - JITCompiler::Jump isNull = m_jit.branchTest64(JITCompiler::Zero, base.gpr()); - writeBarrier(base.gpr(), scratch1.gpr(), scratch2.gpr()); -#else - JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, base.tagGPR(), TrustedImm32(JSValue::EmptyValueTag)); - writeBarrier(base.payloadGPR(), scratch1.gpr(), scratch2.gpr()); -#endif - isNull.link(&m_jit); - break; - } - - default: - RELEASE_ASSERT_NOT_REACHED(); - break; - } + writeBarrier(base.gpr(), scratch1.gpr(), scratch2.gpr()); noResult(node); } -JITCompiler::Jump SpeculativeJIT::genericWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2) -{ - jit.move(owner, scratch1); - jit.move(owner, scratch2); - - jit.andPtr(MacroAssembler::TrustedImmPtr(MarkedBlock::blockMask), scratch1); - jit.andPtr(MacroAssembler::TrustedImmPtr(~MarkedBlock::blockMask), scratch2); - - // Shift index -#if USE(JSVALUE64) - jit.rshift64(MacroAssembler::TrustedImm32(MarkedBlock::atomShiftAmount + MarkedBlock::markByteShiftAmount), scratch2); -#else - jit.rshift32(MacroAssembler::TrustedImm32(MarkedBlock::atomShiftAmount + MarkedBlock::markByteShiftAmount), scratch2); -#endif - - // Emit load and branch - return jit.branchTest8(MacroAssembler::Zero, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfMarks())); -} - -JITCompiler::Jump SpeculativeJIT::genericWriteBarrier(CCallHelpers& jit, JSCell* owner) -{ - MarkedBlock* block = MarkedBlock::blockFor(owner); - size_t markIndex = (reinterpret_cast<size_t>(owner) & ~MarkedBlock::blockMask) >> (MarkedBlock::atomShiftAmount + MarkedBlock::markByteShiftAmount); - uint8_t* address = reinterpret_cast<uint8_t*>(reinterpret_cast<char*>(block) + MarkedBlock::offsetOfMarks()) + markIndex; - return jit.branchTest8(MacroAssembler::Zero, MacroAssembler::AbsoluteAddress(address)); -} - void SpeculativeJIT::storeToWriteBarrierBuffer(GPRReg cell, GPRReg scratch1, GPRReg scratch2) { ASSERT(scratch1 != scratch2); - WriteBarrierBuffer* writeBarrierBuffer = &m_jit.vm()->heap.m_writeBarrierBuffer; - m_jit.move(TrustedImmPtr(writeBarrierBuffer), scratch1); - m_jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2); - JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset())); + WriteBarrierBuffer& writeBarrierBuffer = m_jit.vm()->heap.m_writeBarrierBuffer; + m_jit.load32(writeBarrierBuffer.currentIndexAddress(), scratch2); + JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::TrustedImm32(writeBarrierBuffer.capacity())); m_jit.add32(TrustedImm32(1), scratch2); - m_jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset())); + m_jit.store32(scratch2, writeBarrierBuffer.currentIndexAddress()); - m_jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1); + m_jit.move(TrustedImmPtr(writeBarrierBuffer.buffer()), scratch1); // We use an offset of -sizeof(void*) because we already added 1 to scratch2. m_jit.storePtr(cell, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*)))); @@ -5432,75 +7512,85 @@ void SpeculativeJIT::storeToWriteBarrierBuffer(GPRReg cell, GPRReg scratch1, GPR done.link(&m_jit); } -void SpeculativeJIT::storeToWriteBarrierBuffer(JSCell* cell, GPRReg scratch1, GPRReg scratch2) +void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg scratch1, GPRReg scratch2) { - ASSERT(scratch1 != scratch2); - WriteBarrierBuffer* writeBarrierBuffer = &m_jit.vm()->heap.m_writeBarrierBuffer; - m_jit.move(TrustedImmPtr(writeBarrierBuffer), scratch1); - m_jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2); - JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset())); - - m_jit.add32(TrustedImm32(1), scratch2); - m_jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset())); - - m_jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1); - // We use an offset of -sizeof(void*) because we already added 1 to scratch2. - m_jit.storePtr(TrustedImmPtr(cell), MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*)))); - - JITCompiler::Jump done = m_jit.jump(); - needToFlush.link(&m_jit); - - // Call C slow path - silentSpillAllRegisters(InvalidGPRReg); - callOperation(operationFlushWriteBarrierBuffer, cell); - silentFillAllRegisters(InvalidGPRReg); - - done.link(&m_jit); + JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR); + storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2); + ownerIsRememberedOrInEden.link(&m_jit); } -void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, GPRReg scratch1, GPRReg scratch2) +void SpeculativeJIT::compilePutAccessorById(Node* node) { - if (Heap::isMarked(value)) - return; + SpeculateCellOperand base(this, node->child1()); + SpeculateCellOperand accessor(this, node->child2()); - JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, ownerGPR, scratch1, scratch2); - storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2); - definitelyNotMarked.link(&m_jit); + GPRReg baseGPR = base.gpr(); + GPRReg accessorGPR = accessor.gpr(); + + flushRegisters(); + callOperation(node->op() == PutGetterById ? operationPutGetterById : operationPutSetterById, NoResult, baseGPR, identifierUID(node->identifierNumber()), node->accessorAttributes(), accessorGPR); + m_jit.exceptionCheck(); + + noResult(node); } -void SpeculativeJIT::osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2) +void SpeculativeJIT::compilePutGetterSetterById(Node* node) { - JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(jit, owner, scratch1, scratch2); + SpeculateCellOperand base(this, node->child1()); + JSValueOperand getter(this, node->child2()); + JSValueOperand setter(this, node->child3()); - // We need these extra slots because setupArgumentsWithExecState will use poke on x86. -#if CPU(X86) - jit.subPtr(TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); -#endif +#if USE(JSVALUE64) + GPRReg baseGPR = base.gpr(); + GPRReg getterGPR = getter.gpr(); + GPRReg setterGPR = setter.gpr(); - jit.setupArgumentsWithExecState(owner); - jit.move(TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch1); - jit.call(scratch1); + flushRegisters(); + callOperation(operationPutGetterSetter, NoResult, baseGPR, identifierUID(node->identifierNumber()), node->accessorAttributes(), getterGPR, setterGPR); +#else + // These JSValues may be JSUndefined OR JSFunction*. + // At that time, + // 1. If the JSValue is JSUndefined, its payload becomes nullptr. + // 2. If the JSValue is JSFunction*, its payload becomes JSFunction*. + // So extract payload and pass it to operationPutGetterSetter. This hack is used as the same way in baseline JIT. + GPRReg baseGPR = base.gpr(); + JSValueRegs getterRegs = getter.jsValueRegs(); + JSValueRegs setterRegs = setter.jsValueRegs(); -#if CPU(X86) - jit.addPtr(TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); + flushRegisters(); + callOperation(operationPutGetterSetter, NoResult, baseGPR, identifierUID(node->identifierNumber()), node->accessorAttributes(), getterRegs.payloadGPR(), setterRegs.payloadGPR()); #endif + m_jit.exceptionCheck(); - definitelyNotMarked.link(&jit); + noResult(node); } -void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg scratch1, GPRReg scratch2) +void SpeculativeJIT::compilePutAccessorByVal(Node* node) { - JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, ownerGPR, scratch1, scratch2); - storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2); - definitelyNotMarked.link(&m_jit); -} + SpeculateCellOperand base(this, node->child1()); + JSValueOperand subscript(this, node->child2()); + SpeculateCellOperand accessor(this, node->child3()); + + auto operation = node->op() == PutGetterByVal ? operationPutGetterByVal : operationPutSetterByVal; +#if USE(JSVALUE64) + GPRReg baseGPR = base.gpr(); + GPRReg subscriptGPR = subscript.gpr(); + GPRReg accessorGPR = accessor.gpr(); + + flushRegisters(); + callOperation(operation, NoResult, baseGPR, subscriptGPR, node->accessorAttributes(), accessorGPR); #else -void SpeculativeJIT::compileStoreBarrier(Node* node) -{ - DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate); + GPRReg baseGPR = base.gpr(); + JSValueRegs subscriptRegs = subscript.jsValueRegs(); + GPRReg accessorGPR = accessor.gpr(); + + flushRegisters(); + callOperation(operation, NoResult, baseGPR, subscriptRegs.tagGPR(), subscriptRegs.payloadGPR(), node->accessorAttributes(), accessorGPR); +#endif + m_jit.exceptionCheck(); + noResult(node); } -#endif // ENABLE(GGC) } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h index 3534c7b15..63a43960e 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2011-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGSpeculativeJIT_h #define DFGSpeculativeJIT_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGAbstractInterpreter.h" @@ -41,6 +39,7 @@ #include "JITOperations.h" #include "MarkedAllocator.h" #include "PutKind.h" +#include "SpillRegistersMode.h" #include "ValueRecovery.h" #include "VirtualRegister.h" @@ -56,7 +55,7 @@ class SpeculateDoubleOperand; class SpeculateCellOperand; class SpeculateBooleanOperand; -enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandDouble, GeneratedOperandJSValue}; +enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandJSValue}; inline GPRReg extractResult(GPRReg result) { return result; } #if USE(JSVALUE64) @@ -77,6 +76,8 @@ inline NoResultTag extractResult(NoResultTag) { return NoResult; } // to propagate type information (including information that has // only speculatively been asserted) through the dataflow. class SpeculativeJIT { + WTF_MAKE_FAST_ALLOCATED; + friend struct OSRExit; private: typedef JITCompiler::TrustedImm32 TrustedImm32; @@ -119,6 +120,7 @@ public: ~SpeculativeJIT(); bool compile(); + void createOSREntries(); void linkOSREntries(LinkBuffer&); @@ -161,7 +163,11 @@ public: // and its machine registers may be reused. bool canReuse(Node* node) { - return generationInfo(node).canReuse(); + return generationInfo(node).useCount() == 1; + } + bool canReuse(Node* nodeA, Node* nodeB) + { + return nodeA == nodeB && generationInfo(nodeA).useCount() == 2; } bool canReuse(Edge nodeUse) { @@ -189,7 +195,6 @@ public: if (spillMe.isValid()) { #if USE(JSVALUE32_64) GenerationInfo& info = generationInfoFromVirtualRegister(spillMe); - RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble); if ((info.registerFormat() & DataFormatJS)) m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR()); #endif @@ -264,7 +269,7 @@ public: else if (registerFormat != DataFormatNone) m_gprs.release(info.gpr()); #elif USE(JSVALUE32_64) - if (registerFormat == DataFormatDouble || registerFormat == DataFormatJSDouble) + if (registerFormat == DataFormatDouble) m_fprs.release(info.fpr()); else if (registerFormat & DataFormatJS) { m_gprs.release(info.tagGPR()); @@ -286,22 +291,15 @@ public: } bool masqueradesAsUndefinedWatchpointIsStillValid() { - return masqueradesAsUndefinedWatchpointIsStillValid(m_currentNode->codeOrigin); + return masqueradesAsUndefinedWatchpointIsStillValid(m_currentNode->origin.semantic); } -#if ENABLE(GGC) void storeToWriteBarrierBuffer(GPRReg cell, GPRReg scratch1, GPRReg scratch2); - void storeToWriteBarrierBuffer(JSCell*, GPRReg scratch1, GPRReg scratch2); - static JITCompiler::Jump genericWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2); - static JITCompiler::Jump genericWriteBarrier(CCallHelpers& jit, JSCell* owner); - static void osrWriteBarrier(CCallHelpers&, GPRReg owner, GPRReg scratch1, GPRReg scratch2); void writeBarrier(GPRReg owner, GPRReg scratch1, GPRReg scratch2); - void writeBarrier(GPRReg owner, JSCell* value, GPRReg scratch1, GPRReg scratch2); void writeBarrier(GPRReg owner, GPRReg value, Edge valueUse, GPRReg scratch1, GPRReg scratch2); - void writeBarrier(JSCell* owner, GPRReg value, Edge valueUse, GPRReg scratch1, GPRReg scratch2); -#endif + void compileStoreBarrier(Node*); static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg) @@ -319,12 +317,12 @@ public: GPRReg fillSpeculateBoolean(Edge); GeneratedOperandType checkGeneratedTypeForToInt32(Node*); - void addSlowPathGenerator(PassOwnPtr<SlowPathGenerator>); - void runSlowPathGenerators(); + void addSlowPathGenerator(std::unique_ptr<SlowPathGenerator>); + void runSlowPathGenerators(PCToCodeOriginMapBuilder&); void compile(Node*); void noticeOSRBirth(Node*); - void bail(); + void bail(AbortReason); void compileCurrentBlock(); void checkArgumentTypes(); @@ -389,6 +387,14 @@ public: { silentSpillAllRegisters(InvalidGPRReg, InvalidGPRReg, exclude); } + void silentSpillAllRegisters(JSValueRegs exclude) + { +#if USE(JSVALUE64) + silentSpillAllRegisters(exclude.payloadGPR()); +#else + silentSpillAllRegisters(exclude.payloadGPR(), exclude.tagGPR()); +#endif + } static GPRReg pickCanTrample(GPRReg exclude) { @@ -406,7 +412,12 @@ public: return GPRInfo::regT0; } -#if USE(JSVALUE32_64) +#if USE(JSVALUE64) + static GPRReg pickCanTrample(JSValueRegs exclude) + { + return pickCanTrample(exclude.payloadGPR()); + } +#else static GPRReg pickCanTrample(JSValueRegs exclude) { GPRReg result = GPRInfo::regT0; @@ -441,9 +452,9 @@ public: { return m_jit.boxDouble(fpr, gpr); } - FPRReg unboxDouble(GPRReg gpr, FPRReg fpr) + FPRReg unboxDouble(GPRReg gpr, GPRReg resultGPR, FPRReg fpr) { - return m_jit.unboxDouble(gpr, fpr); + return m_jit.unboxDouble(gpr, resultGPR, fpr); } GPRReg boxDouble(FPRReg fpr) { @@ -461,6 +472,10 @@ public: m_jit.unboxDouble(tagGPR, payloadGPR, fpr, scratchFPR); } #endif + void boxDouble(FPRReg fpr, JSValueRegs regs) + { + m_jit.boxDouble(fpr, regs); + } // Spill a VirtualRegister to the JSStack. void spill(VirtualRegister spillMe) @@ -530,11 +545,10 @@ public: return; } - case DataFormatDouble: - case DataFormatJSDouble: { + case DataFormatDouble: { // On JSVALUE32_64 boxing a double is a no-op. m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe)); - info.spill(*m_stream, spillMe, DataFormatJSDouble); + info.spill(*m_stream, spillMe, DataFormatDouble); return; } @@ -555,31 +569,9 @@ public: bool isKnownNotInteger(Node* node) { return !(m_state.forNode(node).m_type & SpecInt32); } bool isKnownNotNumber(Node* node) { return !(m_state.forNode(node).m_type & SpecFullNumber); } bool isKnownNotCell(Node* node) { return !(m_state.forNode(node).m_type & SpecCell); } + bool isKnownNotOther(Node* node) { return !(m_state.forNode(node).m_type & SpecOther); } - // Checks/accessors for constant values. - bool isConstant(Node* node) { return m_jit.graph().isConstant(node); } - bool isJSConstant(Node* node) { return m_jit.graph().isJSConstant(node); } - bool isInt32Constant(Node* node) { return m_jit.graph().isInt32Constant(node); } - bool isDoubleConstant(Node* node) { return m_jit.graph().isDoubleConstant(node); } - bool isNumberConstant(Node* node) { return m_jit.graph().isNumberConstant(node); } - bool isBooleanConstant(Node* node) { return m_jit.graph().isBooleanConstant(node); } - bool isFunctionConstant(Node* node) { return m_jit.graph().isFunctionConstant(node); } - int32_t valueOfInt32Constant(Node* node) { return m_jit.graph().valueOfInt32Constant(node); } - double valueOfNumberConstant(Node* node) { return m_jit.graph().valueOfNumberConstant(node); } -#if USE(JSVALUE32_64) - void* addressOfDoubleConstant(Node* node) { return m_jit.addressOfDoubleConstant(node); } -#endif - JSValue valueOfJSConstant(Node* node) { return m_jit.graph().valueOfJSConstant(node); } - bool valueOfBooleanConstant(Node* node) { return m_jit.graph().valueOfBooleanConstant(node); } - JSFunction* valueOfFunctionConstant(Node* node) { return m_jit.graph().valueOfFunctionConstant(node); } - bool isNullConstant(Node* node) - { - if (!isConstant(node)) - return false; - return valueOfJSConstant(node).isNull(); - } - - StringImpl* identifierUID(unsigned index) + UniquedStringImpl* identifierUID(unsigned index) { return m_jit.graph().identifiers()[index]; } @@ -601,7 +593,6 @@ public: } } -#ifndef NDEBUG // Used to ASSERT flushRegisters() has been called prior to // calling out from JIT code to a C helper function. bool isFlushed() @@ -616,12 +607,11 @@ public: } return true; } -#endif #if USE(JSVALUE64) - MacroAssembler::Imm64 valueOfJSConstantAsImm64(Node* node) + static MacroAssembler::Imm64 valueOfJSConstantAsImm64(Node* node) { - return MacroAssembler::Imm64(JSValue::encode(valueOfJSConstant(node))); + return MacroAssembler::Imm64(JSValue::encode(node->asJSValue())); } #endif @@ -706,7 +696,7 @@ public: } // Check if the lastNode is a branch on this node. - Node* lastNode = m_block->last(); + Node* lastNode = m_block->terminal(); return lastNode->op() == Branch && lastNode->child1() == m_currentNode ? m_block->size() - 1 : UINT_MAX; } @@ -715,19 +705,18 @@ public: #if USE(JSVALUE64) void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill); - void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump()); + void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill); #elif USE(JSVALUE32_64) void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill); - void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump()); + void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill); #endif void compileIn(Node*); void compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdge); - void nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert = false); - void nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert = false); - bool nonSpeculativeCompareNull(Node*, Edge operand, bool invert = false); + void nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand); + void nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, Node* branchNode); void nonSpeculativePeepholeBranch(Node*, Node* branchNode, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction); void nonSpeculativeNonPeepholeCompare(Node*, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction); @@ -737,59 +726,12 @@ public: void nonSpeculativeNonPeepholeStrictEq(Node*, bool invert = false); bool nonSpeculativeStrictEq(Node*, bool invert = false); - void compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg); + void compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg, GPRReg scratch2Reg); void compileInstanceOf(Node*); + void compileInstanceOfCustom(Node*); - ptrdiff_t calleeFrameOffset(int numArgs) - { - return virtualRegisterForLocal(m_jit.graph().m_nextMachineLocal + JSStack::CallFrameHeaderSize + numArgs).offset() * sizeof(Register); - } - - // Access to our fixed callee CallFrame. - MacroAssembler::Address calleeFrameSlot(int numArgs, int slot) - { - return MacroAssembler::Address(GPRInfo::callFrameRegister, calleeFrameOffset(numArgs) + sizeof(Register) * slot); - } - - // Access to our fixed callee CallFrame. - MacroAssembler::Address calleeArgumentSlot(int numArgs, int argument) - { - return calleeFrameSlot(numArgs, virtualRegisterForArgument(argument).offset()); - } - - MacroAssembler::Address calleeFrameTagSlot(int numArgs, int slot) - { - return calleeFrameSlot(numArgs, slot).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); - } - - MacroAssembler::Address calleeFramePayloadSlot(int numArgs, int slot) - { - return calleeFrameSlot(numArgs, slot).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); - } - - MacroAssembler::Address calleeArgumentTagSlot(int numArgs, int argument) - { - return calleeArgumentSlot(numArgs, argument).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); - } - - MacroAssembler::Address calleeArgumentPayloadSlot(int numArgs, int argument) - { - return calleeArgumentSlot(numArgs, argument).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); - } - - MacroAssembler::Address calleeFrameCallerFrame(int numArgs) - { - return calleeFrameSlot(numArgs, 0).withOffset(CallFrame::callerFrameOffset()); - } - void emitCall(Node*); - int32_t framePointerOffsetToGetActivationRegisters() - { - return m_jit.codeBlock()->framePointerOffsetToGetActivationRegisters( - m_jit.graph().m_machineCaptureStart); - } - // Called once a node has completed code generation but prior to setting // its result, to free up its children. (This must happen prior to setting // the nodes result, since the node may have the same VirtualRegister as @@ -860,15 +802,20 @@ public: GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); info.initCell(node, node->refCount(), reg); } - void booleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) + void blessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) { - if (mode == CallUseChildren) - useChildren(node); - - VirtualRegister virtualRegister = node->virtualRegister(); - m_gprs.retain(reg, virtualRegister, SpillOrderBoolean); - GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); - info.initBoolean(node, node->refCount(), reg); +#if USE(JSVALUE64) + jsValueResult(reg, node, DataFormatJSBoolean, mode); +#else + booleanResult(reg, node, mode); +#endif + } + void unblessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) + { +#if USE(JSVALUE64) + blessBoolean(reg); +#endif + blessedBooleanResult(reg, node, mode); } #if USE(JSVALUE64) void jsValueResult(GPRReg reg, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren) @@ -889,6 +836,16 @@ public: jsValueResult(reg, node, DataFormatJS, mode); } #elif USE(JSVALUE32_64) + void booleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) + { + if (mode == CallUseChildren) + useChildren(node); + + VirtualRegister virtualRegister = node->virtualRegister(); + m_gprs.retain(reg, virtualRegister, SpillOrderBoolean); + GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); + info.initBoolean(node, node->refCount(), reg); + } void jsValueResult(GPRReg tag, GPRReg payload, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren) { if (mode == CallUseChildren) @@ -905,6 +862,14 @@ public: jsValueResult(tag, payload, node, DataFormatJS, mode); } #endif + void jsValueResult(JSValueRegs regs, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren) + { +#if USE(JSVALUE64) + jsValueResult(regs.gpr(), node, format, mode); +#else + jsValueResult(regs.tagGPR(), regs.payloadGPR(), node, format, mode); +#endif + } void storageResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) { if (mode == CallUseChildren) @@ -927,7 +892,7 @@ public: } void initConstantInfo(Node* node) { - ASSERT(isInt32Constant(node) || isNumberConstant(node) || isJSConstant(node)); + ASSERT(node->hasConstant()); generationInfo(node).initConstant(node, node->refCount()); } @@ -937,186 +902,241 @@ public: // machine registers, and delegate the calling convention specific // decision as to how to fill the regsiters to setupArguments* methods. + JITCompiler::Call callOperation(V_JITOperation_E operation) + { + m_jit.setupArgumentsExecState(); + return appendCall(operation); + } JITCompiler::Call callOperation(P_JITOperation_E operation, GPRReg result) { m_jit.setupArgumentsExecState(); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_EC operation, GPRReg result, GPRReg cell) { m_jit.setupArgumentsWithExecState(cell); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_EO operation, GPRReg result, GPRReg object) { m_jit.setupArgumentsWithExecState(object); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_EOS operation, GPRReg result, GPRReg object, size_t size) { m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_EOZ operation, GPRReg result, GPRReg object, int32_t size) { m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_EOZ operation, GPRReg result, GPRReg object, int32_t size) { m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(static_cast<size_t>(size))); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_EPS operation, GPRReg result, GPRReg old, size_t size) { m_jit.setupArgumentsWithExecState(old, TrustedImmPtr(size)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_ES operation, GPRReg result, size_t size) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(size)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_ESJss operation, GPRReg result, size_t index, GPRReg arg1) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(index), arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_ESt operation, GPRReg result, Structure* structure) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_EStZ operation, GPRReg result, Structure* structure, GPRReg arg2) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_EStZ operation, GPRReg result, Structure* structure, size_t arg2) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(arg2)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_EStZ operation, GPRReg result, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_EStPS operation, GPRReg result, Structure* structure, void* pointer, size_t size) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(pointer), TrustedImmPtr(size)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_EStSS operation, GPRReg result, Structure* structure, size_t index, size_t size) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(index), TrustedImmPtr(size)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_E operation, GPRReg result) { m_jit.setupArgumentsExecState(); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_EC operation, GPRReg result, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_EC operation, GPRReg result, JSCell* cell) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(C_JITOperation_ECZ operation, GPRReg result, GPRReg arg1, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, arg2); + return appendCallSetResult(operation, result); } - JITCompiler::Call callOperation(C_JITOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell) + JITCompiler::Call callOperation(C_JITOperation_ECZC operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(C_JITOperation_EJscC operation, GPRReg result, GPRReg arg1, JSCell* cell) { m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_EIcf operation, GPRReg result, InlineCallFrame* inlineCallFrame) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(inlineCallFrame)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_ESt operation, GPRReg result, Structure* structure) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); + } + +#if USE(JSVALUE64) + JITCompiler::Call callOperation(C_JITOperation_EStJscSymtabJ operation, GPRReg result, Structure* structure, GPRReg scope, SymbolTable* table, TrustedImm64 initialValue) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), scope, TrustedImmPtr(table), initialValue); + return appendCallSetResult(operation, result); + } +#else + JITCompiler::Call callOperation(C_JITOperation_EStJscSymtabJ operation, GPRReg result, Structure* structure, GPRReg scope, SymbolTable* table, TrustedImm32 tag, TrustedImm32 payload) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), scope, TrustedImmPtr(table), payload, tag); + return appendCallSetResult(operation, result); + } +#endif + JITCompiler::Call callOperation(C_JITOperation_EStZ operation, GPRReg result, Structure* structure, unsigned knownLength) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(knownLength)); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(C_JITOperation_EStZZ operation, GPRReg result, Structure* structure, unsigned knownLength, unsigned minCapacity) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(knownLength), TrustedImm32(minCapacity)); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(C_JITOperation_EStZ operation, GPRReg result, Structure* structure, GPRReg length) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), length); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(C_JITOperation_EStZZ operation, GPRReg result, Structure* structure, GPRReg length, unsigned minCapacity) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), length, TrustedImm32(minCapacity)); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_EJssSt operation, GPRReg result, GPRReg arg1, Structure* structure) { m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(structure)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_EJssJss operation, GPRReg result, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_EJssJssJss operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3) { m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(S_JITOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); + } + + JITCompiler::Call callOperation(S_JITOperation_EGC operation, GPRReg result, JSGlobalObject* globalObject, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(globalObject), arg2); + return appendCallSetResult(operation, result); + } + + JITCompiler::Call callOperation(C_JITOperation_EGC operation, GPRReg result, JSGlobalObject* globalObject, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(globalObject), arg2); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(Jss_JITOperation_EZ operation, GPRReg result, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(V_JITOperation_EC operation, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_EC operation, JSCell* arg1) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(arg1)); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_ECIcf operation, GPRReg arg1, InlineCallFrame* inlineCallFrame) { m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(inlineCallFrame)); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_ECCIcf operation, GPRReg arg1, GPRReg arg2, InlineCallFrame* inlineCallFrame) { m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(inlineCallFrame)); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_ECZ operation, GPRReg arg1, int arg2) { m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2)); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_ECC operation, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_ECC operation, GPRReg arg1, JSCell* arg2) { m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2)); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_ECC operation, JSCell* arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(arg1), arg2); - return appendCallWithExceptionCheck(operation); - } - - JITCompiler::Call callOperation(V_JITOperation_EVws operation, VariableWatchpointSet* watchpointSet) - { - m_jit.setupArgumentsWithExecState(TrustedImmPtr(watchpointSet)); return appendCall(operation); } @@ -1131,31 +1151,22 @@ public: m_jit.setupArgumentsExecState(); return appendCallWithCallFrameRollbackOnExceptionSetResult(operation, result); } - - template<typename FunctionType, typename ArgumentType1> - JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1) - { - return callOperation(operation, arg1); - } - template<typename FunctionType, typename ArgumentType1, typename ArgumentType2> - JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2) - { - return callOperation(operation, arg1, arg2); - } - template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3> - JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3) + JITCompiler::Call callOperation(Z_JITOperation_EC operation, GPRReg result, GPRReg arg1) { - return callOperation(operation, arg1, arg2, arg3); + m_jit.setupArgumentsWithExecState(arg1); + return appendCallSetResult(operation, result); } - template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4> - JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3, ArgumentType4 arg4) + + JITCompiler::Call callOperation(V_JITOperation_ECIZC operation, GPRReg regOp1, UniquedStringImpl* identOp2, int32_t op3, GPRReg regOp4) { - return callOperation(operation, arg1, arg2, arg3, arg4); + m_jit.setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4); + return appendCall(operation); } - template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4, typename ArgumentType5> - JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3, ArgumentType4 arg4, ArgumentType5 arg5) + + template<typename FunctionType, typename... Args> + JITCompiler::Call callOperation(FunctionType operation, NoResultTag, Args... args) { - return callOperation(operation, arg1, arg2, arg3, arg4, arg5); + return callOperation(operation, args...); } JITCompiler::Call callOperation(D_JITOperation_ZZ operation, FPRReg result, GPRReg arg1, GPRReg arg2) @@ -1173,32 +1184,61 @@ public: m_jit.setupArguments(arg1, arg2); return appendCallSetResult(operation, result); } - JITCompiler::Call callOperation(I_JITOperation_EJss operation, GPRReg result, GPRReg arg1) + JITCompiler::Call callOperation(T_JITOperation_EJss operation, GPRReg result, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(C_JITOperation_EJscZ operation, GPRReg result, GPRReg arg1, int32_t arg2) + { + m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2)); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_EZ operation, GPRReg result, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_EZ operation, GPRReg result, int32_t arg1) { m_jit.setupArgumentsWithExecState(TrustedImm32(arg1)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); + } + + JITCompiler::Call callOperation(J_JITOperation_EJscC operation, GPRReg result, GPRReg arg1, JSCell* cell) + { + m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell)); + return appendCallSetResult(operation, result); + } + + JITCompiler::Call callOperation(J_JITOperation_EJscCJ operation, GPRReg result, GPRReg arg1, JSCell* cell, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell), arg2); + return appendCallSetResult(operation, result); + } + + JITCompiler::Call callOperation(V_JITOperation_EWs operation, WatchpointSet* watchpointSet) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(watchpointSet)); + return appendCall(operation); + } + + JITCompiler::Call callOperation(V_JITOperation_ECRUiUi operation, GPRReg arg1, GPRReg arg2, Imm32 arg3, GPRReg arg4) + { + m_jit.setupArgumentsWithExecState(arg1, arg2, arg3.asTrustedImm32(), arg4); + return appendCall(operation); } #if USE(JSVALUE64) JITCompiler::Call callOperation(J_JITOperation_E operation, GPRReg result) { m_jit.setupArgumentsExecState(); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg result, void* pointer) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(Z_JITOperation_D operation, GPRReg result, FPRReg arg1) { @@ -1207,90 +1247,115 @@ public: m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result); return call; } - JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg result, StringImpl* uid) + JITCompiler::Call callOperation(Q_JITOperation_J operation, GPRReg result, GPRReg value) + { + m_jit.setupArguments(value); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(Q_JITOperation_D operation, GPRReg result, FPRReg value) + { + m_jit.setupArguments(value); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg result, UniquedStringImpl* uid) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(uid)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EA operation, GPRReg result, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EAZ operation, GPRReg result, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EJssZ operation, GPRReg result, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EPS operation, GPRReg result, void* pointer, size_t size) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_ESS operation, GPRReg result, int startConstant, int numConstants) { m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EPP operation, GPRReg result, GPRReg arg1, void* pointer) { m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EC operation, GPRReg result, JSCell* cell) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } - JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, const StringImpl* uid) + JITCompiler::Call callOperation(J_JITOperation_ECZ operation, GPRReg result, GPRReg arg1, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, arg2); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, const UniquedStringImpl* uid) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } - JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, StringImpl* uid) + JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, UniquedStringImpl* uid) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EDA operation, GPRReg result, FPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(J_JITOperation_EJC operation, GPRReg result, GPRReg arg1, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, arg2); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(J_JITOperation_EJZ operation, GPRReg result, GPRReg arg1, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, arg2); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EJA operation, GPRReg result, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg result, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg result, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg result, int32_t arg1) { m_jit.setupArgumentsWithExecState(TrustedImm32(arg1)); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EZZ operation, GPRReg result, int32_t arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EZIcfZ operation, GPRReg result, int32_t arg1, InlineCallFrame* inlineCallFrame, GPRReg arg2) { m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImmPtr(inlineCallFrame), arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(P_JITOperation_EJS operation, GPRReg result, GPRReg value, size_t index) @@ -1302,13 +1367,38 @@ public: JITCompiler::Call callOperation(P_JITOperation_EStJ operation, GPRReg result, Structure* structure, GPRReg arg2) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_EJ operation, GPRReg result, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(C_JITOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, arg2); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(C_JITOperation_EJJC operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(C_JITOperation_EJJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(C_JITOperation_EJZ operation, GPRReg result, GPRReg arg1, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, arg2); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(C_JITOperation_EJZC operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(S_JITOperation_J operation, GPRReg result, GPRReg arg1) { @@ -1318,107 +1408,150 @@ public: JITCompiler::Call callOperation(S_JITOperation_EJ operation, GPRReg result, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(J_JITOperation_EJ operation, JSValueRegs result, JSValueRegs arg1) + { + return callOperation(operation, result.payloadGPR(), arg1.payloadGPR()); } JITCompiler::Call callOperation(J_JITOperation_EJ operation, GPRReg result, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(S_JITOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EPP operation, GPRReg result, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::TrustedImm32 imm) { m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value)))); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg result, MacroAssembler::TrustedImm32 imm, GPRReg arg2) { m_jit.setupArgumentsWithExecState(MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value))), arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(J_JITOperation_EJJ operation, JSValueRegs result, JSValueRegs arg1, JSValueRegs arg2) + { + return callOperation(operation, result.payloadGPR(), arg1.payloadGPR(), arg2.payloadGPR()); } JITCompiler::Call callOperation(J_JITOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg result, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg result, GPRReg arg1, JSValueRegs arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2.gpr()); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(V_JITOperation_EOZD operation, GPRReg arg1, GPRReg arg2, FPRReg arg3) { m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_EJ operation, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_EJPP operation, GPRReg arg1, GPRReg arg2, void* pointer) { m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer)); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } - JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1, GPRReg arg2, StringImpl* uid) + JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1, GPRReg arg2, UniquedStringImpl* uid) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, arg2, TrustedImmPtr(uid)); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) { m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) { m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) { m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_ECJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) { m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } - JITCompiler::Call callOperation(D_JITOperation_EJ operation, FPRReg result, GPRReg arg1) + JITCompiler::Call callOperation(Z_JITOperation_EJZZ operation, GPRReg result, GPRReg arg1, unsigned arg2, unsigned arg3) { - m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, result); + m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2), TrustedImm32(arg3)); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(F_JITOperation_EFJZZ operation, GPRReg result, GPRReg arg1, GPRReg arg2, unsigned arg3, GPRReg arg4) + { + m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImm32(arg3), arg4); + return appendCallSetResult(operation, result); + } + + JITCompiler::Call callOperation(Z_JITOperation_EJOJ operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(Z_JITOperation_EJOJ operation, GPRReg result, JSValueRegs arg1, GPRReg arg2, JSValueRegs arg3) + { + return callOperation(operation, result, arg1.payloadGPR(), arg2, arg3.payloadGPR()); } + JITCompiler::Call callOperation(Z_JITOperation_EJZ operation, GPRReg result, GPRReg arg1, unsigned arg2) + { + m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2)); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(V_JITOperation_EZJZZZ operation, unsigned arg1, GPRReg arg2, unsigned arg3, GPRReg arg4, unsigned arg5) + { + m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2, TrustedImm32(arg3), arg4, TrustedImm32(arg5)); + return appendCall(operation); + } + JITCompiler::Call callOperation(V_JITOperation_ECJZC operation, GPRReg regOp1, GPRReg regOp2, int32_t op3, GPRReg regOp4) + { + m_jit.setupArgumentsWithExecState(regOp1, regOp2, TrustedImm32(op3), regOp4); + return appendCall(operation); + } + JITCompiler::Call callOperation(V_JITOperation_ECIZJJ operation, GPRReg regOp1, UniquedStringImpl* identOp2, int32_t op3, GPRReg regOp4, GPRReg regOp5) + { + m_jit.setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4, regOp5); + return appendCall(operation); + } #else // USE(JSVALUE32_64) -// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]). -// To avoid assemblies from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary. +// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned on an even-numbered register (r0, r2 or [sp]). +// To prevent the assembler from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary. #if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS) #define EABI_32BIT_DUMMY_ARG TrustedImm32(0), #else @@ -1434,6 +1567,11 @@ public: #define SH4_32BIT_DUMMY_ARG #endif + JITCompiler::Call callOperation(D_JITOperation_G operation, FPRReg result, JSGlobalObject* globalObject) + { + m_jit.setupArguments(TrustedImmPtr(globalObject)); + return appendCallSetResult(operation, result); + } JITCompiler::Call callOperation(Z_JITOperation_D operation, GPRReg result, FPRReg arg1) { prepareForExternalCall(); @@ -1445,123 +1583,152 @@ public: JITCompiler::Call callOperation(J_JITOperation_E operation, GPRReg resultTag, GPRReg resultPayload) { m_jit.setupArgumentsExecState(); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, void* pointer) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EPP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, void* pointer) { m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } - JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, StringImpl* uid) + JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, UniquedStringImpl* uid) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(uid)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EAZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); + } + JITCompiler::Call callOperation(J_JITOperation_EJ operation, JSValueRegs result, JSValueRegs arg1) + { + return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1.tagGPR(), arg1.payloadGPR()); + } + JITCompiler::Call callOperation(J_JITOperation_EJ operation, GPRReg resultPayload, GPRReg resultTag, GPRReg arg1) + { + m_jit.setupArgumentsWithExecState(arg1); + return appendCallSetResult(operation, resultPayload, resultTag); + } + JITCompiler::Call callOperation(J_JITOperation_EJC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EJssZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EPS operation, GPRReg resultTag, GPRReg resultPayload, void* pointer, size_t size) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_ESS operation, GPRReg resultTag, GPRReg resultPayload, int startConstant, int numConstants) { m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, void* pointer) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImmPtr(pointer)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EC operation, GPRReg resultTag, GPRReg resultPayload, JSCell* cell) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); + } + JITCompiler::Call callOperation(J_JITOperation_ECZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, arg2); + return appendCallSetResult(operation, resultPayload, resultTag); } - JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1, const StringImpl* uid) + JITCompiler::Call callOperation(J_JITOperation_EJscC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, JSCell* cell) + { + m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell)); + return appendCallSetResult(operation, resultPayload, resultTag); + } + JITCompiler::Call callOperation(J_JITOperation_EJscCJ operation, GPRReg result, GPRReg arg1, JSCell* cell, GPRReg arg2Tag, GPRReg arg2Payload) + { + m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell), EABI_32BIT_DUMMY_ARG arg2Payload, arg2Tag); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1, const UniquedStringImpl* uid) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } - JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, StringImpl* uid) + JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } - JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, int32_t arg1Tag, GPRReg arg1Payload, StringImpl* uid) + JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, int32_t arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, TrustedImm32(arg1Tag), TrustedImmPtr(uid)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EDA operation, GPRReg resultTag, GPRReg resultPayload, FPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, TrustedImm32 arg1Tag, GPRReg arg1Payload, GPRReg arg2) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1) { m_jit.setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1) { m_jit.setupArgumentsWithExecState(TrustedImm32(arg1)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EZIcfZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1, InlineCallFrame* inlineCallFrame, GPRReg arg2) { m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImmPtr(inlineCallFrame), arg2); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EZZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(P_JITOperation_EJS operation, GPRReg result, JSValueRegs value, size_t index) @@ -1573,113 +1740,164 @@ public: JITCompiler::Call callOperation(P_JITOperation_EStJ operation, GPRReg result, Structure* structure, GPRReg arg2Tag, GPRReg arg2Payload) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2Payload, arg2Tag); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(C_JITOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } - JITCompiler::Call callOperation(S_JITOperation_J operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload) + + JITCompiler::Call callOperation(C_JITOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload) { - m_jit.setupArguments(arg1Payload, arg1Tag); + m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag); return appendCallSetResult(operation, result); } + + JITCompiler::Call callOperation(C_JITOperation_EJJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload) + { + m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag, arg3Payload, arg3Tag); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(S_JITOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(S_JITOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag); - return appendCallWithExceptionCheckSetResult(operation, result); + return appendCallSetResult(operation, result); } JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, MacroAssembler::TrustedImm32 imm) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG imm, TrustedImm32(JSValue::Int32Tag)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, MacroAssembler::TrustedImm32 imm, GPRReg arg2Tag, GPRReg arg2Payload) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG imm, TrustedImm32(JSValue::Int32Tag), SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); + } + JITCompiler::Call callOperation(J_JITOperation_EJJ operation, JSValueRegs result, JSValueRegs arg1, JSValueRegs arg2) + { + return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1.tagGPR(), arg1.payloadGPR(), arg2.tagGPR(), arg2.payloadGPR()); } JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload) { m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); + } + JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Payload) + { + m_jit.setupArgumentsWithExecState(arg1, arg2Payload, MacroAssembler::TrustedImm32(JSValue::CellTag)); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(J_JITOperation_ECJ operation, JSValueRegs result, GPRReg arg1, JSValueRegs arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2.payloadGPR(), arg2.tagGPR()); - return appendCallWithExceptionCheckSetResult(operation, result.payloadGPR(), result.tagGPR()); + return appendCallSetResult(operation, result.payloadGPR(), result.tagGPR()); } JITCompiler::Call callOperation(J_JITOperation_ECC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2) { m_jit.setupArgumentsWithExecState(arg1, arg2); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallSetResult(operation, resultPayload, resultTag); } JITCompiler::Call callOperation(V_JITOperation_EOZD operation, GPRReg arg1, GPRReg arg2, FPRReg arg3) { m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG arg3); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_EJ operation, GPRReg arg1Tag, GPRReg arg1Payload) { - m_jit.setupArgumentsWithExecState(arg1Tag, arg1Payload); - return appendCallWithExceptionCheck(operation); + m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_EJPP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, void* pointer) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer)); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } - JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Payload, StringImpl* uid) + JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Payload, UniquedStringImpl* uid) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, arg2Payload, TrustedImm32(JSValue::CellTag), TrustedImmPtr(uid)); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_ECJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload) { m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload) { m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG SH4_32BIT_DUMMY_ARG arg3Payload, arg3Tag); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload) { m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG SH4_32BIT_DUMMY_ARG arg3Payload, arg3Tag); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } JITCompiler::Call callOperation(V_JITOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, TrustedImm32 arg3Tag, GPRReg arg3Payload) { m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG SH4_32BIT_DUMMY_ARG arg3Payload, arg3Tag); - return appendCallWithExceptionCheck(operation); + return appendCall(operation); } - JITCompiler::Call callOperation(D_JITOperation_EJ operation, FPRReg result, GPRReg arg1Tag, GPRReg arg1Payload) + JITCompiler::Call callOperation(Z_JITOperation_EJOJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload) { - m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag); - return appendCallWithExceptionCheckSetResult(operation, result); + m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, EABI_32BIT_DUMMY_ARG arg3Payload, arg3Tag); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(Z_JITOperation_EJOJ operation, GPRReg result, JSValueRegs arg1, GPRReg arg2, JSValueRegs arg3) + { + return callOperation(operation, result, arg1.tagGPR(), arg1.payloadGPR(), arg2, arg3.tagGPR(), arg3.payloadGPR()); } + JITCompiler::Call callOperation(Z_JITOperation_EJZZ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, unsigned arg2, unsigned arg3) + { + m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2), TrustedImm32(arg3)); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(F_JITOperation_EFJZZ operation, GPRReg result, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, unsigned arg3, GPRReg arg4) + { + m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(Z_JITOperation_EJZ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, unsigned arg2) + { + m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2)); + return appendCallSetResult(operation, result); + } + JITCompiler::Call callOperation(V_JITOperation_EZJZZZ operation, unsigned arg1, GPRReg arg2Tag, GPRReg arg2Payload, unsigned arg3, GPRReg arg4, unsigned arg5) + { + m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2Payload, arg2Tag, TrustedImm32(arg3), arg4, TrustedImm32(arg5)); + return appendCall(operation); + } + JITCompiler::Call callOperation(V_JITOperation_ECJZC operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, int32_t arg3, GPRReg arg4) + { + m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4); + return appendCall(operation); + } + JITCompiler::Call callOperation(V_JITOperation_ECIZCC operation, GPRReg arg1, UniquedStringImpl* identOp2, int32_t op3, GPRReg arg4, GPRReg arg5) + { + m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identOp2), TrustedImm32(op3), arg4, arg5); + return appendCall(operation); + } #undef EABI_32BIT_DUMMY_ARG #undef SH4_32BIT_DUMMY_ARG @@ -1750,30 +1968,19 @@ public: void prepareForExternalCall() { } #endif - // These methods add call instructions, with optional exception checks & setting results. - JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function) + // These methods add call instructions, optionally setting results, and optionally rolling back the call frame on an exception. + JITCompiler::Call appendCall(const FunctionPtr& function) { prepareForExternalCall(); - m_jit.emitStoreCodeOrigin(m_currentNode->codeOrigin); - JITCompiler::Call call = m_jit.appendCall(function); - m_jit.exceptionCheck(); - return call; + m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic); + return m_jit.appendCall(function); } JITCompiler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr& function) { - prepareForExternalCall(); - m_jit.emitStoreCodeOrigin(m_currentNode->codeOrigin); - JITCompiler::Call call = m_jit.appendCall(function); + JITCompiler::Call call = appendCall(function); m_jit.exceptionCheckWithCallFrameRollback(); return call; } - JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result) - { - JITCompiler::Call call = appendCallWithExceptionCheck(function); - if ((result != InvalidGPRReg) && (result != GPRInfo::returnValueGPR)) - m_jit.move(GPRInfo::returnValueGPR, result); - return call; - } JITCompiler::Call appendCallWithCallFrameRollbackOnExceptionSetResult(const FunctionPtr& function, GPRReg result) { JITCompiler::Call call = appendCallWithCallFrameRollbackOnException(function); @@ -1783,35 +1990,18 @@ public: } JITCompiler::Call appendCallSetResult(const FunctionPtr& function, GPRReg result) { - prepareForExternalCall(); - m_jit.emitStoreCodeOrigin(m_currentNode->codeOrigin); - JITCompiler::Call call = m_jit.appendCall(function); + JITCompiler::Call call = appendCall(function); if (result != InvalidGPRReg) m_jit.move(GPRInfo::returnValueGPR, result); return call; } - JITCompiler::Call appendCall(const FunctionPtr& function) + JITCompiler::Call appendCallSetResult(const FunctionPtr& function, GPRReg result1, GPRReg result2) { - prepareForExternalCall(); - m_jit.emitStoreCodeOrigin(m_currentNode->codeOrigin); - return m_jit.appendCall(function); - } - JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result1, GPRReg result2) - { - JITCompiler::Call call = appendCallWithExceptionCheck(function); + JITCompiler::Call call = appendCall(function); m_jit.setupResults(result1, result2); return call; } #if CPU(X86) - JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result) - { - JITCompiler::Call call = appendCallWithExceptionCheck(function); - if (result != InvalidFPRReg) { - m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister); - m_jit.loadDouble(JITCompiler::stackPointerRegister, result); - } - return call; - } JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result) { JITCompiler::Call call = appendCall(function); @@ -1822,13 +2012,6 @@ public: return call; } #elif CPU(ARM) && !CPU(ARM_HARDFP) - JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result) - { - JITCompiler::Call call = appendCallWithExceptionCheck(function); - if (result != InvalidFPRReg) - m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2); - return call; - } JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result) { JITCompiler::Call call = appendCall(function); @@ -1837,13 +2020,6 @@ public: return call; } #else // CPU(X86_64) || (CPU(ARM) && CPU(ARM_HARDFP)) || CPU(ARM64) || CPU(MIPS) || CPU(SH4) - JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result) - { - JITCompiler::Call call = appendCallWithExceptionCheck(function); - if (result != InvalidFPRReg) - m_jit.moveDouble(FPRInfo::returnValueFPR, result); - return call; - } JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result) { JITCompiler::Call call = appendCall(function); @@ -1947,17 +2123,6 @@ public: void dump(const char* label = 0); - bool isInteger(Node* node) - { - if (node->hasInt32Result()) - return true; - - if (isInt32Constant(node)) - return true; - - return generationInfo(node).isJSInt32(); - } - bool betterUseStrictInt52(Node* node) { return !generationInfo(node).isInt52(); @@ -1974,16 +2139,34 @@ public: void compilePeepHoleBooleanBranch(Node*, Node* branchNode, JITCompiler::RelationalCondition); void compilePeepHoleDoubleBranch(Node*, Node* branchNode, JITCompiler::DoubleCondition); void compilePeepHoleObjectEquality(Node*, Node* branchNode); + void compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode); void compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode); void compileObjectEquality(Node*); + void compileObjectStrictEquality(Edge objectChild, Edge otherChild); void compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild); void compileObjectOrOtherLogicalNot(Edge value); void compileLogicalNot(Node*); + void compileLogicalNotStringOrOther(Node*); + void compileStringEquality( + Node*, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, + GPRReg leftTempGPR, GPRReg rightTempGPR, GPRReg leftTemp2GPR, + GPRReg rightTemp2GPR, JITCompiler::JumpList fastTrue, + JITCompiler::JumpList fastSlow); void compileStringEquality(Node*); void compileStringIdentEquality(Node*); + void compileStringToUntypedEquality(Node*, Edge stringEdge, Edge untypedEdge); + void compileStringIdentToNotStringVarEquality(Node*, Edge stringEdge, Edge notStringVarEdge); void compileStringZeroLength(Node*); + void compileMiscStrictEq(Node*); + + template<typename Functor> + void extractStringImplFromBinarySymbols(Edge leftSymbolEdge, Edge rightSymbolEdge, const Functor&); + void compileSymbolEquality(Node*); + void compilePeepHoleSymbolEquality(Node*, Node* branchNode); void emitObjectOrOtherBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken); + void emitStringBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken); + void emitStringOrOtherBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken); void emitBranch(Node*); struct StringSwitchCase { @@ -1995,7 +2178,10 @@ public: { } - bool operator<(const StringSwitchCase& other) const; + bool operator<(const StringSwitchCase& other) const + { + return stringLessThan(*string, *other.string); + } StringImpl* string; BasicBlock* target; @@ -2013,7 +2199,7 @@ public: void emitSwitchString(Node*, SwitchData*); void emitSwitch(Node*); - void compileToStringOnCell(Node*); + void compileToStringOrCallStringConstructorOnCell(Node*); void compileNewStringObject(Node*); void compileNewTypedArray(Node*); @@ -2023,12 +2209,11 @@ public: void compileBooleanCompare(Node*, MacroAssembler::RelationalCondition); void compileDoubleCompare(Node*, MacroAssembler::DoubleCondition); - bool compileStrictEqForConstant(Node*, Edge value, JSValue constant); - bool compileStrictEq(Node*); void compileAllocatePropertyStorage(Node*); void compileReallocatePropertyStorage(Node*); + void compileGetButterfly(Node*); #if USE(JSVALUE32_64) template<typename BaseOperandType, typename PropertyOperandType, typename ValueOperandType, typename TagType> @@ -2049,22 +2234,47 @@ public: void compileGetByValOnString(Node*); void compileFromCharCode(Node*); - void compileGetByValOnArguments(Node*); - void compileGetArgumentsLength(Node*); + void compileGetByValOnDirectArguments(Node*); + void compileGetByValOnScopedArguments(Node*); + void compileGetScope(Node*); + void compileSkipScope(Node*); + void compileGetArrayLength(Node*); + + void compileCheckTypeInfoFlags(Node*); + void compileCheckIdent(Node*); + + void compileValueRep(Node*); + void compileDoubleRep(Node*); void compileValueToInt32(Node*); void compileUInt32ToNumber(Node*); void compileDoubleAsInt32(Node*); - void compileInt32ToDouble(Node*); - void compileAdd(Node*); + + template<typename SnippetGenerator, J_JITOperation_EJJ slowPathFunction> + void emitUntypedBitOp(Node*); + void compileBitwiseOp(Node*); + + void emitUntypedRightShiftBitOp(Node*); + void compileShiftOp(Node*); + + void compileValueAdd(Node*); + void compileArithAdd(Node*); void compileMakeRope(Node*); + void compileArithClz32(Node*); void compileArithSub(Node*); void compileArithNegate(Node*); void compileArithMul(Node*); void compileArithDiv(Node*); void compileArithMod(Node*); + void compileArithPow(Node*); + void compileArithRounding(Node*); + void compileArithFloor(Node*); + void compileArithCeil(Node*); + void compileArithRandom(Node*); + void compileArithSqrt(Node*); + void compileArithLog(Node*); void compileConstantStoragePointer(Node*); void compileGetIndexedPropertyStorage(Node*); JITCompiler::Jump jumpForTypedArrayOutOfBounds(Node*, GPRReg baseGPR, GPRReg indexGPR); @@ -2074,9 +2284,31 @@ public: void compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType); void compileGetByValOnFloatTypedArray(Node*, TypedArrayType); void compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType); - void compileNewFunctionNoCheck(Node*); - void compileNewFunctionExpression(Node*); + template <typename ClassType> void compileNewFunctionCommon(GPRReg, Structure*, GPRReg, GPRReg, GPRReg, MacroAssembler::JumpList&, size_t, FunctionExecutable*, ptrdiff_t, ptrdiff_t, ptrdiff_t); + void compileNewFunction(Node*); + void compileForwardVarargs(Node*); + void compileCreateActivation(Node*); + void compileCreateDirectArguments(Node*); + void compileGetFromArguments(Node*); + void compilePutToArguments(Node*); + void compileCreateScopedArguments(Node*); + void compileCreateClonedArguments(Node*); + void compileCopyRest(Node*); + void compileGetRestLength(Node*); + void compileNotifyWrite(Node*); bool compileRegExpExec(Node*); + void compileIsObjectOrNull(Node*); + void compileIsFunction(Node*); + void compileTypeOf(Node*); + void compileCheckStructure(Node*, GPRReg cellGPR, GPRReg tempGPR); + void compileCheckStructure(Node*); + void compilePutAccessorById(Node*); + void compilePutGetterSetterById(Node*); + void compilePutAccessorByVal(Node*); + + void moveTrueTo(GPRReg); + void moveFalseTo(GPRReg); + void blessBoolean(GPRReg); // size can be an immediate or a register, and must be in bytes. If size is a register, // it must be a different register than resultGPR. Emits code that place a pointer to @@ -2090,7 +2322,7 @@ public: #ifndef NDEBUG m_jit.move(size, resultGPR); MacroAssembler::Jump nonZeroSize = m_jit.branchTest32(MacroAssembler::NonZero, resultGPR); - m_jit.breakpoint(); + m_jit.abortWithReason(DFGBasicStorageAllocatorZeroSize); nonZeroSize.link(&m_jit); #endif @@ -2102,14 +2334,18 @@ public: return slowPath; } - + // Allocator for a cell of a specific size. template <typename StructureType> // StructureType can be GPR or ImmPtr. void emitAllocateJSCell(GPRReg resultGPR, GPRReg allocatorGPR, StructureType structure, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath) { - m_jit.loadPtr(MacroAssembler::Address(allocatorGPR, MarkedAllocator::offsetOfFreeListHead()), resultGPR); - slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR)); + if (Options::forceGCSlowPaths()) + slowPath.append(m_jit.jump()); + else { + m_jit.loadPtr(MacroAssembler::Address(allocatorGPR, MarkedAllocator::offsetOfFreeListHead()), resultGPR); + slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR)); + } // The object is half-allocated: we have what we know is a fresh object, but // it's still on the GC's free list. @@ -2117,7 +2353,7 @@ public: m_jit.storePtr(scratchGPR, MacroAssembler::Address(allocatorGPR, MarkedAllocator::offsetOfFreeListHead())); // Initialize the object's Structure. - m_jit.storePtr(structure, MacroAssembler::Address(resultGPR, JSCell::structureOffset())); + m_jit.emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR); } // Allocator for an object of a specific size. @@ -2131,30 +2367,69 @@ public: m_jit.storePtr(storage, MacroAssembler::Address(resultGPR, JSObject::butterflyOffset())); } - // Convenience allocator for a buit-in object. template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr. - void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage, - GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath) + void emitAllocateJSObjectWithKnownSize( + GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, + GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath, size_t size) { - MarkedAllocator* allocator = 0; - size_t size = ClassType::allocationSize(0); - if (ClassType::needsDestruction && ClassType::hasImmortalStructure) - allocator = &m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(size); - else if (ClassType::needsDestruction) - allocator = &m_jit.vm()->heap.allocatorForObjectWithNormalDestructor(size); - else - allocator = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(size); + MarkedAllocator* allocator = &m_jit.vm()->heap.allocatorForObjectOfType<ClassType>(size); m_jit.move(TrustedImmPtr(allocator), scratchGPR1); emitAllocateJSObject(resultGPR, scratchGPR1, structure, storage, scratchGPR2, slowPath); } - void emitAllocateJSArray(GPRReg resultGPR, Structure*, GPRReg storageGPR, unsigned numElements); + // Convenience allocator for a built-in object. + template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr. + void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage, + GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath) + { + emitAllocateJSObjectWithKnownSize<ClassType>( + resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, + ClassType::allocationSize(0)); + } -#if USE(JSVALUE64) - JITCompiler::Jump convertToDouble(GPRReg value, FPRReg result, GPRReg tmp); -#elif USE(JSVALUE32_64) - JITCompiler::Jump convertToDouble(JSValueOperand&, FPRReg result); -#endif + template <typename ClassType, typename StructureType> // StructureType and StorageType can be GPR or ImmPtr. + void emitAllocateVariableSizedJSObject(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath) + { + static_assert(!(MarkedSpace::preciseStep & (MarkedSpace::preciseStep - 1)), "MarkedSpace::preciseStep must be a power of two."); + static_assert(!(MarkedSpace::impreciseStep & (MarkedSpace::impreciseStep - 1)), "MarkedSpace::impreciseStep must be a power of two."); + + MarkedSpace::Subspace& subspace = m_jit.vm()->heap.subspaceForObjectOfType<ClassType>(); + m_jit.add32(TrustedImm32(MarkedSpace::preciseStep - 1), allocationSize); + MacroAssembler::Jump notSmall = m_jit.branch32(MacroAssembler::AboveOrEqual, allocationSize, TrustedImm32(MarkedSpace::preciseCutoff)); + m_jit.rshift32(allocationSize, TrustedImm32(getLSBSet(MarkedSpace::preciseStep)), scratchGPR1); + m_jit.mul32(TrustedImm32(sizeof(MarkedAllocator)), scratchGPR1, scratchGPR1); + m_jit.addPtr(MacroAssembler::TrustedImmPtr(&subspace.preciseAllocators[0]), scratchGPR1); + + MacroAssembler::Jump selectedSmallSpace = m_jit.jump(); + notSmall.link(&m_jit); + slowPath.append(m_jit.branch32(MacroAssembler::AboveOrEqual, allocationSize, TrustedImm32(MarkedSpace::impreciseCutoff))); + m_jit.rshift32(allocationSize, TrustedImm32(getLSBSet(MarkedSpace::impreciseStep)), scratchGPR1); + m_jit.mul32(TrustedImm32(sizeof(MarkedAllocator)), scratchGPR1, scratchGPR1); + m_jit.addPtr(MacroAssembler::TrustedImmPtr(&subspace.impreciseAllocators[0]), scratchGPR1); + + selectedSmallSpace.link(&m_jit); + + emitAllocateJSObject(resultGPR, scratchGPR1, structure, TrustedImmPtr(0), scratchGPR2, slowPath); + } + + template <typename T> + void emitAllocateDestructibleObject(GPRReg resultGPR, Structure* structure, + GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath) + { + emitAllocateJSObject<T>(resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR1, scratchGPR2, slowPath); + m_jit.storePtr(TrustedImmPtr(structure->classInfo()), MacroAssembler::Address(resultGPR, JSDestructibleObject::classInfoOffset())); + } + + void emitAllocateJSArray(GPRReg resultGPR, Structure*, GPRReg storageGPR, unsigned numElements); + + void emitGetLength(InlineCallFrame*, GPRReg lengthGPR, bool includeThis = false); + void emitGetLength(CodeOrigin, GPRReg lengthGPR, bool includeThis = false); + void emitGetCallee(CodeOrigin, GPRReg calleeGPR); + void emitGetArgumentStart(CodeOrigin, GPRReg startGPR); + + // Generate an OSR exit fuzz check. Returns Jump() if OSR exit fuzz is not enabled, or if + // it's in training mode. + MacroAssembler::Jump emitOSRExitFuzzCheck(); // Add a speculation check. void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail); @@ -2177,29 +2452,47 @@ public: // Helpers for performing type checks on an edge stored in the given registers. bool needsTypeCheck(Edge edge, SpeculatedType typesPassedThrough) { return m_interpreter.needsTypeCheck(edge, typesPassedThrough); } - void typeCheck(JSValueSource, Edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail); - + void typeCheck(JSValueSource, Edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind = BadType); + + void speculateCellTypeWithoutTypeFiltering(Edge, GPRReg cellGPR, JSType); + void speculateCellType(Edge, GPRReg cellGPR, SpeculatedType, JSType); + void speculateInt32(Edge); +#if USE(JSVALUE64) + void convertMachineInt(Edge, GPRReg resultGPR); void speculateMachineInt(Edge); + void speculateDoubleRepMachineInt(Edge); +#endif // USE(JSVALUE64) void speculateNumber(Edge); void speculateRealNumber(Edge); + void speculateDoubleRepReal(Edge); void speculateBoolean(Edge); void speculateCell(Edge); + void speculateCellOrOther(Edge); void speculateObject(Edge); + void speculateFunction(Edge); void speculateFinalObject(Edge); + void speculateRegExpObject(Edge); void speculateObjectOrOther(Edge); void speculateString(Edge edge, GPRReg cell); void speculateStringIdentAndLoadStorage(Edge edge, GPRReg string, GPRReg storage); void speculateStringIdent(Edge edge, GPRReg string); void speculateStringIdent(Edge); void speculateString(Edge); + void speculateStringOrOther(Edge, JSValueRegs, GPRReg scratch); + void speculateStringOrOther(Edge); + void speculateNotStringVar(Edge); template<typename StructureLocationType> void speculateStringObjectForStructure(Edge, StructureLocationType); void speculateStringObject(Edge, GPRReg); void speculateStringObject(Edge); void speculateStringOrStringObject(Edge); + void speculateSymbol(Edge, GPRReg cell); + void speculateSymbol(Edge); void speculateNotCell(Edge); void speculateOther(Edge); + void speculateMisc(Edge, JSValueRegs); + void speculateMisc(Edge); void speculate(Node*, Edge); JITCompiler::Jump jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, ArrayMode, IndexingType); @@ -2250,7 +2543,7 @@ public: // The current node being generated. BasicBlock* m_block; Node* m_currentNode; - bool m_canExit; + NodeType m_lastGeneratedNode; unsigned m_indexInBlock; // Virtual and physical register maps. Vector<GenerationInfo, 32> m_generationInfo; @@ -2271,8 +2564,7 @@ public: }; Vector<BranchRecord, 8> m_branches; - CodeOrigin m_codeOriginForExitTarget; - CodeOrigin m_codeOriginForExitProfile; + NodeOrigin m_origin; InPlaceAbstractState m_state; AbstractInterpreter<InPlaceAbstractState> m_interpreter; @@ -2280,10 +2572,9 @@ public: VariableEventStream* m_stream; MinifiedGraph* m_minifiedGraph; - bool m_isCheckingArgumentTypes; - - Vector<OwnPtr<SlowPathGenerator>, 8> m_slowPathGenerators; + Vector<std::unique_ptr<SlowPathGenerator>, 8> m_slowPathGenerators; Vector<SilentRegisterSavePlan> m_plans; + unsigned m_outOfLineStreamIndex { UINT_MAX }; }; @@ -2308,6 +2599,8 @@ public: #endif { ASSERT(m_jit); + if (!edge) + return; ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse); #if USE(JSVALUE64) if (jit->isFilled(node())) @@ -2320,8 +2613,36 @@ public: #endif } + explicit JSValueOperand(JSValueOperand&& other) + : m_jit(other.m_jit) + , m_edge(other.m_edge) + { +#if USE(JSVALUE64) + m_gprOrInvalid = other.m_gprOrInvalid; +#elif USE(JSVALUE32_64) + m_register.pair.tagGPR = InvalidGPRReg; + m_register.pair.payloadGPR = InvalidGPRReg; + m_isDouble = other.m_isDouble; + + if (m_edge) { + if (m_isDouble) + m_register.fpr = other.m_register.fpr; + else + m_register.pair = other.m_register.pair; + } +#endif + other.m_edge = Edge(); +#if USE(JSVALUE64) + other.m_gprOrInvalid = InvalidGPRReg; +#elif USE(JSVALUE32_64) + other.m_isDouble = false; +#endif + } + ~JSValueOperand() { + if (!m_edge) + return; #if USE(JSVALUE64) ASSERT(m_gprOrInvalid != InvalidGPRReg); m_jit->unlock(m_gprOrInvalid); @@ -2502,6 +2823,8 @@ public: m_gpr = m_jit->reuse(op1.gpr()); else if (m_jit->canReuse(op2.node())) m_gpr = m_jit->reuse(op2.gpr()); + else if (m_jit->canReuse(op1.node(), op2.node()) && op1.gpr() == op2.gpr()) + m_gpr = m_jit->reuse(op1.gpr()); else m_gpr = m_jit->allocate(); } @@ -2527,6 +2850,23 @@ private: GPRReg m_gpr; }; +class JSValueRegsTemporary { +public: + JSValueRegsTemporary(); + JSValueRegsTemporary(SpeculativeJIT*); + ~JSValueRegsTemporary(); + + JSValueRegs regs(); + +private: +#if USE(JSVALUE64) + GPRTemporary m_gpr; +#else + GPRTemporary m_payloadGPR; + GPRTemporary m_tagGPR; +#endif +}; + class FPRTemporary { public: FPRTemporary(SpeculativeJIT*); @@ -2564,18 +2904,18 @@ private: // // These classes lock the result of a call to a C++ helper function. -class GPRResult : public GPRTemporary { +class GPRFlushedCallResult : public GPRTemporary { public: - GPRResult(SpeculativeJIT* jit) + GPRFlushedCallResult(SpeculativeJIT* jit) : GPRTemporary(jit, GPRInfo::returnValueGPR) { } }; #if USE(JSVALUE32_64) -class GPRResult2 : public GPRTemporary { +class GPRFlushedCallResult2 : public GPRTemporary { public: - GPRResult2(SpeculativeJIT* jit) + GPRFlushedCallResult2(SpeculativeJIT* jit) : GPRTemporary(jit, GPRInfo::returnValueGPR2) { } @@ -2716,12 +3056,12 @@ private: // Gives you a canonical Int52 (i.e. it's left-shifted by 16, low bits zero). class SpeculateInt52Operand { public: - explicit SpeculateInt52Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + explicit SpeculateInt52Operand(SpeculativeJIT* jit, Edge edge) : m_jit(jit) , m_edge(edge) , m_gprOrInvalid(InvalidGPRReg) { - ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == MachineIntUse); + RELEASE_ASSERT(edge.useKind() == Int52RepUse); if (jit->isFilled(node())) gpr(); } @@ -2763,12 +3103,12 @@ private: // Gives you a strict Int52 (i.e. the payload is in the low 48 bits, high 16 bits are sign-extended). class SpeculateStrictInt52Operand { public: - explicit SpeculateStrictInt52Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + explicit SpeculateStrictInt52Operand(SpeculativeJIT* jit, Edge edge) : m_jit(jit) , m_edge(edge) , m_gprOrInvalid(InvalidGPRReg) { - ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == MachineIntUse); + RELEASE_ASSERT(edge.useKind() == Int52RepUse); if (jit->isFilled(node())) gpr(); } @@ -2811,35 +3151,35 @@ enum OppositeShiftTag { OppositeShift }; class SpeculateWhicheverInt52Operand { public: - explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge) : m_jit(jit) , m_edge(edge) , m_gprOrInvalid(InvalidGPRReg) , m_strict(jit->betterUseStrictInt52(edge)) { - ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == MachineIntUse); + RELEASE_ASSERT(edge.useKind() == Int52RepUse); if (jit->isFilled(node())) gpr(); } - explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, const SpeculateWhicheverInt52Operand& other, OperandSpeculationMode mode = AutomaticOperandSpeculation) + explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, const SpeculateWhicheverInt52Operand& other) : m_jit(jit) , m_edge(edge) , m_gprOrInvalid(InvalidGPRReg) , m_strict(other.m_strict) { - ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == MachineIntUse); + RELEASE_ASSERT(edge.useKind() == Int52RepUse); if (jit->isFilled(node())) gpr(); } - explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, OppositeShiftTag, const SpeculateWhicheverInt52Operand& other, OperandSpeculationMode mode = AutomaticOperandSpeculation) + explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, OppositeShiftTag, const SpeculateWhicheverInt52Operand& other) : m_jit(jit) , m_edge(edge) , m_gprOrInvalid(InvalidGPRReg) , m_strict(!other.m_strict) { - ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == MachineIntUse); + RELEASE_ASSERT(edge.useKind() == Int52RepUse); if (jit->isFilled(node())) gpr(); } @@ -2888,13 +3228,13 @@ private: class SpeculateDoubleOperand { public: - explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge edge) : m_jit(jit) , m_edge(edge) , m_fprOrInvalid(InvalidFPRReg) { ASSERT(m_jit); - ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || isDouble(edge.useKind())); + RELEASE_ASSERT(isDouble(edge.useKind())); if (jit->isFilled(node())) fpr(); } @@ -2994,7 +3334,7 @@ public: , m_gprOrInvalid(InvalidGPRReg) { ASSERT(m_jit); - ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse); + ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse || edge.useKind() == KnownBooleanUse); if (jit->isFilled(node())) gpr(); } @@ -3037,25 +3377,28 @@ template<typename StructureLocationType> void SpeculativeJIT::speculateStringObjectForStructure(Edge edge, StructureLocationType structureLocation) { Structure* stringObjectStructure = - m_jit.globalObjectFor(m_currentNode->codeOrigin)->stringObjectStructure(); + m_jit.globalObjectFor(m_currentNode->origin.semantic)->stringObjectStructure(); - if (!m_state.forNode(edge).m_currentKnownStructure.isSubsetOf(StructureSet(stringObjectStructure))) { + if (!m_state.forNode(edge).m_structure.isSubsetOf(StructureSet(stringObjectStructure))) { speculationCheck( NotStringObject, JSValueRegs(), 0, - m_jit.branchPtr( - JITCompiler::NotEqual, structureLocation, TrustedImmPtr(stringObjectStructure))); + m_jit.branchStructure( + JITCompiler::NotEqual, structureLocation, stringObjectStructure)); } } -#define DFG_TYPE_CHECK(source, edge, typesPassedThrough, jumpToFail) do { \ +#define DFG_TYPE_CHECK_WITH_EXIT_KIND(exitKind, source, edge, typesPassedThrough, jumpToFail) do { \ JSValueSource _dtc_source = (source); \ Edge _dtc_edge = (edge); \ SpeculatedType _dtc_typesPassedThrough = typesPassedThrough; \ if (!needsTypeCheck(_dtc_edge, _dtc_typesPassedThrough)) \ break; \ - typeCheck(_dtc_source, _dtc_edge, _dtc_typesPassedThrough, (jumpToFail)); \ + typeCheck(_dtc_source, _dtc_edge, _dtc_typesPassedThrough, (jumpToFail), exitKind); \ } while (0) +#define DFG_TYPE_CHECK(source, edge, typesPassedThrough, jumpToFail) \ + DFG_TYPE_CHECK_WITH_EXIT_KIND(BadType, source, edge, typesPassedThrough, jumpToFail) + } } // namespace JSC::DFG #endif diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp index bc21f929f..97e924df2 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2011-2016 Apple Inc. All rights reserved. * Copyright (C) 2011 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,14 +30,22 @@ #if ENABLE(DFG_JIT) #include "ArrayPrototype.h" +#include "CallFrameShuffler.h" #include "DFGAbstractInterpreterInlines.h" #include "DFGCallArrayAllocatorSlowPathGenerator.h" #include "DFGOperations.h" #include "DFGSlowPathGenerator.h" #include "Debugger.h" -#include "JSActivation.h" +#include "DirectArguments.h" +#include "GetterSetter.h" +#include "JSEnvironmentRecord.h" +#include "JSLexicalEnvironment.h" +#include "JSPropertyNameEnumerator.h" #include "ObjectPrototype.h" -#include "Operations.h" +#include "JSCInlines.h" +#include "SetupVarargsFrame.h" +#include "TypeProfilerLog.h" +#include "Watchdog.h" namespace JSC { namespace DFG { @@ -57,11 +65,12 @@ bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, if (edge->hasConstant()) { tagGPR = allocate(); payloadGPR = allocate(); - m_jit.move(Imm32(valueOfJSConstant(edge.node()).tag()), tagGPR); - m_jit.move(Imm32(valueOfJSConstant(edge.node()).payload()), payloadGPR); + JSValue value = edge->asJSValue(); + m_jit.move(Imm32(value.tag()), tagGPR); + m_jit.move(Imm32(value.payload()), payloadGPR); m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant); - info.fillJSValue(*m_stream, tagGPR, payloadGPR, isInt32Constant(edge.node()) ? DataFormatJSInt32 : DataFormatJS); + info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS); } else { DataFormat spillFormat = info.spillFormat(); ASSERT(spillFormat != DataFormatNone && spillFormat != DataFormatStorage); @@ -106,7 +115,7 @@ bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, m_gprs.lock(gpr); } tagGPR = allocate(); - uint32_t tag = JSValue::EmptyValueTag; + int32_t tag = JSValue::EmptyValueTag; DataFormat fillFormat = DataFormatJS; switch (info.registerFormat()) { case DataFormatInt32: @@ -134,20 +143,6 @@ bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, } case DataFormatJSDouble: - case DataFormatDouble: { - FPRReg oldFPR = info.fpr(); - m_fprs.lock(oldFPR); - tagGPR = allocate(); - payloadGPR = allocate(); - boxDouble(oldFPR, tagGPR, payloadGPR); - m_fprs.unlock(oldFPR); - m_fprs.release(oldFPR); - m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS); - m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS); - info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS); - return true; - } - case DataFormatJS: case DataFormatJSInt32: case DataFormatJSCell: @@ -160,6 +155,7 @@ bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, } case DataFormatStorage: + case DataFormatDouble: // this type currently never occurs RELEASE_ASSERT_NOT_REACHED(); @@ -169,12 +165,36 @@ bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, } } -void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode) +void SpeculativeJIT::cachedGetById( + CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, + unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode) { + // This is a hacky fix for when the register allocator decides to alias the base payload with the result tag. This only happens + // in the case of GetByIdFlush, which has a relatively expensive register allocation story already so we probably don't need to + // trip over one move instruction. + if (basePayloadGPR == resultTagGPR) { + RELEASE_ASSERT(basePayloadGPR != resultPayloadGPR); + + if (baseTagGPROrNone == resultPayloadGPR) { + m_jit.swap(basePayloadGPR, baseTagGPROrNone); + baseTagGPROrNone = resultTagGPR; + } else + m_jit.move(basePayloadGPR, resultPayloadGPR); + basePayloadGPR = resultPayloadGPR; + } + + RegisterSet usedRegisters = this->usedRegisters(); + if (spillMode == DontSpill) { + // We've already flushed registers to the stack, we don't need to spill these. + usedRegisters.set(JSValueRegs(baseTagGPROrNone, basePayloadGPR), false); + usedRegisters.set(JSValueRegs(resultTagGPR, resultPayloadGPR), false); + } + + CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); JITGetByIdGenerator gen( - m_jit.codeBlock(), codeOrigin, usedRegisters(), GPRInfo::callFrameRegister, + m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, JSValueRegs(baseTagGPROrNone, basePayloadGPR), - JSValueRegs(resultTagGPR, resultPayloadGPR), spillMode != NeedToSpill); + JSValueRegs(resultTagGPR, resultPayloadGPR)); gen.generateFastPath(m_jit); @@ -182,8 +202,8 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNon if (slowPathTarget.isSet()) slowCases.append(slowPathTarget); slowCases.append(gen.slowPathJump()); - - OwnPtr<SlowPathGenerator> slowPath; + + std::unique_ptr<SlowPathGenerator> slowPath; if (baseTagGPROrNone == InvalidGPRReg) { slowPath = slowPathCall( slowCases, this, operationGetByIdOptimize, @@ -196,17 +216,24 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNon JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), baseTagGPROrNone, basePayloadGPR, identifierUID(identifierNumber)); } - + m_jit.addGetById(gen, slowPath.get()); - addSlowPathGenerator(slowPath.release()); + addSlowPathGenerator(WTFMove(slowPath)); } -void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget) +void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode) { + RegisterSet usedRegisters = this->usedRegisters(); + if (spillMode == DontSpill) { + // We've already flushed registers to the stack, we don't need to spill these. + usedRegisters.set(basePayloadGPR, false); + usedRegisters.set(JSValueRegs(valueTagGPR, valuePayloadGPR), false); + } + CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); JITPutByIdGenerator gen( - m_jit.codeBlock(), codeOrigin, usedRegisters(), GPRInfo::callFrameRegister, + m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, JSValueRegs::payloadOnly(basePayloadGPR), JSValueRegs(valueTagGPR, valuePayloadGPR), - scratchGPR, false, m_jit.ecmaModeFor(codeOrigin), putKind); + scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind); gen.generateFastPath(m_jit); @@ -215,17 +242,17 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, slowCases.append(slowPathTarget); slowCases.append(gen.slowPathJump()); - OwnPtr<SlowPathGenerator> slowPath = slowPathCall( + auto slowPath = slowPathCall( slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueTagGPR, valuePayloadGPR, basePayloadGPR, identifierUID(identifierNumber)); m_jit.addPutById(gen, slowPath.get()); - addSlowPathGenerator(slowPath.release()); + addSlowPathGenerator(WTFMove(slowPath)); } -void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert) +void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand) { - JSValueOperand arg(this, operand); + JSValueOperand arg(this, operand, ManualOperandSpeculation); GPRReg argTagGPR = arg.tagGPR(); GPRReg argPayloadGPR = arg.payloadGPR(); @@ -236,29 +263,32 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv JITCompiler::Jump notMasqueradesAsUndefined; if (masqueradesAsUndefinedWatchpointIsStillValid()) { if (!isKnownCell(operand.node())) - notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); - - m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR); + notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); + + m_jit.move(TrustedImm32(0), resultPayloadGPR); notMasqueradesAsUndefined = m_jit.jump(); } else { GPRTemporary localGlobalObject(this); GPRTemporary remoteGlobalObject(this); if (!isKnownCell(operand.node())) - notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); - - m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultPayloadGPR); - JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(resultPayloadGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined)); + notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); + + JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8( + JITCompiler::NonZero, + JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()), + JITCompiler::TrustedImm32(MasqueradesAsUndefined)); - m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR); + m_jit.move(TrustedImm32(0), resultPayloadGPR); notMasqueradesAsUndefined = m_jit.jump(); isMasqueradesAsUndefined.link(&m_jit); GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); - m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)), localGlobalObjectGPR); + m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR); + m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultPayloadGPR); m_jit.loadPtr(JITCompiler::Address(resultPayloadGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); - m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultPayloadGPR); + m_jit.compare32(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultPayloadGPR); } if (!isKnownCell(operand.node())) { @@ -267,9 +297,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv notCell.link(&m_jit); // null or undefined? COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); - m_jit.move(argTagGPR, resultPayloadGPR); - m_jit.or32(TrustedImm32(1), resultPayloadGPR); - m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultPayloadGPR, TrustedImm32(JSValue::NullTag), resultPayloadGPR); + m_jit.or32(TrustedImm32(1), argTagGPR, resultPayloadGPR); + m_jit.compare32(JITCompiler::Equal, resultPayloadGPR, TrustedImm32(JSValue::NullTag), resultPayloadGPR); done.link(&m_jit); } @@ -279,11 +308,12 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv booleanResult(resultPayloadGPR, m_currentNode); } -void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert) +void SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, Node* branchNode) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); - + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; + + bool invert = false; if (taken == nextBlock()) { invert = !invert; BasicBlock* tmp = taken; @@ -291,7 +321,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branch notTaken = tmp; } - JSValueOperand arg(this, operand); + JSValueOperand arg(this, operand, ManualOperandSpeculation); GPRReg argTagGPR = arg.tagGPR(); GPRReg argPayloadGPR = arg.payloadGPR(); @@ -302,22 +332,25 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branch if (masqueradesAsUndefinedWatchpointIsStillValid()) { if (!isKnownCell(operand.node())) - notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); - + notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); + jump(invert ? taken : notTaken, ForceJump); } else { GPRTemporary localGlobalObject(this); GPRTemporary remoteGlobalObject(this); if (!isKnownCell(operand.node())) - notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); - - m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultGPR); - branchTest8(JITCompiler::Zero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), invert ? taken : notTaken); + notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); + + branchTest8(JITCompiler::Zero, + JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()), + JITCompiler::TrustedImm32(MasqueradesAsUndefined), + invert ? taken : notTaken); GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); - m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)), localGlobalObjectGPR); + m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR); + m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultGPR); m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken); } @@ -328,41 +361,17 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branch notCell.link(&m_jit); // null or undefined? COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); - m_jit.move(argTagGPR, resultGPR); - m_jit.or32(TrustedImm32(1), resultGPR); + m_jit.or32(TrustedImm32(1), argTagGPR, resultGPR); branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag), taken); } jump(notTaken); } -bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool invert) -{ - unsigned branchIndexInBlock = detectPeepHoleBranch(); - if (branchIndexInBlock != UINT_MAX) { - Node* branchNode = m_block->at(branchIndexInBlock); - - ASSERT(node->adjustedRefCount() == 1); - - nonSpeculativePeepholeBranchNull(operand, branchNode, invert); - - use(node->child1()); - use(node->child2()); - m_indexInBlock = branchIndexInBlock; - m_currentNode = branchNode; - - return true; - } - - nonSpeculativeNonPeepholeCompareNull(operand, invert); - - return false; -} - void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero; @@ -386,7 +395,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, JITCompiler::JumpList slowPath; if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) { - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); arg1.use(); @@ -394,6 +403,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, flushRegisters(); callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); + m_jit.exceptionCheck(); branchTest32(callResultCondition, resultGPR, taken); } else { @@ -417,6 +427,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, silentSpillAllRegisters(resultGPR); callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); + m_jit.exceptionCheck(); silentFillAllRegisters(resultGPR); branchTest32(callResultCondition, resultGPR, taken); @@ -438,7 +449,7 @@ public: S_JITOperation_EJJ function, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload) : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>( - from, jit, function, NeedToSpill, result) + from, jit, function, NeedToSpill, ExceptionCheckRequirement::CheckNeeded, result) , m_arg1Tag(arg1Tag) , m_arg1Payload(arg1Payload) , m_arg2Tag(arg2Tag) @@ -477,7 +488,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler JITCompiler::JumpList slowPath; if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) { - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultPayloadGPR = result.gpr(); arg1.use(); @@ -485,6 +496,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler flushRegisters(); callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); + m_jit.exceptionCheck(); booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); } else { @@ -502,10 +514,9 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR); if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) { - addSlowPathGenerator(adoptPtr( - new CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>( + addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>( slowPath, this, helperFunction, resultPayloadGPR, arg1TagGPR, - arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR))); + arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR)); } booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); @@ -514,8 +525,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; // The branch instruction will branch to the taken block. // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. @@ -546,6 +557,7 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode silentSpillAllRegisters(resultPayloadGPR); callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); + m_jit.exceptionCheck(); silentFillAllRegisters(resultPayloadGPR); branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken); @@ -554,6 +566,7 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode silentSpillAllRegisters(resultPayloadGPR); callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); + m_jit.exceptionCheck(); silentFillAllRegisters(resultPayloadGPR); branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken); @@ -590,6 +603,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert) silentSpillAllRegisters(resultPayloadGPR); callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); + m_jit.exceptionCheck(); silentFillAllRegisters(resultPayloadGPR); m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR); @@ -601,6 +615,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert) silentSpillAllRegisters(resultPayloadGPR); callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); silentFillAllRegisters(resultPayloadGPR); + m_jit.exceptionCheck(); m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR); } @@ -608,92 +623,308 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert) booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); } -void SpeculativeJIT::emitCall(Node* node) +void SpeculativeJIT::compileMiscStrictEq(Node* node) { - if (node->op() != Call) - ASSERT(node->op() == Construct); - - // For constructors, the this argument is not passed but we have to make space - // for it. - int dummyThisArgument = node->op() == Call ? 0 : 1; - - CallLinkInfo::CallType callType = node->op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct; + JSValueOperand op1(this, node->child1(), ManualOperandSpeculation); + JSValueOperand op2(this, node->child2(), ManualOperandSpeculation); + GPRTemporary result(this); + + if (node->child1().useKind() == MiscUse) + speculateMisc(node->child1(), op1.jsValueRegs()); + if (node->child2().useKind() == MiscUse) + speculateMisc(node->child2(), op2.jsValueRegs()); + + m_jit.move(TrustedImm32(0), result.gpr()); + JITCompiler::Jump notEqual = m_jit.branch32(JITCompiler::NotEqual, op1.tagGPR(), op2.tagGPR()); + m_jit.compare32(JITCompiler::Equal, op1.payloadGPR(), op2.payloadGPR(), result.gpr()); + notEqual.link(&m_jit); + booleanResult(result.gpr(), node); +} - Edge calleeEdge = m_jit.graph().m_varArgChildren[node->firstChild()]; - JSValueOperand callee(this, calleeEdge); - GPRReg calleeTagGPR = callee.tagGPR(); - GPRReg calleePayloadGPR = callee.payloadGPR(); - use(calleeEdge); +void SpeculativeJIT::emitCall(Node* node) +{ + CallLinkInfo::CallType callType; + bool isVarargs = false; + bool isForwardVarargs = false; + bool isTail = false; + bool isEmulatedTail = false; + switch (node->op()) { + case Call: + callType = CallLinkInfo::Call; + break; + case TailCall: + callType = CallLinkInfo::TailCall; + isTail = true; + break; + case TailCallInlinedCaller: + callType = CallLinkInfo::Call; + isEmulatedTail = true; + break; + case Construct: + callType = CallLinkInfo::Construct; + break; + case CallVarargs: + callType = CallLinkInfo::CallVarargs; + isVarargs = true; + break; + case TailCallVarargs: + callType = CallLinkInfo::TailCallVarargs; + isVarargs = true; + isTail = true; + break; + case TailCallVarargsInlinedCaller: + callType = CallLinkInfo::CallVarargs; + isVarargs = true; + isEmulatedTail = true; + break; + case ConstructVarargs: + callType = CallLinkInfo::ConstructVarargs; + isVarargs = true; + break; + case CallForwardVarargs: + callType = CallLinkInfo::CallVarargs; + isForwardVarargs = true; + break; + case TailCallForwardVarargs: + callType = CallLinkInfo::TailCallVarargs; + isTail = true; + isForwardVarargs = true; + break; + case TailCallForwardVarargsInlinedCaller: + callType = CallLinkInfo::CallVarargs; + isEmulatedTail = true; + isForwardVarargs = true; + break; + case ConstructForwardVarargs: + callType = CallLinkInfo::ConstructVarargs; + isForwardVarargs = true; + break; + default: + DFG_CRASH(m_jit.graph(), node, "bad node type"); + break; + } - // The call instruction's first child is either the function (normal call) or the - // receiver (method call). subsequent children are the arguments. - int numPassedArgs = node->numChildren() - 1; + Edge calleeEdge = m_jit.graph().child(node, 0); + GPRReg calleeTagGPR; + GPRReg calleePayloadGPR; + CallFrameShuffleData shuffleData; - int numArgs = numPassedArgs + dummyThisArgument; - - m_jit.store32(MacroAssembler::TrustedImm32(numArgs), calleeFramePayloadSlot(numArgs, JSStack::ArgumentCount)); - m_jit.storePtr(GPRInfo::callFrameRegister, calleeFrameCallerFrame(numArgs)); - m_jit.store32(calleePayloadGPR, calleeFramePayloadSlot(numArgs, JSStack::Callee)); - m_jit.store32(calleeTagGPR, calleeFrameTagSlot(numArgs, JSStack::Callee)); + // Gotta load the arguments somehow. Varargs is trickier. + if (isVarargs || isForwardVarargs) { + CallVarargsData* data = node->callVarargsData(); - for (int i = 0; i < numPassedArgs; i++) { - Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i]; - JSValueOperand arg(this, argEdge); - GPRReg argTagGPR = arg.tagGPR(); - GPRReg argPayloadGPR = arg.payloadGPR(); - use(argEdge); + GPRReg resultGPR; + unsigned numUsedStackSlots = m_jit.graph().m_nextMachineLocal; + + if (isForwardVarargs) { + flushRegisters(); + use(node->child2()); + + GPRReg scratchGPR1; + GPRReg scratchGPR2; + GPRReg scratchGPR3; + + scratchGPR1 = JITCompiler::selectScratchGPR(); + scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1); + scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2); + + m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2); + JITCompiler::JumpList slowCase; + emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase); + JITCompiler::Jump done = m_jit.jump(); + slowCase.link(&m_jit); + callOperation(operationThrowStackOverflowForVarargs); + m_jit.exceptionCheck(); + m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow); + done.link(&m_jit); + resultGPR = scratchGPR2; + } else { + GPRReg argumentsPayloadGPR; + GPRReg argumentsTagGPR; + GPRReg scratchGPR1; + GPRReg scratchGPR2; + GPRReg scratchGPR3; + + auto loadArgumentsGPR = [&] (GPRReg reservedGPR) { + if (reservedGPR != InvalidGPRReg) + lock(reservedGPR); + JSValueOperand arguments(this, node->child2()); + argumentsTagGPR = arguments.tagGPR(); + argumentsPayloadGPR = arguments.payloadGPR(); + if (reservedGPR != InvalidGPRReg) + unlock(reservedGPR); + flushRegisters(); + + scratchGPR1 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, reservedGPR); + scratchGPR2 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, reservedGPR); + scratchGPR3 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, scratchGPR2, reservedGPR); + }; + + loadArgumentsGPR(InvalidGPRReg); + + DFG_ASSERT(m_jit.graph(), node, isFlushed()); - m_jit.store32(argTagGPR, calleeArgumentTagSlot(numArgs, i + dummyThisArgument)); - m_jit.store32(argPayloadGPR, calleeArgumentPayloadSlot(numArgs, i + dummyThisArgument)); + // Right now, arguments is in argumentsTagGPR/argumentsPayloadGPR and the register file is + // flushed. + callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR, numUsedStackSlots, data->firstVarArgOffset); + m_jit.exceptionCheck(); + + // Now we have the argument count of the callee frame, but we've lost the arguments operand. + // Reconstruct the arguments operand while preserving the callee frame. + loadArgumentsGPR(GPRInfo::returnValueGPR); + m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1); + emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1); + m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister); + + callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, argumentsTagGPR, argumentsPayloadGPR, data->firstVarArgOffset, GPRInfo::returnValueGPR); + m_jit.exceptionCheck(); + resultGPR = GPRInfo::returnValueGPR; + } + + m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), resultGPR, JITCompiler::stackPointerRegister); + + DFG_ASSERT(m_jit.graph(), node, isFlushed()); + + // We don't need the arguments array anymore. + if (isVarargs) + use(node->child2()); + + // Now set up the "this" argument. + JSValueOperand thisArgument(this, node->child3()); + GPRReg thisArgumentTagGPR = thisArgument.tagGPR(); + GPRReg thisArgumentPayloadGPR = thisArgument.payloadGPR(); + thisArgument.use(); + + m_jit.store32(thisArgumentTagGPR, JITCompiler::calleeArgumentTagSlot(0)); + m_jit.store32(thisArgumentPayloadGPR, JITCompiler::calleeArgumentPayloadSlot(0)); + } else { + // The call instruction's first child is either the function (normal call) or the + // receiver (method call). subsequent children are the arguments. + int numPassedArgs = node->numChildren() - 1; + + if (node->op() == TailCall) { + JSValueOperand callee(this, calleeEdge); + calleeTagGPR = callee.tagGPR(); + calleePayloadGPR = callee.payloadGPR(); + use(calleeEdge); + + shuffleData.numLocals = m_jit.graph().frameRegisterCount(); + shuffleData.callee = ValueRecovery::inPair(calleeTagGPR, calleePayloadGPR); + shuffleData.args.resize(numPassedArgs); + + for (int i = 0; i < numPassedArgs; ++i) { + Edge argEdge = m_jit.graph().varArgChild(node, i + 1); + GenerationInfo& info = generationInfo(argEdge.node()); + use(argEdge); + shuffleData.args[i] = info.recovery(argEdge->virtualRegister()); + } + } else { + m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), m_jit.calleeFramePayloadSlot(JSStack::ArgumentCount)); + + for (int i = 0; i < numPassedArgs; i++) { + Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i]; + JSValueOperand arg(this, argEdge); + GPRReg argTagGPR = arg.tagGPR(); + GPRReg argPayloadGPR = arg.payloadGPR(); + use(argEdge); + + m_jit.store32(argTagGPR, m_jit.calleeArgumentTagSlot(i)); + m_jit.store32(argPayloadGPR, m_jit.calleeArgumentPayloadSlot(i)); + } + } } - flushRegisters(); + if (node->op() != TailCall) { + JSValueOperand callee(this, calleeEdge); + calleeTagGPR = callee.tagGPR(); + calleePayloadGPR = callee.payloadGPR(); + use(calleeEdge); + m_jit.store32(calleePayloadGPR, m_jit.calleeFramePayloadSlot(JSStack::Callee)); + m_jit.store32(calleeTagGPR, m_jit.calleeFrameTagSlot(JSStack::Callee)); + + if (!isTail) + flushRegisters(); + } - GPRResult resultPayload(this); - GPRResult2 resultTag(this); + GPRFlushedCallResult resultPayload(this); + GPRFlushedCallResult2 resultTag(this); GPRReg resultPayloadGPR = resultPayload.gpr(); GPRReg resultTagGPR = resultTag.gpr(); JITCompiler::DataLabelPtr targetToCheck; JITCompiler::JumpList slowPath; - m_jit.emitStoreCodeOrigin(node->codeOrigin); - - m_jit.addPtr(TrustedImm32(calleeFrameOffset(numArgs)), GPRInfo::callFrameRegister); + CodeOrigin staticOrigin = node->origin.semantic; + ASSERT(!isTail || !staticOrigin.inlineCallFrame || !staticOrigin.inlineCallFrame->getCallerSkippingTailCalls()); + ASSERT(!isEmulatedTail || (staticOrigin.inlineCallFrame && staticOrigin.inlineCallFrame->getCallerSkippingTailCalls())); + CodeOrigin dynamicOrigin = + isEmulatedTail ? *staticOrigin.inlineCallFrame->getCallerSkippingTailCalls() : staticOrigin; + CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(dynamicOrigin, m_stream->size()); + m_jit.emitStoreCallSiteIndex(callSite); - slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, calleeTagGPR, TrustedImm32(JSValue::CellTag))); + CallLinkInfo* info = m_jit.codeBlock()->addCallLinkInfo(); + + slowPath.append(m_jit.branchIfNotCell(JSValueRegs(calleeTagGPR, calleePayloadGPR))); slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck)); - m_jit.loadPtr(MacroAssembler::Address(calleePayloadGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultPayloadGPR); - m_jit.storePtr(resultPayloadGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); - m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); - JITCompiler::Call fastCall = m_jit.nearCall(); + if (isTail) { + if (node->op() == TailCall) { + info->setFrameShuffleData(shuffleData); + CallFrameShuffler(m_jit, shuffleData).prepareForTailCall(); + } else { + m_jit.emitRestoreCalleeSaves(); + m_jit.prepareForTailCallSlow(); + } + } + + JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall(); JITCompiler::Jump done = m_jit.jump(); slowPath.link(&m_jit); - // Callee payload needs to be in regT0, tag in regT1 - if (calleeTagGPR == GPRInfo::regT0) { - if (calleePayloadGPR == GPRInfo::regT1) - m_jit.swap(GPRInfo::regT1, GPRInfo::regT0); - else { - m_jit.move(calleeTagGPR, GPRInfo::regT1); + if (node->op() == TailCall) { + CallFrameShuffler callFrameShuffler(m_jit, shuffleData); + callFrameShuffler.setCalleeJSValueRegs(JSValueRegs( + GPRInfo::regT1, GPRInfo::regT0)); + callFrameShuffler.prepareForSlowPath(); + } else { + // Callee payload needs to be in regT0, tag in regT1 + if (calleeTagGPR == GPRInfo::regT0) { + if (calleePayloadGPR == GPRInfo::regT1) + m_jit.swap(GPRInfo::regT1, GPRInfo::regT0); + else { + m_jit.move(calleeTagGPR, GPRInfo::regT1); + m_jit.move(calleePayloadGPR, GPRInfo::regT0); + } + } else { m_jit.move(calleePayloadGPR, GPRInfo::regT0); + m_jit.move(calleeTagGPR, GPRInfo::regT1); } - } else { - m_jit.move(calleePayloadGPR, GPRInfo::regT0); - m_jit.move(calleeTagGPR, GPRInfo::regT1); + + if (isTail) + m_jit.emitRestoreCalleeSaves(); } + + m_jit.move(MacroAssembler::TrustedImmPtr(info), GPRInfo::regT2); JITCompiler::Call slowCall = m_jit.nearCall(); done.link(&m_jit); - m_jit.setupResults(resultPayloadGPR, resultTagGPR); + if (isTail) + m_jit.abortWithReason(JITDidReturnFromTailCall); + else { + m_jit.setupResults(resultPayloadGPR, resultTagGPR); - jsValueResult(resultTagGPR, resultPayloadGPR, node, DataFormatJS, UseChildrenCalledExplicitly); + jsValueResult(resultTagGPR, resultPayloadGPR, node, DataFormatJS, UseChildrenCalledExplicitly); + // After the calls are done, we need to reestablish our stack + // pointer. We rely on this for varargs calls, calls with arity + // mismatch (the callframe is slided) and tail calls. + m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister); + } - m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, calleePayloadGPR, node->codeOrigin); + info->setUpCall(callType, node->origin.semantic, calleePayloadGPR); + m_jit.addJSCall(fastCall, slowCall, targetToCheck, info); } template<bool strict> @@ -702,22 +933,23 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF AbstractValue& value = m_state.forNode(edge); SpeculatedType type = value.m_type; ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32)); + m_interpreter.filter(value, SpecInt32); + if (value.isClear()) { + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + returnFormat = DataFormatInt32; + return allocate(); + } + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); switch (info.registerFormat()) { case DataFormatNone: { - if ((edge->hasConstant() && !isInt32Constant(edge.node())) || info.spillFormat() == DataFormatDouble) { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - returnFormat = DataFormatInt32; - return allocate(); - } - if (edge->hasConstant()) { - ASSERT(isInt32Constant(edge.node())); + ASSERT(edge->isInt32Constant()); GPRReg gpr = allocate(); - m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr); + m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); info.fillInt32(*m_stream, gpr); returnFormat = DataFormatInt32; @@ -725,6 +957,7 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF } DataFormat spillFormat = info.spillFormat(); + ASSERT_UNUSED(spillFormat, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32); // If we know this was spilled as an integer we can fill without checking. @@ -765,16 +998,12 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF return gpr; } - case DataFormatDouble: case DataFormatCell: case DataFormatBoolean: case DataFormatJSDouble: case DataFormatJSCell: case DataFormatJSBoolean: - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - returnFormat = DataFormatInt32; - return allocate(); - + case DataFormatDouble: case DataFormatStorage: default: RELEASE_ASSERT_NOT_REACHED(); @@ -797,136 +1026,34 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge) FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge) { - AbstractValue& value = m_state.forNode(edge); - SpeculatedType type = value.m_type; - ASSERT(edge.useKind() != KnownNumberUse || !(value.m_type & ~SpecFullNumber)); - m_interpreter.filter(value, SpecFullNumber); + ASSERT(isDouble(edge.useKind())); + ASSERT(edge->hasDoubleResult()); VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); if (info.registerFormat() == DataFormatNone) { if (edge->hasConstant()) { - if (isInt32Constant(edge.node())) { - GPRReg gpr = allocate(); - m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr); - m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - info.fillInt32(*m_stream, gpr); - unlock(gpr); - } else if (isNumberConstant(edge.node())) { - FPRReg fpr = fprAllocate(); - m_jit.loadDouble(addressOfDoubleConstant(edge.node()), fpr); - m_fprs.retain(fpr, virtualRegister, SpillOrderConstant); - info.fillDouble(*m_stream, fpr); - return fpr; - } else { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return fprAllocate(); - } - } else { - DataFormat spillFormat = info.spillFormat(); - ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInt32); - if (spillFormat == DataFormatJSDouble || spillFormat == DataFormatDouble) { - FPRReg fpr = fprAllocate(); - m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); - m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); - info.fillDouble(*m_stream, fpr); - return fpr; - } - + RELEASE_ASSERT(edge->isNumberConstant()); FPRReg fpr = fprAllocate(); - JITCompiler::Jump hasUnboxedDouble; - - if (spillFormat != DataFormatJSInt32 && spillFormat != DataFormatInt32) { - JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)); - if (type & ~SpecFullNumber) - speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::AboveOrEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::LowestTag))); - m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); - hasUnboxedDouble = m_jit.jump(); - - isInteger.link(&m_jit); - } - - m_jit.convertInt32ToDouble(JITCompiler::payloadFor(virtualRegister), fpr); - - if (hasUnboxedDouble.isSet()) - hasUnboxedDouble.link(&m_jit); - - m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); + m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(edge.node())), fpr); + m_fprs.retain(fpr, virtualRegister, SpillOrderConstant); info.fillDouble(*m_stream, fpr); - info.killSpilled(); return fpr; } - } - - switch (info.registerFormat()) { - case DataFormatJS: - case DataFormatJSInt32: { - GPRReg tagGPR = info.tagGPR(); - GPRReg payloadGPR = info.payloadGPR(); + + RELEASE_ASSERT(info.spillFormat() == DataFormatDouble); FPRReg fpr = fprAllocate(); - - m_gprs.lock(tagGPR); - m_gprs.lock(payloadGPR); - - JITCompiler::Jump hasUnboxedDouble; - - if (info.registerFormat() != DataFormatJSInt32) { - FPRTemporary scratch(this); - JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag)); - if (type & ~SpecFullNumber) - speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag))); - unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr()); - hasUnboxedDouble = m_jit.jump(); - isInteger.link(&m_jit); - } - - m_jit.convertInt32ToDouble(payloadGPR, fpr); - - if (hasUnboxedDouble.isSet()) - hasUnboxedDouble.link(&m_jit); - - m_gprs.release(tagGPR); - m_gprs.release(payloadGPR); - m_gprs.unlock(tagGPR); - m_gprs.unlock(payloadGPR); - m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); + m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); + m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); info.fillDouble(*m_stream, fpr); - info.killSpilled(); return fpr; } - case DataFormatInt32: { - FPRReg fpr = fprAllocate(); - GPRReg gpr = info.gpr(); - m_gprs.lock(gpr); - m_jit.convertInt32ToDouble(gpr, fpr); - m_gprs.unlock(gpr); - return fpr; - } - - case DataFormatJSDouble: - case DataFormatDouble: { - FPRReg fpr = info.fpr(); - m_fprs.lock(fpr); - return fpr; - } - - case DataFormatNone: - case DataFormatStorage: - RELEASE_ASSERT_NOT_REACHED(); - - case DataFormatCell: - case DataFormatJSCell: - case DataFormatBoolean: - case DataFormatJSBoolean: - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return fprAllocate(); - - default: - RELEASE_ASSERT_NOT_REACHED(); - return InvalidFPRReg; - } + RELEASE_ASSERT(info.registerFormat() == DataFormatDouble); + FPRReg fpr = info.fpr(); + m_fprs.lock(fpr); + return fpr; } GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) @@ -934,33 +1061,38 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) AbstractValue& value = m_state.forNode(edge); SpeculatedType type = value.m_type; ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell)); + m_interpreter.filter(value, SpecCell); + if (value.isClear()) { + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + return allocate(); + } + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); switch (info.registerFormat()) { case DataFormatNone: { - if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return allocate(); - } - if (edge->hasConstant()) { - JSValue jsValue = valueOfJSConstant(edge.node()); + JSValue jsValue = edge->asJSValue(); GPRReg gpr = allocate(); - if (jsValue.isCell()) { - m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr); - info.fillCell(*m_stream, gpr); - return gpr; - } - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr); + info.fillCell(*m_stream, gpr); return gpr; } ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatCell); - if (type & ~SpecCell) - speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag))); + if (type & ~SpecCell) { + speculationCheck( + BadType, + JSValueSource(JITCompiler::addressFor(virtualRegister)), + edge, + m_jit.branch32( + MacroAssembler::NotEqual, + JITCompiler::tagFor(virtualRegister), + TrustedImm32(JSValue::CellTag))); + } GPRReg gpr = allocate(); m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); @@ -980,8 +1112,11 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) GPRReg payloadGPR = info.payloadGPR(); m_gprs.lock(tagGPR); m_gprs.lock(payloadGPR); - if (type & ~SpecCell) - speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag))); + if (type & ~SpecCell) { + speculationCheck( + BadType, JSValueRegs(tagGPR, payloadGPR), edge, + m_jit.branchIfNotCell(info.jsValueRegs())); + } m_gprs.unlock(tagGPR); m_gprs.release(tagGPR); m_gprs.release(payloadGPR); @@ -993,12 +1128,9 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) case DataFormatJSInt32: case DataFormatInt32: case DataFormatJSDouble: - case DataFormatDouble: case DataFormatJSBoolean: case DataFormatBoolean: - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return allocate(); - + case DataFormatDouble: case DataFormatStorage: RELEASE_ASSERT_NOT_REACHED(); @@ -1012,27 +1144,25 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge) { AbstractValue& value = m_state.forNode(edge); SpeculatedType type = value.m_type; + ASSERT(edge.useKind() != KnownBooleanUse || !(value.m_type & ~SpecBoolean)); + m_interpreter.filter(value, SpecBoolean); + if (value.isClear()) { + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + return allocate(); + } + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); switch (info.registerFormat()) { case DataFormatNone: { - if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return allocate(); - } - if (edge->hasConstant()) { - JSValue jsValue = valueOfJSConstant(edge.node()); + JSValue jsValue = edge->asJSValue(); GPRReg gpr = allocate(); - if (jsValue.isBoolean()) { - m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr); - info.fillBoolean(*m_stream, gpr); - return gpr; - } - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr); + info.fillBoolean(*m_stream, gpr); return gpr; } @@ -1074,12 +1204,9 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge) case DataFormatJSInt32: case DataFormatInt32: case DataFormatJSDouble: - case DataFormatDouble: case DataFormatJSCell: case DataFormatCell: - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return allocate(); - + case DataFormatDouble: case DataFormatStorage: RELEASE_ASSERT_NOT_REACHED(); @@ -1089,31 +1216,8 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge) } } -JITCompiler::Jump SpeculativeJIT::convertToDouble(JSValueOperand& op, FPRReg result) -{ - FPRTemporary scratch(this); - - GPRReg opPayloadGPR = op.payloadGPR(); - GPRReg opTagGPR = op.tagGPR(); - FPRReg scratchFPR = scratch.fpr(); - - JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, opTagGPR, TrustedImm32(JSValue::Int32Tag)); - JITCompiler::Jump notNumber = m_jit.branch32(MacroAssembler::AboveOrEqual, opPayloadGPR, TrustedImm32(JSValue::LowestTag)); - - unboxDouble(opTagGPR, opPayloadGPR, result, scratchFPR); - JITCompiler::Jump done = m_jit.jump(); - - isInteger.link(&m_jit); - m_jit.convertInt32ToDouble(opPayloadGPR, result); - - done.link(&m_jit); - - return notNumber; -} - void SpeculativeJIT::compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdge) { -#if ENABLE(GGC) ASSERT(!isKnownNotCell(valueEdge.node())); SpeculateCellOperand base(this, baseEdge); @@ -1122,10 +1226,6 @@ void SpeculativeJIT::compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdg GPRTemporary scratch2(this); writeBarrier(base.gpr(), value.tagGPR(), valueEdge, scratch1.gpr(), scratch2.gpr()); -#else - UNUSED_PARAM(baseEdge); - UNUSED_PARAM(valueEdge); -#endif } void SpeculativeJIT::compileObjectEquality(Node* node) @@ -1137,41 +1237,24 @@ void SpeculativeJIT::compileObjectEquality(Node* node) if (masqueradesAsUndefinedWatchpointIsStillValid()) { DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op1GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR)); DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op2GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR)); } else { - GPRTemporary structure(this); - GPRReg structureGPR = structure.gpr(); - - m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); - speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), + JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR)); + speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); - m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); - speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), + JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR)); + speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchTest8( - MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::NonZero, + MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } @@ -1188,6 +1271,57 @@ void SpeculativeJIT::compileObjectEquality(Node* node) booleanResult(resultPayloadGPR, node); } +void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild) +{ + SpeculateCellOperand op1(this, objectChild); + JSValueOperand op2(this, otherChild); + + GPRReg op1GPR = op1.gpr(); + GPRReg op2GPR = op2.payloadGPR(); + + DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); + + GPRTemporary resultPayload(this, Reuse, op1); + GPRReg resultPayloadGPR = resultPayload.gpr(); + + MacroAssembler::Jump op2CellJump = m_jit.branchIfCell(op2.jsValueRegs()); + + m_jit.move(TrustedImm32(0), resultPayloadGPR); + MacroAssembler::Jump op2NotCellJump = m_jit.jump(); + + // At this point we know that we can perform a straight-forward equality comparison on pointer + // values because we are doing strict equality. + op2CellJump.link(&m_jit); + m_jit.compare32(MacroAssembler::Equal, op1GPR, op2GPR, resultPayloadGPR); + + op2NotCellJump.link(&m_jit); + booleanResult(resultPayloadGPR, m_currentNode); +} + +void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode) +{ + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; + + SpeculateCellOperand op1(this, objectChild); + JSValueOperand op2(this, otherChild); + + GPRReg op1GPR = op1.gpr(); + GPRReg op2GPR = op2.payloadGPR(); + + DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); + + branch32(MacroAssembler::NotEqual, op2.tagGPR(), TrustedImm32(JSValue::CellTag), notTaken); + + if (taken == nextBlock()) { + branch32(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken); + jump(taken); + } else { + branch32(MacroAssembler::Equal, op1GPR, op2GPR, taken); + jump(notTaken); + } +} + void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild) { SpeculateCellOperand op1(this, leftChild); @@ -1198,66 +1332,39 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r GPRReg op2TagGPR = op2.tagGPR(); GPRReg op2PayloadGPR = op2.payloadGPR(); GPRReg resultGPR = result.gpr(); - GPRTemporary structure; - GPRReg structureGPR = InvalidGPRReg; bool masqueradesAsUndefinedWatchpointValid = masqueradesAsUndefinedWatchpointIsStillValid(); - if (!masqueradesAsUndefinedWatchpointValid) { - // The masquerades as undefined case will use the structure register, so allocate it here. - // Do this at the top of the function to avoid branching around a register allocation. - GPRTemporary realStructure(this); - structure.adopt(realStructure); - structureGPR = structure.gpr(); - } - if (masqueradesAsUndefinedWatchpointValid) { DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op1GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } // It seems that most of the time when programs do a == b where b may be either null/undefined // or an object, b is usually an object. Balance the branches to make that case fast. - MacroAssembler::Jump rightNotCell = - m_jit.branch32(MacroAssembler::NotEqual, op2TagGPR, TrustedImm32(JSValue::CellTag)); + MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs()); // We know that within this branch, rightChild must be a cell. if (masqueradesAsUndefinedWatchpointValid) { DFG_TYPE_CHECK( - JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, - m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( - JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, - m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR)); speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } @@ -1272,8 +1379,7 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r // We know that within this branch, rightChild must not be a cell. Check if that is enough to // prove that it is either null or undefined. if (needsTypeCheck(rightChild, SpecCell | SpecOther)) { - m_jit.move(op2TagGPR, resultGPR); - m_jit.or32(TrustedImm32(1), resultGPR); + m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR); typeCheck( JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther, @@ -1294,8 +1400,8 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; SpeculateCellOperand op1(this, leftChild); JSValueOperand op2(this, rightChild, ManualOperandSpeculation); @@ -1305,65 +1411,40 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild GPRReg op2TagGPR = op2.tagGPR(); GPRReg op2PayloadGPR = op2.payloadGPR(); GPRReg resultGPR = result.gpr(); - GPRTemporary structure; - GPRReg structureGPR = InvalidGPRReg; bool masqueradesAsUndefinedWatchpointValid = masqueradesAsUndefinedWatchpointIsStillValid(); - if (!masqueradesAsUndefinedWatchpointValid) { - // The masquerades as undefined case will use the structure register, so allocate it here. - // Do this at the top of the function to avoid branching around a register allocation. - GPRTemporary realStructure(this); - structure.adopt(realStructure); - structureGPR = structure.gpr(); - } - if (masqueradesAsUndefinedWatchpointValid) { DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op1GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } // It seems that most of the time when programs do a == b where b may be either null/undefined // or an object, b is usually an object. Balance the branches to make that case fast. - MacroAssembler::Jump rightNotCell = - m_jit.branch32(MacroAssembler::NotEqual, op2TagGPR, TrustedImm32(JSValue::CellTag)); + MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs()); // We know that within this branch, rightChild must be a cell. if (masqueradesAsUndefinedWatchpointValid) { DFG_TYPE_CHECK( JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, - m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + m_jit.branchIfNotObject(op2PayloadGPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, - m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + m_jit.branchIfNotObject(op2PayloadGPR)); speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } @@ -1380,8 +1461,7 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild jump(notTaken, ForceJump); rightNotCell.link(&m_jit); - m_jit.move(op2TagGPR, resultGPR); - m_jit.or32(TrustedImm32(1), resultGPR); + m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR); typeCheck( JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther, @@ -1440,35 +1520,28 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse) structureGPR = structure.gpr(); } - MacroAssembler::Jump notCell = m_jit.branch32(MacroAssembler::NotEqual, valueTagGPR, TrustedImm32(JSValue::CellTag)); + MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs()); if (masqueradesAsUndefinedWatchpointValid) { DFG_TYPE_CHECK( JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, - m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + m_jit.branchIfNotObject(valuePayloadGPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()), structureGPR); - DFG_TYPE_CHECK( JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, - m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + m_jit.branchIfNotObject(valuePayloadGPR)); MacroAssembler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8( MacroAssembler::Zero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined)); + m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), structureGPR); speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branchPtr( MacroAssembler::Equal, MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()), - MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)))); + MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)))); isNotMasqueradesAsUndefined.link(&m_jit); } @@ -1479,8 +1552,7 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse) COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) { - m_jit.move(valueTagGPR, resultPayloadGPR); - m_jit.or32(TrustedImm32(1), resultPayloadGPR); + m_jit.or32(TrustedImm32(1), valueTagGPR, resultPayloadGPR); typeCheck( JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther, m_jit.branch32( @@ -1498,7 +1570,8 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse) void SpeculativeJIT::compileLogicalNot(Node* node) { switch (node->child1().useKind()) { - case BooleanUse: { + case BooleanUse: + case KnownBooleanUse: { SpeculateBooleanOperand value(this, node->child1()); GPRTemporary result(this, Reuse, value); m_jit.xor32(TrustedImm32(1), value.gpr(), result.gpr()); @@ -1519,7 +1592,7 @@ void SpeculativeJIT::compileLogicalNot(Node* node) return; } - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand value(this, node->child1()); FPRTemporary scratch(this); GPRTemporary resultPayload(this); @@ -1547,7 +1620,7 @@ void SpeculativeJIT::compileLogicalNot(Node* node) addSlowPathGenerator( slowPathCall( slowCase, this, operationConvertJSValueToBoolean, resultPayloadGPR, arg1TagGPR, - arg1PayloadGPR)); + arg1PayloadGPR, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded)); m_jit.xor32(TrustedImm32(1), resultPayloadGPR); booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); @@ -1556,6 +1629,9 @@ void SpeculativeJIT::compileLogicalNot(Node* node) case StringUse: return compileStringZeroLength(node); + case StringOrOtherUse: + return compileLogicalNotStringOrOther(node); + default: RELEASE_ASSERT_NOT_REACHED(); break; @@ -1570,31 +1646,27 @@ void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, Ba GPRReg valuePayloadGPR = value.payloadGPR(); GPRReg scratchGPR = scratch.gpr(); - MacroAssembler::Jump notCell = m_jit.branch32(MacroAssembler::NotEqual, valueTagGPR, TrustedImm32(JSValue::CellTag)); + MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs()); if (masqueradesAsUndefinedWatchpointIsStillValid()) { DFG_TYPE_CHECK( JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, - m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + m_jit.branchIfNotObject(valuePayloadGPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()), scratchGPR); - DFG_TYPE_CHECK( JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, - m_jit.branchPtr( - MacroAssembler::Equal, - scratchGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + m_jit.branchIfNotObject(valuePayloadGPR)); - JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::Zero, MacroAssembler::Address(scratchGPR, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8( + JITCompiler::Zero, + MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()), + TrustedImm32(MasqueradesAsUndefined)); + m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), scratchGPR); speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branchPtr( MacroAssembler::Equal, MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()), - MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)))); + MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)))); isNotMasqueradesAsUndefined.link(&m_jit); } @@ -1604,8 +1676,7 @@ void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, Ba COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) { - m_jit.move(valueTagGPR, scratchGPR); - m_jit.or32(TrustedImm32(1), scratchGPR); + m_jit.or32(TrustedImm32(1), valueTagGPR, scratchGPR); typeCheck( JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther, m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag))); @@ -1618,11 +1689,12 @@ void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, Ba void SpeculativeJIT::emitBranch(Node* node) { - BasicBlock* taken = node->takenBlock(); - BasicBlock* notTaken = node->notTakenBlock(); + BasicBlock* taken = node->branchData()->taken.block; + BasicBlock* notTaken = node->branchData()->notTaken.block; switch (node->child1().useKind()) { - case BooleanUse: { + case BooleanUse: + case KnownBooleanUse: { SpeculateBooleanOperand value(this, node->child1()); MacroAssembler::ResultCondition condition = MacroAssembler::NonZero; @@ -1644,8 +1716,18 @@ void SpeculativeJIT::emitBranch(Node* node) emitObjectOrOtherBranch(node->child1(), taken, notTaken); return; } - - case NumberUse: + + case StringUse: { + emitStringBranch(node->child1(), taken, notTaken); + return; + } + + case StringOrOtherUse: { + emitStringOrOtherBranch(node->child1(), taken, notTaken); + return; + } + + case DoubleRepUse: case Int32Use: { if (node->child1().useKind() == Int32Use) { bool invert = false; @@ -1786,39 +1868,43 @@ void SpeculativeJIT::compile(Node* node) switch (op) { case JSConstant: - initConstantInfo(node); - break; - - case PhantomArguments: - initConstantInfo(node); - break; - - case WeakJSConstant: - m_jit.addWeakReference(node->weakConstant()); + case DoubleConstant: + case PhantomDirectArguments: + case PhantomClonedArguments: initConstantInfo(node); break; case Identity: { - RELEASE_ASSERT_NOT_REACHED(); + speculate(node, node->child1()); + switch (node->child1().useKind()) { + case DoubleRepUse: + case DoubleRepRealUse: { + SpeculateDoubleOperand op(this, node->child1()); + doubleResult(op.fpr(), node); + break; + } + case Int52RepUse: + case MachineIntUse: + case DoubleRepMachineIntUse: { + RELEASE_ASSERT_NOT_REACHED(); + break; + } + default: { + JSValueOperand op(this, node->child1()); + jsValueResult(op.tagGPR(), op.payloadGPR(), node); + break; + } + } // switch break; } case GetLocal: { - SpeculatedType prediction = node->variableAccessData()->prediction(); AbstractValue& value = m_state.variables().operand(node->local()); - // If we have no prediction for this local, then don't attempt to compile. - if (prediction == SpecNone) { - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); - break; - } - // If the CFA is tracking this variable and it found that the variable // cannot have been assigned, then don't attempt to proceed. if (value.isClear()) { - // FIXME: We should trap instead. - // https://bugs.webkit.org/show_bug.cgi?id=110383 - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); + m_compileOkay = false; break; } @@ -1868,8 +1954,7 @@ void SpeculativeJIT::compile(Node* node) break; } - case FlushedJSValue: - case FlushedArguments: { + case FlushedJSValue: { GPRTemporary result(this); GPRTemporary tag(this); m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr()); @@ -1900,13 +1985,23 @@ void SpeculativeJIT::compile(Node* node) break; } - case MovHint: - case ZombieHint: - case Check: { - RELEASE_ASSERT_NOT_REACHED(); + case MovHint: { + compileMovHint(m_currentNode); + noResult(node); + break; + } + + case ZombieHint: { + recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead); + noResult(node); break; } + case ExitOK: { + noResult(node); + break; + } + case SetLocal: { switch (node->variableAccessData()->flushFormat()) { case FlushedDouble: { @@ -1945,16 +2040,7 @@ void SpeculativeJIT::compile(Node* node) break; } - case FlushedJSValue: - case FlushedArguments: { - if (generationInfoFromVirtualRegister(node->child1()->virtualRegister()).registerFormat() == DataFormatDouble) { - SpeculateDoubleOperand value(this, node->child1(), ManualOperandSpeculation); - m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->machineLocal())); - noResult(node); - recordSetLocal(DataFormatDouble); - break; - } - + case FlushedJSValue: { JSValueOperand value(this, node->child1()); m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node->machineLocal())); m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node->machineLocal())); @@ -1975,60 +2061,19 @@ void SpeculativeJIT::compile(Node* node) // But it may be profitable to use this as a hook to run speculation checks // on arguments, thereby allowing us to trivially eliminate such checks if // the argument is not used. + recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat())); break; case BitAnd: case BitOr: case BitXor: - if (isInt32Constant(node->child1().node())) { - SpeculateInt32Operand op2(this, node->child2()); - GPRTemporary result(this, Reuse, op2); - - bitOp(op, valueOfInt32Constant(node->child1().node()), op2.gpr(), result.gpr()); - - int32Result(result.gpr(), node); - } else if (isInt32Constant(node->child2().node())) { - SpeculateInt32Operand op1(this, node->child1()); - GPRTemporary result(this, Reuse, op1); - - bitOp(op, valueOfInt32Constant(node->child2().node()), op1.gpr(), result.gpr()); - - int32Result(result.gpr(), node); - } else { - SpeculateInt32Operand op1(this, node->child1()); - SpeculateInt32Operand op2(this, node->child2()); - GPRTemporary result(this, Reuse, op1, op2); - - GPRReg reg1 = op1.gpr(); - GPRReg reg2 = op2.gpr(); - bitOp(op, reg1, reg2, result.gpr()); - - int32Result(result.gpr(), node); - } + compileBitwiseOp(node); break; case BitRShift: case BitLShift: case BitURShift: - if (isInt32Constant(node->child2().node())) { - SpeculateInt32Operand op1(this, node->child1()); - GPRTemporary result(this, Reuse, op1); - - shiftOp(op, op1.gpr(), valueOfInt32Constant(node->child2().node()) & 0x1f, result.gpr()); - - int32Result(result.gpr(), node); - } else { - // Do not allow shift amount to be used as the result, MacroAssembler does not permit this. - SpeculateInt32Operand op1(this, node->child1()); - SpeculateInt32Operand op2(this, node->child2()); - GPRTemporary result(this, Reuse, op1); - - GPRReg reg1 = op1.gpr(); - GPRReg reg2 = op2.gpr(); - shiftOp(op, reg1, reg2, result.gpr()); - - int32Result(result.gpr(), node); - } + compileShiftOp(node); break; case UInt32ToNumber: { @@ -2046,35 +2091,58 @@ void SpeculativeJIT::compile(Node* node) break; } - case Int32ToDouble: { - compileInt32ToDouble(node); + case DoubleRep: { + compileDoubleRep(node); break; } - case ValueAdd: { - JSValueOperand op1(this, node->child1()); - JSValueOperand op2(this, node->child2()); + case ValueRep: { + compileValueRep(node); + break; + } + + case ValueAdd: + compileValueAdd(node); + break; + + case StrCat: { + JSValueOperand op1(this, node->child1(), ManualOperandSpeculation); + JSValueOperand op2(this, node->child2(), ManualOperandSpeculation); + JSValueOperand op3(this, node->child3(), ManualOperandSpeculation); GPRReg op1TagGPR = op1.tagGPR(); GPRReg op1PayloadGPR = op1.payloadGPR(); GPRReg op2TagGPR = op2.tagGPR(); GPRReg op2PayloadGPR = op2.payloadGPR(); + GPRReg op3TagGPR; + GPRReg op3PayloadGPR; + if (node->child3()) { + op3TagGPR = op3.tagGPR(); + op3PayloadGPR = op3.payloadGPR(); + } else { + op3TagGPR = InvalidGPRReg; + op3PayloadGPR = InvalidGPRReg; + } flushRegisters(); - - GPRResult2 resultTag(this); - GPRResult resultPayload(this); - if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node())) - callOperation(operationValueAddNotNumber, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR); + + GPRFlushedCallResult result(this); + if (node->child3()) + callOperation(operationStrCat3, result.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR, op3TagGPR, op3PayloadGPR); else - callOperation(operationValueAdd, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR); + callOperation(operationStrCat2, result.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR); + m_jit.exceptionCheck(); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); + cellResult(result.gpr(), node); break; } case ArithAdd: - compileAdd(node); + compileArithAdd(node); + break; + + case ArithClz32: + compileArithClz32(node); break; case MakeRope: @@ -2103,6 +2171,11 @@ void SpeculativeJIT::compile(Node* node) break; } + case ArithPow: { + compileArithPow(node); + break; + } + case ArithAbs: { switch (node->child1().useKind()) { case Int32Use: { @@ -2114,13 +2187,13 @@ void SpeculativeJIT::compile(Node* node) m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr()); m_jit.add32(scratch.gpr(), result.gpr()); m_jit.xor32(scratch.gpr(), result.gpr()); - speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31))); + speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, result.gpr())); int32Result(result.gpr(), node); break; } - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand op1(this, node->child1()); FPRTemporary result(this); @@ -2162,7 +2235,7 @@ void SpeculativeJIT::compile(Node* node) break; } - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand op1(this, node->child1()); SpeculateDoubleOperand op2(this, node->child2()); FPRTemporary result(this, op1); @@ -2206,17 +2279,32 @@ void SpeculativeJIT::compile(Node* node) } break; } - - case ArithSqrt: { + + case ArithSqrt: + compileArithSqrt(node); + break; + + case ArithFRound: { SpeculateDoubleOperand op1(this, node->child1()); FPRTemporary result(this, op1); - m_jit.sqrtDouble(op1.fpr(), result.fpr()); + m_jit.convertDoubleToFloat(op1.fpr(), result.fpr()); + m_jit.convertFloatToDouble(result.fpr(), result.fpr()); doubleResult(result.fpr(), node); break; } + case ArithRandom: + compileArithRandom(node); + break; + + case ArithRound: + case ArithFloor: + case ArithCeil: + compileArithRounding(node); + break; + case ArithSin: { SpeculateDoubleOperand op1(this, node->child1()); FPRReg op1FPR = op1.fpr(); @@ -2241,6 +2329,10 @@ void SpeculativeJIT::compile(Node* node) break; } + case ArithLog: + compileArithLog(node); + break; + case LogicalNot: compileLogicalNot(node); break; @@ -2264,23 +2356,12 @@ void SpeculativeJIT::compile(Node* node) if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq)) return; break; - - case CompareEqConstant: - ASSERT(isNullConstant(node->child2().node())); - if (nonSpeculativeCompareNull(node, node->child1())) - return; - break; case CompareEq: if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq)) return; break; - case CompareStrictEqConstant: - if (compileStrictEqForConstant(node, node->child1(), valueOfJSConstant(node->child2().node()))) - return; - break; - case CompareStrictEq: if (compileStrictEq(node)) return; @@ -2318,8 +2399,30 @@ void SpeculativeJIT::compile(Node* node) case Array::SelectUsingPredictions: case Array::ForceExit: RELEASE_ASSERT_NOT_REACHED(); +#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); +#endif + break; + case Array::Undecided: { + SpeculateStrictInt32Operand index(this, node->child2()); + GPRTemporary resultTag(this, Reuse, index); + GPRTemporary resultPayload(this); + + GPRReg indexGPR = index.gpr(); + GPRReg resultTagGPR = resultTag.gpr(); + GPRReg resultPayloadGPR = resultPayload.gpr(); + + use(node->child1()); + index.use(); + + speculationCheck(OutOfBounds, JSValueRegs(), node, + m_jit.branch32(MacroAssembler::LessThan, indexGPR, MacroAssembler::TrustedImm32(0))); + + m_jit.move(MacroAssembler::TrustedImm32(JSValue::UndefinedTag), resultTagGPR); + m_jit.move(MacroAssembler::TrustedImm32(0), resultPayloadGPR); + jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); break; + } case Array::Generic: { SpeculateCellOperand base(this, node->child1()); // Save a register, speculate cell. We'll probably be right. JSValueOperand property(this, node->child2()); @@ -2328,9 +2431,10 @@ void SpeculativeJIT::compile(Node* node) GPRReg propertyPayloadGPR = property.payloadGPR(); flushRegisters(); - GPRResult2 resultTag(this); - GPRResult resultPayload(this); + GPRFlushedCallResult2 resultTag(this); + GPRFlushedCallResult resultPayload(this); callOperation(operationGetByValCell, resultTag.gpr(), resultPayload.gpr(), baseGPR, propertyTagGPR, propertyPayloadGPR); + m_jit.exceptionCheck(); jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); break; @@ -2351,21 +2455,46 @@ void SpeculativeJIT::compile(Node* node) GPRTemporary resultPayload(this); if (node->arrayMode().type() == Array::Int32) { + ASSERT(!node->arrayMode().isSaneChain()); + speculationCheck( OutOfBounds, JSValueRegs(), 0, m_jit.branch32( MacroAssembler::Equal, - MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), + MacroAssembler::BaseIndex( + storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset), TrustedImm32(JSValue::EmptyValueTag))); - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); + m_jit.load32( + MacroAssembler::BaseIndex( + storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset), + resultPayload.gpr()); int32Result(resultPayload.gpr(), node); break; } GPRTemporary resultTag(this); - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr()); - speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag))); - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); + m_jit.load32( + MacroAssembler::BaseIndex( + storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset), + resultTag.gpr()); + m_jit.load32( + MacroAssembler::BaseIndex( + storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset), + resultPayload.gpr()); + if (node->arrayMode().isSaneChain()) { + JITCompiler::Jump notHole = m_jit.branch32( + MacroAssembler::NotEqual, resultTag.gpr(), + TrustedImm32(JSValue::EmptyValueTag)); + m_jit.move(TrustedImm32(JSValue::UndefinedTag), resultTag.gpr()); + m_jit.move(TrustedImm32(0), resultPayload.gpr()); + notHole.link(&m_jit); + } else { + speculationCheck( + LoadFromHole, JSValueRegs(), 0, + m_jit.branch32( + MacroAssembler::Equal, resultTag.gpr(), + TrustedImm32(JSValue::EmptyValueTag))); + } jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); break; } @@ -2520,8 +2649,11 @@ void SpeculativeJIT::compile(Node* node) case Array::String: compileGetByValOnString(node); break; - case Array::Arguments: - compileGetByValOnArguments(node); + case Array::DirectArguments: + compileGetByValOnDirectArguments(node); + break; + case Array::ScopedArguments: + compileGetByValOnScopedArguments(node); break; default: { TypedArrayType type = node->arrayMode().typedArrayType(); @@ -2548,8 +2680,10 @@ void SpeculativeJIT::compile(Node* node) case Array::SelectUsingPredictions: case Array::ForceExit: RELEASE_ASSERT_NOT_REACHED(); +#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); alreadyHandled = true; +#endif break; case Array::Generic: { ASSERT(node->op() == PutByVal || node->op() == PutByValDirect); @@ -2568,6 +2702,7 @@ void SpeculativeJIT::compile(Node* node) callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict, baseGPR, propertyTagGPR, propertyPayloadGPR, valueTagGPR, valuePayloadGPR); else callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict, baseGPR, propertyTagGPR, propertyPayloadGPR, valueTagGPR, valuePayloadGPR); + m_jit.exceptionCheck(); noResult(node); alreadyHandled = true; @@ -2698,12 +2833,6 @@ void SpeculativeJIT::compile(Node* node) break; } - case Array::Arguments: - // FIXME: we could at some point make this work. Right now we're assuming that the register - // pressure would be too great. - RELEASE_ASSERT_NOT_REACHED(); - break; - default: { TypedArrayType type = arrayMode.typedArrayType(); if (isInt(type)) @@ -2715,50 +2844,69 @@ void SpeculativeJIT::compile(Node* node) } case RegExpExec: { - if (compileRegExpExec(node)) - return; - - if (!node->adjustedRefCount()) { + if (node->child1().useKind() == CellUse + && node->child2().useKind() == CellUse) { SpeculateCellOperand base(this, node->child1()); SpeculateCellOperand argument(this, node->child2()); GPRReg baseGPR = base.gpr(); GPRReg argumentGPR = argument.gpr(); - + flushRegisters(); - GPRResult result(this); - callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); - - // Must use jsValueResult because otherwise we screw up register - // allocation, which thinks that this node has a result. - booleanResult(result.gpr(), node); + GPRFlushedCallResult2 resultTag(this); + GPRFlushedCallResult resultPayload(this); + callOperation(operationRegExpExec, resultTag.gpr(), resultPayload.gpr(), baseGPR, argumentGPR); + m_jit.exceptionCheck(); + + jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); break; } - - SpeculateCellOperand base(this, node->child1()); - SpeculateCellOperand argument(this, node->child2()); - GPRReg baseGPR = base.gpr(); - GPRReg argumentGPR = argument.gpr(); + + JSValueOperand base(this, node->child1()); + JSValueOperand argument(this, node->child2()); + GPRReg baseTagGPR = base.tagGPR(); + GPRReg basePayloadGPR = base.payloadGPR(); + GPRReg argumentTagGPR = argument.tagGPR(); + GPRReg argumentPayloadGPR = argument.payloadGPR(); flushRegisters(); - GPRResult2 resultTag(this); - GPRResult resultPayload(this); - callOperation(operationRegExpExec, resultTag.gpr(), resultPayload.gpr(), baseGPR, argumentGPR); + GPRFlushedCallResult2 resultTag(this); + GPRFlushedCallResult resultPayload(this); + callOperation(operationRegExpExecGeneric, resultTag.gpr(), resultPayload.gpr(), baseTagGPR, basePayloadGPR, argumentTagGPR, argumentPayloadGPR); + m_jit.exceptionCheck(); jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); break; } case RegExpTest: { - SpeculateCellOperand base(this, node->child1()); - SpeculateCellOperand argument(this, node->child2()); - GPRReg baseGPR = base.gpr(); - GPRReg argumentGPR = argument.gpr(); + if (node->child1().useKind() == CellUse + && node->child2().useKind() == CellUse) { + SpeculateCellOperand base(this, node->child1()); + SpeculateCellOperand argument(this, node->child2()); + GPRReg baseGPR = base.gpr(); + GPRReg argumentGPR = argument.gpr(); + + flushRegisters(); + GPRFlushedCallResult result(this); + callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); + m_jit.exceptionCheck(); + + booleanResult(result.gpr(), node); + break; + } + + JSValueOperand base(this, node->child1()); + JSValueOperand argument(this, node->child2()); + GPRReg baseTagGPR = base.tagGPR(); + GPRReg basePayloadGPR = base.payloadGPR(); + GPRReg argumentTagGPR = argument.tagGPR(); + GPRReg argumentPayloadGPR = argument.payloadGPR(); flushRegisters(); - GPRResult result(this); - callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); + GPRFlushedCallResult result(this); + callOperation(operationRegExpTestGeneric, result.gpr(), baseTagGPR, basePayloadGPR, argumentTagGPR, argumentPayloadGPR); + m_jit.exceptionCheck(); - // If we add a DataFormatBool, we should use it here. booleanResult(result.gpr(), node); break; } @@ -2826,7 +2974,7 @@ void SpeculativeJIT::compile(Node* node) FPRReg valueFPR = value.fpr(); DFG_TYPE_CHECK( - JSValueRegs(), node->child2(), SpecFullRealNumber, + JSValueRegs(), node->child2(), SpecDoubleReal, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR)); m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); @@ -2942,7 +3090,7 @@ void SpeculativeJIT::compile(Node* node) MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight), tempFPR); MacroAssembler::Jump slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR); - JSValue nan = JSValue(JSValue::EncodeAsDouble, QNaN); + JSValue nan = JSValue(JSValue::EncodeAsDouble, PNaN); m_jit.store32( MacroAssembler::TrustedImm32(nan.u.asBits.tag), MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); @@ -3012,7 +3160,7 @@ void SpeculativeJIT::compile(Node* node) } case DFG::Jump: { - jump(node->takenBlock()); + jump(node->targetBlock()); noResult(node); break; } @@ -3047,12 +3195,8 @@ void SpeculativeJIT::compile(Node* node) } } - // Grab the return address. - m_jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::regT2); - // Restore our caller's "r". - m_jit.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::callFrameRegister); - // Return. - m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT2); + m_jit.emitRestoreCalleeSaves(); + m_jit.emitFunctionEpilogue(); m_jit.ret(); noResult(node); @@ -3067,6 +3211,60 @@ void SpeculativeJIT::compile(Node* node) break; } + case BooleanToNumber: { + switch (node->child1().useKind()) { + case BooleanUse: { + SpeculateBooleanOperand value(this, node->child1()); + GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add). + + m_jit.move(value.gpr(), result.gpr()); + + int32Result(result.gpr(), node); + break; + } + + case UntypedUse: { + JSValueOperand value(this, node->child1()); + + if (!m_interpreter.needsTypeCheck(node->child1(), SpecBoolInt32 | SpecBoolean)) { + GPRTemporary result(this); + + GPRReg valueGPR = value.payloadGPR(); + GPRReg resultGPR = result.gpr(); + + m_jit.move(valueGPR, resultGPR); + int32Result(result.gpr(), node); + break; + } + + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + + GPRReg valueTagGPR = value.tagGPR(); + GPRReg valuePayloadGPR = value.payloadGPR(); + GPRReg resultTagGPR = resultTag.gpr(); + GPRReg resultPayloadGPR = resultPayload.gpr(); + + m_jit.move(valuePayloadGPR, resultPayloadGPR); + JITCompiler::Jump isBoolean = m_jit.branch32( + JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::BooleanTag)); + m_jit.move(valueTagGPR, resultTagGPR); + JITCompiler::Jump done = m_jit.jump(); + isBoolean.link(&m_jit); + m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR); + done.link(&m_jit); + + jsValueResult(resultTagGPR, resultPayloadGPR, node); + break; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + break; + } + case ToPrimitive: { RELEASE_ASSERT(node->child1().useKind() == UntypedUse); JSValueOperand op1(this, node->child1()); @@ -3080,59 +3278,57 @@ void SpeculativeJIT::compile(Node* node) op1.use(); - if (!(m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean))) { - m_jit.move(op1TagGPR, resultTagGPR); - m_jit.move(op1PayloadGPR, resultPayloadGPR); - } else { - MacroAssembler::Jump alreadyPrimitive = m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag)); - MacroAssembler::Jump notPrimitive = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1PayloadGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())); - - alreadyPrimitive.link(&m_jit); - m_jit.move(op1TagGPR, resultTagGPR); - m_jit.move(op1PayloadGPR, resultPayloadGPR); - - addSlowPathGenerator( - slowPathCall( - notPrimitive, this, operationToPrimitive, - JSValueRegs(resultTagGPR, resultPayloadGPR), op1TagGPR, op1PayloadGPR)); - } + MacroAssembler::Jump alreadyPrimitive = m_jit.branchIfNotCell(op1.jsValueRegs()); + MacroAssembler::Jump notPrimitive = m_jit.branchIfObject(op1PayloadGPR); + + alreadyPrimitive.link(&m_jit); + m_jit.move(op1TagGPR, resultTagGPR); + m_jit.move(op1PayloadGPR, resultPayloadGPR); + + addSlowPathGenerator( + slowPathCall( + notPrimitive, this, operationToPrimitive, + JSValueRegs(resultTagGPR, resultPayloadGPR), op1TagGPR, op1PayloadGPR)); jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); break; } - case ToString: { + case ToString: + case CallStringConstructor: { if (node->child1().useKind() == UntypedUse) { JSValueOperand op1(this, node->child1()); GPRReg op1PayloadGPR = op1.payloadGPR(); GPRReg op1TagGPR = op1.tagGPR(); - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); flushRegisters(); JITCompiler::Jump done; if (node->child1()->prediction() & SpecString) { - JITCompiler::Jump slowPath1 = m_jit.branch32( - JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag)); - JITCompiler::Jump slowPath2 = m_jit.branchPtr( - JITCompiler::NotEqual, - JITCompiler::Address(op1PayloadGPR, JSCell::structureOffset()), - TrustedImmPtr(m_jit.vm()->stringStructure.get())); + JITCompiler::Jump slowPath1 = m_jit.branchIfNotCell(op1.jsValueRegs()); + JITCompiler::Jump slowPath2 = m_jit.branchIfNotString(op1PayloadGPR); m_jit.move(op1PayloadGPR, resultGPR); done = m_jit.jump(); slowPath1.link(&m_jit); slowPath2.link(&m_jit); } - callOperation(operationToString, resultGPR, op1TagGPR, op1PayloadGPR); + if (op == ToString) + callOperation(operationToString, resultGPR, op1TagGPR, op1PayloadGPR); + else { + ASSERT(op == CallStringConstructor); + callOperation(operationCallStringConstructor, resultGPR, op1TagGPR, op1PayloadGPR); + } + m_jit.exceptionCheck(); if (done.isSet()) done.link(&m_jit); cellResult(resultGPR, node); break; } - compileToStringOnCell(node); + compileToStringOrCallStringConstructorOnCell(node); break; } @@ -3142,8 +3338,8 @@ void SpeculativeJIT::compile(Node* node) } case NewArray: { - JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); - if (!globalObject->isHavingABadTime() && !hasArrayStorage(node->indexingType())) { + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); + if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) { Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()); ASSERT(structure->indexingType() == node->indexingType()); ASSERT( @@ -3178,7 +3374,7 @@ void SpeculativeJIT::compile(Node* node) SpeculateDoubleOperand operand(this, use); FPRReg opFPR = operand.fpr(); DFG_TYPE_CHECK( - JSValueRegs(), use, SpecFullRealNumber, + JSValueRegs(), use, SpecDoubleReal, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR)); m_jit.storeDouble(opFPR, MacroAssembler::Address(storageGPR, sizeof(double) * operandIdx)); @@ -3217,9 +3413,10 @@ void SpeculativeJIT::compile(Node* node) if (!node->numChildren()) { flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); callOperation( operationNewEmptyArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())); + m_jit.exceptionCheck(); cellResult(result.gpr(), node); break; } @@ -3246,7 +3443,7 @@ void SpeculativeJIT::compile(Node* node) JSValueRegs(), use, SpecFullRealNumber, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR)); - m_jit.storeDouble(opFPR, reinterpret_cast<char*>(buffer + operandIdx)); + m_jit.storeDouble(opFPR, TrustedImmPtr(reinterpret_cast<char*>(buffer + operandIdx))); break; } case ALL_INT32_INDEXING_TYPES: { @@ -3292,11 +3489,12 @@ void SpeculativeJIT::compile(Node* node) m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr()); } - GPRResult result(this); + GPRFlushedCallResult result(this); callOperation( operationNewArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), static_cast<void*>(buffer), node->numChildren()); + m_jit.exceptionCheck(); if (scratchSize) { GPRTemporary scratch(this); @@ -3310,8 +3508,8 @@ void SpeculativeJIT::compile(Node* node) } case NewArrayWithSize: { - JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); - if (!globalObject->isHavingABadTime() && !hasArrayStorage(node->indexingType())) { + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); + if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) { SpeculateStrictInt32Operand size(this, node->child1()); GPRTemporary result(this); GPRTemporary storage(this); @@ -3325,7 +3523,7 @@ void SpeculativeJIT::compile(Node* node) GPRReg scratch2GPR = scratch2.gpr(); MacroAssembler::JumpList slowCases; - slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX))); + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH))); ASSERT((1 << 3) == sizeof(JSValue)); m_jit.move(sizeGPR, scratchGPR); @@ -3341,7 +3539,7 @@ void SpeculativeJIT::compile(Node* node) m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); if (hasDouble(node->indexingType())) { - JSValue nan = JSValue(JSValue::EncodeAsDouble, QNaN); + JSValue nan = JSValue(JSValue::EncodeAsDouble, PNaN); m_jit.move(sizeGPR, scratchGPR); MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratchGPR); @@ -3353,12 +3551,11 @@ void SpeculativeJIT::compile(Node* node) done.link(&m_jit); } - addSlowPathGenerator(adoptPtr( - new CallArrayAllocatorWithVariableSizeSlowPathGenerator( + addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>( slowCases, this, operationNewArrayWithSize, resultGPR, globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage), - sizeGPR))); + sizeGPR)); cellResult(resultGPR, node); break; @@ -3367,10 +3564,10 @@ void SpeculativeJIT::compile(Node* node) SpeculateStrictInt32Operand size(this, node->child1()); GPRReg sizeGPR = size.gpr(); flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); GPRReg structureGPR = selectScratchGPR(sizeGPR); - MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX)); + MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)); m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())), structureGPR); MacroAssembler::Jump done = m_jit.jump(); bigLength.link(&m_jit); @@ -3378,14 +3575,15 @@ void SpeculativeJIT::compile(Node* node) done.link(&m_jit); callOperation( operationNewArrayWithSize, resultGPR, structureGPR, sizeGPR); + m_jit.exceptionCheck(); cellResult(resultGPR, node); break; } case NewArrayBuffer: { - JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); IndexingType indexingType = node->indexingType(); - if (!globalObject->isHavingABadTime() && !hasArrayStorage(indexingType)) { + if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(indexingType)) { unsigned numElements = node->numConstants(); GPRTemporary result(this); @@ -3420,9 +3618,10 @@ void SpeculativeJIT::compile(Node* node) } flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), node->startConstant(), node->numConstants()); + m_jit.exceptionCheck(); cellResult(result.gpr(), node); break; @@ -3440,14 +3639,15 @@ void SpeculativeJIT::compile(Node* node) flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); - JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); callOperation( operationNewTypedArrayWithOneArgumentForType(node->typedArrayType()), resultGPR, globalObject->typedArrayStructure(node->typedArrayType()), argumentTagGPR, argumentPayloadGPR); + m_jit.exceptionCheck(); cellResult(resultGPR, node); break; @@ -3461,10 +3661,11 @@ void SpeculativeJIT::compile(Node* node) case NewRegexp: { flushRegisters(); - GPRResult resultPayload(this); - GPRResult2 resultTag(this); + GPRFlushedCallResult resultPayload(this); + GPRFlushedCallResult2 resultTag(this); callOperation(operationNewRegexp, resultTag.gpr(), resultPayload.gpr(), m_jit.codeBlock()->regexp(node->regexpIndex())); + m_jit.exceptionCheck(); // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag. cellResult(resultPayload.gpr(), node); @@ -3482,18 +3683,15 @@ void SpeculativeJIT::compile(Node* node) GPRReg tempTagGPR = tempTag.gpr(); MacroAssembler::JumpList slowCases; - slowCases.append(m_jit.branch32( - MacroAssembler::NotEqual, thisValueTagGPR, TrustedImm32(JSValue::CellTag))); - m_jit.loadPtr( - MacroAssembler::Address(thisValuePayloadGPR, JSCell::structureOffset()), tempGPR); + slowCases.append(m_jit.branchIfNotCell(thisValue.jsValueRegs())); slowCases.append(m_jit.branch8( MacroAssembler::NotEqual, - MacroAssembler::Address(tempGPR, Structure::typeInfoTypeOffset()), + MacroAssembler::Address(thisValuePayloadGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); m_jit.move(thisValuePayloadGPR, tempGPR); m_jit.move(thisValueTagGPR, tempTagGPR); J_JITOperation_EJ function; - if (m_jit.graph().executableFor(node->codeOrigin)->isStrictMode()) + if (m_jit.graph().executableFor(node->origin.semantic)->isStrictMode()) function = operationToThisStrict; else function = operationToThis; @@ -3524,11 +3722,16 @@ void SpeculativeJIT::compile(Node* node) GPRReg allocatorGPR = allocator.gpr(); GPRReg structureGPR = structure.gpr(); GPRReg scratchGPR = scratch.gpr(); + // Rare data is only used to access the allocator & structure + // We can avoid using an additional GPR this way + GPRReg rareDataGPR = structureGPR; MacroAssembler::JumpList slowPath; - m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR); - m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR); + m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfRareData()), rareDataGPR); + slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, rareDataGPR)); + m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR); + m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR); slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, allocatorGPR)); emitAllocateJSObject(resultGPR, allocatorGPR, structureGPR, TrustedImmPtr(0), scratchGPR, slowPath); @@ -3538,12 +3741,6 @@ void SpeculativeJIT::compile(Node* node) break; } - case AllocationProfileWatchpoint: - case TypedArrayWatchpoint: { - noResult(node); - break; - } - case NewObject: { GPRTemporary result(this); GPRTemporary allocator(this); @@ -3575,90 +3772,44 @@ void SpeculativeJIT::compile(Node* node) break; } - case GetScope: { - SpeculateCellOperand function(this, node->child1()); - GPRTemporary result(this, Reuse, function); - m_jit.loadPtr(JITCompiler::Address(function.gpr(), JSFunction::offsetOfScopeChain()), result.gpr()); - cellResult(result.gpr(), node); - break; - } - - case GetMyScope: { + case GetArgumentCount: { GPRTemporary result(this); - GPRReg resultGPR = result.gpr(); - - m_jit.loadPtr(JITCompiler::payloadFor(JSStack::ScopeChain), resultGPR); - cellResult(resultGPR, node); + m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), result.gpr()); + int32Result(result.gpr(), node); break; } - case SkipTopScope: { - SpeculateCellOperand scope(this, node->child1()); - GPRTemporary result(this, Reuse, scope); - GPRReg resultGPR = result.gpr(); - m_jit.move(scope.gpr(), resultGPR); - JITCompiler::Jump activationNotCreated = - m_jit.branchTestPtr( - JITCompiler::Zero, - JITCompiler::payloadFor( - static_cast<VirtualRegister>(m_jit.graph().machineActivationRegister()))); - m_jit.loadPtr(JITCompiler::Address(resultGPR, JSScope::offsetOfNext()), resultGPR); - activationNotCreated.link(&m_jit); - cellResult(resultGPR, node); + case GetScope: + compileGetScope(node); break; - } - - case SkipScope: { - SpeculateCellOperand scope(this, node->child1()); - GPRTemporary result(this, Reuse, scope); - m_jit.loadPtr(JITCompiler::Address(scope.gpr(), JSScope::offsetOfNext()), result.gpr()); - cellResult(result.gpr(), node); + + case SkipScope: + compileSkipScope(node); break; - } - - case GetClosureRegisters: { - if (WriteBarrierBase<Unknown>* registers = m_jit.graph().tryGetRegisters(node->child1().node())) { - GPRTemporary result(this); - GPRReg resultGPR = result.gpr(); - m_jit.move(TrustedImmPtr(registers), resultGPR); - storageResult(resultGPR, node); - break; - } - SpeculateCellOperand scope(this, node->child1()); - GPRTemporary result(this); - GPRReg scopeGPR = scope.gpr(); - GPRReg resultGPR = result.gpr(); - - m_jit.loadPtr(JITCompiler::Address(scopeGPR, JSVariableObject::offsetOfRegisters()), resultGPR); - storageResult(resultGPR, node); - break; - } case GetClosureVar: { - StorageOperand registers(this, node->child1()); + SpeculateCellOperand base(this, node->child1()); GPRTemporary resultTag(this); GPRTemporary resultPayload(this); - GPRReg registersGPR = registers.gpr(); + GPRReg baseGPR = base.gpr(); GPRReg resultTagGPR = resultTag.gpr(); GPRReg resultPayloadGPR = resultPayload.gpr(); - m_jit.load32(JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); - m_jit.load32(JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); + m_jit.load32(JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + TagOffset), resultTagGPR); + m_jit.load32(JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + PayloadOffset), resultPayloadGPR); jsValueResult(resultTagGPR, resultPayloadGPR, node); break; } + case PutClosureVar: { - StorageOperand registers(this, node->child2()); - JSValueOperand value(this, node->child3()); - GPRTemporary scratchRegister(this); + SpeculateCellOperand base(this, node->child1()); + JSValueOperand value(this, node->child2()); - GPRReg registersGPR = registers.gpr(); + GPRReg baseGPR = base.gpr(); GPRReg valueTagGPR = value.tagGPR(); GPRReg valuePayloadGPR = value.payloadGPR(); - speculate(node, node->child1()); - - m_jit.store32(valueTagGPR, JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); - m_jit.store32(valuePayloadGPR, JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + m_jit.store32(valueTagGPR, JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + TagOffset)); + m_jit.store32(valuePayloadGPR, JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + PayloadOffset)); noResult(node); break; } @@ -3669,8 +3820,8 @@ void SpeculativeJIT::compile(Node* node) switch (node->child1().useKind()) { case CellUse: { SpeculateCellOperand base(this, node->child1()); - GPRTemporary resultTag(this, Reuse, base); - GPRTemporary resultPayload(this); + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this, Reuse, base); GPRReg baseGPR = base.gpr(); GPRReg resultTagGPR = resultTag.gpr(); @@ -3678,7 +3829,7 @@ void SpeculativeJIT::compile(Node* node) base.use(); - cachedGetById(node->codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber()); + cachedGetById(node->origin.semantic, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber()); jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); break; @@ -3686,8 +3837,8 @@ void SpeculativeJIT::compile(Node* node) case UntypedUse: { JSValueOperand base(this, node->child1()); - GPRTemporary resultTag(this, Reuse, base, TagWord); - GPRTemporary resultPayload(this); + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this, Reuse, base, TagWord); GPRReg baseTagGPR = base.tagGPR(); GPRReg basePayloadGPR = base.payloadGPR(); @@ -3696,9 +3847,9 @@ void SpeculativeJIT::compile(Node* node) base.use(); - JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag)); + JITCompiler::Jump notCell = m_jit.branchIfNotCell(base.jsValueRegs()); - cachedGetById(node->codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell); + cachedGetById(node->origin.semantic, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell); jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); break; @@ -3723,16 +3874,16 @@ void SpeculativeJIT::compile(Node* node) GPRReg baseGPR = base.gpr(); - GPRResult resultTag(this); - GPRResult2 resultPayload(this); - GPRReg resultTagGPR = resultTag.gpr(); + GPRFlushedCallResult resultPayload(this); + GPRFlushedCallResult2 resultTag(this); GPRReg resultPayloadGPR = resultPayload.gpr(); + GPRReg resultTagGPR = resultTag.gpr(); base.use(); flushRegisters(); - cachedGetById(node->codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill); + cachedGetById(node->origin.semantic, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill); jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); break; @@ -3743,18 +3894,18 @@ void SpeculativeJIT::compile(Node* node) GPRReg baseTagGPR = base.tagGPR(); GPRReg basePayloadGPR = base.payloadGPR(); - GPRResult resultTag(this); - GPRResult2 resultPayload(this); - GPRReg resultTagGPR = resultTag.gpr(); + GPRFlushedCallResult resultPayload(this); + GPRFlushedCallResult2 resultTag(this); GPRReg resultPayloadGPR = resultPayload.gpr(); + GPRReg resultTagGPR = resultTag.gpr(); base.use(); flushRegisters(); - JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag)); + JITCompiler::Jump notCell = m_jit.branchIfNotCell(base.jsValueRegs()); - cachedGetById(node->codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell, DontSpill); + cachedGetById(node->origin.semantic, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell, DontSpill); jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); break; @@ -3771,91 +3922,54 @@ void SpeculativeJIT::compile(Node* node) compileGetArrayLength(node); break; - case CheckFunction: { - SpeculateCellOperand function(this, node->child1()); - speculationCheck(BadFunction, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node->function())); + case CheckCell: { + SpeculateCellOperand cell(this, node->child1()); + speculationCheck(BadCell, JSValueSource::unboxedCell(cell.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, cell.gpr(), node->cellOperand()->cell())); noResult(node); break; } - case CheckExecutable: { - SpeculateCellOperand function(this, node->child1()); - speculationCheck(BadExecutable, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(function.gpr(), JSFunction::offsetOfExecutable()), node->executable())); + case CheckNotEmpty: { + JSValueOperand operand(this, node->child1()); + GPRReg tagGPR = operand.tagGPR(); + speculationCheck(TDZFailure, JSValueSource(), nullptr, m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::EmptyValueTag))); noResult(node); break; } - - case CheckStructure: { - SpeculateCellOperand base(this, node->child1()); - - ASSERT(node->structureSet().size()); - - if (node->structureSet().size() == 1) { - speculationCheck( - BadCache, JSValueSource::unboxedCell(base.gpr()), 0, - m_jit.branchWeakPtr( - JITCompiler::NotEqual, - JITCompiler::Address(base.gpr(), JSCell::structureOffset()), - node->structureSet()[0])); - } else { - GPRTemporary structure(this); - - m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr()); - - JITCompiler::JumpList done; - - for (size_t i = 0; i < node->structureSet().size() - 1; ++i) - done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node->structureSet()[i])); - - speculationCheck( - BadCache, JSValueSource::unboxedCell(base.gpr()), 0, - m_jit.branchWeakPtr( - JITCompiler::NotEqual, structure.gpr(), node->structureSet().last())); - - done.link(&m_jit); - } - - noResult(node); + + case CheckIdent: + compileCheckIdent(node); break; - } - - case StructureTransitionWatchpoint: { - // There is a fascinating question here of what to do about array profiling. - // We *could* try to tell the OSR exit about where the base of the access is. - // The DFG will have kept it alive, though it may not be in a register, and - // we shouldn't really load it since that could be a waste. For now though, - // we'll just rely on the fact that when a watchpoint fires then that's - // quite a hint already. - - m_jit.addWeakReference(node->structure()); - -#if !ASSERT_DISABLED - SpeculateCellOperand op1(this, node->child1()); - JITCompiler::Jump isOK = m_jit.branchPtr(JITCompiler::Equal, JITCompiler::Address(op1.gpr(), JSCell::structureOffset()), TrustedImmPtr(node->structure())); - m_jit.breakpoint(); - isOK.link(&m_jit); -#else - speculateCell(node->child1()); -#endif - noResult(node); + case GetExecutable: { + SpeculateCellOperand function(this, node->child1()); + GPRTemporary result(this, Reuse, function); + GPRReg functionGPR = function.gpr(); + GPRReg resultGPR = result.gpr(); + speculateCellType(node->child1(), functionGPR, SpecFunction, JSFunctionType); + m_jit.loadPtr(JITCompiler::Address(functionGPR, JSFunction::offsetOfExecutable()), resultGPR); + cellResult(resultGPR, node); break; } - case PhantomPutStructure: { - ASSERT(isKnownCell(node->child1().node())); - m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node); - noResult(node); + case CheckStructure: { + compileCheckStructure(node); break; } - + case PutStructure: { + Structure* oldStructure = node->transition()->previous; + Structure* newStructure = node->transition()->next; + m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node); SpeculateCellOperand base(this, node->child1()); GPRReg baseGPR = base.gpr(); - m_jit.storePtr(MacroAssembler::TrustedImmPtr(node->structureTransitionData().newStructure), MacroAssembler::Address(baseGPR, JSCell::structureOffset())); + ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType()); + ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type()); + ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags()); + m_jit.storePtr(MacroAssembler::TrustedImmPtr(newStructure), MacroAssembler::Address(baseGPR, JSCell::structureIDOffset())); noResult(node); break; @@ -3869,18 +3983,10 @@ void SpeculativeJIT::compile(Node* node) compileReallocatePropertyStorage(node); break; - case GetButterfly: { - SpeculateCellOperand base(this, node->child1()); - GPRTemporary result(this, Reuse, base); - - GPRReg baseGPR = base.gpr(); - GPRReg resultGPR = result.gpr(); - - m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR); - - storageResult(resultGPR, node); + case GetButterfly: + case GetButterflyReadOnly: + compileGetButterfly(node); break; - } case GetIndexedPropertyStorage: { compileGetIndexedPropertyStorage(node); @@ -3906,7 +4012,7 @@ void SpeculativeJIT::compile(Node* node) GPRReg resultTagGPR = resultTag.gpr(); GPRReg resultPayloadGPR = resultPayload.gpr(); - StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node->storageAccessDataIndex()]; + StorageAccessData& storageAccessData = node->storageAccessData(); m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); @@ -3915,6 +4021,47 @@ void SpeculativeJIT::compile(Node* node) break; } + case GetGetterSetterByOffset: { + StorageOperand storage(this, node->child1()); + GPRTemporary resultPayload(this); + + GPRReg storageGPR = storage.gpr(); + GPRReg resultPayloadGPR = resultPayload.gpr(); + + StorageAccessData& storageAccessData = node->storageAccessData(); + + m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); + + cellResult(resultPayloadGPR, node); + break; + } + + case GetGetter: { + SpeculateCellOperand op1(this, node->child1()); + GPRTemporary result(this, Reuse, op1); + + GPRReg op1GPR = op1.gpr(); + GPRReg resultGPR = result.gpr(); + + m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfGetter()), resultGPR); + + cellResult(resultGPR, node); + break; + } + + case GetSetter: { + SpeculateCellOperand op1(this, node->child1()); + GPRTemporary result(this, Reuse, op1); + + GPRReg op1GPR = op1.gpr(); + GPRReg resultGPR = result.gpr(); + + m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfSetter()), resultGPR); + + cellResult(resultGPR, node); + break; + } + case PutByOffset: { StorageOperand storage(this, node->child1()); JSValueOperand value(this, node->child3()); @@ -3925,7 +4072,7 @@ void SpeculativeJIT::compile(Node* node) speculate(node, node->child2()); - StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node->storageAccessDataIndex()]; + StorageAccessData& storageAccessData = node->storageAccessData(); m_jit.storePtr(valueTagGPR, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); m_jit.storePtr(valuePayloadGPR, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); @@ -3933,6 +4080,23 @@ void SpeculativeJIT::compile(Node* node) noResult(node); break; } + + case PutByIdFlush: { + SpeculateCellOperand base(this, node->child1()); + JSValueOperand value(this, node->child2()); + GPRTemporary scratch(this); + + GPRReg baseGPR = base.gpr(); + GPRReg valueTagGPR = value.tagGPR(); + GPRReg valuePayloadGPR = value.payloadGPR(); + GPRReg scratchGPR = scratch.gpr(); + flushRegisters(); + + cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect, MacroAssembler::Jump(), DontSpill); + + noResult(node); + break; + } case PutById: { SpeculateCellOperand base(this, node->child1()); @@ -3944,7 +4108,7 @@ void SpeculativeJIT::compile(Node* node) GPRReg valuePayloadGPR = value.payloadGPR(); GPRReg scratchGPR = scratch.gpr(); - cachedPutById(node->codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect); + cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect); noResult(node); break; @@ -3960,17 +4124,35 @@ void SpeculativeJIT::compile(Node* node) GPRReg valuePayloadGPR = value.payloadGPR(); GPRReg scratchGPR = scratch.gpr(); - cachedPutById(node->codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), Direct); + cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), Direct); noResult(node); break; } + case PutGetterById: + case PutSetterById: { + compilePutAccessorById(node); + break; + } + + case PutGetterSetterById: { + compilePutGetterSetterById(node); + break; + } + + case PutGetterByVal: + case PutSetterByVal: { + compilePutAccessorByVal(node); + break; + } + + case GetGlobalLexicalVariable: case GetGlobalVar: { GPRTemporary resultPayload(this); GPRTemporary resultTag(this); - m_jit.move(TrustedImmPtr(node->registerPointer()), resultPayload.gpr()); + m_jit.move(TrustedImmPtr(node->variablePointer()), resultPayload.gpr()); m_jit.load32(JITCompiler::Address(resultPayload.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTag.gpr()); m_jit.load32(JITCompiler::Address(resultPayload.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayload.gpr()); @@ -3978,91 +4160,72 @@ void SpeculativeJIT::compile(Node* node) break; } - case PutGlobalVar: { - JSValueOperand value(this, node->child1()); + case PutGlobalVariable: { + JSValueOperand value(this, node->child2()); // FIXME: if we happen to have a spare register - and _ONLY_ if we happen to have // a spare register - a good optimization would be to put the register pointer into // a register and then do a zero offset store followed by a four-offset store (or // vice-versa depending on endianness). - m_jit.store32(value.tagGPR(), node->registerPointer()->tagPointer()); - m_jit.store32(value.payloadGPR(), node->registerPointer()->payloadPointer()); + m_jit.store32(value.tagGPR(), node->variablePointer()->tagPointer()); + m_jit.store32(value.payloadGPR(), node->variablePointer()->payloadPointer()); noResult(node); break; } case NotifyWrite: { - VariableWatchpointSet* set = node->variableWatchpointSet(); - - JSValueOperand value(this, node->child1()); - GPRReg valueTagGPR = value.tagGPR(); - GPRReg valuePayloadGPR = value.payloadGPR(); - - GPRTemporary temp(this); - GPRReg tempGPR = temp.gpr(); - - m_jit.load8(set->addressOfState(), tempGPR); - - JITCompiler::JumpList ready; - - ready.append(m_jit.branch32(JITCompiler::Equal, tempGPR, TrustedImm32(IsInvalidated))); - - if (set->state() == ClearWatchpoint) { - JITCompiler::Jump isWatched = - m_jit.branch32(JITCompiler::NotEqual, tempGPR, TrustedImm32(ClearWatchpoint)); - - m_jit.store32(valueTagGPR, &set->addressOfInferredValue()->u.asBits.tag); - m_jit.store32(valuePayloadGPR, &set->addressOfInferredValue()->u.asBits.payload); - m_jit.store8(TrustedImm32(IsWatched), set->addressOfState()); - ready.append(m_jit.jump()); - - isWatched.link(&m_jit); - } + compileNotifyWrite(node); + break; + } - JITCompiler::Jump definitelyNotEqual = m_jit.branch32( - JITCompiler::NotEqual, - JITCompiler::AbsoluteAddress(&set->addressOfInferredValue()->u.asBits.payload), - valuePayloadGPR); - ready.append(m_jit.branch32( - JITCompiler::Equal, - JITCompiler::AbsoluteAddress(&set->addressOfInferredValue()->u.asBits.tag), - valueTagGPR)); - definitelyNotEqual.link(&m_jit); - - JITCompiler::Jump slowCase = m_jit.branchTest8( - JITCompiler::NonZero, JITCompiler::AbsoluteAddress(set->addressOfSetIsNotEmpty())); - m_jit.store8(TrustedImm32(IsInvalidated), set->addressOfState()); - m_jit.store32( - TrustedImm32(JSValue::EmptyValueTag), - &set->addressOfInferredValue()->u.asBits.tag); - m_jit.store32( - TrustedImm32(0), &set->addressOfInferredValue()->u.asBits.payload); - - ready.link(&m_jit); - - addSlowPathGenerator( - slowPathCall(slowCase, this, operationInvalidate, NoResult, set)); - + case VarInjectionWatchpoint: { noResult(node); break; } - case VarInjectionWatchpoint: - case VariableWatchpoint: { - noResult(node); + case CheckTypeInfoFlags: { + compileCheckTypeInfoFlags(node); break; } - case CheckHasInstance: { + case OverridesHasInstance: { + + Node* hasInstanceValueNode = node->child2().node(); + JSFunction* defaultHasInstanceFunction = jsCast<JSFunction*>(node->cellOperand()->value()); + + MacroAssembler::Jump notDefaulthasInstanceValue; + MacroAssembler::Jump hasInstanceValueNotCell; SpeculateCellOperand base(this, node->child1()); - GPRTemporary structure(this); + JSValueOperand hasInstanceValue(this, node->child2()); + GPRTemporary result(this); - // Speculate that base 'ImplementsDefaultHasInstance'. - m_jit.loadPtr(MacroAssembler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr()); - speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(structure.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance))); + GPRReg resultGPR = result.gpr(); - noResult(node); + // If we have proven that the constructor's Symbol.hasInstance will always be the one on + // Function.prototype[Symbol.hasInstance] then we don't need a runtime check here. We don't worry + // about the case where the constructor's Symbol.hasInstance is a constant but is not the default + // one as fixup should have converted this check to true. + ASSERT(!hasInstanceValueNode->isCellConstant() || defaultHasInstanceFunction == hasInstanceValueNode->asCell()); + if (!hasInstanceValueNode->isCellConstant()) { + + JSValueRegs hasInstanceValueRegs = hasInstanceValue.jsValueRegs(); + hasInstanceValueNotCell = m_jit.branchIfNotCell(hasInstanceValueRegs); + notDefaulthasInstanceValue = m_jit.branchPtr(MacroAssembler::NotEqual, hasInstanceValueRegs.payloadGPR(), TrustedImmPtr(defaultHasInstanceFunction)); + } + + // Check that constructor 'ImplementsDefaultHasInstance'. + m_jit.test8(MacroAssembler::Zero, MacroAssembler::Address(base.gpr(), JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance), resultGPR); + MacroAssembler::Jump done = m_jit.jump(); + + if (!hasInstanceValueNode->isCellConstant()) { + hasInstanceValueNotCell.link(&m_jit); + notDefaulthasInstanceValue.link(&m_jit); + moveTrueTo(resultGPR); + } + + done.link(&m_jit); + booleanResult(resultGPR, node); break; } @@ -4071,13 +4234,18 @@ void SpeculativeJIT::compile(Node* node) break; } + case InstanceOfCustom: { + compileInstanceOfCustom(node); + break; + } + case IsUndefined: { JSValueOperand value(this, node->child1()); GPRTemporary result(this); GPRTemporary localGlobalObject(this); GPRTemporary remoteGlobalObject(this); - JITCompiler::Jump isCell = m_jit.branch32(JITCompiler::Equal, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag)); + JITCompiler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs()); m_jit.compare32(JITCompiler::Equal, value.tagGPR(), TrustedImm32(JSValue::UndefinedTag), result.gpr()); JITCompiler::Jump done = m_jit.jump(); @@ -4088,15 +4256,18 @@ void SpeculativeJIT::compile(Node* node) m_jit.move(TrustedImm32(0), result.gpr()); notMasqueradesAsUndefined = m_jit.jump(); } else { - m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureOffset()), result.gpr()); - JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(result.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8( + JITCompiler::NonZero, + JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()), + TrustedImm32(MasqueradesAsUndefined)); m_jit.move(TrustedImm32(0), result.gpr()); notMasqueradesAsUndefined = m_jit.jump(); isMasqueradesAsUndefined.link(&m_jit); GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); - m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->codeOrigin)), localGlobalObjectGPR); + m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->origin.semantic)), localGlobalObjectGPR); + m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureIDOffset()), result.gpr()); m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR); m_jit.compare32(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, result.gpr()); } @@ -4130,10 +4301,12 @@ void SpeculativeJIT::compile(Node* node) JSValueOperand value(this, node->child1()); GPRTemporary result(this, Reuse, value, TagWord); - JITCompiler::Jump isNotCell = m_jit.branch32(JITCompiler::NotEqual, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag)); + JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs()); - m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureOffset()), result.gpr()); - m_jit.compare8(JITCompiler::Equal, JITCompiler::Address(result.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType), result.gpr()); + m_jit.compare8(JITCompiler::Equal, + JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoTypeOffset()), + TrustedImm32(StringType), + result.gpr()); JITCompiler::Jump done = m_jit.jump(); isNotCell.link(&m_jit); @@ -4146,86 +4319,35 @@ void SpeculativeJIT::compile(Node* node) case IsObject: { JSValueOperand value(this, node->child1()); - GPRReg valueTagGPR = value.tagGPR(); - GPRReg valuePayloadGPR = value.payloadGPR(); - GPRResult result(this); - GPRReg resultGPR = result.gpr(); - flushRegisters(); - callOperation(operationIsObject, resultGPR, valueTagGPR, valuePayloadGPR); + GPRTemporary result(this, Reuse, value, TagWord); + + JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs()); + + m_jit.compare8(JITCompiler::AboveOrEqual, + JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoTypeOffset()), + TrustedImm32(ObjectType), + result.gpr()); + JITCompiler::Jump done = m_jit.jump(); + + isNotCell.link(&m_jit); + m_jit.move(TrustedImm32(0), result.gpr()); + + done.link(&m_jit); booleanResult(result.gpr(), node); break; } + case IsObjectOrNull: { + compileIsObjectOrNull(node); + break; + } + case IsFunction: { - JSValueOperand value(this, node->child1()); - GPRReg valueTagGPR = value.tagGPR(); - GPRReg valuePayloadGPR = value.payloadGPR(); - GPRResult result(this); - GPRReg resultGPR = result.gpr(); - flushRegisters(); - callOperation(operationIsFunction, resultGPR, valueTagGPR, valuePayloadGPR); - booleanResult(result.gpr(), node); + compileIsFunction(node); break; } case TypeOf: { - JSValueOperand value(this, node->child1(), ManualOperandSpeculation); - GPRReg tagGPR = value.tagGPR(); - GPRReg payloadGPR = value.payloadGPR(); - GPRTemporary temp(this); - GPRReg tempGPR = temp.gpr(); - GPRResult result(this); - GPRReg resultGPR = result.gpr(); - JITCompiler::JumpList doneJumps; - - flushRegisters(); - - ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == CellUse || node->child1().useKind() == StringUse); - - JITCompiler::Jump isNotCell = m_jit.branch32(JITCompiler::NotEqual, tagGPR, JITCompiler::TrustedImm32(JSValue::CellTag)); - if (node->child1().useKind() != UntypedUse) - DFG_TYPE_CHECK(JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecCell, isNotCell); - - if (!node->child1()->shouldSpeculateObject() || node->child1().useKind() == StringUse) { - m_jit.loadPtr(JITCompiler::Address(payloadGPR, JSCell::structureOffset()), tempGPR); - JITCompiler::Jump notString = m_jit.branch8(JITCompiler::NotEqual, JITCompiler::Address(tempGPR, Structure::typeInfoTypeOffset()), TrustedImm32(StringType)); - if (node->child1().useKind() == StringUse) - DFG_TYPE_CHECK(JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecString, notString); - m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.stringString()), resultGPR); - doneJumps.append(m_jit.jump()); - if (node->child1().useKind() != StringUse) { - notString.link(&m_jit); - callOperation(operationTypeOf, resultGPR, payloadGPR); - doneJumps.append(m_jit.jump()); - } - } else { - callOperation(operationTypeOf, resultGPR, payloadGPR); - doneJumps.append(m_jit.jump()); - } - - if (node->child1().useKind() == UntypedUse) { - isNotCell.link(&m_jit); - - m_jit.add32(TrustedImm32(1), tagGPR, tempGPR); - JITCompiler::Jump notNumber = m_jit.branch32(JITCompiler::AboveOrEqual, tempGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1)); - m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.numberString()), resultGPR); - doneJumps.append(m_jit.jump()); - notNumber.link(&m_jit); - - JITCompiler::Jump notUndefined = m_jit.branch32(JITCompiler::NotEqual, tagGPR, TrustedImm32(JSValue::UndefinedTag)); - m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.undefinedString()), resultGPR); - doneJumps.append(m_jit.jump()); - notUndefined.link(&m_jit); - - JITCompiler::Jump notNull = m_jit.branch32(JITCompiler::NotEqual, tagGPR, TrustedImm32(JSValue::NullTag)); - m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.objectString()), resultGPR); - doneJumps.append(m_jit.jump()); - notNull.link(&m_jit); - - // Only boolean left - m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.booleanString()), resultGPR); - } - doneJumps.link(&m_jit); - cellResult(resultGPR, node); + compileTypeOf(node); break; } @@ -4233,414 +4355,444 @@ void SpeculativeJIT::compile(Node* node) break; case Call: + case TailCall: + case TailCallInlinedCaller: case Construct: + case CallVarargs: + case TailCallVarargs: + case TailCallVarargsInlinedCaller: + case ConstructVarargs: + case CallForwardVarargs: + case TailCallForwardVarargs: + case TailCallForwardVarargsInlinedCaller: + case ConstructForwardVarargs: emitCall(node); break; - case CreateActivation: { - JSValueOperand value(this, node->child1()); - GPRTemporary result(this, Reuse, value, PayloadWord); + case LoadVarargs: { + LoadVarargsData* data = node->loadVarargsData(); - GPRReg valueTagGPR = value.tagGPR(); - GPRReg valuePayloadGPR = value.payloadGPR(); - GPRReg resultGPR = result.gpr(); + GPRReg argumentsTagGPR; + GPRReg argumentsPayloadGPR; + { + JSValueOperand arguments(this, node->child1()); + argumentsTagGPR = arguments.tagGPR(); + argumentsPayloadGPR = arguments.payloadGPR(); + flushRegisters(); + } - m_jit.move(valuePayloadGPR, resultGPR); + callOperation(operationSizeOfVarargs, GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR, data->offset); + m_jit.exceptionCheck(); - JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); + lock(GPRInfo::returnValueGPR); + { + JSValueOperand arguments(this, node->child1()); + argumentsTagGPR = arguments.tagGPR(); + argumentsPayloadGPR = arguments.payloadGPR(); + flushRegisters(); + } + unlock(GPRInfo::returnValueGPR); - addSlowPathGenerator( - slowPathCall( - notCreated, this, operationCreateActivation, resultGPR, - framePointerOffsetToGetActivationRegisters())); + // FIXME: There is a chance that we will call an effectful length property twice. This is safe + // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance + // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right + // past the sizing. + // https://bugs.webkit.org/show_bug.cgi?id=141448 + + GPRReg argCountIncludingThisGPR = + JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR); - cellResult(resultGPR, node); - break; - } + m_jit.add32(TrustedImm32(1), GPRInfo::returnValueGPR, argCountIncludingThisGPR); + speculationCheck( + VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32( + MacroAssembler::Above, + argCountIncludingThisGPR, + TrustedImm32(data->limit))); + + m_jit.store32(argCountIncludingThisGPR, JITCompiler::payloadFor(data->machineCount)); + + callOperation(operationLoadVarargs, data->machineStart.offset(), argumentsTagGPR, argumentsPayloadGPR, data->offset, GPRInfo::returnValueGPR, data->mandatoryMinimum); + m_jit.exceptionCheck(); - case FunctionReentryWatchpoint: { noResult(node); break; } - case CreateArguments: { - JSValueOperand value(this, node->child1()); - GPRTemporary result(this, Reuse, value, PayloadWord); - - GPRReg valueTagGPR = value.tagGPR(); - GPRReg valuePayloadGPR = value.payloadGPR(); - GPRReg resultGPR = result.gpr(); + case ForwardVarargs: { + compileForwardVarargs(node); + break; + } - m_jit.move(valuePayloadGPR, resultGPR); + case CreateActivation: { + compileCreateActivation(node); + break; + } - JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); + case CreateDirectArguments: { + compileCreateDirectArguments(node); + break; + } - if (node->codeOrigin.inlineCallFrame) { - addSlowPathGenerator( - slowPathCall( - notCreated, this, operationCreateInlinedArguments, resultGPR, - node->codeOrigin.inlineCallFrame)); - } else { - addSlowPathGenerator( - slowPathCall(notCreated, this, operationCreateArguments, resultGPR)); - } + case GetFromArguments: { + compileGetFromArguments(node); + break; + } - cellResult(resultGPR, node); + case PutToArguments: { + compilePutToArguments(node); break; } - case TearOffActivation: { - JSValueOperand activationValue(this, node->child1()); - GPRTemporary scratch(this); + case CreateScopedArguments: { + compileCreateScopedArguments(node); + break; + } - GPRReg activationValueTagGPR = activationValue.tagGPR(); - GPRReg activationValuePayloadGPR = activationValue.payloadGPR(); - GPRReg scratchGPR = scratch.gpr(); + case CreateClonedArguments: { + compileCreateClonedArguments(node); + break; + } - JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, activationValueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); - - SymbolTable* symbolTable = m_jit.symbolTableFor(node->codeOrigin); - int registersOffset = JSActivation::registersOffset(symbolTable); - - int bytecodeCaptureStart = symbolTable->captureStart(); - int machineCaptureStart = m_jit.graph().m_machineCaptureStart; - for (int i = symbolTable->captureCount(); i--;) { - m_jit.loadPtr( - JITCompiler::Address( - GPRInfo::callFrameRegister, (machineCaptureStart - i) * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), - scratchGPR); - m_jit.storePtr( - scratchGPR, JITCompiler::Address( - activationValuePayloadGPR, registersOffset + (bytecodeCaptureStart - i) * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); - m_jit.loadPtr( - JITCompiler::Address( - GPRInfo::callFrameRegister, (machineCaptureStart - i) * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), - scratchGPR); - m_jit.storePtr( - scratchGPR, JITCompiler::Address( - activationValuePayloadGPR, registersOffset + (bytecodeCaptureStart - i) * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); - } - m_jit.addPtr(TrustedImm32(registersOffset), activationValuePayloadGPR, scratchGPR); - m_jit.storePtr(scratchGPR, JITCompiler::Address(activationValuePayloadGPR, JSActivation::offsetOfRegisters())); - - notCreated.link(&m_jit); - noResult(node); + case CopyRest: { + compileCopyRest(node); break; } - - case TearOffArguments: { - JSValueOperand unmodifiedArgumentsValue(this, node->child1()); - JSValueOperand activationValue(this, node->child2()); - GPRReg unmodifiedArgumentsValuePayloadGPR = unmodifiedArgumentsValue.payloadGPR(); - GPRReg activationValuePayloadGPR = activationValue.payloadGPR(); - - JITCompiler::Jump created = m_jit.branchTest32( - JITCompiler::NonZero, unmodifiedArgumentsValuePayloadGPR); - - if (node->codeOrigin.inlineCallFrame) { - addSlowPathGenerator( - slowPathCall( - created, this, operationTearOffInlinedArguments, NoResult, - unmodifiedArgumentsValuePayloadGPR, activationValuePayloadGPR, node->codeOrigin.inlineCallFrame)); - } else { - addSlowPathGenerator( - slowPathCall( - created, this, operationTearOffArguments, NoResult, - unmodifiedArgumentsValuePayloadGPR, activationValuePayloadGPR)); - } - - noResult(node); + + case GetRestLength: { + compileGetRestLength(node); break; } - - case CheckArgumentsNotCreated: { - ASSERT(!isEmptySpeculation( - m_state.variables().operand( - m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)); - speculationCheck( - Uncountable, JSValueRegs(), 0, - m_jit.branch32( - JITCompiler::NotEqual, - JITCompiler::tagFor(m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)), - TrustedImm32(JSValue::EmptyValueTag))); - noResult(node); + + case NewFunction: + case NewArrowFunction: + case NewGeneratorFunction: + compileNewFunction(node); + break; + + case In: + compileIn(node); + break; + + case StoreBarrier: { + compileStoreBarrier(node); break; } - - case GetMyArgumentsLength: { - GPRTemporary result(this); + + case GetEnumerableLength: { + SpeculateCellOperand enumerator(this, node->child1()); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); - - if (!isEmptySpeculation( - m_state.variables().operand( - m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)) { - speculationCheck( - ArgumentsEscaped, JSValueRegs(), 0, - m_jit.branch32( - JITCompiler::NotEqual, - JITCompiler::tagFor(m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)), - TrustedImm32(JSValue::EmptyValueTag))); - } - - ASSERT(!node->codeOrigin.inlineCallFrame); - m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR); - m_jit.sub32(TrustedImm32(1), resultGPR); + + m_jit.load32(MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::indexedLengthOffset()), resultGPR); int32Result(resultGPR, node); break; } - - case GetMyArgumentsLengthSafe: { + case HasGenericProperty: { + JSValueOperand base(this, node->child1()); + SpeculateCellOperand property(this, node->child2()); + GPRFlushedCallResult resultPayload(this); + GPRFlushedCallResult2 resultTag(this); + GPRReg basePayloadGPR = base.payloadGPR(); + GPRReg baseTagGPR = base.tagGPR(); + GPRReg resultPayloadGPR = resultPayload.gpr(); + GPRReg resultTagGPR = resultTag.gpr(); + + flushRegisters(); + callOperation(operationHasGenericProperty, resultTagGPR, resultPayloadGPR, baseTagGPR, basePayloadGPR, property.gpr()); + m_jit.exceptionCheck(); + booleanResult(resultPayloadGPR, node); + break; + } + case HasStructureProperty: { + JSValueOperand base(this, node->child1()); + SpeculateCellOperand property(this, node->child2()); + SpeculateCellOperand enumerator(this, node->child3()); GPRTemporary resultPayload(this); GPRTemporary resultTag(this); + + GPRReg baseTagGPR = base.tagGPR(); + GPRReg basePayloadGPR = base.payloadGPR(); + GPRReg propertyGPR = property.gpr(); GPRReg resultPayloadGPR = resultPayload.gpr(); GPRReg resultTagGPR = resultTag.gpr(); - - JITCompiler::Jump created = m_jit.branch32( - JITCompiler::NotEqual, - JITCompiler::tagFor(m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)), - TrustedImm32(JSValue::EmptyValueTag)); - - if (node->codeOrigin.inlineCallFrame) { - m_jit.move( - Imm32(node->codeOrigin.inlineCallFrame->arguments.size() - 1), - resultPayloadGPR); - } else { - m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultPayloadGPR); - m_jit.sub32(TrustedImm32(1), resultPayloadGPR); - } - m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR); - - // FIXME: the slow path generator should perform a forward speculation that the - // result is an integer. For now we postpone the speculation by having this return - // a JSValue. - - addSlowPathGenerator( - slowPathCall( - created, this, operationGetArgumentsLength, - JSValueRegs(resultTagGPR, resultPayloadGPR), - m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset())); - - jsValueResult(resultTagGPR, resultPayloadGPR, node); + + m_jit.load32(MacroAssembler::Address(basePayloadGPR, JSCell::structureIDOffset()), resultTagGPR); + MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual, + resultTagGPR, + MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::cachedStructureIDOffset())); + + moveTrueTo(resultPayloadGPR); + MacroAssembler::Jump done = m_jit.jump(); + + done.link(&m_jit); + + addSlowPathGenerator(slowPathCall(wrongStructure, this, operationHasGenericProperty, resultTagGPR, resultPayloadGPR, baseTagGPR, basePayloadGPR, propertyGPR)); + booleanResult(resultPayloadGPR, node); break; } - - case GetMyArgumentByVal: { - SpeculateStrictInt32Operand index(this, node->child1()); + case HasIndexedProperty: { + SpeculateCellOperand base(this, node->child1()); + SpeculateInt32Operand index(this, node->child2()); GPRTemporary resultPayload(this); GPRTemporary resultTag(this); + + GPRReg baseGPR = base.gpr(); GPRReg indexGPR = index.gpr(); GPRReg resultPayloadGPR = resultPayload.gpr(); GPRReg resultTagGPR = resultTag.gpr(); - - if (!isEmptySpeculation( - m_state.variables().operand( - m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)) { - speculationCheck( - ArgumentsEscaped, JSValueRegs(), 0, - m_jit.branch32( - JITCompiler::NotEqual, - JITCompiler::tagFor(m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)), - TrustedImm32(JSValue::EmptyValueTag))); - } - - m_jit.add32(TrustedImm32(1), indexGPR, resultPayloadGPR); + + MacroAssembler::JumpList slowCases; + ArrayMode mode = node->arrayMode(); + switch (mode.type()) { + case Array::Int32: + case Array::Contiguous: { + ASSERT(!!node->child3()); + StorageOperand storage(this, node->child3()); + GPRTemporary scratch(this); - if (node->codeOrigin.inlineCallFrame) { - speculationCheck( - Uncountable, JSValueRegs(), 0, - m_jit.branch32( - JITCompiler::AboveOrEqual, - resultPayloadGPR, - Imm32(node->codeOrigin.inlineCallFrame->arguments.size()))); - } else { - speculationCheck( - Uncountable, JSValueRegs(), 0, - m_jit.branch32( - JITCompiler::AboveOrEqual, - resultPayloadGPR, - JITCompiler::payloadFor(JSStack::ArgumentCount))); + GPRReg storageGPR = storage.gpr(); + GPRReg scratchGPR = scratch.gpr(); + + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()))); + m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR); + slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag))); + break; } - - JITCompiler::JumpList slowArgument; - JITCompiler::JumpList slowArgumentOutOfBounds; - if (m_jit.symbolTableFor(node->codeOrigin)->slowArguments()) { - RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame); - const SlowArgument* slowArguments = m_jit.graph().m_slowArguments.get(); - slowArgumentOutOfBounds.append( - m_jit.branch32( - JITCompiler::AboveOrEqual, indexGPR, - Imm32(m_jit.symbolTableFor(node->codeOrigin)->parameterCount()))); + case Array::Double: { + ASSERT(!!node->child3()); + StorageOperand storage(this, node->child3()); + FPRTemporary scratch(this); + FPRReg scratchFPR = scratch.fpr(); + GPRReg storageGPR = storage.gpr(); - COMPILE_ASSERT(sizeof(SlowArgument) == 8, SlowArgument_size_is_eight_bytes); - m_jit.move(ImmPtr(slowArguments), resultPayloadGPR); - m_jit.load32( - JITCompiler::BaseIndex( - resultPayloadGPR, indexGPR, JITCompiler::TimesEight, - OBJECT_OFFSETOF(SlowArgument, index)), - resultPayloadGPR); + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()))); + m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchFPR); + slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, scratchFPR, scratchFPR)); + break; + } + case Array::ArrayStorage: { + ASSERT(!!node->child3()); + StorageOperand storage(this, node->child3()); + GPRTemporary scratch(this); - m_jit.load32( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, - OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), - resultTagGPR); - m_jit.load32( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, - OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), - resultPayloadGPR); - slowArgument.append(m_jit.jump()); + GPRReg storageGPR = storage.gpr(); + GPRReg scratchGPR = scratch.gpr(); + + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()))); + m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR); + slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag))); + break; } - slowArgumentOutOfBounds.link(&m_jit); - - m_jit.load32( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, - m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), - resultTagGPR); - m_jit.load32( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, - m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), - resultPayloadGPR); - - slowArgument.link(&m_jit); - jsValueResult(resultTagGPR, resultPayloadGPR, node); + default: { + slowCases.append(m_jit.jump()); + break; + } + } + + moveTrueTo(resultPayloadGPR); + MacroAssembler::Jump done = m_jit.jump(); + + addSlowPathGenerator(slowPathCall(slowCases, this, operationHasIndexedProperty, resultTagGPR, resultPayloadGPR, baseGPR, indexGPR)); + + done.link(&m_jit); + booleanResult(resultPayloadGPR, node); break; } - case GetMyArgumentByValSafe: { - SpeculateStrictInt32Operand index(this, node->child1()); + case GetDirectPname: { + Edge& baseEdge = m_jit.graph().varArgChild(node, 0); + Edge& propertyEdge = m_jit.graph().varArgChild(node, 1); + + SpeculateCellOperand base(this, baseEdge); + SpeculateCellOperand property(this, propertyEdge); + GPRReg baseGPR = base.gpr(); + GPRReg propertyGPR = property.gpr(); + +#if CPU(X86) + GPRFlushedCallResult resultPayload(this); + GPRFlushedCallResult2 resultTag(this); + GPRTemporary scratch(this); + + GPRReg resultTagGPR = resultTag.gpr(); + GPRReg resultPayloadGPR = resultPayload.gpr(); + GPRReg scratchGPR = scratch.gpr(); + + // Not enough registers on X86 for this code, so always use the slow path. + flushRegisters(); + m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), scratchGPR); + callOperation(operationGetByValCell, resultTagGPR, resultPayloadGPR, baseGPR, scratchGPR, propertyGPR); + m_jit.exceptionCheck(); +#else GPRTemporary resultPayload(this); GPRTemporary resultTag(this); - GPRReg indexGPR = index.gpr(); - GPRReg resultPayloadGPR = resultPayload.gpr(); + GPRTemporary scratch(this); + GPRReg resultTagGPR = resultTag.gpr(); - - JITCompiler::JumpList slowPath; + GPRReg resultPayloadGPR = resultPayload.gpr(); + GPRReg scratchGPR = scratch.gpr(); + + Edge& indexEdge = m_jit.graph().varArgChild(node, 2); + Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3); + + SpeculateInt32Operand index(this, indexEdge); + SpeculateCellOperand enumerator(this, enumeratorEdge); + + GPRReg indexGPR = index.gpr(); + GPRReg enumeratorGPR = enumerator.gpr(); + + MacroAssembler::JumpList slowPath; + + // Check the structure + m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), scratchGPR); slowPath.append( m_jit.branch32( - JITCompiler::NotEqual, - JITCompiler::tagFor(m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)), - TrustedImm32(JSValue::EmptyValueTag))); - - m_jit.add32(TrustedImm32(1), indexGPR, resultPayloadGPR); - if (node->codeOrigin.inlineCallFrame) { - slowPath.append( - m_jit.branch32( - JITCompiler::AboveOrEqual, - resultPayloadGPR, - Imm32(node->codeOrigin.inlineCallFrame->arguments.size()))); - } else { - slowPath.append( - m_jit.branch32( - JITCompiler::AboveOrEqual, - resultPayloadGPR, - JITCompiler::payloadFor(JSStack::ArgumentCount))); - } + MacroAssembler::NotEqual, + scratchGPR, + MacroAssembler::Address( + enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset()))); - JITCompiler::JumpList slowArgument; - JITCompiler::JumpList slowArgumentOutOfBounds; - if (m_jit.symbolTableFor(node->codeOrigin)->slowArguments()) { - RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame); - const SlowArgument* slowArguments = m_jit.graph().m_slowArguments.get(); - slowArgumentOutOfBounds.append( - m_jit.branch32( - JITCompiler::AboveOrEqual, indexGPR, - Imm32(m_jit.symbolTableFor(node->codeOrigin)->parameterCount()))); + // Compute the offset + // If index is less than the enumerator's cached inline storage, then it's an inline access + MacroAssembler::Jump outOfLineAccess = m_jit.branch32(MacroAssembler::AboveOrEqual, + indexGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); - COMPILE_ASSERT(sizeof(SlowArgument) == 8, SlowArgument_size_is_eight_bytes); - m_jit.move(ImmPtr(slowArguments), resultPayloadGPR); - m_jit.load32( - JITCompiler::BaseIndex( - resultPayloadGPR, indexGPR, JITCompiler::TimesEight, - OBJECT_OFFSETOF(SlowArgument, index)), - resultPayloadGPR); - m_jit.load32( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, - OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), - resultTagGPR); - m_jit.load32( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, - OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), - resultPayloadGPR); - slowArgument.append(m_jit.jump()); - } - slowArgumentOutOfBounds.link(&m_jit); - - m_jit.load32( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, - m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), - resultTagGPR); - m_jit.load32( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, - m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), - resultPayloadGPR); - - if (node->codeOrigin.inlineCallFrame) { - addSlowPathGenerator( - slowPathCall( - slowPath, this, operationGetInlinedArgumentByVal, - JSValueRegs(resultTagGPR, resultPayloadGPR), - m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset(), - node->codeOrigin.inlineCallFrame, indexGPR)); - } else { - addSlowPathGenerator( - slowPathCall( - slowPath, this, operationGetArgumentByVal, - JSValueRegs(resultTagGPR, resultPayloadGPR), - m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset(), - indexGPR)); - } + m_jit.move(indexGPR, scratchGPR); + m_jit.signExtend32ToPtr(scratchGPR, scratchGPR); + m_jit.load32(MacroAssembler::BaseIndex(baseGPR, scratchGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagGPR); + m_jit.load32(MacroAssembler::BaseIndex(baseGPR, scratchGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadGPR); + + MacroAssembler::Jump done = m_jit.jump(); - slowArgument.link(&m_jit); + // Otherwise it's out of line + outOfLineAccess.link(&m_jit); + m_jit.move(indexGPR, scratchGPR); + m_jit.sub32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratchGPR); + m_jit.neg32(scratchGPR); + m_jit.signExtend32ToPtr(scratchGPR, scratchGPR); + // We use resultPayloadGPR as a temporary here. We have to make sure clobber it after getting the + // value out of indexGPR and enumeratorGPR because resultPayloadGPR could reuse either of those registers. + m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultPayloadGPR); + slowPath.append(m_jit.branchIfNotToSpace(resultPayloadGPR)); + int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); + m_jit.load32(MacroAssembler::BaseIndex(resultPayloadGPR, scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagGPR); + m_jit.load32(MacroAssembler::BaseIndex(resultPayloadGPR, scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadGPR); + + done.link(&m_jit); + + addSlowPathGenerator(slowPathCall(slowPath, this, operationGetByValCell, resultTagGPR, resultPayloadGPR, baseGPR, propertyGPR)); +#endif + jsValueResult(resultTagGPR, resultPayloadGPR, node); break; } - - case NewFunctionNoCheck: - compileNewFunctionNoCheck(node); + case GetPropertyEnumerator: { + SpeculateCellOperand base(this, node->child1()); + GPRFlushedCallResult result(this); + GPRReg resultGPR = result.gpr(); + + flushRegisters(); + callOperation(operationGetPropertyEnumerator, resultGPR, base.gpr()); + m_jit.exceptionCheck(); + cellResult(resultGPR, node); break; - - case NewFunction: { - JSValueOperand value(this, node->child1()); - GPRTemporary resultTag(this, Reuse, value, TagWord); - GPRTemporary resultPayload(this, Reuse, value, PayloadWord); - - GPRReg valueTagGPR = value.tagGPR(); - GPRReg valuePayloadGPR = value.payloadGPR(); + } + case GetEnumeratorStructurePname: + case GetEnumeratorGenericPname: { + SpeculateCellOperand enumerator(this, node->child1()); + SpeculateInt32Operand index(this, node->child2()); + GPRTemporary scratch(this); + GPRTemporary resultPayload(this); + GPRTemporary resultTag(this); + + GPRReg enumeratorGPR = enumerator.gpr(); + GPRReg indexGPR = index.gpr(); + GPRReg scratchGPR = scratch.gpr(); GPRReg resultTagGPR = resultTag.gpr(); GPRReg resultPayloadGPR = resultPayload.gpr(); - - m_jit.move(valuePayloadGPR, resultPayloadGPR); - m_jit.move(valueTagGPR, resultTagGPR); - - JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); - - addSlowPathGenerator( - slowPathCall( - notCreated, this, operationNewFunction, JSValueRegs(resultTagGPR, resultPayloadGPR), - m_jit.codeBlock()->functionDecl(node->functionDeclIndex()))); - + + MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, indexGPR, + MacroAssembler::Address(enumeratorGPR, (op == GetEnumeratorStructurePname) + ? JSPropertyNameEnumerator::endStructurePropertyIndexOffset() + : JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); + + m_jit.move(MacroAssembler::TrustedImm32(JSValue::NullTag), resultTagGPR); + m_jit.move(MacroAssembler::TrustedImm32(0), resultPayloadGPR); + + MacroAssembler::Jump done = m_jit.jump(); + inBounds.link(&m_jit); + + m_jit.loadPtr(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), scratchGPR); + m_jit.loadPtr(MacroAssembler::BaseIndex(scratchGPR, indexGPR, MacroAssembler::ScalePtr), resultPayloadGPR); + m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), resultTagGPR); + + done.link(&m_jit); jsValueResult(resultTagGPR, resultPayloadGPR, node); break; } - - case NewFunctionExpression: - compileNewFunctionExpression(node); - break; - - case In: - compileIn(node); + case ToIndexString: { + SpeculateInt32Operand index(this, node->child1()); + GPRFlushedCallResult result(this); + GPRReg resultGPR = result.gpr(); + + flushRegisters(); + callOperation(operationToIndexString, resultGPR, index.gpr()); + m_jit.exceptionCheck(); + cellResult(resultGPR, node); break; + } + case ProfileType: { + JSValueOperand value(this, node->child1()); + GPRTemporary scratch1(this); + GPRTemporary scratch2(this); + GPRTemporary scratch3(this); - case StoreBarrier: - case ConditionalStoreBarrier: - case StoreBarrierWithNullCheck: { - compileStoreBarrier(node); + GPRReg scratch1GPR = scratch1.gpr(); + GPRReg scratch2GPR = scratch2.gpr(); + GPRReg scratch3GPR = scratch3.gpr(); + + JITCompiler::Jump isTDZValue = m_jit.branch32(JITCompiler::Equal, value.tagGPR(), TrustedImm32(JSValue::EmptyValueTag)); + + // Load the TypeProfilerLog into Scratch2. + TypeProfilerLog* cachedTypeProfilerLog = m_jit.vm()->typeProfilerLog(); + m_jit.move(TrustedImmPtr(cachedTypeProfilerLog), scratch2GPR); + + // Load the next LogEntry into Scratch1. + m_jit.loadPtr(MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()), scratch1GPR); + + // Store the JSValue onto the log entry. + m_jit.store32(value.tagGPR(), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(value.payloadGPR(), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + + // Store the structureID of the cell if valueGPR is a cell, otherwise, store 0 on the log entry. + MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs()); + m_jit.load32(MacroAssembler::Address(value.payloadGPR(), JSCell::structureIDOffset()), scratch3GPR); + m_jit.store32(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset())); + MacroAssembler::Jump skipIsCell = m_jit.jump(); + isNotCell.link(&m_jit); + m_jit.store32(TrustedImm32(0), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset())); + skipIsCell.link(&m_jit); + + // Store the typeLocation on the log entry. + TypeLocation* cachedTypeLocation = node->typeLocation(); + m_jit.move(TrustedImmPtr(cachedTypeLocation), scratch3GPR); + m_jit.storePtr(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::locationOffset())); + + // Increment the current log entry. + m_jit.addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), scratch1GPR); + m_jit.storePtr(scratch1GPR, MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset())); + MacroAssembler::Jump clearLog = m_jit.branchPtr(MacroAssembler::Equal, scratch1GPR, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr())); + addSlowPathGenerator( + slowPathCall(clearLog, this, operationProcessTypeProfilerLogDFG, NoResult)); + + isTDZValue.link(&m_jit); + + noResult(node); + break; + } + case ProfileControlFlow: { + GPRTemporary scratch1(this); + BasicBlockLocation* basicBlockLocation = node->basicBlockLocation(); + basicBlockLocation->emitExecuteCode(m_jit, scratch1.gpr()); + noResult(node); break; } @@ -4653,19 +4805,24 @@ void SpeculativeJIT::compile(Node* node) emitInvalidationPoint(node); break; - case CheckWatchdogTimer: - speculationCheck( - WatchdogTimerFired, JSValueRegs(), 0, - m_jit.branchTest8( - JITCompiler::NonZero, - JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog.timerDidFireAddress()))); + case CheckWatchdogTimer: { + ASSERT(m_jit.vm()->watchdog()); + GPRTemporary unused(this); + GPRReg unusedGPR = unused.gpr(); + + JITCompiler::Jump timerDidFire = m_jit.branchTest8(JITCompiler::NonZero, + JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog()->timerDidFireAddress())); + + addSlowPathGenerator(slowPathCall(timerDidFire, this, operationHandleWatchdogTimer, unusedGPR)); break; + } case CountExecution: m_jit.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node->executionCounter()->address())); break; case Phantom: + case Check: DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate); noResult(node); break; @@ -4678,7 +4835,8 @@ void SpeculativeJIT::compile(Node* node) // This is a no-op. noResult(node); break; - + + case Unreachable: RELEASE_ASSERT_NOT_REACHED(); break; @@ -4686,16 +4844,34 @@ void SpeculativeJIT::compile(Node* node) case LastNodeType: case Phi: case Upsilon: - case GetArgument: case ExtractOSREntryLocal: case CheckTierUpInLoop: case CheckTierUpAtReturn: case CheckTierUpAndOSREnter: - case Int52ToDouble: - case Int52ToValue: + case CheckTierUpWithNestedTriggerAndOSREnter: + case Int52Rep: + case FiatInt52: + case Int52Constant: case CheckInBounds: case ArithIMul: - RELEASE_ASSERT_NOT_REACHED(); + case MultiGetByOffset: + case MultiPutByOffset: + case CheckBadCell: + case BottomValue: + case PhantomNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case PutHint: + case CheckStructureImmediate: + case MaterializeNewObject: + case MaterializeCreateActivation: + case PutStack: + case KillStack: + case GetStack: + case GetMyArgumentByVal: + case StringReplace: + DFG_CRASH(m_jit.graph(), node, "unexpected node in DFG backend"); break; } @@ -4706,35 +4882,45 @@ void SpeculativeJIT::compile(Node* node) use(node); } -#if ENABLE(GGC) void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueTagGPR, Edge valueUse, GPRReg scratch1, GPRReg scratch2) { JITCompiler::Jump isNotCell; if (!isKnownCell(valueUse.node())) isNotCell = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::CellTag)); - JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, ownerGPR, scratch1, scratch2); + JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR); storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2); - definitelyNotMarked.link(&m_jit); + ownerIsRememberedOrInEden.link(&m_jit); if (!isKnownCell(valueUse.node())) isNotCell.link(&m_jit); } -void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueTagGPR, Edge valueUse, GPRReg scratch1, GPRReg scratch2) +void SpeculativeJIT::moveTrueTo(GPRReg gpr) { - JITCompiler::Jump isNotCell; - if (!isKnownCell(valueUse.node())) - isNotCell = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::CellTag)); + m_jit.move(TrustedImm32(1), gpr); +} - JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, owner); - storeToWriteBarrierBuffer(owner, scratch1, scratch2); - definitelyNotMarked.link(&m_jit); +void SpeculativeJIT::moveFalseTo(GPRReg gpr) +{ + m_jit.move(TrustedImm32(0), gpr); +} - if (!isKnownCell(valueUse.node())) - isNotCell.link(&m_jit); +void SpeculativeJIT::blessBoolean(GPRReg) +{ +} + +void SpeculativeJIT::compileArithRandom(Node* node) +{ + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); + + flushRegisters(); + + FPRResult result(this); + callOperation(operationRandom, result.fpr(), globalObject); + // operationRandom does not raise any exception. + doubleResult(result.fpr(), node); } -#endif // ENABLE(GGC) #endif diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp index ea9f88613..dbcdfb5f7 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2011-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,15 +28,24 @@ #if ENABLE(DFG_JIT) -#include "Arguments.h" #include "ArrayPrototype.h" +#include "CallFrameShuffler.h" #include "DFGAbstractInterpreterInlines.h" #include "DFGCallArrayAllocatorSlowPathGenerator.h" #include "DFGOperations.h" #include "DFGSlowPathGenerator.h" #include "Debugger.h" -#include "JSCJSValueInlines.h" +#include "DirectArguments.h" +#include "GetterSetter.h" +#include "JSCInlines.h" +#include "JSEnvironmentRecord.h" +#include "JSLexicalEnvironment.h" +#include "JSPropertyNameEnumerator.h" #include "ObjectPrototype.h" +#include "SetupVarargsFrame.h" +#include "SpillRegistersMode.h" +#include "TypeProfilerLog.h" +#include "Watchdog.h" namespace JSC { namespace DFG { @@ -78,21 +87,9 @@ GPRReg SpeculativeJIT::fillJSValue(Edge edge) GPRReg gpr = allocate(); if (edge->hasConstant()) { - if (isInt32Constant(edge.node())) { - info.fillJSValue(*m_stream, gpr, DataFormatJSInt32); - JSValue jsValue = jsNumber(valueOfInt32Constant(edge.node())); - m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr); - } else if (isNumberConstant(edge.node())) { - info.fillJSValue(*m_stream, gpr, DataFormatJSDouble); - JSValue jsValue(JSValue::EncodeAsDouble, valueOfNumberConstant(edge.node())); - m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr); - } else { - ASSERT(isJSConstant(edge.node())); - JSValue jsValue = valueOfJSConstant(edge.node()); - m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr); - info.fillJSValue(*m_stream, gpr, DataFormatJS); - } - + JSValue jsValue = edge->asJSValue(); + m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr); + info.fillJSValue(*m_stream, gpr, DataFormatJS); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); } else { DataFormat spillFormat = info.spillFormat(); @@ -105,21 +102,9 @@ GPRReg SpeculativeJIT::fillJSValue(Edge edge) break; } - case DataFormatInt52: - case DataFormatStrictInt52: { - m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); - boxInt52(gpr, gpr, spillFormat); - return gpr; - } - default: m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); - if (spillFormat == DataFormatDouble) { - // Need to box the double, since we want a JSValue. - m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr); - spillFormat = DataFormatJSDouble; - } else - RELEASE_ASSERT(spillFormat & DataFormatJS); + DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat & DataFormatJS); break; } info.fillJSValue(*m_stream, gpr, spillFormat); @@ -142,28 +127,6 @@ GPRReg SpeculativeJIT::fillJSValue(Edge edge) return gpr; } - case DataFormatDouble: { - FPRReg fpr = info.fpr(); - GPRReg gpr = boxDouble(fpr); - - // Update all info - info.fillJSValue(*m_stream, gpr, DataFormatJSDouble); - m_fprs.release(fpr); - m_gprs.retain(gpr, virtualRegister, SpillOrderJS); - - return gpr; - } - - case DataFormatInt52: - case DataFormatStrictInt52: { - GPRReg gpr = info.gpr(); - lock(gpr); - GPRReg resultGPR = allocate(); - boxInt52(gpr, resultGPR, info.registerFormat()); - unlock(gpr); - return resultGPR; - } - case DataFormatCell: // No retag required on JSVALUE64! case DataFormatJS: @@ -178,20 +141,29 @@ GPRReg SpeculativeJIT::fillJSValue(Edge edge) case DataFormatBoolean: case DataFormatStorage: + case DataFormatDouble: + case DataFormatInt52: // this type currently never occurs - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format"); default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format"); return InvalidGPRReg; } } void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode) { + CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); + RegisterSet usedRegisters = this->usedRegisters(); + if (spillMode == DontSpill) { + // We've already flushed registers to the stack, we don't need to spill these. + usedRegisters.set(baseGPR, false); + usedRegisters.set(resultGPR, false); + } JITGetByIdGenerator gen( - m_jit.codeBlock(), codeOrigin, usedRegisters(), GPRInfo::callFrameRegister, - JSValueRegs(baseGPR), JSValueRegs(resultGPR), spillMode != NeedToSpill); + m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, JSValueRegs(baseGPR), + JSValueRegs(resultGPR)); gen.generateFastPath(m_jit); JITCompiler::JumpList slowCases; @@ -199,20 +171,28 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg slowCases.append(slowPathTarget); slowCases.append(gen.slowPathJump()); - OwnPtr<SlowPathGenerator> slowPath = slowPathCall( + auto slowPath = slowPathCall( slowCases, this, operationGetByIdOptimize, resultGPR, gen.stubInfo(), baseGPR, identifierUID(identifierNumber), spillMode); m_jit.addGetById(gen, slowPath.get()); - addSlowPathGenerator(slowPath.release()); + addSlowPathGenerator(WTFMove(slowPath)); } -void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget) +void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode) { + CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); + RegisterSet usedRegisters = this->usedRegisters(); + if (spillMode == DontSpill) { + // We've already flushed registers to the stack, we don't need to spill these. + usedRegisters.set(baseGPR, false); + usedRegisters.set(valueGPR, false); + } + JITPutByIdGenerator gen( - m_jit.codeBlock(), codeOrigin, usedRegisters(), GPRInfo::callFrameRegister, - JSValueRegs(baseGPR), JSValueRegs(valueGPR), scratchGPR, false, - m_jit.ecmaModeFor(codeOrigin), putKind); + m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, JSValueRegs(baseGPR), + JSValueRegs(valueGPR), scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind); + gen.generateFastPath(m_jit); JITCompiler::JumpList slowCases; @@ -220,152 +200,132 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg slowCases.append(slowPathTarget); slowCases.append(gen.slowPathJump()); - OwnPtr<SlowPathGenerator> slowPath = slowPathCall( + auto slowPath = slowPathCall( slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueGPR, baseGPR, identifierUID(identifierNumber)); m_jit.addPutById(gen, slowPath.get()); - addSlowPathGenerator(slowPath.release()); + addSlowPathGenerator(WTFMove(slowPath)); } -void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert) +void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand) { - JSValueOperand arg(this, operand); + ASSERT_WITH_MESSAGE(!masqueradesAsUndefinedWatchpointIsStillValid() || !isKnownCell(operand.node()), "The Compare should have been eliminated, it is known to be always false."); + + JSValueOperand arg(this, operand, ManualOperandSpeculation); GPRReg argGPR = arg.gpr(); - GPRTemporary result(this, Reuse, arg); + GPRTemporary result(this); GPRReg resultGPR = result.gpr(); - - JITCompiler::Jump notCell; - - JITCompiler::Jump notMasqueradesAsUndefined; - if (masqueradesAsUndefinedWatchpointIsStillValid()) { - if (!isKnownCell(operand.node())) - notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister); - m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultGPR); - notMasqueradesAsUndefined = m_jit.jump(); + m_jit.move(TrustedImm32(0), resultGPR); + + JITCompiler::JumpList done; + if (masqueradesAsUndefinedWatchpointIsStillValid()) { + if (!isKnownNotCell(operand.node())) + done.append(m_jit.branchIfCell(JSValueRegs(argGPR))); } else { GPRTemporary localGlobalObject(this); GPRTemporary remoteGlobalObject(this); + GPRTemporary scratch(this); + JITCompiler::Jump notCell; if (!isKnownCell(operand.node())) - notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister); - - m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR); - JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined)); - - m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultGPR); - notMasqueradesAsUndefined = m_jit.jump(); + notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR)); + + JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8( + JITCompiler::Zero, + JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()), + JITCompiler::TrustedImm32(MasqueradesAsUndefined)); + done.append(isNotMasqueradesAsUndefined); - isMasqueradesAsUndefined.link(&m_jit); GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); - m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(operand->codeOrigin)), localGlobalObjectGPR); + m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR); + m_jit.emitLoadStructure(argGPR, resultGPR, scratch.gpr()); m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); - m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultGPR); + m_jit.comparePtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultGPR); + done.append(m_jit.jump()); + if (!isKnownCell(operand.node())) + notCell.link(&m_jit); } - if (!isKnownCell(operand.node())) { - JITCompiler::Jump done = m_jit.jump(); - - notCell.link(&m_jit); - + if (!isKnownNotOther(operand.node())) { m_jit.move(argGPR, resultGPR); m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR); - m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR); - - done.link(&m_jit); + m_jit.compare64(JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR); } - - notMasqueradesAsUndefined.link(&m_jit); + + done.link(&m_jit); m_jit.or32(TrustedImm32(ValueFalse), resultGPR); jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean); } -void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert) +void SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, Node* branchNode) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); - - if (taken == nextBlock()) { - invert = !invert; - BasicBlock* tmp = taken; - taken = notTaken; - notTaken = tmp; - } + ASSERT_WITH_MESSAGE(!masqueradesAsUndefinedWatchpointIsStillValid() || !isKnownCell(operand.node()), "The Compare should have been eliminated, it is known to be always false."); + + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; - JSValueOperand arg(this, operand); + JSValueOperand arg(this, operand, ManualOperandSpeculation); GPRReg argGPR = arg.gpr(); GPRTemporary result(this, Reuse, arg); GPRReg resultGPR = result.gpr(); - - JITCompiler::Jump notCell; - - if (masqueradesAsUndefinedWatchpointIsStillValid()) { - if (!isKnownCell(operand.node())) - notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister); - jump(invert ? taken : notTaken, ForceJump); + // First, handle the case where "operand" is a cell. + if (masqueradesAsUndefinedWatchpointIsStillValid()) { + if (!isKnownNotCell(operand.node())) { + JITCompiler::Jump isCell = m_jit.branchIfCell(JSValueRegs(argGPR)); + addBranch(isCell, notTaken); + } } else { GPRTemporary localGlobalObject(this); GPRTemporary remoteGlobalObject(this); + GPRTemporary scratch(this); + JITCompiler::Jump notCell; if (!isKnownCell(operand.node())) - notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister); - - m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR); - branchTest8(JITCompiler::Zero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), invert ? taken : notTaken); + notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR)); + + branchTest8(JITCompiler::Zero, + JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()), + JITCompiler::TrustedImm32(MasqueradesAsUndefined), notTaken); GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); - m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(operand->codeOrigin)), localGlobalObjectGPR); + m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR); + m_jit.emitLoadStructure(argGPR, resultGPR, scratch.gpr()); m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); - branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken); + branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, taken); + + if (!isKnownCell(operand.node())) { + jump(notTaken, ForceJump); + notCell.link(&m_jit); + } } - - if (!isKnownCell(operand.node())) { - jump(notTaken, ForceJump); - - notCell.link(&m_jit); - + + if (isKnownNotOther(operand.node())) + jump(notTaken); + else { + JITCompiler::RelationalCondition condition = JITCompiler::Equal; + if (taken == nextBlock()) { + condition = JITCompiler::NotEqual; + std::swap(taken, notTaken); + } m_jit.move(argGPR, resultGPR); m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR); - branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm64(ValueNull), taken); - } - - jump(notTaken); -} - -bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool invert) -{ - unsigned branchIndexInBlock = detectPeepHoleBranch(); - if (branchIndexInBlock != UINT_MAX) { - Node* branchNode = m_block->at(branchIndexInBlock); - - RELEASE_ASSERT(node->adjustedRefCount() == 1); - - nonSpeculativePeepholeBranchNull(operand, branchNode, invert); - - use(node->child1()); - use(node->child2()); - m_indexInBlock = branchIndexInBlock; - m_currentNode = branchNode; - - return true; + branch64(condition, resultGPR, JITCompiler::TrustedImm64(ValueNull), taken); + jump(notTaken); } - - nonSpeculativeNonPeepholeCompareNull(operand, invert); - - return false; } void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero; @@ -387,7 +347,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, JITCompiler::JumpList slowPath; if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) { - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); arg1.use(); @@ -395,6 +355,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, flushRegisters(); callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR); + m_jit.exceptionCheck(); branchTest32(callResultCondition, resultGPR, taken); } else { @@ -419,6 +380,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, silentSpillAllRegisters(resultGPR); callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR); silentFillAllRegisters(resultGPR); + m_jit.exceptionCheck(); branchTest32(callResultCondition, resultGPR, taken); } @@ -438,7 +400,7 @@ public: JumpType from, SpeculativeJIT* jit, S_JITOperation_EJJ function, GPRReg result, GPRReg arg1, GPRReg arg2) : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>( - from, jit, function, NeedToSpill, result) + from, jit, function, NeedToSpill, ExceptionCheckRequirement::CheckNeeded, result) , m_arg1(arg1) , m_arg2(arg2) { @@ -470,7 +432,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler JITCompiler::JumpList slowPath; if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) { - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); arg1.use(); @@ -478,6 +440,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler flushRegisters(); callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR); + m_jit.exceptionCheck(); m_jit.or32(TrustedImm32(ValueFalse), resultGPR); jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly); @@ -497,9 +460,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler m_jit.or32(TrustedImm32(ValueFalse), resultGPR); if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) { - addSlowPathGenerator(adoptPtr( - new CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>( - slowPath, this, helperFunction, resultGPR, arg1GPR, arg2GPR))); + addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>( + slowPath, this, helperFunction, resultGPR, arg1GPR, arg2GPR)); } jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly); @@ -508,8 +470,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; // The branch instruction will branch to the taken block. // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. @@ -539,6 +501,7 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode silentSpillAllRegisters(resultGPR); callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR); silentFillAllRegisters(resultGPR); + m_jit.exceptionCheck(); branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken); } else { @@ -565,6 +528,7 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode silentSpillAllRegisters(resultGPR); callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR); silentFillAllRegisters(resultGPR); + m_jit.exceptionCheck(); branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken); } @@ -600,6 +564,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert) silentSpillAllRegisters(resultGPR); callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR); silentFillAllRegisters(resultGPR); + m_jit.exceptionCheck(); m_jit.and64(JITCompiler::TrustedImm32(1), resultGPR); m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR); @@ -629,11 +594,9 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert) m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR); - addSlowPathGenerator( - adoptPtr( - new CompareAndBoxBooleanSlowPathGenerator<MacroAssembler::JumpList>( + addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<MacroAssembler::JumpList>>( slowPathCases, this, operationCompareStrictEq, resultGPR, arg1GPR, - arg2GPR))); + arg2GPR)); done.link(&m_jit); } @@ -641,74 +604,282 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert) jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly); } -void SpeculativeJIT::emitCall(Node* node) +void SpeculativeJIT::compileMiscStrictEq(Node* node) { - if (node->op() != Call) - RELEASE_ASSERT(node->op() == Construct); - - // For constructors, the this argument is not passed but we have to make space - // for it. - int dummyThisArgument = node->op() == Call ? 0 : 1; - - CallLinkInfo::CallType callType = node->op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct; - - Edge calleeEdge = m_jit.graph().m_varArgChildren[node->firstChild()]; - JSValueOperand callee(this, calleeEdge); - GPRReg calleeGPR = callee.gpr(); - use(calleeEdge); - - // The call instruction's first child is the function; the subsequent children are the - // arguments. - int numPassedArgs = node->numChildren() - 1; + JSValueOperand op1(this, node->child1(), ManualOperandSpeculation); + JSValueOperand op2(this, node->child2(), ManualOperandSpeculation); + GPRTemporary result(this); - int numArgs = numPassedArgs + dummyThisArgument; + if (node->child1().useKind() == MiscUse) + speculateMisc(node->child1(), op1.jsValueRegs()); + if (node->child2().useKind() == MiscUse) + speculateMisc(node->child2(), op2.jsValueRegs()); - m_jit.store32(MacroAssembler::TrustedImm32(numArgs), calleeFramePayloadSlot(numArgs, JSStack::ArgumentCount)); - m_jit.store64(GPRInfo::callFrameRegister, calleeFrameCallerFrame(numArgs)); - m_jit.store64(calleeGPR, calleeFrameSlot(numArgs, JSStack::Callee)); + m_jit.compare64(JITCompiler::Equal, op1.gpr(), op2.gpr(), result.gpr()); + m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); + jsValueResult(result.gpr(), node, DataFormatJSBoolean); +} + +void SpeculativeJIT::emitCall(Node* node) +{ + CallLinkInfo::CallType callType; + bool isVarargs = false; + bool isForwardVarargs = false; + bool isTail = false; + bool isEmulatedTail = false; + switch (node->op()) { + case Call: + callType = CallLinkInfo::Call; + break; + case TailCall: + callType = CallLinkInfo::TailCall; + isTail = true; + break; + case TailCallInlinedCaller: + callType = CallLinkInfo::Call; + isEmulatedTail = true; + break; + case Construct: + callType = CallLinkInfo::Construct; + break; + case CallVarargs: + callType = CallLinkInfo::CallVarargs; + isVarargs = true; + break; + case TailCallVarargs: + callType = CallLinkInfo::TailCallVarargs; + isVarargs = true; + isTail = true; + break; + case TailCallVarargsInlinedCaller: + callType = CallLinkInfo::CallVarargs; + isVarargs = true; + isEmulatedTail = true; + break; + case ConstructVarargs: + callType = CallLinkInfo::ConstructVarargs; + isVarargs = true; + break; + case CallForwardVarargs: + callType = CallLinkInfo::CallVarargs; + isForwardVarargs = true; + break; + case ConstructForwardVarargs: + callType = CallLinkInfo::ConstructVarargs; + isForwardVarargs = true; + break; + case TailCallForwardVarargs: + callType = CallLinkInfo::TailCallVarargs; + isTail = true; + isForwardVarargs = true; + break; + case TailCallForwardVarargsInlinedCaller: + callType = CallLinkInfo::CallVarargs; + isEmulatedTail = true; + isForwardVarargs = true; + break; + default: + DFG_CRASH(m_jit.graph(), node, "bad node type"); + break; + } + + GPRReg calleeGPR; + CallFrameShuffleData shuffleData; - for (int i = 0; i < numPassedArgs; i++) { - Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i]; - JSValueOperand arg(this, argEdge); - GPRReg argGPR = arg.gpr(); - use(argEdge); + // Gotta load the arguments somehow. Varargs is trickier. + if (isVarargs || isForwardVarargs) { + CallVarargsData* data = node->callVarargsData(); + + GPRReg resultGPR; + unsigned numUsedStackSlots = m_jit.graph().m_nextMachineLocal; + + if (isForwardVarargs) { + flushRegisters(); + use(node->child2()); + + GPRReg scratchGPR1; + GPRReg scratchGPR2; + GPRReg scratchGPR3; + + scratchGPR1 = JITCompiler::selectScratchGPR(); + scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1); + scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2); + + m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2); + JITCompiler::JumpList slowCase; + emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase); + JITCompiler::Jump done = m_jit.jump(); + slowCase.link(&m_jit); + callOperation(operationThrowStackOverflowForVarargs); + m_jit.exceptionCheck(); + m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow); + done.link(&m_jit); + resultGPR = scratchGPR2; + } else { + GPRReg argumentsGPR; + GPRReg scratchGPR1; + GPRReg scratchGPR2; + GPRReg scratchGPR3; + + auto loadArgumentsGPR = [&] (GPRReg reservedGPR) { + if (reservedGPR != InvalidGPRReg) + lock(reservedGPR); + JSValueOperand arguments(this, node->child2()); + argumentsGPR = arguments.gpr(); + if (reservedGPR != InvalidGPRReg) + unlock(reservedGPR); + flushRegisters(); + + scratchGPR1 = JITCompiler::selectScratchGPR(argumentsGPR, reservedGPR); + scratchGPR2 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, reservedGPR); + scratchGPR3 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, scratchGPR2, reservedGPR); + }; + + loadArgumentsGPR(InvalidGPRReg); + + DFG_ASSERT(m_jit.graph(), node, isFlushed()); + + // Right now, arguments is in argumentsGPR and the register file is flushed. + callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, argumentsGPR, numUsedStackSlots, data->firstVarArgOffset); + m_jit.exceptionCheck(); + + // Now we have the argument count of the callee frame, but we've lost the arguments operand. + // Reconstruct the arguments operand while preserving the callee frame. + loadArgumentsGPR(GPRInfo::returnValueGPR); + m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1); + emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1); + m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister); + + callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, argumentsGPR, data->firstVarArgOffset, GPRInfo::returnValueGPR); + m_jit.exceptionCheck(); + resultGPR = GPRInfo::returnValueGPR; + } + + m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), resultGPR, JITCompiler::stackPointerRegister); - m_jit.store64(argGPR, calleeArgumentSlot(numArgs, i + dummyThisArgument)); + DFG_ASSERT(m_jit.graph(), node, isFlushed()); + + // We don't need the arguments array anymore. + if (isVarargs) + use(node->child2()); + + // Now set up the "this" argument. + JSValueOperand thisArgument(this, node->child3()); + GPRReg thisArgumentGPR = thisArgument.gpr(); + thisArgument.use(); + + m_jit.store64(thisArgumentGPR, JITCompiler::calleeArgumentSlot(0)); + } else { + // The call instruction's first child is the function; the subsequent children are the + // arguments. + int numPassedArgs = node->numChildren() - 1; + + if (node->op() == TailCall) { + Edge calleeEdge = m_jit.graph().child(node, 0); + JSValueOperand callee(this, calleeEdge); + calleeGPR = callee.gpr(); + callee.use(); + + shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister; + shuffleData.numLocals = m_jit.graph().frameRegisterCount(); + shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatJS); + shuffleData.args.resize(numPassedArgs); + + for (int i = 0; i < numPassedArgs; ++i) { + Edge argEdge = m_jit.graph().varArgChild(node, i + 1); + GenerationInfo& info = generationInfo(argEdge.node()); + use(argEdge); + shuffleData.args[i] = info.recovery(argEdge->virtualRegister()); + } + + shuffleData.setupCalleeSaveRegisters(m_jit.codeBlock()); + } else { + m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), JITCompiler::calleeFramePayloadSlot(JSStack::ArgumentCount)); + + for (int i = 0; i < numPassedArgs; i++) { + Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i]; + JSValueOperand arg(this, argEdge); + GPRReg argGPR = arg.gpr(); + use(argEdge); + + m_jit.store64(argGPR, JITCompiler::calleeArgumentSlot(i)); + } + } } - flushRegisters(); + if (node->op() != TailCall) { + Edge calleeEdge = m_jit.graph().child(node, 0); + JSValueOperand callee(this, calleeEdge); + calleeGPR = callee.gpr(); + callee.use(); + m_jit.store64(calleeGPR, JITCompiler::calleeFrameSlot(JSStack::Callee)); - GPRResult result(this); - GPRReg resultGPR = result.gpr(); + flushRegisters(); + } - JITCompiler::DataLabelPtr targetToCheck; - JITCompiler::JumpList slowPath; + CodeOrigin staticOrigin = node->origin.semantic; + ASSERT(!isTail || !staticOrigin.inlineCallFrame || !staticOrigin.inlineCallFrame->getCallerSkippingTailCalls()); + ASSERT(!isEmulatedTail || (staticOrigin.inlineCallFrame && staticOrigin.inlineCallFrame->getCallerSkippingTailCalls())); + CodeOrigin dynamicOrigin = + isEmulatedTail ? *staticOrigin.inlineCallFrame->getCallerSkippingTailCalls() : staticOrigin; - m_jit.emitStoreCodeOrigin(node->codeOrigin); - - m_jit.addPtr(TrustedImm32(calleeFrameOffset(numArgs)), GPRInfo::callFrameRegister); + CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(dynamicOrigin, m_stream->size()); + m_jit.emitStoreCallSiteIndex(callSite); - slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0))); + CallLinkInfo* callLinkInfo = m_jit.codeBlock()->addCallLinkInfo(); - m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultGPR); - m_jit.store64(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain)); + JITCompiler::DataLabelPtr targetToCheck; + JITCompiler::Jump slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0)); + + if (isTail) { + if (node->op() == TailCall) { + callLinkInfo->setFrameShuffleData(shuffleData); + CallFrameShuffler(m_jit, shuffleData).prepareForTailCall(); + } else { + m_jit.emitRestoreCalleeSaves(); + m_jit.prepareForTailCallSlow(); + } + } + + JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall(); - JITCompiler::Call fastCall = m_jit.nearCall(); - JITCompiler::Jump done = m_jit.jump(); - + slowPath.link(&m_jit); - - m_jit.move(calleeGPR, GPRInfo::regT0); // Callee needs to be in regT0 + + if (node->op() == TailCall) { + CallFrameShuffler callFrameShuffler(m_jit, shuffleData); + callFrameShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0)); + callFrameShuffler.prepareForSlowPath(); + } else { + m_jit.move(calleeGPR, GPRInfo::regT0); // Callee needs to be in regT0 + + if (isTail) + m_jit.emitRestoreCalleeSaves(); // This needs to happen after we moved calleeGPR to regT0 + } + + m_jit.move(MacroAssembler::TrustedImmPtr(callLinkInfo), GPRInfo::regT2); // Link info needs to be in regT2 JITCompiler::Call slowCall = m_jit.nearCall(); - + done.link(&m_jit); - - m_jit.move(GPRInfo::returnValueGPR, resultGPR); - - jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly); - - m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, calleeGPR, m_currentNode->codeOrigin); + + if (isTail) + m_jit.abortWithReason(JITDidReturnFromTailCall); + else { + GPRFlushedCallResult result(this); + GPRReg resultGPR = result.gpr(); + m_jit.move(GPRInfo::returnValueGPR, resultGPR); + + jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly); + + // After the calls are done, we need to reestablish our stack + // pointer. We rely on this for varargs calls, calls with arity + // mismatch (the callframe is slided) and tail calls. + m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister); + } + + callLinkInfo->setUpCall(callType, m_currentNode->origin.semantic, calleeGPR); + m_jit.addJSCall(fastCall, slowCall, targetToCheck, callLinkInfo); } // Clang should allow unreachable [[clang::fallthrough]] in template functions if any template expansion uses it @@ -725,24 +896,25 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF AbstractValue& value = m_state.forNode(edge); SpeculatedType type = value.m_type; ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32)); + m_interpreter.filter(value, SpecInt32); + if (value.isClear()) { + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + returnFormat = DataFormatInt32; + return allocate(); + } + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); switch (info.registerFormat()) { case DataFormatNone: { - if ((edge->hasConstant() && !isInt32Constant(edge.node())) || info.spillFormat() == DataFormatDouble) { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - returnFormat = DataFormatInt32; - return allocate(); - } - GPRReg gpr = allocate(); if (edge->hasConstant()) { m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - ASSERT(isInt32Constant(edge.node())); - m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr); + ASSERT(edge->isInt32Constant()); + m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr); info.fillInt32(*m_stream, gpr); returnFormat = DataFormatInt32; return gpr; @@ -750,7 +922,7 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF DataFormat spillFormat = info.spillFormat(); - RELEASE_ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInt32 || spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52); + DFG_ASSERT(m_jit.graph(), m_currentNode, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); @@ -771,36 +943,6 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF returnFormat = DataFormatJSInt32; return gpr; } - if (spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52) { - // Generally, this can only happen if we've already proved that the - // value is an int32. That's because if a value originated as a JSValue - // then we would speculate that it's an int32 before representing it as - // an int52. Otherwise, if we knowingly produced an int52, then we would - // be boxing it into a value using Int52ToValue. This assertion is valid - // only because Int52 is something that we introduce at prediction time. - // However: we may have an int32-producing node replaced by an - // int52-producing node due to CSE. So we must do a check. - RELEASE_ASSERT(!(type & ~SpecMachineInt)); - if (type & SpecInt52) { - GPRReg temp = allocate(); - m_jit.signExtend32ToPtr(gpr, temp); - // Currently, we can't supply value profiling information here. :-/ - speculationCheck( - BadType, JSValueRegs(), 0, - m_jit.branch64(MacroAssembler::NotEqual, gpr, temp)); - unlock(temp); - } - if (spillFormat == DataFormatStrictInt52) - m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); - else { - m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); - m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr); - m_jit.zeroExtend32ToPtr(gpr, gpr); - } - info.fillInt32(*m_stream, gpr); - returnFormat = DataFormatInt32; - return gpr; - } m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); // Fill as JSValue, and fall through. @@ -810,7 +952,7 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF } case DataFormatJS: { - RELEASE_ASSERT(!(type & SpecInt52)); + DFG_ASSERT(m_jit.graph(), m_currentNode, !(type & SpecInt52)); // Check the value is an integer. GPRReg gpr = info.gpr(); m_gprs.lock(gpr); @@ -859,61 +1001,19 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnF return gpr; } - case DataFormatStrictInt52: - case DataFormatInt52: { - GPRReg gpr = info.gpr(); - GPRReg result; - DataFormat oldFormat = info.registerFormat(); - if (m_gprs.isLocked(gpr)) { - result = allocate(); - m_jit.move(gpr, result); - } else { - lock(gpr); - info.fillInt32(*m_stream, gpr); - result = gpr; - } - RELEASE_ASSERT(!(type & ~SpecMachineInt)); - if (oldFormat == DataFormatInt52) - m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), result); - if (type & SpecInt52) { - GPRReg temp = allocate(); - m_jit.signExtend32ToPtr(result, temp); - // Currently, we can't supply value profiling information here. :-/ - speculationCheck( - BadType, JSValueRegs(), 0, - m_jit.branch64(MacroAssembler::NotEqual, result, temp)); - unlock(temp); - } - m_jit.zeroExtend32ToPtr(result, result); - returnFormat = DataFormatInt32; - return gpr; - } - - case DataFormatDouble: - case DataFormatJSDouble: { - if (edge->hasConstant() && isInt32Constant(edge.node())) { - GPRReg gpr = allocate(); - ASSERT(isInt32Constant(edge.node())); - m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr); - returnFormat = DataFormatInt32; - return gpr; - } - FALLTHROUGH; - } + case DataFormatJSDouble: case DataFormatCell: case DataFormatBoolean: case DataFormatJSCell: - case DataFormatJSBoolean: { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - returnFormat = DataFormatInt32; - return allocate(); - } - + case DataFormatJSBoolean: + case DataFormatDouble: case DataFormatStorage: - RELEASE_ASSERT_NOT_REACHED(); + case DataFormatInt52: + case DataFormatStrictInt52: + DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format"); default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format"); return InvalidGPRReg; } } @@ -930,7 +1030,7 @@ GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge) { DataFormat mustBeDataFormatInt32; GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32); - RELEASE_ASSERT(mustBeDataFormatInt32 == DataFormatInt32); + DFG_ASSERT(m_jit.graph(), m_currentNode, mustBeDataFormatInt32 == DataFormatInt32); return result; } @@ -938,22 +1038,22 @@ GPRReg SpeculativeJIT::fillSpeculateInt52(Edge edge, DataFormat desiredFormat) { ASSERT(desiredFormat == DataFormatInt52 || desiredFormat == DataFormatStrictInt52); AbstractValue& value = m_state.forNode(edge); - SpeculatedType type = value.m_type; + m_interpreter.filter(value, SpecMachineInt); + if (value.isClear()) { + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + return allocate(); + } + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); switch (info.registerFormat()) { case DataFormatNone: { - if ((edge->hasConstant() && !valueOfJSConstant(edge.node()).isMachineInt()) || info.spillFormat() == DataFormatDouble) { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return allocate(); - } - GPRReg gpr = allocate(); if (edge->hasConstant()) { - JSValue jsValue = valueOfJSConstant(edge.node()); + JSValue jsValue = edge->asJSValue(); ASSERT(jsValue.isMachineInt()); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); int64_t value = jsValue.asMachineInt(); @@ -966,82 +1066,21 @@ GPRReg SpeculativeJIT::fillSpeculateInt52(Edge edge, DataFormat desiredFormat) DataFormat spillFormat = info.spillFormat(); - RELEASE_ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInt32 || spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52); + DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); - if (spillFormat == DataFormatJSInt32 || spillFormat == DataFormatInt32) { - // If we know this was spilled as an integer we can fill without checking. - m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); - m_jit.signExtend32ToPtr(gpr, gpr); - if (desiredFormat == DataFormatStrictInt52) { - info.fillStrictInt52(*m_stream, gpr); - return gpr; - } - m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr); - info.fillInt52(*m_stream, gpr); - return gpr; - } - if (spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52) { - m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); - if (desiredFormat == DataFormatStrictInt52) { - if (spillFormat == DataFormatInt52) - m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr); - info.fillStrictInt52(*m_stream, gpr); - return gpr; - } - if (spillFormat == DataFormatStrictInt52) - m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr); - info.fillInt52(*m_stream, gpr); - return gpr; - } m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); - - // Fill as JSValue, and fall through. - info.fillJSValue(*m_stream, gpr, DataFormatJSInt32); - m_gprs.unlock(gpr); - FALLTHROUGH; - } - - case DataFormatJS: { - // Check the value is an integer. Note that we would *like* to unbox an Int52 - // at this point but this is too costly. We only *prove* that this is an Int52 - // even though we check if it's an int32. - GPRReg gpr = info.gpr(); - GPRReg result; - if (m_gprs.isLocked(gpr)) { - result = allocate(); - m_jit.move(gpr, result); - } else { - m_gprs.lock(gpr); - result = gpr; - } - if (type & ~SpecInt32) - speculationCheck(BadType, JSValueRegs(result), edge, m_jit.branch64(MacroAssembler::Below, result, GPRInfo::tagTypeNumberRegister)); - if (result == gpr) // The not-already-locked, so fill in-place, case. - info.fillInt52(*m_stream, gpr, desiredFormat); - m_jit.signExtend32ToPtr(result, result); - if (desiredFormat == DataFormatInt52) - m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), result); - return result; - } - - case DataFormatInt32: - case DataFormatJSInt32: { - GPRReg gpr = info.gpr(); - GPRReg result; - if (m_gprs.isLocked(gpr)) { - result = allocate(); - m_jit.move(gpr, result); - } else { - m_gprs.lock(gpr); - info.fillInt52(*m_stream, gpr, desiredFormat); - result = gpr; + if (desiredFormat == DataFormatStrictInt52) { + if (spillFormat == DataFormatInt52) + m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr); + info.fillStrictInt52(*m_stream, gpr); + return gpr; } - m_jit.signExtend32ToPtr(result, result); - if (desiredFormat == DataFormatInt52) - m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), result); - return result; + if (spillFormat == DataFormatStrictInt52) + m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr); + info.fillInt52(*m_stream, gpr); + return gpr; } case DataFormatStrictInt52: { @@ -1078,66 +1117,32 @@ GPRReg SpeculativeJIT::fillSpeculateInt52(Edge edge, DataFormat desiredFormat) return gpr; } - case DataFormatDouble: - case DataFormatJSDouble: - if (edge->hasConstant()) { - JSValue jsValue = valueOfJSConstant(edge.node()); - if (jsValue.isMachineInt()) { - int64_t value = jsValue.asMachineInt(); - if (desiredFormat == DataFormatInt52) - value = value << JSValue::int52ShiftAmount; - GPRReg gpr = allocate(); - m_jit.move(MacroAssembler::Imm64(value), gpr); - return gpr; - } - } - FALLTHROUGH; - case DataFormatCell: - case DataFormatBoolean: - case DataFormatJSCell: - case DataFormatJSBoolean: { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return allocate(); - } - - case DataFormatStorage: - RELEASE_ASSERT_NOT_REACHED(); - default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format"); return InvalidGPRReg; } } FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge) { - AbstractValue& value = m_state.forNode(edge); - SpeculatedType type = value.m_type; - ASSERT(edge.useKind() != KnownNumberUse || !(value.m_type & ~SpecFullNumber)); - m_interpreter.filter(value, SpecFullNumber); + ASSERT(edge.useKind() == DoubleRepUse || edge.useKind() == DoubleRepRealUse || edge.useKind() == DoubleRepMachineIntUse); + ASSERT(edge->hasDoubleResult()); VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); if (info.registerFormat() == DataFormatNone) { if (edge->hasConstant()) { - GPRReg gpr = allocate(); - - if (isInt32Constant(edge.node())) { - FPRReg fpr = fprAllocate(); - m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(static_cast<double>(valueOfInt32Constant(edge.node())))), gpr); - m_jit.move64ToDouble(gpr, fpr); - unlock(gpr); - - // Don't fill double here since that will lead to confusion: the - // register allocator will now think that this is a double while - // everyone else thinks it's an integer. - return fpr; - } - if (isNumberConstant(edge.node())) { + if (edge->isNumberConstant()) { FPRReg fpr = fprAllocate(); - m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(edge.node()))), gpr); - m_jit.move64ToDouble(gpr, fpr); - unlock(gpr); + int64_t doubleAsInt = reinterpretDoubleToInt64(edge->asNumber()); + if (!doubleAsInt) + m_jit.moveZeroToDouble(fpr); + else { + GPRReg gpr = allocate(); + m_jit.move(MacroAssembler::Imm64(doubleAsInt), gpr); + m_jit.move64ToDouble(gpr, fpr); + unlock(gpr); + } m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); info.fillDouble(*m_stream, fpr); @@ -1148,159 +1153,24 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge) } DataFormat spillFormat = info.spillFormat(); - switch (spillFormat) { - case DataFormatDouble: { - FPRReg fpr = fprAllocate(); - m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); - m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(*m_stream, fpr); - return fpr; - } - - case DataFormatInt32: { - GPRReg gpr = allocate(); - - m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); - m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); - info.fillInt32(*m_stream, gpr); - unlock(gpr); - break; - } - - case DataFormatInt52: { - GPRReg gpr = allocate(); - m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); - m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); - info.fillInt52(*m_stream, gpr); - unlock(gpr); - break; + if (spillFormat != DataFormatDouble) { + DFG_CRASH( + m_jit.graph(), m_currentNode, toCString( + "Expected ", edge, " to have double format but instead it is spilled as ", + dataFormatToString(spillFormat)).data()); } - - case DataFormatStrictInt52: { - GPRReg gpr = allocate(); - m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); - m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); - info.fillStrictInt52(*m_stream, gpr); - unlock(gpr); - break; - } - - default: - GPRReg gpr = allocate(); - - RELEASE_ASSERT(spillFormat & DataFormatJS); - m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); - m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); - info.fillJSValue(*m_stream, gpr, spillFormat); - unlock(gpr); - break; - } - } - - switch (info.registerFormat()) { - case DataFormatNone: // Should have filled, above. - case DataFormatBoolean: // This type never occurs. - case DataFormatStorage: - RELEASE_ASSERT_NOT_REACHED(); - - case DataFormatCell: - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return fprAllocate(); - - case DataFormatJSCell: - case DataFormatJS: - case DataFormatJSBoolean: { - GPRReg jsValueGpr = info.gpr(); - m_gprs.lock(jsValueGpr); - FPRReg fpr = fprAllocate(); - GPRReg tempGpr = allocate(); - - JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister); - - if (type & ~SpecFullNumber) - speculationCheck(BadType, JSValueRegs(jsValueGpr), edge, m_jit.branchTest64(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister)); - - // First, if we get here we have a double encoded as a JSValue - m_jit.move(jsValueGpr, tempGpr); - unboxDouble(tempGpr, fpr); - JITCompiler::Jump hasUnboxedDouble = m_jit.jump(); - - // Finally, handle integers. - isInteger.link(&m_jit); - m_jit.convertInt32ToDouble(jsValueGpr, fpr); - hasUnboxedDouble.link(&m_jit); - - m_gprs.release(jsValueGpr); - m_gprs.unlock(jsValueGpr); - m_gprs.unlock(tempGpr); - m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(*m_stream, fpr); - info.killSpilled(); - return fpr; - } - - case DataFormatJSInt32: - case DataFormatInt32: { - FPRReg fpr = fprAllocate(); - GPRReg gpr = info.gpr(); - m_gprs.lock(gpr); - m_jit.convertInt32ToDouble(gpr, fpr); - m_gprs.unlock(gpr); - return fpr; - } - - case DataFormatInt52: { - FPRReg fpr = fprAllocate(); - GPRReg gpr = info.gpr(); - m_gprs.lock(gpr); - GPRReg temp = allocate(); - m_jit.move(gpr, temp); - m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), temp); - m_jit.convertInt64ToDouble(temp, fpr); - unlock(temp); - m_gprs.unlock(gpr); - return fpr; - } - - case DataFormatStrictInt52: { - FPRReg fpr = fprAllocate(); - GPRReg gpr = info.gpr(); - m_gprs.lock(gpr); - m_jit.convertInt64ToDouble(gpr, fpr); - m_gprs.unlock(gpr); - return fpr; - } - - // Unbox the double - case DataFormatJSDouble: { - GPRReg gpr = info.gpr(); + DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatDouble); FPRReg fpr = fprAllocate(); - if (m_gprs.isLocked(gpr)) { - // Make sure we don't trample gpr if it is in use. - GPRReg temp = allocate(); - m_jit.move(gpr, temp); - unboxDouble(temp, fpr); - unlock(temp); - } else - unboxDouble(gpr, fpr); - - m_gprs.release(gpr); + m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(*m_stream, fpr); return fpr; } - case DataFormatDouble: { - FPRReg fpr = info.fpr(); - m_fprs.lock(fpr); - return fpr; - } - - default: - RELEASE_ASSERT_NOT_REACHED(); - return InvalidFPRReg; - } + DFG_ASSERT(m_jit.graph(), m_currentNode, info.registerFormat() == DataFormatDouble); + FPRReg fpr = info.fpr(); + m_fprs.lock(fpr); + return fpr; } GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) @@ -1308,37 +1178,34 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) AbstractValue& value = m_state.forNode(edge); SpeculatedType type = value.m_type; ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell)); + m_interpreter.filter(value, SpecCell); + if (value.isClear()) { + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + return allocate(); + } + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); switch (info.registerFormat()) { case DataFormatNone: { - if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return allocate(); - } - GPRReg gpr = allocate(); if (edge->hasConstant()) { - JSValue jsValue = valueOfJSConstant(edge.node()); - if (jsValue.isCell()) { - m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr); - info.fillJSValue(*m_stream, gpr, DataFormatJSCell); - return gpr; - } - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + JSValue jsValue = edge->asJSValue(); + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr); + info.fillJSValue(*m_stream, gpr, DataFormatJSCell); return gpr; } - RELEASE_ASSERT(info.spillFormat() & DataFormatJS); + m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); info.fillJSValue(*m_stream, gpr, DataFormatJS); if (type & ~SpecCell) - speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister)); + speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr))); info.fillJSValue(*m_stream, gpr, DataFormatJSCell); return gpr; } @@ -1348,8 +1215,8 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) GPRReg gpr = info.gpr(); m_gprs.lock(gpr); if (!ASSERT_DISABLED) { - MacroAssembler::Jump checkCell = m_jit.branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister); - m_jit.breakpoint(); + MacroAssembler::Jump checkCell = m_jit.branchIfCell(JSValueRegs(gpr)); + m_jit.abortWithReason(DFGIsNotCell); checkCell.link(&m_jit); } return gpr; @@ -1359,7 +1226,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) GPRReg gpr = info.gpr(); m_gprs.lock(gpr); if (type & ~SpecCell) - speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister)); + speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr))); info.fillJSValue(*m_stream, gpr, DataFormatJSCell); return gpr; } @@ -1367,20 +1234,16 @@ GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) case DataFormatJSInt32: case DataFormatInt32: case DataFormatJSDouble: - case DataFormatDouble: case DataFormatJSBoolean: case DataFormatBoolean: - case DataFormatInt52: - case DataFormatStrictInt52: { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return allocate(); - } - + case DataFormatDouble: case DataFormatStorage: - RELEASE_ASSERT_NOT_REACHED(); + case DataFormatInt52: + case DataFormatStrictInt52: + DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format"); default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format"); return InvalidGPRReg; } } @@ -1389,31 +1252,29 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge) { AbstractValue& value = m_state.forNode(edge); SpeculatedType type = value.m_type; + ASSERT(edge.useKind() != KnownBooleanUse || !(value.m_type & ~SpecBoolean)); + m_interpreter.filter(value, SpecBoolean); + if (value.isClear()) { + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + return allocate(); + } + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); switch (info.registerFormat()) { case DataFormatNone: { - if (info.spillFormat() == DataFormatInt32 || info.spillFormat() == DataFormatDouble) { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return allocate(); - } - GPRReg gpr = allocate(); if (edge->hasConstant()) { - JSValue jsValue = valueOfJSConstant(edge.node()); - if (jsValue.isBoolean()) { - m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr); - info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean); - return gpr; - } - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + JSValue jsValue = edge->asJSValue(); + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr); + info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean); return gpr; } - RELEASE_ASSERT(info.spillFormat() & DataFormatJS); + DFG_ASSERT(m_jit.graph(), m_currentNode, info.spillFormat() & DataFormatJS); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); @@ -1449,47 +1310,22 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge) case DataFormatJSInt32: case DataFormatInt32: case DataFormatJSDouble: - case DataFormatDouble: case DataFormatJSCell: case DataFormatCell: - case DataFormatInt52: - case DataFormatStrictInt52: { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); - return allocate(); - } - + case DataFormatDouble: case DataFormatStorage: - RELEASE_ASSERT_NOT_REACHED(); + case DataFormatInt52: + case DataFormatStrictInt52: + DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format"); default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format"); return InvalidGPRReg; } } -JITCompiler::Jump SpeculativeJIT::convertToDouble(GPRReg value, FPRReg result, GPRReg tmp) -{ - JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, value, GPRInfo::tagTypeNumberRegister); - - JITCompiler::Jump notNumber = m_jit.branchTest64(MacroAssembler::Zero, value, GPRInfo::tagTypeNumberRegister); - - m_jit.move(value, tmp); - unboxDouble(tmp, result); - - JITCompiler::Jump done = m_jit.jump(); - - isInteger.link(&m_jit); - - m_jit.convertInt32ToDouble(value, result); - - done.link(&m_jit); - - return notNumber; -} - void SpeculativeJIT::compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdge) { -#if ENABLE(GGC) ASSERT(!isKnownNotCell(valueEdge.node())); SpeculateCellOperand base(this, baseEdge); @@ -1498,10 +1334,6 @@ void SpeculativeJIT::compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdg GPRTemporary scratch2(this); writeBarrier(base.gpr(), value.gpr(), valueEdge, scratch1.gpr(), scratch2.gpr()); -#else - UNUSED_PARAM(baseEdge); - UNUSED_PARAM(valueEdge); -#endif } void SpeculativeJIT::compileObjectEquality(Node* node) @@ -1516,41 +1348,24 @@ void SpeculativeJIT::compileObjectEquality(Node* node) if (masqueradesAsUndefinedWatchpointIsStillValid()) { DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op1GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR)); DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op2GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR)); } else { - GPRTemporary structure(this); - GPRReg structureGPR = structure.gpr(); - - m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR)); speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); - m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR)); speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } @@ -1564,6 +1379,47 @@ void SpeculativeJIT::compileObjectEquality(Node* node) jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean); } +void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild) +{ + SpeculateCellOperand op1(this, objectChild); + JSValueOperand op2(this, otherChild); + GPRTemporary result(this); + + GPRReg op1GPR = op1.gpr(); + GPRReg op2GPR = op2.gpr(); + GPRReg resultGPR = result.gpr(); + + DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); + + // At this point we know that we can perform a straight-forward equality comparison on pointer + // values because we are doing strict equality. + m_jit.compare64(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR); + m_jit.or32(TrustedImm32(ValueFalse), resultGPR); + jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean); +} + +void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode) +{ + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; + + SpeculateCellOperand op1(this, objectChild); + JSValueOperand op2(this, otherChild); + + GPRReg op1GPR = op1.gpr(); + GPRReg op2GPR = op2.gpr(); + + DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); + + if (taken == nextBlock()) { + branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken); + jump(taken); + } else { + branchPtr(MacroAssembler::Equal, op1GPR, op2GPR, taken); + jump(notTaken); + } +} + void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild) { SpeculateCellOperand op1(this, leftChild); @@ -1573,63 +1429,38 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r GPRReg op1GPR = op1.gpr(); GPRReg op2GPR = op2.gpr(); GPRReg resultGPR = result.gpr(); - GPRTemporary structure; - GPRReg structureGPR = InvalidGPRReg; bool masqueradesAsUndefinedWatchpointValid = masqueradesAsUndefinedWatchpointIsStillValid(); - if (!masqueradesAsUndefinedWatchpointValid) { - // The masquerades as undefined case will use the structure register, so allocate it here. - // Do this at the top of the function to avoid branching around a register allocation. - GPRTemporary realStructure(this); - structure.adopt(realStructure); - structureGPR = structure.gpr(); - } - if (masqueradesAsUndefinedWatchpointValid) { DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op1GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } // It seems that most of the time when programs do a == b where b may be either null/undefined // or an object, b is usually an object. Balance the branches to make that case fast. - MacroAssembler::Jump rightNotCell = - m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister); + MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR)); // We know that within this branch, rightChild must be a cell. if (masqueradesAsUndefinedWatchpointValid) { DFG_TYPE_CHECK( - JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op2GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( - JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR)); speculationCheck(BadType, JSValueRegs(op2GPR), rightChild, m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } @@ -1666,8 +1497,8 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; SpeculateCellOperand op1(this, leftChild); JSValueOperand op2(this, rightChild, ManualOperandSpeculation); @@ -1676,63 +1507,38 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild GPRReg op1GPR = op1.gpr(); GPRReg op2GPR = op2.gpr(); GPRReg resultGPR = result.gpr(); - GPRTemporary structure; - GPRReg structureGPR = InvalidGPRReg; - bool masqueradesAsUndefinedWatchpointValid = + bool masqueradesAsUndefinedWatchpointValid = masqueradesAsUndefinedWatchpointIsStillValid(); - if (!masqueradesAsUndefinedWatchpointValid) { - // The masquerades as undefined case will use the structure register, so allocate it here. - // Do this at the top of the function to avoid branching around a register allocation. - GPRTemporary realStructure(this); - structure.adopt(realStructure); - structureGPR = structure.gpr(); - } - if (masqueradesAsUndefinedWatchpointValid) { DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op1GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( - JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } // It seems that most of the time when programs do a == b where b may be either null/undefined // or an object, b is usually an object. Balance the branches to make that case fast. - MacroAssembler::Jump rightNotCell = - m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister); + MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR)); // We know that within this branch, rightChild must be a cell. if (masqueradesAsUndefinedWatchpointValid) { DFG_TYPE_CHECK( - JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(op2GPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR); DFG_TYPE_CHECK( - JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR)); speculationCheck(BadType, JSValueRegs(op2GPR), rightChild, m_jit.branchTest8( MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } @@ -1763,15 +1569,34 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild void SpeculativeJIT::compileInt32Compare(Node* node, MacroAssembler::RelationalCondition condition) { - SpeculateInt32Operand op1(this, node->child1()); - SpeculateInt32Operand op2(this, node->child2()); - GPRTemporary result(this, Reuse, op1, op2); - - m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr()); - - // If we add a DataFormatBool, we should use it here. - m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); - jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean); + if (node->child1()->isInt32Constant()) { + SpeculateInt32Operand op2(this, node->child2()); + GPRTemporary result(this, Reuse, op2); + int32_t imm = node->child1()->asInt32(); + m_jit.compare32(condition, JITCompiler::Imm32(imm), op2.gpr(), result.gpr()); + + // If we add a DataFormatBool, we should use it here. + m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); + jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean); + } else if (node->child2()->isInt32Constant()) { + SpeculateInt32Operand op1(this, node->child1()); + GPRTemporary result(this, Reuse, op1); + int32_t imm = node->child2()->asInt32(); + m_jit.compare32(condition, op1.gpr(), JITCompiler::Imm32(imm), result.gpr()); + + // If we add a DataFormatBool, we should use it here. + m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); + jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean); + } else { + SpeculateInt32Operand op1(this, node->child1()); + SpeculateInt32Operand op2(this, node->child2()); + GPRTemporary result(this, Reuse, op1, op2); + m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr()); + + // If we add a DataFormatBool, we should use it here. + m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); + jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean); + } } void SpeculativeJIT::compileInt52Compare(Node* node, MacroAssembler::RelationalCondition condition) @@ -1789,8 +1614,8 @@ void SpeculativeJIT::compileInt52Compare(Node* node, MacroAssembler::RelationalC void SpeculativeJIT::compilePeepHoleInt52Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition) { - BasicBlock* taken = branchNode->takenBlock(); - BasicBlock* notTaken = branchNode->notTakenBlock(); + BasicBlock* taken = branchNode->branchData()->taken.block; + BasicBlock* notTaken = branchNode->branchData()->notTaken.block; // The branch instruction will branch to the taken block. // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. @@ -1830,6 +1655,8 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse) GPRReg resultGPR = result.gpr(); GPRTemporary structure; GPRReg structureGPR = InvalidGPRReg; + GPRTemporary scratch; + GPRReg scratchGPR = InvalidGPRReg; bool masqueradesAsUndefinedWatchpointValid = masqueradesAsUndefinedWatchpointIsStillValid(); @@ -1838,37 +1665,33 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse) // The masquerades as undefined case will use the structure register, so allocate it here. // Do this at the top of the function to avoid branching around a register allocation. GPRTemporary realStructure(this); + GPRTemporary realScratch(this); structure.adopt(realStructure); + scratch.adopt(realScratch); structureGPR = structure.gpr(); + scratchGPR = scratch.gpr(); } - MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister); + MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR)); if (masqueradesAsUndefinedWatchpointValid) { DFG_TYPE_CHECK( - JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(valueGPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(valueGPR, JSCell::structureOffset()), structureGPR); - DFG_TYPE_CHECK( - JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - structureGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR)); MacroAssembler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8( MacroAssembler::Zero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(MasqueradesAsUndefined)); + m_jit.emitLoadStructure(valueGPR, structureGPR, scratchGPR); speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse, m_jit.branchPtr( MacroAssembler::Equal, MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()), - MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)))); + MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)))); isNotMasqueradesAsUndefined.link(&m_jit); } @@ -1910,7 +1733,7 @@ void SpeculativeJIT::compileLogicalNot(Node* node) return; } - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand value(this, node->child1()); FPRTemporary scratch(this); GPRTemporary result(this); @@ -1922,7 +1745,8 @@ void SpeculativeJIT::compileLogicalNot(Node* node) return; } - case BooleanUse: { + case BooleanUse: + case KnownBooleanUse: { if (!needsTypeCheck(node->child1(), SpecBoolean)) { SpeculateBooleanOperand value(this, node->child1()); GPRTemporary result(this, Reuse, value); @@ -1963,7 +1787,7 @@ void SpeculativeJIT::compileLogicalNot(Node* node) JITCompiler::Jump slowCase = m_jit.branchTest64(JITCompiler::NonZero, resultGPR, TrustedImm32(static_cast<int32_t>(~1))); addSlowPathGenerator( - slowPathCall(slowCase, this, operationConvertJSValueToBoolean, resultGPR, arg1GPR)); + slowPathCall(slowCase, this, operationConvertJSValueToBoolean, resultGPR, arg1GPR, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded)); m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR); jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly); @@ -1972,8 +1796,11 @@ void SpeculativeJIT::compileLogicalNot(Node* node) case StringUse: return compileStringZeroLength(node); + case StringOrOtherUse: + return compileLogicalNotStringOrOther(node); + default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), node, "Bad use kind"); break; } } @@ -1982,32 +1809,36 @@ void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, Ba { JSValueOperand value(this, nodeUse, ManualOperandSpeculation); GPRTemporary scratch(this); + GPRTemporary structure; GPRReg valueGPR = value.gpr(); GPRReg scratchGPR = scratch.gpr(); - - MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister); + GPRReg structureGPR = InvalidGPRReg; + + if (!masqueradesAsUndefinedWatchpointIsStillValid()) { + GPRTemporary realStructure(this); + structure.adopt(realStructure); + structureGPR = structure.gpr(); + } + + MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR)); if (masqueradesAsUndefinedWatchpointIsStillValid()) { DFG_TYPE_CHECK( - JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - MacroAssembler::Address(valueGPR, JSCell::structureOffset()), - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR)); } else { - m_jit.loadPtr(MacroAssembler::Address(valueGPR, JSCell::structureOffset()), scratchGPR); - DFG_TYPE_CHECK( - JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::Equal, - scratchGPR, - MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR)); - JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::Zero, MacroAssembler::Address(scratchGPR, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8( + JITCompiler::Zero, + MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()), + TrustedImm32(MasqueradesAsUndefined)); + m_jit.emitLoadStructure(valueGPR, structureGPR, scratchGPR); speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse, m_jit.branchPtr( MacroAssembler::Equal, - MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()), - MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)))); + MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()), + MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)))); isNotMasqueradesAsUndefined.link(&m_jit); } @@ -2029,8 +1860,8 @@ void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, Ba void SpeculativeJIT::emitBranch(Node* node) { - BasicBlock* taken = node->takenBlock(); - BasicBlock* notTaken = node->notTakenBlock(); + BasicBlock* taken = node->branchData()->taken.block; + BasicBlock* notTaken = node->branchData()->notTaken.block; switch (node->child1().useKind()) { case ObjectOrOtherUse: { @@ -2039,7 +1870,7 @@ void SpeculativeJIT::emitBranch(Node* node) } case Int32Use: - case NumberUse: { + case DoubleRepUse: { if (node->child1().useKind() == Int32Use) { bool invert = false; @@ -2064,12 +1895,23 @@ void SpeculativeJIT::emitBranch(Node* node) return; } + case StringUse: { + emitStringBranch(node->child1(), taken, notTaken); + return; + } + + case StringOrOtherUse: { + emitStringOrOtherBranch(node->child1(), taken, notTaken); + return; + } + case UntypedUse: - case BooleanUse: { + case BooleanUse: + case KnownBooleanUse: { JSValueOperand value(this, node->child1(), ManualOperandSpeculation); GPRReg valueGPR = value.gpr(); - if (node->child1().useKind() == BooleanUse) { + if (node->child1().useKind() == BooleanUse || node->child1().useKind() == KnownBooleanUse) { if (!needsTypeCheck(node->child1(), SpecBoolean)) { MacroAssembler::ResultCondition condition = MacroAssembler::NonZero; @@ -2118,7 +1960,7 @@ void SpeculativeJIT::emitBranch(Node* node) } default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), m_currentNode, "Bad use kind"); } } @@ -2132,40 +1974,50 @@ void SpeculativeJIT::compile(Node* node) switch (op) { case JSConstant: + case DoubleConstant: + case Int52Constant: + case PhantomDirectArguments: + case PhantomClonedArguments: initConstantInfo(node); break; - case PhantomArguments: - initConstantInfo(node); - break; - - case WeakJSConstant: - m_jit.addWeakReference(node->weakConstant()); - initConstantInfo(node); - break; - case Identity: { - // CSE should always eliminate this. - RELEASE_ASSERT_NOT_REACHED(); + speculate(node, node->child1()); + switch (node->child1().useKind()) { + case DoubleRepUse: + case DoubleRepRealUse: + case DoubleRepMachineIntUse: { + SpeculateDoubleOperand op(this, node->child1()); + FPRTemporary scratch(this, op); + m_jit.moveDouble(op.fpr(), scratch.fpr()); + doubleResult(scratch.fpr(), node); + break; + } + case Int52RepUse: { + SpeculateInt52Operand op(this, node->child1()); + GPRTemporary result(this, Reuse, op); + m_jit.move(op.gpr(), result.gpr()); + int52Result(result.gpr(), node); + break; + } + default: { + JSValueOperand op(this, node->child1()); + GPRTemporary result(this, Reuse, op); + m_jit.move(op.gpr(), result.gpr()); + jsValueResult(result.gpr(), node); + break; + } + } // switch break; } case GetLocal: { - SpeculatedType prediction = node->variableAccessData()->prediction(); AbstractValue& value = m_state.variables().operand(node->local()); - // If we have no prediction for this local, then don't attempt to compile. - if (prediction == SpecNone) { - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); - break; - } - // If the CFA is tracking this variable and it found that the variable // cannot have been assigned, then don't attempt to proceed. if (value.isClear()) { - // FIXME: We should trap instead. - // https://bugs.webkit.org/show_bug.cgi?id=110383 - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); + m_compileOkay = false; break; } @@ -2233,13 +2085,23 @@ void SpeculativeJIT::compile(Node* node) break; } - case MovHint: - case ZombieHint: - case Check: { - RELEASE_ASSERT_NOT_REACHED(); + case MovHint: { + compileMovHint(m_currentNode); + noResult(node); + break; + } + + case ZombieHint: { + recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead); + noResult(node); break; } - + + case ExitOK: { + noResult(node); + break; + } + case SetLocal: { switch (node->variableAccessData()->flushFormat()) { case FlushedDouble: { @@ -2286,8 +2148,7 @@ void SpeculativeJIT::compile(Node* node) break; } - case FlushedJSValue: - case FlushedArguments: { + case FlushedJSValue: { JSValueOperand value(this, node->child1()); m_jit.store64(value.gpr(), JITCompiler::addressFor(node->machineLocal())); noResult(node); @@ -2296,7 +2157,7 @@ void SpeculativeJIT::compile(Node* node) } default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), node, "Bad flush format"); break; } @@ -2308,60 +2169,19 @@ void SpeculativeJIT::compile(Node* node) // But it may be profitable to use this as a hook to run speculation checks // on arguments, thereby allowing us to trivially eliminate such checks if // the argument is not used. + recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat())); break; case BitAnd: case BitOr: case BitXor: - if (isInt32Constant(node->child1().node())) { - SpeculateInt32Operand op2(this, node->child2()); - GPRTemporary result(this, Reuse, op2); - - bitOp(op, valueOfInt32Constant(node->child1().node()), op2.gpr(), result.gpr()); - - int32Result(result.gpr(), node); - } else if (isInt32Constant(node->child2().node())) { - SpeculateInt32Operand op1(this, node->child1()); - GPRTemporary result(this, Reuse, op1); - - bitOp(op, valueOfInt32Constant(node->child2().node()), op1.gpr(), result.gpr()); - - int32Result(result.gpr(), node); - } else { - SpeculateInt32Operand op1(this, node->child1()); - SpeculateInt32Operand op2(this, node->child2()); - GPRTemporary result(this, Reuse, op1, op2); - - GPRReg reg1 = op1.gpr(); - GPRReg reg2 = op2.gpr(); - bitOp(op, reg1, reg2, result.gpr()); - - int32Result(result.gpr(), node); - } + compileBitwiseOp(node); break; case BitRShift: case BitLShift: case BitURShift: - if (isInt32Constant(node->child2().node())) { - SpeculateInt32Operand op1(this, node->child1()); - GPRTemporary result(this, Reuse, op1); - - shiftOp(op, op1.gpr(), valueOfInt32Constant(node->child2().node()) & 0x1f, result.gpr()); - - int32Result(result.gpr(), node); - } else { - // Do not allow shift amount to be used as the result, MacroAssembler does not permit this. - SpeculateInt32Operand op1(this, node->child1()); - SpeculateInt32Operand op2(this, node->child2()); - GPRTemporary result(this, Reuse, op1); - - GPRReg reg1 = op1.gpr(); - GPRReg reg2 = op2.gpr(); - shiftOp(op, reg1, reg2, result.gpr()); - - int32Result(result.gpr(), node); - } + compileShiftOp(node); break; case UInt32ToNumber: { @@ -2379,48 +2199,101 @@ void SpeculativeJIT::compile(Node* node) break; } - case Int32ToDouble: { - compileInt32ToDouble(node); + case DoubleRep: { + compileDoubleRep(node); break; } - case Int52ToValue: { - JSValueOperand operand(this, node->child1()); - GPRTemporary result(this, Reuse, operand); - m_jit.move(operand.gpr(), result.gpr()); - jsValueResult(result.gpr(), node); + case ValueRep: { + compileValueRep(node); break; } - case Int52ToDouble: { - SpeculateDoubleOperand operand(this, node->child1()); - FPRTemporary result(this, operand); - m_jit.moveDouble(operand.fpr(), result.fpr()); - doubleResult(result.fpr(), node); + case Int52Rep: { + switch (node->child1().useKind()) { + case Int32Use: { + SpeculateInt32Operand operand(this, node->child1()); + GPRTemporary result(this, Reuse, operand); + + m_jit.signExtend32ToPtr(operand.gpr(), result.gpr()); + + strictInt52Result(result.gpr(), node); + break; + } + + case MachineIntUse: { + GPRTemporary result(this); + GPRReg resultGPR = result.gpr(); + + convertMachineInt(node->child1(), resultGPR); + + strictInt52Result(resultGPR, node); + break; + } + + case DoubleRepMachineIntUse: { + SpeculateDoubleOperand value(this, node->child1()); + FPRReg valueFPR = value.fpr(); + + GPRFlushedCallResult result(this); + GPRReg resultGPR = result.gpr(); + + flushRegisters(); + + callOperation(operationConvertDoubleToInt52, resultGPR, valueFPR); + + DFG_TYPE_CHECK_WITH_EXIT_KIND(Int52Overflow, + JSValueRegs(), node->child1(), SpecInt52AsDouble, + m_jit.branch64( + JITCompiler::Equal, resultGPR, + JITCompiler::TrustedImm64(JSValue::notInt52))); + + strictInt52Result(resultGPR, node); + break; + } + + default: + DFG_CRASH(m_jit.graph(), node, "Bad use kind"); + } break; } - - case ValueAdd: { - JSValueOperand op1(this, node->child1()); - JSValueOperand op2(this, node->child2()); + + case ValueAdd: + compileValueAdd(node); + break; + + case StrCat: { + JSValueOperand op1(this, node->child1(), ManualOperandSpeculation); + JSValueOperand op2(this, node->child2(), ManualOperandSpeculation); + JSValueOperand op3(this, node->child3(), ManualOperandSpeculation); GPRReg op1GPR = op1.gpr(); GPRReg op2GPR = op2.gpr(); + GPRReg op3GPR; + if (node->child3()) + op3GPR = op3.gpr(); + else + op3GPR = InvalidGPRReg; flushRegisters(); - - GPRResult result(this); - if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node())) - callOperation(operationValueAddNotNumber, result.gpr(), op1GPR, op2GPR); + + GPRFlushedCallResult result(this); + if (node->child3()) + callOperation(operationStrCat3, result.gpr(), op1GPR, op2GPR, op3GPR); else - callOperation(operationValueAdd, result.gpr(), op1GPR, op2GPR); + callOperation(operationStrCat2, result.gpr(), op1GPR, op2GPR); + m_jit.exceptionCheck(); - jsValueResult(result.gpr(), node); + cellResult(result.gpr(), node); break; } - + case ArithAdd: - compileAdd(node); + compileArithAdd(node); + break; + + case ArithClz32: + compileArithClz32(node); break; case MakeRope: @@ -2460,12 +2333,13 @@ void SpeculativeJIT::compile(Node* node) m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr()); m_jit.add32(scratch.gpr(), result.gpr()); m_jit.xor32(scratch.gpr(), result.gpr()); - speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31))); + if (shouldCheckOverflow(node->arithMode())) + speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, result.gpr())); int32Result(result.gpr(), node); break; } - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand op1(this, node->child1()); FPRTemporary result(this); @@ -2475,7 +2349,7 @@ void SpeculativeJIT::compile(Node* node) } default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), node, "Bad use kind"); break; } break; @@ -2503,7 +2377,7 @@ void SpeculativeJIT::compile(Node* node) break; } - case NumberUse: { + case DoubleRepUse: { SpeculateDoubleOperand op1(this, node->child1()); SpeculateDoubleOperand op2(this, node->child2()); FPRTemporary result(this, op1); @@ -2542,22 +2416,41 @@ void SpeculativeJIT::compile(Node* node) } default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), node, "Bad use kind"); break; } break; } - - case ArithSqrt: { + + case ArithPow: + compileArithPow(node); + break; + + case ArithSqrt: + compileArithSqrt(node); + break; + + case ArithFRound: { SpeculateDoubleOperand op1(this, node->child1()); FPRTemporary result(this, op1); - m_jit.sqrtDouble(op1.fpr(), result.fpr()); + m_jit.convertDoubleToFloat(op1.fpr(), result.fpr()); + m_jit.convertFloatToDouble(result.fpr(), result.fpr()); doubleResult(result.fpr(), node); break; } - + + case ArithRandom: + compileArithRandom(node); + break; + + case ArithRound: + case ArithFloor: + case ArithCeil: + compileArithRounding(node); + break; + case ArithSin: { SpeculateDoubleOperand op1(this, node->child1()); FPRReg op1FPR = op1.fpr(); @@ -2582,6 +2475,10 @@ void SpeculativeJIT::compile(Node* node) break; } + case ArithLog: + compileArithLog(node); + break; + case LogicalNot: compileLogicalNot(node); break; @@ -2605,23 +2502,12 @@ void SpeculativeJIT::compile(Node* node) if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq)) return; break; - - case CompareEqConstant: - ASSERT(isNullConstant(node->child2().node())); - if (nonSpeculativeCompareNull(node, node->child1())) - return; - break; case CompareEq: if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq)) return; break; - case CompareStrictEqConstant: - if (compileStrictEqForConstant(node, node->child1(), valueOfJSConstant(node->child2().node()))) - return; - break; - case CompareStrictEq: if (compileStrictEq(node)) return; @@ -2658,9 +2544,24 @@ void SpeculativeJIT::compile(Node* node) switch (node->arrayMode().type()) { case Array::SelectUsingPredictions: case Array::ForceExit: - RELEASE_ASSERT_NOT_REACHED(); - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); + DFG_CRASH(m_jit.graph(), node, "Bad array mode type"); + break; + case Array::Undecided: { + SpeculateStrictInt32Operand index(this, node->child2()); + GPRTemporary result(this, Reuse, index); + GPRReg indexGPR = index.gpr(); + GPRReg resultGPR = result.gpr(); + + use(node->child1()); + index.use(); + + speculationCheck(OutOfBounds, JSValueRegs(), node, + m_jit.branch32(MacroAssembler::LessThan, indexGPR, MacroAssembler::TrustedImm32(0))); + + m_jit.move(MacroAssembler::TrustedImm64(ValueUndefined), resultGPR); + jsValueResult(resultGPR, node, UseChildrenCalledExplicitly); break; + } case Array::Generic: { JSValueOperand base(this, node->child1()); JSValueOperand property(this, node->child2()); @@ -2668,8 +2569,9 @@ void SpeculativeJIT::compile(Node* node) GPRReg propertyGPR = property.gpr(); flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); callOperation(operationGetByVal, result.gpr(), baseGPR, propertyGPR); + m_jit.exceptionCheck(); jsValueResult(result.gpr(), node); break; @@ -2690,7 +2592,17 @@ void SpeculativeJIT::compile(Node* node) GPRTemporary result(this); m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr()); - speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchTest64(MacroAssembler::Zero, result.gpr())); + if (node->arrayMode().isSaneChain()) { + ASSERT(node->arrayMode().type() == Array::Contiguous); + JITCompiler::Jump notHole = m_jit.branchTest64( + MacroAssembler::NonZero, result.gpr()); + m_jit.move(TrustedImm64(JSValue::encode(jsUndefined())), result.gpr()); + notHole.link(&m_jit); + } else { + speculationCheck( + LoadFromHole, JSValueRegs(), 0, + m_jit.branchTest64(MacroAssembler::Zero, result.gpr())); + } jsValueResult(result.gpr(), node, node->arrayMode().type() == Array::Int32 ? DataFormatJSInt32 : DataFormatJS); break; } @@ -2833,8 +2745,11 @@ void SpeculativeJIT::compile(Node* node) case Array::String: compileGetByValOnString(node); break; - case Array::Arguments: - compileGetByValOnArguments(node); + case Array::DirectArguments: + compileGetByValOnDirectArguments(node); + break; + case Array::ScopedArguments: + compileGetByValOnScopedArguments(node); break; default: { TypedArrayType type = node->arrayMode().typedArrayType(); @@ -2860,12 +2775,10 @@ void SpeculativeJIT::compile(Node* node) switch (arrayMode.type()) { case Array::SelectUsingPredictions: case Array::ForceExit: - RELEASE_ASSERT_NOT_REACHED(); - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); - alreadyHandled = true; + DFG_CRASH(m_jit.graph(), node, "Bad array mode type"); break; case Array::Generic: { - RELEASE_ASSERT(node->op() == PutByVal); + DFG_ASSERT(m_jit.graph(), node, node->op() == PutByVal || node->op() == PutByValDirect); JSValueOperand arg1(this, child1); JSValueOperand arg2(this, child2); @@ -2875,9 +2788,10 @@ void SpeculativeJIT::compile(Node* node) GPRReg arg3GPR = arg3.gpr(); flushRegisters(); if (node->op() == PutByValDirect) - callOperation(m_jit.isStrictModeFor(node->codeOrigin) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict, arg1GPR, arg2GPR, arg3GPR); + callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict, arg1GPR, arg2GPR, arg3GPR); else - callOperation(m_jit.isStrictModeFor(node->codeOrigin) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR); + callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR); + m_jit.exceptionCheck(); noResult(node); alreadyHandled = true; @@ -3066,47 +2980,6 @@ void SpeculativeJIT::compile(Node* node) break; } - case Array::Arguments: { - JSValueOperand value(this, child3); - GPRTemporary scratch(this); - GPRTemporary scratch2(this); - - GPRReg valueReg = value.gpr(); - GPRReg scratchReg = scratch.gpr(); - GPRReg scratch2Reg = scratch2.gpr(); - - if (!m_compileOkay) - return; - - // Two really lame checks. - speculationCheck( - Uncountable, JSValueSource(), 0, - m_jit.branch32( - MacroAssembler::AboveOrEqual, propertyReg, - MacroAssembler::Address(baseReg, Arguments::offsetOfNumArguments()))); - speculationCheck( - Uncountable, JSValueSource(), 0, - m_jit.branchTestPtr( - MacroAssembler::NonZero, - MacroAssembler::Address( - baseReg, Arguments::offsetOfSlowArgumentData()))); - - m_jit.move(propertyReg, scratch2Reg); - m_jit.signExtend32ToPtr(scratch2Reg, scratch2Reg); - m_jit.loadPtr( - MacroAssembler::Address(baseReg, Arguments::offsetOfRegisters()), - scratchReg); - - m_jit.store64( - valueReg, - MacroAssembler::BaseIndex( - scratchReg, scratch2Reg, MacroAssembler::TimesEight, - CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register))); - - noResult(node); - break; - } - default: { TypedArrayType type = arrayMode.typedArrayType(); if (isInt(type)) @@ -3119,46 +2992,64 @@ void SpeculativeJIT::compile(Node* node) } case RegExpExec: { - if (compileRegExpExec(node)) - return; - if (!node->adjustedRefCount()) { + if (node->child1().useKind() == CellUse + && node->child2().useKind() == CellUse) { SpeculateCellOperand base(this, node->child1()); SpeculateCellOperand argument(this, node->child2()); GPRReg baseGPR = base.gpr(); GPRReg argumentGPR = argument.gpr(); - + flushRegisters(); - GPRResult result(this); - callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); - - // Must use jsValueResult because otherwise we screw up register - // allocation, which thinks that this node has a result. + GPRFlushedCallResult result(this); + callOperation(operationRegExpExec, result.gpr(), baseGPR, argumentGPR); + m_jit.exceptionCheck(); + jsValueResult(result.gpr(), node); break; } - - SpeculateCellOperand base(this, node->child1()); - SpeculateCellOperand argument(this, node->child2()); + + JSValueOperand base(this, node->child1()); + JSValueOperand argument(this, node->child2()); GPRReg baseGPR = base.gpr(); GPRReg argumentGPR = argument.gpr(); flushRegisters(); - GPRResult result(this); - callOperation(operationRegExpExec, result.gpr(), baseGPR, argumentGPR); + GPRFlushedCallResult result(this); + callOperation(operationRegExpExecGeneric, result.gpr(), baseGPR, argumentGPR); + m_jit.exceptionCheck(); jsValueResult(result.gpr(), node); break; } case RegExpTest: { - SpeculateCellOperand base(this, node->child1()); - SpeculateCellOperand argument(this, node->child2()); + if (node->child1().useKind() == CellUse + && node->child2().useKind() == CellUse) { + SpeculateCellOperand base(this, node->child1()); + SpeculateCellOperand argument(this, node->child2()); + GPRReg baseGPR = base.gpr(); + GPRReg argumentGPR = argument.gpr(); + + flushRegisters(); + GPRFlushedCallResult result(this); + callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); + m_jit.exceptionCheck(); + + // If we add a DataFormatBool, we should use it here. + m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); + jsValueResult(result.gpr(), node, DataFormatJSBoolean); + break; + } + + JSValueOperand base(this, node->child1()); + JSValueOperand argument(this, node->child2()); GPRReg baseGPR = base.gpr(); GPRReg argumentGPR = argument.gpr(); flushRegisters(); - GPRResult result(this); - callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); + GPRFlushedCallResult result(this); + callOperation(operationRegExpTestGeneric, result.gpr(), baseGPR, argumentGPR); + m_jit.exceptionCheck(); // If we add a DataFormatBool, we should use it here. m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); @@ -3200,7 +3091,7 @@ void SpeculativeJIT::compile(Node* node) addSlowPathGenerator( slowPathCall( - slowPath, this, operationArrayPush, NoResult, storageLengthGPR, + slowPath, this, operationArrayPush, storageLengthGPR, valueGPR, baseGPR)); jsValueResult(storageLengthGPR, node); @@ -3212,7 +3103,7 @@ void SpeculativeJIT::compile(Node* node) FPRReg valueFPR = value.fpr(); DFG_TYPE_CHECK( - JSValueRegs(), node->child2(), SpecFullRealNumber, + JSValueRegs(), node->child2(), SpecDoubleReal, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR)); m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); @@ -3224,7 +3115,7 @@ void SpeculativeJIT::compile(Node* node) addSlowPathGenerator( slowPathCall( - slowPath, this, operationArrayPushDouble, NoResult, storageLengthGPR, + slowPath, this, operationArrayPushDouble, storageLengthGPR, valueFPR, baseGPR)); jsValueResult(storageLengthGPR, node); @@ -3299,7 +3190,7 @@ void SpeculativeJIT::compile(Node* node) // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old // length and the new length. m_jit.store64( - MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight)); + MacroAssembler::TrustedImm64(bitwise_cast<int64_t>(PNaN)), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight)); slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR); boxDouble(tempFPR, valueGPR); } else { @@ -3366,7 +3257,7 @@ void SpeculativeJIT::compile(Node* node) } case DFG::Jump: { - jump(node->takenBlock()); + jump(node->targetBlock()); noResult(node); break; } @@ -3388,12 +3279,8 @@ void SpeculativeJIT::compile(Node* node) JSValueOperand op1(this, node->child1()); m_jit.move(op1.gpr(), GPRInfo::returnValueGPR); - // Grab the return address. - m_jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::regT1); - // Restore our caller's "r". - m_jit.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::callFrameRegister); - // Return. - m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1); + m_jit.emitRestoreCalleeSaves(); + m_jit.emitFunctionEpilogue(); m_jit.ret(); noResult(node); @@ -3408,8 +3295,56 @@ void SpeculativeJIT::compile(Node* node) break; } + case BooleanToNumber: { + switch (node->child1().useKind()) { + case BooleanUse: { + JSValueOperand value(this, node->child1(), ManualOperandSpeculation); + GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add). + + m_jit.move(value.gpr(), result.gpr()); + m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr()); + DFG_TYPE_CHECK( + JSValueRegs(value.gpr()), node->child1(), SpecBoolean, m_jit.branchTest64( + JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1)))); + + int32Result(result.gpr(), node); + break; + } + + case UntypedUse: { + JSValueOperand value(this, node->child1()); + GPRTemporary result(this); + + if (!m_interpreter.needsTypeCheck(node->child1(), SpecBoolInt32 | SpecBoolean)) { + m_jit.move(value.gpr(), result.gpr()); + m_jit.and32(TrustedImm32(1), result.gpr()); + int32Result(result.gpr(), node); + break; + } + + m_jit.move(value.gpr(), result.gpr()); + m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr()); + JITCompiler::Jump isBoolean = m_jit.branchTest64( + JITCompiler::Zero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))); + m_jit.move(value.gpr(), result.gpr()); + JITCompiler::Jump done = m_jit.jump(); + isBoolean.link(&m_jit); + m_jit.or64(GPRInfo::tagTypeNumberRegister, result.gpr()); + done.link(&m_jit); + + jsValueResult(result.gpr(), node); + break; + } + + default: + DFG_CRASH(m_jit.graph(), node, "Bad use kind"); + break; + } + break; + } + case ToPrimitive: { - RELEASE_ASSERT(node->child1().useKind() == UntypedUse); + DFG_ASSERT(m_jit.graph(), node, node->child1().useKind() == UntypedUse); JSValueOperand op1(this, node->child1()); GPRTemporary result(this, Reuse, op1); @@ -3418,54 +3353,53 @@ void SpeculativeJIT::compile(Node* node) op1.use(); - if (!(m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean))) - m_jit.move(op1GPR, resultGPR); - else { - MacroAssembler::Jump alreadyPrimitive = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister); - MacroAssembler::Jump notPrimitive = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())); - - alreadyPrimitive.link(&m_jit); - m_jit.move(op1GPR, resultGPR); - - addSlowPathGenerator( - slowPathCall(notPrimitive, this, operationToPrimitive, resultGPR, op1GPR)); - } + MacroAssembler::Jump alreadyPrimitive = m_jit.branchIfNotCell(JSValueRegs(op1GPR)); + MacroAssembler::Jump notPrimitive = m_jit.branchIfObject(op1GPR); + + alreadyPrimitive.link(&m_jit); + m_jit.move(op1GPR, resultGPR); + + addSlowPathGenerator( + slowPathCall(notPrimitive, this, operationToPrimitive, resultGPR, op1GPR)); jsValueResult(resultGPR, node, UseChildrenCalledExplicitly); break; } - case ToString: { + case ToString: + case CallStringConstructor: { if (node->child1().useKind() == UntypedUse) { JSValueOperand op1(this, node->child1()); GPRReg op1GPR = op1.gpr(); - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); flushRegisters(); JITCompiler::Jump done; if (node->child1()->prediction() & SpecString) { - JITCompiler::Jump slowPath1 = m_jit.branchTest64( - JITCompiler::NonZero, op1GPR, GPRInfo::tagMaskRegister); - JITCompiler::Jump slowPath2 = m_jit.branchPtr( - JITCompiler::NotEqual, - JITCompiler::Address(op1GPR, JSCell::structureOffset()), - TrustedImmPtr(m_jit.vm()->stringStructure.get())); + JITCompiler::Jump slowPath1 = m_jit.branchIfNotCell(JSValueRegs(op1GPR)); + JITCompiler::Jump slowPath2 = m_jit.branchIfNotString(op1GPR); m_jit.move(op1GPR, resultGPR); done = m_jit.jump(); slowPath1.link(&m_jit); slowPath2.link(&m_jit); } - callOperation(operationToString, resultGPR, op1GPR); + if (op == ToString) + callOperation(operationToString, resultGPR, op1GPR); + else { + ASSERT(op == CallStringConstructor); + callOperation(operationCallStringConstructor, resultGPR, op1GPR); + } + m_jit.exceptionCheck(); if (done.isSet()) done.link(&m_jit); cellResult(resultGPR, node); break; } - compileToStringOnCell(node); + compileToStringOrCallStringConstructorOnCell(node); break; } @@ -3475,10 +3409,10 @@ void SpeculativeJIT::compile(Node* node) } case NewArray: { - JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); - if (!globalObject->isHavingABadTime() && !hasArrayStorage(node->indexingType())) { + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); + if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) { Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()); - RELEASE_ASSERT(structure->indexingType() == node->indexingType()); + DFG_ASSERT(m_jit.graph(), node, structure->indexingType() == node->indexingType()); ASSERT( hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) @@ -3511,7 +3445,7 @@ void SpeculativeJIT::compile(Node* node) SpeculateDoubleOperand operand(this, use); FPRReg opFPR = operand.fpr(); DFG_TYPE_CHECK( - JSValueRegs(), use, SpecFullRealNumber, + JSValueRegs(), use, SpecDoubleReal, m_jit.branchDouble( MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR)); m_jit.storeDouble(opFPR, MacroAssembler::Address(storageGPR, sizeof(double) * operandIdx)); @@ -3549,8 +3483,9 @@ void SpeculativeJIT::compile(Node* node) if (!node->numChildren()) { flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); callOperation(operationNewEmptyArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())); + m_jit.exceptionCheck(); cellResult(result.gpr(), node); break; } @@ -3576,7 +3511,7 @@ void SpeculativeJIT::compile(Node* node) FPRReg opFPR = operand.fpr(); GPRReg scratchGPR = scratch.gpr(); DFG_TYPE_CHECK( - JSValueRegs(), use, SpecFullRealNumber, + JSValueRegs(), use, SpecDoubleReal, m_jit.branchDouble( MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR)); m_jit.boxDouble(opFPR, scratchGPR); @@ -3628,11 +3563,12 @@ void SpeculativeJIT::compile(Node* node) m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr()); } - GPRResult result(this); + GPRFlushedCallResult result(this); callOperation( operationNewArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), static_cast<void*>(buffer), node->numChildren()); + m_jit.exceptionCheck(); if (scratchSize) { GPRTemporary scratch(this); @@ -3646,8 +3582,8 @@ void SpeculativeJIT::compile(Node* node) } case NewArrayWithSize: { - JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); - if (!globalObject->isHavingABadTime() && !hasArrayStorage(node->indexingType())) { + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); + if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) { SpeculateStrictInt32Operand size(this, node->child1()); GPRTemporary result(this); GPRTemporary storage(this); @@ -3661,7 +3597,7 @@ void SpeculativeJIT::compile(Node* node) GPRReg scratch2GPR = scratch2.gpr(); MacroAssembler::JumpList slowCases; - slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX))); + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH))); ASSERT((1 << 3) == sizeof(JSValue)); m_jit.move(sizeGPR, scratchGPR); @@ -3671,13 +3607,13 @@ void SpeculativeJIT::compile(Node* node) emitAllocateBasicStorage(resultGPR, storageGPR)); m_jit.subPtr(scratchGPR, storageGPR); Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()); - emitAllocateJSObject<JSArray>(resultGPR, ImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases); + emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases); m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); if (hasDouble(node->indexingType())) { - m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR); + m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR); m_jit.move(sizeGPR, scratch2GPR); MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratch2GPR); MacroAssembler::Label loop = m_jit.label(); @@ -3687,12 +3623,11 @@ void SpeculativeJIT::compile(Node* node) done.link(&m_jit); } - addSlowPathGenerator(adoptPtr( - new CallArrayAllocatorWithVariableSizeSlowPathGenerator( + addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>( slowCases, this, operationNewArrayWithSize, resultGPR, globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage), - sizeGPR))); + sizeGPR)); cellResult(resultGPR, node); break; @@ -3701,24 +3636,25 @@ void SpeculativeJIT::compile(Node* node) SpeculateStrictInt32Operand size(this, node->child1()); GPRReg sizeGPR = size.gpr(); flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); GPRReg structureGPR = selectScratchGPR(sizeGPR); - MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX)); + MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)); m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())), structureGPR); MacroAssembler::Jump done = m_jit.jump(); bigLength.link(&m_jit); m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)), structureGPR); done.link(&m_jit); callOperation(operationNewArrayWithSize, resultGPR, structureGPR, sizeGPR); + m_jit.exceptionCheck(); cellResult(resultGPR, node); break; } case NewArrayBuffer: { - JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); IndexingType indexingType = node->indexingType(); - if (!globalObject->isHavingABadTime() && !hasArrayStorage(indexingType)) { + if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(indexingType)) { unsigned numElements = node->numConstants(); GPRTemporary result(this); @@ -3729,7 +3665,7 @@ void SpeculativeJIT::compile(Node* node) emitAllocateJSArray(resultGPR, globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType), storageGPR, numElements); - RELEASE_ASSERT(indexingType & IsArray); + DFG_ASSERT(m_jit.graph(), node, indexingType & IsArray); JSValue* data = m_jit.codeBlock()->constantBuffer(node->startConstant()); if (indexingType == ArrayWithDouble) { for (unsigned index = 0; index < node->numConstants(); ++index) { @@ -3751,9 +3687,10 @@ void SpeculativeJIT::compile(Node* node) } flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), node->startConstant(), node->numConstants()); + m_jit.exceptionCheck(); cellResult(result.gpr(), node); break; @@ -3770,20 +3707,21 @@ void SpeculativeJIT::compile(Node* node) flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); - JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); callOperation( operationNewTypedArrayWithOneArgumentForType(node->typedArrayType()), resultGPR, globalObject->typedArrayStructure(node->typedArrayType()), argumentGPR); + m_jit.exceptionCheck(); cellResult(resultGPR, node); break; } default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), node, "Bad use kind"); break; } break; @@ -3791,9 +3729,13 @@ void SpeculativeJIT::compile(Node* node) case NewRegexp: { flushRegisters(); - GPRResult result(this); + GPRFlushedCallResult result(this); + // FIXME: We really should be able to inline code that uses NewRegexp. That means not + // reaching into the CodeBlock here. + // https://bugs.webkit.org/show_bug.cgi?id=154808 callOperation(operationNewRegexp, result.gpr(), m_jit.codeBlock()->regexp(node->regexpIndex())); + m_jit.exceptionCheck(); cellResult(result.gpr(), node); break; @@ -3807,17 +3749,14 @@ void SpeculativeJIT::compile(Node* node) GPRReg tempGPR = temp.gpr(); MacroAssembler::JumpList slowCases; - slowCases.append(m_jit.branchTest64( - MacroAssembler::NonZero, thisValueGPR, GPRInfo::tagMaskRegister)); - m_jit.loadPtr( - MacroAssembler::Address(thisValueGPR, JSCell::structureOffset()), tempGPR); + slowCases.append(m_jit.branchIfNotCell(JSValueRegs(thisValueGPR))); slowCases.append(m_jit.branch8( MacroAssembler::NotEqual, - MacroAssembler::Address(tempGPR, Structure::typeInfoTypeOffset()), + MacroAssembler::Address(thisValueGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); m_jit.move(thisValueGPR, tempGPR); J_JITOperation_EJ function; - if (m_jit.graph().executableFor(node->codeOrigin)->isStrictMode()) + if (m_jit.graph().executableFor(node->origin.semantic)->isStrictMode()) function = operationToThisStrict; else function = operationToThis; @@ -3846,11 +3785,16 @@ void SpeculativeJIT::compile(Node* node) GPRReg allocatorGPR = allocator.gpr(); GPRReg structureGPR = structure.gpr(); GPRReg scratchGPR = scratch.gpr(); + // Rare data is only used to access the allocator & structure + // We can avoid using an additional GPR this way + GPRReg rareDataGPR = structureGPR; MacroAssembler::JumpList slowPath; - m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR); - m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR); + m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfRareData()), rareDataGPR); + slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, rareDataGPR)); + m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR); + m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR); slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, allocatorGPR)); emitAllocateJSObject(resultGPR, allocatorGPR, structureGPR, TrustedImmPtr(0), scratchGPR, slowPath); @@ -3860,12 +3804,6 @@ void SpeculativeJIT::compile(Node* node) break; } - case AllocationProfileWatchpoint: - case TypedArrayWatchpoint: { - noResult(node); - break; - } - case NewObject: { GPRTemporary result(this); GPRTemporary allocator(this); @@ -3897,85 +3835,44 @@ void SpeculativeJIT::compile(Node* node) break; } - case GetScope: { - SpeculateCellOperand function(this, node->child1()); - GPRTemporary result(this, Reuse, function); - m_jit.loadPtr(JITCompiler::Address(function.gpr(), JSFunction::offsetOfScopeChain()), result.gpr()); - cellResult(result.gpr(), node); + case GetArgumentCount: { + GPRTemporary result(this); + m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), result.gpr()); + int32Result(result.gpr(), node); break; } - - case GetMyScope: { - GPRTemporary result(this); - GPRReg resultGPR = result.gpr(); - m_jit.loadPtr(JITCompiler::addressFor(JSStack::ScopeChain), resultGPR); - cellResult(resultGPR, node); + case GetRestLength: { + compileGetRestLength(node); break; } - case SkipTopScope: { - SpeculateCellOperand scope(this, node->child1()); - GPRTemporary result(this, Reuse, scope); - GPRReg resultGPR = result.gpr(); - m_jit.move(scope.gpr(), resultGPR); - JITCompiler::Jump activationNotCreated = - m_jit.branchTest64( - JITCompiler::Zero, - JITCompiler::addressFor( - static_cast<VirtualRegister>(m_jit.graph().machineActivationRegister()))); - m_jit.loadPtr(JITCompiler::Address(resultGPR, JSScope::offsetOfNext()), resultGPR); - activationNotCreated.link(&m_jit); - cellResult(resultGPR, node); + case GetScope: + compileGetScope(node); break; - } - - case SkipScope: { - SpeculateCellOperand scope(this, node->child1()); - GPRTemporary result(this, Reuse, scope); - m_jit.loadPtr(JITCompiler::Address(scope.gpr(), JSScope::offsetOfNext()), result.gpr()); - cellResult(result.gpr(), node); + + case SkipScope: + compileSkipScope(node); break; - } - - case GetClosureRegisters: { - if (WriteBarrierBase<Unknown>* registers = m_jit.graph().tryGetRegisters(node->child1().node())) { - GPRTemporary result(this); - GPRReg resultGPR = result.gpr(); - m_jit.move(TrustedImmPtr(registers), resultGPR); - storageResult(resultGPR, node); - break; - } - SpeculateCellOperand scope(this, node->child1()); - GPRTemporary result(this); - GPRReg scopeGPR = scope.gpr(); - GPRReg resultGPR = result.gpr(); - - m_jit.loadPtr(JITCompiler::Address(scopeGPR, JSVariableObject::offsetOfRegisters()), resultGPR); - storageResult(resultGPR, node); - break; - } case GetClosureVar: { - StorageOperand registers(this, node->child1()); + SpeculateCellOperand base(this, node->child1()); GPRTemporary result(this); - GPRReg registersGPR = registers.gpr(); + GPRReg baseGPR = base.gpr(); GPRReg resultGPR = result.gpr(); - m_jit.load64(JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register)), resultGPR); + m_jit.load64(JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset())), resultGPR); jsValueResult(resultGPR, node); break; } case PutClosureVar: { - StorageOperand registers(this, node->child2()); - JSValueOperand value(this, node->child3()); + SpeculateCellOperand base(this, node->child1()); + JSValueOperand value(this, node->child2()); - GPRReg registersGPR = registers.gpr(); + GPRReg baseGPR = base.gpr(); GPRReg valueGPR = value.gpr(); - speculate(node, node->child1()); - - m_jit.store64(valueGPR, JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register))); + m_jit.store64(valueGPR, JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()))); noResult(node); break; } @@ -3992,7 +3889,7 @@ void SpeculativeJIT::compile(Node* node) base.use(); - cachedGetById(node->codeOrigin, baseGPR, resultGPR, node->identifierNumber()); + cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber()); jsValueResult(resultGPR, node, UseChildrenCalledExplicitly); break; @@ -4007,16 +3904,16 @@ void SpeculativeJIT::compile(Node* node) base.use(); - JITCompiler::Jump notCell = m_jit.branchTest64(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister); + JITCompiler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(baseGPR)); - cachedGetById(node->codeOrigin, baseGPR, resultGPR, node->identifierNumber(), notCell); + cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber(), notCell); jsValueResult(resultGPR, node, UseChildrenCalledExplicitly); break; } default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), node, "Bad use kind"); break; } break; @@ -4033,7 +3930,7 @@ void SpeculativeJIT::compile(Node* node) SpeculateCellOperand base(this, node->child1()); GPRReg baseGPR = base.gpr(); - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); @@ -4041,7 +3938,7 @@ void SpeculativeJIT::compile(Node* node) flushRegisters(); - cachedGetById(node->codeOrigin, baseGPR, resultGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill); + cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill); jsValueResult(resultGPR, node, UseChildrenCalledExplicitly); break; @@ -4051,22 +3948,22 @@ void SpeculativeJIT::compile(Node* node) JSValueOperand base(this, node->child1()); GPRReg baseGPR = base.gpr(); - GPRResult result(this); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); base.use(); flushRegisters(); - JITCompiler::Jump notCell = m_jit.branchTest64(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister); + JITCompiler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(baseGPR)); - cachedGetById(node->codeOrigin, baseGPR, resultGPR, node->identifierNumber(), notCell, DontSpill); + cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber(), notCell, DontSpill); jsValueResult(resultGPR, node, UseChildrenCalledExplicitly); break; } default: - RELEASE_ASSERT_NOT_REACHED(); + DFG_CRASH(m_jit.graph(), node, "Bad use kind"); break; } break; @@ -4076,99 +3973,54 @@ void SpeculativeJIT::compile(Node* node) compileGetArrayLength(node); break; - case CheckFunction: { - SpeculateCellOperand function(this, node->child1()); - speculationCheck(BadFunction, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node->function())); + case CheckCell: { + SpeculateCellOperand cell(this, node->child1()); + speculationCheck(BadCell, JSValueSource::unboxedCell(cell.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, cell.gpr(), node->cellOperand()->cell())); noResult(node); break; } - - case CheckExecutable: { - SpeculateCellOperand function(this, node->child1()); - speculationCheck(BadExecutable, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(function.gpr(), JSFunction::offsetOfExecutable()), node->executable())); + + case CheckNotEmpty: { + JSValueOperand operand(this, node->child1()); + GPRReg gpr = operand.gpr(); + speculationCheck(TDZFailure, JSValueSource(), nullptr, m_jit.branchTest64(JITCompiler::Zero, gpr)); noResult(node); break; } - - case CheckStructure: { - SpeculateCellOperand base(this, node->child1()); - - ASSERT(node->structureSet().size()); - - ExitKind exitKind; - if (node->child1()->op() == WeakJSConstant) - exitKind = BadWeakConstantCache; - else - exitKind = BadCache; - - if (node->structureSet().size() == 1) { - speculationCheck( - exitKind, JSValueSource::unboxedCell(base.gpr()), 0, - m_jit.branchWeakPtr( - JITCompiler::NotEqual, - JITCompiler::Address(base.gpr(), JSCell::structureOffset()), - node->structureSet()[0])); - } else { - GPRTemporary structure(this); - - m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr()); - - JITCompiler::JumpList done; - - for (size_t i = 0; i < node->structureSet().size() - 1; ++i) - done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node->structureSet()[i])); - - speculationCheck( - exitKind, JSValueSource::unboxedCell(base.gpr()), 0, - m_jit.branchWeakPtr( - JITCompiler::NotEqual, structure.gpr(), node->structureSet().last())); - - done.link(&m_jit); - } - - noResult(node); + + case CheckIdent: + compileCheckIdent(node); break; - } - - case StructureTransitionWatchpoint: { - // There is a fascinating question here of what to do about array profiling. - // We *could* try to tell the OSR exit about where the base of the access is. - // The DFG will have kept it alive, though it may not be in a register, and - // we shouldn't really load it since that could be a waste. For now though, - // we'll just rely on the fact that when a watchpoint fires then that's - // quite a hint already. - - m_jit.addWeakReference(node->structure()); -#if !ASSERT_DISABLED - SpeculateCellOperand op1(this, node->child1()); - JITCompiler::Jump isOK = m_jit.branchPtr(JITCompiler::Equal, JITCompiler::Address(op1.gpr(), JSCell::structureOffset()), TrustedImmPtr(node->structure())); - m_jit.breakpoint(); - isOK.link(&m_jit); -#else - speculateCell(node->child1()); -#endif - - noResult(node); + case GetExecutable: { + SpeculateCellOperand function(this, node->child1()); + GPRTemporary result(this, Reuse, function); + GPRReg functionGPR = function.gpr(); + GPRReg resultGPR = result.gpr(); + speculateCellType(node->child1(), functionGPR, SpecFunction, JSFunctionType); + m_jit.loadPtr(JITCompiler::Address(functionGPR, JSFunction::offsetOfExecutable()), resultGPR); + cellResult(resultGPR, node); break; } - case PhantomPutStructure: { - ASSERT(isKnownCell(node->child1().node())); - m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node); - noResult(node); + case CheckStructure: { + compileCheckStructure(node); break; } case PutStructure: { + Structure* oldStructure = node->transition()->previous; + Structure* newStructure = node->transition()->next; + m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node); SpeculateCellOperand base(this, node->child1()); - GPRTemporary scratch1(this); - GPRTemporary scratch2(this); GPRReg baseGPR = base.gpr(); - m_jit.storePtr(MacroAssembler::TrustedImmPtr(node->structureTransitionData().newStructure), MacroAssembler::Address(baseGPR, JSCell::structureOffset())); + ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType()); + ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type()); + ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags()); + m_jit.store32(MacroAssembler::TrustedImm32(newStructure->id()), MacroAssembler::Address(baseGPR, JSCell::structureIDOffset())); noResult(node); break; @@ -4182,18 +4034,10 @@ void SpeculativeJIT::compile(Node* node) compileReallocatePropertyStorage(node); break; - case GetButterfly: { - SpeculateCellOperand base(this, node->child1()); - GPRTemporary result(this, Reuse, base); - - GPRReg baseGPR = base.gpr(); - GPRReg resultGPR = result.gpr(); - - m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR); - - storageResult(resultGPR, node); + case GetButterfly: + case GetButterflyReadOnly: + compileGetButterfly(node); break; - } case GetIndexedPropertyStorage: { compileGetIndexedPropertyStorage(node); @@ -4210,14 +4054,15 @@ void SpeculativeJIT::compile(Node* node) break; } - case GetByOffset: { + case GetByOffset: + case GetGetterSetterByOffset: { StorageOperand storage(this, node->child1()); GPRTemporary result(this, Reuse, storage); GPRReg storageGPR = storage.gpr(); GPRReg resultGPR = result.gpr(); - StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node->storageAccessDataIndex()]; + StorageAccessData& storageAccessData = node->storageAccessData(); m_jit.load64(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset)), resultGPR); @@ -4225,24 +4070,64 @@ void SpeculativeJIT::compile(Node* node) break; } + case GetGetter: { + SpeculateCellOperand op1(this, node->child1()); + GPRTemporary result(this, Reuse, op1); + + GPRReg op1GPR = op1.gpr(); + GPRReg resultGPR = result.gpr(); + + m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfGetter()), resultGPR); + + cellResult(resultGPR, node); + break; + } + + case GetSetter: { + SpeculateCellOperand op1(this, node->child1()); + GPRTemporary result(this, Reuse, op1); + + GPRReg op1GPR = op1.gpr(); + GPRReg resultGPR = result.gpr(); + + m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfSetter()), resultGPR); + + cellResult(resultGPR, node); + break; + } + case PutByOffset: { StorageOperand storage(this, node->child1()); JSValueOperand value(this, node->child3()); - GPRTemporary scratch1(this); - GPRTemporary scratch2(this); GPRReg storageGPR = storage.gpr(); GPRReg valueGPR = value.gpr(); speculate(node, node->child2()); - StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node->storageAccessDataIndex()]; + StorageAccessData& storageAccessData = node->storageAccessData(); m_jit.store64(valueGPR, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset))); noResult(node); break; } + + case PutByIdFlush: { + SpeculateCellOperand base(this, node->child1()); + JSValueOperand value(this, node->child2()); + GPRTemporary scratch(this); + + GPRReg baseGPR = base.gpr(); + GPRReg valueGPR = value.gpr(); + GPRReg scratchGPR = scratch.gpr(); + flushRegisters(); + + cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), NotDirect, MacroAssembler::Jump(), DontSpill); + + noResult(node); + break; + } case PutById: { SpeculateCellOperand base(this, node->child1()); @@ -4253,7 +4138,7 @@ void SpeculativeJIT::compile(Node* node) GPRReg valueGPR = value.gpr(); GPRReg scratchGPR = scratch.gpr(); - cachedPutById(node->codeOrigin, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), NotDirect); + cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), NotDirect); noResult(node); break; @@ -4268,90 +4153,94 @@ void SpeculativeJIT::compile(Node* node) GPRReg valueGPR = value.gpr(); GPRReg scratchGPR = scratch.gpr(); - cachedPutById(node->codeOrigin, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), Direct); + cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), Direct); noResult(node); break; } + case PutGetterById: + case PutSetterById: { + compilePutAccessorById(node); + break; + } + + case PutGetterSetterById: { + compilePutGetterSetterById(node); + break; + } + + case PutGetterByVal: + case PutSetterByVal: { + compilePutAccessorByVal(node); + break; + } + + case GetGlobalLexicalVariable: case GetGlobalVar: { GPRTemporary result(this); - m_jit.load64(node->registerPointer(), result.gpr()); + m_jit.load64(node->variablePointer(), result.gpr()); jsValueResult(result.gpr(), node); break; } - case PutGlobalVar: { - JSValueOperand value(this, node->child1()); + case PutGlobalVariable: { + JSValueOperand value(this, node->child2()); - m_jit.store64(value.gpr(), node->registerPointer()); + m_jit.store64(value.gpr(), node->variablePointer()); noResult(node); break; } case NotifyWrite: { - VariableWatchpointSet* set = node->variableWatchpointSet(); - - JSValueOperand value(this, node->child1()); - GPRReg valueGPR = value.gpr(); - - GPRTemporary temp(this); - GPRReg tempGPR = temp.gpr(); - - m_jit.load8(set->addressOfState(), tempGPR); - - JITCompiler::JumpList ready; - - ready.append(m_jit.branch32(JITCompiler::Equal, tempGPR, TrustedImm32(IsInvalidated))); - - if (set->state() == ClearWatchpoint) { - JITCompiler::Jump isWatched = - m_jit.branch32(JITCompiler::NotEqual, tempGPR, TrustedImm32(ClearWatchpoint)); - - m_jit.store64(valueGPR, set->addressOfInferredValue()); - m_jit.store8(TrustedImm32(IsWatched), set->addressOfState()); - ready.append(m_jit.jump()); - - isWatched.link(&m_jit); - } - - ready.append(m_jit.branch64( - JITCompiler::Equal, - JITCompiler::AbsoluteAddress(set->addressOfInferredValue()), valueGPR)); - - JITCompiler::Jump slowCase = m_jit.branchTest8( - JITCompiler::NonZero, JITCompiler::AbsoluteAddress(set->addressOfSetIsNotEmpty())); - m_jit.store8(TrustedImm32(IsInvalidated), set->addressOfState()); - m_jit.move(TrustedImm64(JSValue::encode(JSValue())), tempGPR); - m_jit.store64(tempGPR, set->addressOfInferredValue()); + compileNotifyWrite(node); + break; + } - ready.link(&m_jit); - - addSlowPathGenerator( - slowPathCall(slowCase, this, operationInvalidate, NoResult, set)); - + case VarInjectionWatchpoint: { noResult(node); break; } - case VarInjectionWatchpoint: - case VariableWatchpoint: { - noResult(node); + case CheckTypeInfoFlags: { + compileCheckTypeInfoFlags(node); break; } - case CheckHasInstance: { + case OverridesHasInstance: { + + Node* hasInstanceValueNode = node->child2().node(); + JSFunction* defaultHasInstanceFunction = jsCast<JSFunction*>(node->cellOperand()->value()); + + MacroAssembler::Jump notDefault; SpeculateCellOperand base(this, node->child1()); - GPRTemporary structure(this); + JSValueOperand hasInstanceValue(this, node->child2()); + GPRTemporary result(this); + + GPRReg resultGPR = result.gpr(); - // Speculate that base 'ImplementsDefaultHasInstance'. - m_jit.loadPtr(MacroAssembler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr()); - speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(structure.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance))); + // If we have proven that the constructor's Symbol.hasInstance will always be the one on Function.prototype[Symbol.hasInstance] + // then we don't need a runtime check here. We don't worry about the case where the constructor's Symbol.hasInstance is a constant + // but is not the default one as fixup should have converted this check to true. + ASSERT(!hasInstanceValueNode->isCellConstant() || defaultHasInstanceFunction == hasInstanceValueNode->asCell()); + if (!hasInstanceValueNode->isCellConstant()) + notDefault = m_jit.branchPtr(MacroAssembler::NotEqual, hasInstanceValue.gpr(), TrustedImmPtr(defaultHasInstanceFunction)); - noResult(node); + // Check that base 'ImplementsDefaultHasInstance'. + m_jit.test8(MacroAssembler::Zero, MacroAssembler::Address(base.gpr(), JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance), resultGPR); + m_jit.or32(TrustedImm32(ValueFalse), resultGPR); + MacroAssembler::Jump done = m_jit.jump(); + + if (notDefault.isSet()) { + notDefault.link(&m_jit); + moveTrueTo(resultGPR); + } + + done.link(&m_jit); + jsValueResult(resultGPR, node, DataFormatJSBoolean); break; } @@ -4359,14 +4248,20 @@ void SpeculativeJIT::compile(Node* node) compileInstanceOf(node); break; } + + case InstanceOfCustom: { + compileInstanceOfCustom(node); + break; + } case IsUndefined: { JSValueOperand value(this, node->child1()); GPRTemporary result(this); GPRTemporary localGlobalObject(this); GPRTemporary remoteGlobalObject(this); + GPRTemporary scratch(this); - JITCompiler::Jump isCell = m_jit.branchTest64(JITCompiler::Zero, value.gpr(), GPRInfo::tagMaskRegister); + JITCompiler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs()); m_jit.compare64(JITCompiler::Equal, value.gpr(), TrustedImm32(ValueUndefined), result.gpr()); JITCompiler::Jump done = m_jit.jump(); @@ -4377,15 +4272,18 @@ void SpeculativeJIT::compile(Node* node) m_jit.move(TrustedImm32(0), result.gpr()); notMasqueradesAsUndefined = m_jit.jump(); } else { - m_jit.loadPtr(JITCompiler::Address(value.gpr(), JSCell::structureOffset()), result.gpr()); - JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(result.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8( + JITCompiler::NonZero, + JITCompiler::Address(value.gpr(), JSCell::typeInfoFlagsOffset()), + TrustedImm32(MasqueradesAsUndefined)); m_jit.move(TrustedImm32(0), result.gpr()); notMasqueradesAsUndefined = m_jit.jump(); isMasqueradesAsUndefined.link(&m_jit); GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); - m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->codeOrigin)), localGlobalObjectGPR); + m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->origin.semantic)), localGlobalObjectGPR); + m_jit.emitLoadStructure(value.gpr(), result.gpr(), scratch.gpr()); m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR); m_jit.comparePtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, result.gpr()); } @@ -4423,10 +4321,12 @@ void SpeculativeJIT::compile(Node* node) JSValueOperand value(this, node->child1()); GPRTemporary result(this, Reuse, value); - JITCompiler::Jump isNotCell = m_jit.branchTest64(JITCompiler::NonZero, value.gpr(), GPRInfo::tagMaskRegister); + JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs()); - m_jit.loadPtr(JITCompiler::Address(value.gpr(), JSCell::structureOffset()), result.gpr()); - m_jit.compare8(JITCompiler::Equal, JITCompiler::Address(result.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType), result.gpr()); + m_jit.compare8(JITCompiler::Equal, + JITCompiler::Address(value.gpr(), JSCell::typeInfoTypeOffset()), + TrustedImm32(StringType), + result.gpr()); m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); JITCompiler::Jump done = m_jit.jump(); @@ -4437,87 +4337,40 @@ void SpeculativeJIT::compile(Node* node) jsValueResult(result.gpr(), node, DataFormatJSBoolean); break; } - + case IsObject: { JSValueOperand value(this, node->child1()); - GPRReg valueGPR = value.gpr(); - GPRResult result(this); - GPRReg resultGPR = result.gpr(); - flushRegisters(); - callOperation(operationIsObject, resultGPR, valueGPR); - m_jit.or32(TrustedImm32(ValueFalse), resultGPR); + GPRTemporary result(this, Reuse, value); + + JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs()); + + m_jit.compare8(JITCompiler::AboveOrEqual, + JITCompiler::Address(value.gpr(), JSCell::typeInfoTypeOffset()), + TrustedImm32(ObjectType), + result.gpr()); + m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); + JITCompiler::Jump done = m_jit.jump(); + + isNotCell.link(&m_jit); + m_jit.move(TrustedImm32(ValueFalse), result.gpr()); + + done.link(&m_jit); jsValueResult(result.gpr(), node, DataFormatJSBoolean); break; } + case IsObjectOrNull: { + compileIsObjectOrNull(node); + break; + } + case IsFunction: { - JSValueOperand value(this, node->child1()); - GPRReg valueGPR = value.gpr(); - GPRResult result(this); - GPRReg resultGPR = result.gpr(); - flushRegisters(); - callOperation(operationIsFunction, resultGPR, valueGPR); - m_jit.or32(TrustedImm32(ValueFalse), resultGPR); - jsValueResult(result.gpr(), node, DataFormatJSBoolean); + compileIsFunction(node); break; } case TypeOf: { - JSValueOperand value(this, node->child1(), ManualOperandSpeculation); - GPRReg valueGPR = value.gpr(); - GPRTemporary temp(this); - GPRReg tempGPR = temp.gpr(); - GPRResult result(this); - GPRReg resultGPR = result.gpr(); - JITCompiler::JumpList doneJumps; - - flushRegisters(); - - ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == CellUse || node->child1().useKind() == StringUse); - - JITCompiler::Jump isNotCell = m_jit.branchTest64(JITCompiler::NonZero, valueGPR, GPRInfo::tagMaskRegister); - if (node->child1().useKind() != UntypedUse) - DFG_TYPE_CHECK(JSValueSource(valueGPR), node->child1(), SpecCell, isNotCell); - - if (!node->child1()->shouldSpeculateObject() || node->child1().useKind() == StringUse) { - m_jit.loadPtr(JITCompiler::Address(valueGPR, JSCell::structureOffset()), tempGPR); - JITCompiler::Jump notString = m_jit.branch8(JITCompiler::NotEqual, JITCompiler::Address(tempGPR, Structure::typeInfoTypeOffset()), TrustedImm32(StringType)); - if (node->child1().useKind() == StringUse) - DFG_TYPE_CHECK(JSValueSource(valueGPR), node->child1(), SpecString, notString); - m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.stringString()), resultGPR); - doneJumps.append(m_jit.jump()); - if (node->child1().useKind() != StringUse) { - notString.link(&m_jit); - callOperation(operationTypeOf, resultGPR, valueGPR); - doneJumps.append(m_jit.jump()); - } - } else { - callOperation(operationTypeOf, resultGPR, valueGPR); - doneJumps.append(m_jit.jump()); - } - - if (node->child1().useKind() == UntypedUse) { - isNotCell.link(&m_jit); - JITCompiler::Jump notNumber = m_jit.branchTest64(JITCompiler::Zero, valueGPR, GPRInfo::tagTypeNumberRegister); - m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.numberString()), resultGPR); - doneJumps.append(m_jit.jump()); - notNumber.link(&m_jit); - - JITCompiler::Jump notUndefined = m_jit.branch64(JITCompiler::NotEqual, valueGPR, JITCompiler::TrustedImm64(ValueUndefined)); - m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.undefinedString()), resultGPR); - doneJumps.append(m_jit.jump()); - notUndefined.link(&m_jit); - - JITCompiler::Jump notNull = m_jit.branch64(JITCompiler::NotEqual, valueGPR, JITCompiler::TrustedImm64(ValueNull)); - m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.objectString()), resultGPR); - doneJumps.append(m_jit.jump()); - notNull.link(&m_jit); - - // Only boolean left - m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.booleanString()), resultGPR); - } - doneJumps.link(&m_jit); - cellResult(resultGPR, node); + compileTypeOf(node); break; } @@ -4525,419 +4378,476 @@ void SpeculativeJIT::compile(Node* node) break; case Call: + case TailCall: + case TailCallInlinedCaller: case Construct: + case CallVarargs: + case TailCallVarargs: + case TailCallVarargsInlinedCaller: + case CallForwardVarargs: + case ConstructVarargs: + case ConstructForwardVarargs: + case TailCallForwardVarargs: + case TailCallForwardVarargsInlinedCaller: emitCall(node); break; - case CreateActivation: { - RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame); + case LoadVarargs: { + LoadVarargsData* data = node->loadVarargsData(); - JSValueOperand value(this, node->child1()); - GPRTemporary result(this, Reuse, value); + GPRReg argumentsGPR; + { + JSValueOperand arguments(this, node->child1()); + argumentsGPR = arguments.gpr(); + flushRegisters(); + } - GPRReg valueGPR = value.gpr(); - GPRReg resultGPR = result.gpr(); + callOperation(operationSizeOfVarargs, GPRInfo::returnValueGPR, argumentsGPR, data->offset); + m_jit.exceptionCheck(); - m_jit.move(valueGPR, resultGPR); + lock(GPRInfo::returnValueGPR); + { + JSValueOperand arguments(this, node->child1()); + argumentsGPR = arguments.gpr(); + flushRegisters(); + } + unlock(GPRInfo::returnValueGPR); - JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR); + // FIXME: There is a chance that we will call an effectful length property twice. This is safe + // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance + // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right + // past the sizing. + // https://bugs.webkit.org/show_bug.cgi?id=141448 + + GPRReg argCountIncludingThisGPR = + JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR, argumentsGPR); - addSlowPathGenerator( - slowPathCall( - notCreated, this, operationCreateActivation, resultGPR, - framePointerOffsetToGetActivationRegisters())); + m_jit.add32(TrustedImm32(1), GPRInfo::returnValueGPR, argCountIncludingThisGPR); + speculationCheck( + VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32( + MacroAssembler::Above, + argCountIncludingThisGPR, + TrustedImm32(data->limit))); - cellResult(resultGPR, node); + m_jit.store32(argCountIncludingThisGPR, JITCompiler::payloadFor(data->machineCount)); + + callOperation(operationLoadVarargs, data->machineStart.offset(), argumentsGPR, data->offset, GPRInfo::returnValueGPR, data->mandatoryMinimum); + m_jit.exceptionCheck(); + + noResult(node); break; } - case FunctionReentryWatchpoint: { - noResult(node); + case ForwardVarargs: { + compileForwardVarargs(node); break; } - case CreateArguments: { - JSValueOperand value(this, node->child1()); - GPRTemporary result(this, Reuse, value); + case CreateActivation: { + compileCreateActivation(node); + break; + } - GPRReg valueGPR = value.gpr(); - GPRReg resultGPR = result.gpr(); + case CreateDirectArguments: { + compileCreateDirectArguments(node); + break; + } - m_jit.move(valueGPR, resultGPR); + case GetFromArguments: { + compileGetFromArguments(node); + break; + } - JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR); + case PutToArguments: { + compilePutToArguments(node); + break; + } - if (node->codeOrigin.inlineCallFrame) { - addSlowPathGenerator( - slowPathCall( - notCreated, this, operationCreateInlinedArguments, resultGPR, - node->codeOrigin.inlineCallFrame)); - } else { - addSlowPathGenerator( - slowPathCall(notCreated, this, operationCreateArguments, resultGPR)); - } + case CreateScopedArguments: { + compileCreateScopedArguments(node); + break; + } - cellResult(resultGPR, node); + case CreateClonedArguments: { + compileCreateClonedArguments(node); + break; + } + case CopyRest: { + compileCopyRest(node); break; } - case TearOffActivation: { - RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame); - - JSValueOperand activationValue(this, node->child1()); - GPRTemporary scratch(this); - GPRReg activationValueGPR = activationValue.gpr(); - GPRReg scratchGPR = scratch.gpr(); + case NewFunction: + case NewArrowFunction: + case NewGeneratorFunction: + compileNewFunction(node); + break; - JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, activationValueGPR); - - SymbolTable* symbolTable = m_jit.symbolTableFor(node->codeOrigin); - int registersOffset = JSActivation::registersOffset(symbolTable); - - int bytecodeCaptureStart = symbolTable->captureStart(); - int machineCaptureStart = m_jit.graph().m_machineCaptureStart; - for (int i = symbolTable->captureCount(); i--;) { - m_jit.load64( - JITCompiler::Address( - GPRInfo::callFrameRegister, - (machineCaptureStart - i) * sizeof(Register)), - scratchGPR); - m_jit.store64( - scratchGPR, - JITCompiler::Address( - activationValueGPR, - registersOffset + (bytecodeCaptureStart - i) * sizeof(Register))); - } - m_jit.addPtr(TrustedImm32(registersOffset), activationValueGPR, scratchGPR); - m_jit.storePtr(scratchGPR, JITCompiler::Address(activationValueGPR, JSActivation::offsetOfRegisters())); + case In: + compileIn(node); + break; + + case CountExecution: + m_jit.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node->executionCounter()->address())); + break; - notCreated.link(&m_jit); - noResult(node); + case ForceOSRExit: { + terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); break; } + + case InvalidationPoint: + emitInvalidationPoint(node); + break; - case TearOffArguments: { - JSValueOperand unmodifiedArgumentsValue(this, node->child1()); - JSValueOperand activationValue(this, node->child2()); - GPRReg unmodifiedArgumentsValueGPR = unmodifiedArgumentsValue.gpr(); - GPRReg activationValueGPR = activationValue.gpr(); + case CheckWatchdogTimer: { + ASSERT(m_jit.vm()->watchdog()); + GPRTemporary unused(this); + GPRReg unusedGPR = unused.gpr(); - JITCompiler::Jump created = m_jit.branchTest64(JITCompiler::NonZero, unmodifiedArgumentsValueGPR); + JITCompiler::Jump timerDidFire = m_jit.branchTest8(JITCompiler::NonZero, + JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog()->timerDidFireAddress())); - if (node->codeOrigin.inlineCallFrame) { - addSlowPathGenerator( - slowPathCall( - created, this, operationTearOffInlinedArguments, NoResult, - unmodifiedArgumentsValueGPR, activationValueGPR, node->codeOrigin.inlineCallFrame)); - } else { - addSlowPathGenerator( - slowPathCall( - created, this, operationTearOffArguments, NoResult, unmodifiedArgumentsValueGPR, activationValueGPR)); - } + addSlowPathGenerator(slowPathCall(timerDidFire, this, operationHandleWatchdogTimer, unusedGPR)); + break; + } + + case Phantom: + case Check: + DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate); + noResult(node); + break; + case Breakpoint: + case ProfileWillCall: + case ProfileDidCall: + case PhantomLocal: + case LoopHint: + // This is a no-op. noResult(node); break; + + case Unreachable: + DFG_CRASH(m_jit.graph(), node, "Unexpected Unreachable node"); + break; + + case StoreBarrier: { + compileStoreBarrier(node); + break; } - - case GetMyArgumentsLength: { - GPRTemporary result(this); + + case GetEnumerableLength: { + SpeculateCellOperand enumerator(this, node->child1()); + GPRFlushedCallResult result(this); GPRReg resultGPR = result.gpr(); - - if (!isEmptySpeculation( - m_state.variables().operand( - m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)) { - speculationCheck( - ArgumentsEscaped, JSValueRegs(), 0, - m_jit.branchTest64( - JITCompiler::NonZero, - JITCompiler::addressFor( - m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)))); - } - - RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame); - m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR); - m_jit.sub32(TrustedImm32(1), resultGPR); + + m_jit.load32(MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::indexedLengthOffset()), resultGPR); int32Result(resultGPR, node); break; } - - case GetMyArgumentsLengthSafe: { + case HasGenericProperty: { + JSValueOperand base(this, node->child1()); + SpeculateCellOperand property(this, node->child2()); + GPRFlushedCallResult result(this); + GPRReg resultGPR = result.gpr(); + + flushRegisters(); + callOperation(operationHasGenericProperty, resultGPR, base.gpr(), property.gpr()); + m_jit.exceptionCheck(); + jsValueResult(resultGPR, node, DataFormatJSBoolean); + break; + } + case HasStructureProperty: { + JSValueOperand base(this, node->child1()); + SpeculateCellOperand property(this, node->child2()); + SpeculateCellOperand enumerator(this, node->child3()); GPRTemporary result(this); + + GPRReg baseGPR = base.gpr(); + GPRReg propertyGPR = property.gpr(); GPRReg resultGPR = result.gpr(); - - JITCompiler::Jump created = m_jit.branchTest64( - JITCompiler::NonZero, - JITCompiler::addressFor( - m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin))); - - if (node->codeOrigin.inlineCallFrame) { - m_jit.move( - Imm64(JSValue::encode(jsNumber(node->codeOrigin.inlineCallFrame->arguments.size() - 1))), - resultGPR); - } else { - m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR); - m_jit.sub32(TrustedImm32(1), resultGPR); - m_jit.or64(GPRInfo::tagTypeNumberRegister, resultGPR); - } - - // FIXME: the slow path generator should perform a forward speculation that the - // result is an integer. For now we postpone the speculation by having this return - // a JSValue. - - addSlowPathGenerator( - slowPathCall( - created, this, operationGetArgumentsLength, resultGPR, - m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset())); - - jsValueResult(resultGPR, node); + + m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), resultGPR); + MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual, + resultGPR, + MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::cachedStructureIDOffset())); + + moveTrueTo(resultGPR); + MacroAssembler::Jump done = m_jit.jump(); + + done.link(&m_jit); + + addSlowPathGenerator(slowPathCall(wrongStructure, this, operationHasGenericProperty, resultGPR, baseGPR, propertyGPR)); + jsValueResult(resultGPR, node, DataFormatJSBoolean); break; } - - case GetMyArgumentByVal: { - SpeculateStrictInt32Operand index(this, node->child1()); + case HasIndexedProperty: { + SpeculateCellOperand base(this, node->child1()); + SpeculateStrictInt32Operand index(this, node->child2()); GPRTemporary result(this); + + GPRReg baseGPR = base.gpr(); GPRReg indexGPR = index.gpr(); GPRReg resultGPR = result.gpr(); - if (!isEmptySpeculation( - m_state.variables().operand( - m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)) { - speculationCheck( - ArgumentsEscaped, JSValueRegs(), 0, - m_jit.branchTest64( - JITCompiler::NonZero, - JITCompiler::addressFor( - m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)))); - } + MacroAssembler::JumpList slowCases; + ArrayMode mode = node->arrayMode(); + switch (mode.type()) { + case Array::Int32: + case Array::Contiguous: { + ASSERT(!!node->child3()); + StorageOperand storage(this, node->child3()); + GPRTemporary scratch(this); + + GPRReg storageGPR = storage.gpr(); + GPRReg scratchGPR = scratch.gpr(); - m_jit.add32(TrustedImm32(1), indexGPR, resultGPR); - if (node->codeOrigin.inlineCallFrame) { - speculationCheck( - Uncountable, JSValueRegs(), 0, - m_jit.branch32( - JITCompiler::AboveOrEqual, - resultGPR, - Imm32(node->codeOrigin.inlineCallFrame->arguments.size()))); - } else { - speculationCheck( - Uncountable, JSValueRegs(), 0, - m_jit.branch32( - JITCompiler::AboveOrEqual, - resultGPR, - JITCompiler::payloadFor(JSStack::ArgumentCount))); + MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + if (mode.isInBounds()) + speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds); + else + slowCases.append(outOfBounds); + + m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchGPR); + slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, scratchGPR)); + moveTrueTo(resultGPR); + break; } + case Array::Double: { + ASSERT(!!node->child3()); + StorageOperand storage(this, node->child3()); + FPRTemporary scratch(this); + FPRReg scratchFPR = scratch.fpr(); + GPRReg storageGPR = storage.gpr(); - JITCompiler::JumpList slowArgument; - JITCompiler::JumpList slowArgumentOutOfBounds; - if (m_jit.symbolTableFor(node->codeOrigin)->slowArguments()) { - RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame); - const SlowArgument* slowArguments = m_jit.graph().m_slowArguments.get(); - - slowArgumentOutOfBounds.append( - m_jit.branch32( - JITCompiler::AboveOrEqual, indexGPR, - Imm32(m_jit.symbolTableFor(node->codeOrigin)->parameterCount()))); + MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + if (mode.isInBounds()) + speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds); + else + slowCases.append(outOfBounds); - COMPILE_ASSERT(sizeof(SlowArgument) == 8, SlowArgument_size_is_eight_bytes); - m_jit.move(ImmPtr(slowArguments), resultGPR); - m_jit.load32( - JITCompiler::BaseIndex( - resultGPR, indexGPR, JITCompiler::TimesEight, - OBJECT_OFFSETOF(SlowArgument, index)), - resultGPR); - m_jit.signExtend32ToPtr(resultGPR, resultGPR); - m_jit.load64( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight), - resultGPR); - slowArgument.append(m_jit.jump()); + m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchFPR); + slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, scratchFPR, scratchFPR)); + moveTrueTo(resultGPR); + break; } - slowArgumentOutOfBounds.link(&m_jit); + case Array::ArrayStorage: { + ASSERT(!!node->child3()); + StorageOperand storage(this, node->child3()); + GPRTemporary scratch(this); - m_jit.signExtend32ToPtr(resultGPR, resultGPR); - - m_jit.load64( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin)), - resultGPR); + GPRReg storageGPR = storage.gpr(); + GPRReg scratchGPR = scratch.gpr(); - slowArgument.link(&m_jit); - jsValueResult(resultGPR, node); + MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); + if (mode.isInBounds()) + speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds); + else + slowCases.append(outOfBounds); + + m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()), scratchGPR); + slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, scratchGPR)); + moveTrueTo(resultGPR); + break; + } + default: { + slowCases.append(m_jit.jump()); + break; + } + } + + addSlowPathGenerator(slowPathCall(slowCases, this, operationHasIndexedProperty, resultGPR, baseGPR, indexGPR)); + + jsValueResult(resultGPR, node, DataFormatJSBoolean); break; } - - case GetMyArgumentByValSafe: { - SpeculateStrictInt32Operand index(this, node->child1()); + case GetDirectPname: { + Edge& baseEdge = m_jit.graph().varArgChild(node, 0); + Edge& propertyEdge = m_jit.graph().varArgChild(node, 1); + Edge& indexEdge = m_jit.graph().varArgChild(node, 2); + Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3); + + SpeculateCellOperand base(this, baseEdge); + SpeculateCellOperand property(this, propertyEdge); + SpeculateStrictInt32Operand index(this, indexEdge); + SpeculateCellOperand enumerator(this, enumeratorEdge); GPRTemporary result(this); + GPRTemporary scratch1(this); + GPRTemporary scratch2(this); + + GPRReg baseGPR = base.gpr(); + GPRReg propertyGPR = property.gpr(); GPRReg indexGPR = index.gpr(); + GPRReg enumeratorGPR = enumerator.gpr(); GPRReg resultGPR = result.gpr(); - - JITCompiler::JumpList slowPath; + GPRReg scratch1GPR = scratch1.gpr(); + GPRReg scratch2GPR = scratch2.gpr(); + + MacroAssembler::JumpList slowPath; + + // Check the structure + m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), scratch1GPR); slowPath.append( - m_jit.branchTest64( - JITCompiler::NonZero, - JITCompiler::addressFor( - m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)))); - - m_jit.add32(TrustedImm32(1), indexGPR, resultGPR); - if (node->codeOrigin.inlineCallFrame) { - slowPath.append( - m_jit.branch32( - JITCompiler::AboveOrEqual, - resultGPR, - Imm32(node->codeOrigin.inlineCallFrame->arguments.size()))); - } else { - slowPath.append( - m_jit.branch32( - JITCompiler::AboveOrEqual, - resultGPR, - JITCompiler::payloadFor(JSStack::ArgumentCount))); - } + m_jit.branch32( + MacroAssembler::NotEqual, + scratch1GPR, + MacroAssembler::Address( + enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset()))); - JITCompiler::JumpList slowArgument; - JITCompiler::JumpList slowArgumentOutOfBounds; - if (m_jit.symbolTableFor(node->codeOrigin)->slowArguments()) { - RELEASE_ASSERT(!node->codeOrigin.inlineCallFrame); - const SlowArgument* slowArguments = m_jit.graph().m_slowArguments.get(); + // Compute the offset + // If index is less than the enumerator's cached inline storage, then it's an inline access + MacroAssembler::Jump outOfLineAccess = m_jit.branch32(MacroAssembler::AboveOrEqual, + indexGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); - slowArgumentOutOfBounds.append( - m_jit.branch32( - JITCompiler::AboveOrEqual, indexGPR, - Imm32(m_jit.symbolTableFor(node->codeOrigin)->parameterCount()))); - - COMPILE_ASSERT(sizeof(SlowArgument) == 8, SlowArgument_size_is_eight_bytes); - m_jit.move(ImmPtr(slowArguments), resultGPR); - m_jit.load32( - JITCompiler::BaseIndex( - resultGPR, indexGPR, JITCompiler::TimesEight, - OBJECT_OFFSETOF(SlowArgument, index)), - resultGPR); - m_jit.signExtend32ToPtr(resultGPR, resultGPR); - m_jit.load64( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight), - resultGPR); - slowArgument.append(m_jit.jump()); - } - slowArgumentOutOfBounds.link(&m_jit); + m_jit.load64(MacroAssembler::BaseIndex(baseGPR, indexGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage()), resultGPR); - m_jit.signExtend32ToPtr(resultGPR, resultGPR); - - m_jit.load64( - JITCompiler::BaseIndex( - GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin)), - resultGPR); - - if (node->codeOrigin.inlineCallFrame) { - addSlowPathGenerator( - slowPathCall( - slowPath, this, operationGetInlinedArgumentByVal, resultGPR, - m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset(), - node->codeOrigin.inlineCallFrame, - indexGPR)); - } else { - addSlowPathGenerator( - slowPathCall( - slowPath, this, operationGetArgumentByVal, resultGPR, - m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin).offset(), - indexGPR)); - } + MacroAssembler::Jump done = m_jit.jump(); - slowArgument.link(&m_jit); + // Otherwise it's out of line + outOfLineAccess.link(&m_jit); + m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratch2GPR); + slowPath.append(m_jit.branchIfNotToSpace(scratch2GPR)); + m_jit.move(indexGPR, scratch1GPR); + m_jit.sub32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratch1GPR); + m_jit.neg32(scratch1GPR); + m_jit.signExtend32ToPtr(scratch1GPR, scratch1GPR); + int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); + m_jit.load64(MacroAssembler::BaseIndex(scratch2GPR, scratch1GPR, MacroAssembler::TimesEight, offsetOfFirstProperty), resultGPR); + + done.link(&m_jit); + + addSlowPathGenerator(slowPathCall(slowPath, this, operationGetByVal, resultGPR, baseGPR, propertyGPR)); + jsValueResult(resultGPR, node); break; } - - case CheckArgumentsNotCreated: { - ASSERT(!isEmptySpeculation( - m_state.variables().operand( - m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)); - speculationCheck( - ArgumentsEscaped, JSValueRegs(), 0, - m_jit.branchTest64( - JITCompiler::NonZero, - JITCompiler::addressFor( - m_jit.graph().machineArgumentsRegisterFor(node->codeOrigin)))); - noResult(node); + case GetPropertyEnumerator: { + SpeculateCellOperand base(this, node->child1()); + GPRFlushedCallResult result(this); + GPRReg resultGPR = result.gpr(); + + flushRegisters(); + callOperation(operationGetPropertyEnumerator, resultGPR, base.gpr()); + m_jit.exceptionCheck(); + cellResult(resultGPR, node); break; } - - case NewFunctionNoCheck: - compileNewFunctionNoCheck(node); - break; - - case NewFunction: { - JSValueOperand value(this, node->child1()); - GPRTemporary result(this, Reuse, value); - - GPRReg valueGPR = value.gpr(); + case GetEnumeratorStructurePname: + case GetEnumeratorGenericPname: { + SpeculateCellOperand enumerator(this, node->child1()); + SpeculateStrictInt32Operand index(this, node->child2()); + GPRTemporary scratch1(this); + GPRTemporary result(this); + + GPRReg enumeratorGPR = enumerator.gpr(); + GPRReg indexGPR = index.gpr(); + GPRReg scratch1GPR = scratch1.gpr(); GPRReg resultGPR = result.gpr(); - - m_jit.move(valueGPR, resultGPR); - - JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR); - - addSlowPathGenerator( - slowPathCall( - notCreated, this, operationNewFunction, - resultGPR, m_jit.codeBlock()->functionDecl(node->functionDeclIndex()))); - + + MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, indexGPR, + MacroAssembler::Address(enumeratorGPR, (op == GetEnumeratorStructurePname) + ? JSPropertyNameEnumerator::endStructurePropertyIndexOffset() + : JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); + + m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsNull())), resultGPR); + + MacroAssembler::Jump done = m_jit.jump(); + inBounds.link(&m_jit); + + m_jit.loadPtr(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), scratch1GPR); + m_jit.load64(MacroAssembler::BaseIndex(scratch1GPR, indexGPR, MacroAssembler::TimesEight), resultGPR); + + done.link(&m_jit); jsValueResult(resultGPR, node); break; } - - case NewFunctionExpression: - compileNewFunctionExpression(node); - break; - - case In: - compileIn(node); - break; - - case CountExecution: - m_jit.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node->executionCounter()->address())); - break; + case ToIndexString: { + SpeculateInt32Operand index(this, node->child1()); + GPRFlushedCallResult result(this); + GPRReg resultGPR = result.gpr(); - case ForceOSRExit: { - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); + flushRegisters(); + callOperation(operationToIndexString, resultGPR, index.gpr()); + m_jit.exceptionCheck(); + cellResult(resultGPR, node); break; } - - case InvalidationPoint: - emitInvalidationPoint(node); - break; + case ProfileType: { + JSValueOperand value(this, node->child1()); + GPRTemporary scratch1(this); + GPRTemporary scratch2(this); + GPRTemporary scratch3(this); - case CheckWatchdogTimer: - speculationCheck( - WatchdogTimerFired, JSValueRegs(), 0, - m_jit.branchTest8( - JITCompiler::NonZero, - JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog.timerDidFireAddress()))); - break; + GPRReg scratch1GPR = scratch1.gpr(); + GPRReg scratch2GPR = scratch2.gpr(); + GPRReg scratch3GPR = scratch3.gpr(); + GPRReg valueGPR = value.gpr(); + + MacroAssembler::JumpList jumpToEnd; + + jumpToEnd.append(m_jit.branchTest64(JITCompiler::Zero, valueGPR)); + + TypeLocation* cachedTypeLocation = node->typeLocation(); + // Compile in a predictive type check, if possible, to see if we can skip writing to the log. + // These typechecks are inlined to match those of the 64-bit JSValue type checks. + if (cachedTypeLocation->m_lastSeenType == TypeUndefined) + jumpToEnd.append(m_jit.branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())))); + else if (cachedTypeLocation->m_lastSeenType == TypeNull) + jumpToEnd.append(m_jit.branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsNull())))); + else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) { + m_jit.move(valueGPR, scratch2GPR); + m_jit.and64(TrustedImm32(~1), scratch2GPR); + jumpToEnd.append(m_jit.branch64(MacroAssembler::Equal, scratch2GPR, MacroAssembler::TrustedImm64(ValueFalse))); + } else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt) + jumpToEnd.append(m_jit.branch64(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister)); + else if (cachedTypeLocation->m_lastSeenType == TypeNumber) + jumpToEnd.append(m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagTypeNumberRegister)); + else if (cachedTypeLocation->m_lastSeenType == TypeString) { + MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR)); + jumpToEnd.append(m_jit.branchIfString(valueGPR)); + isNotCell.link(&m_jit); + } + + // Load the TypeProfilerLog into Scratch2. + TypeProfilerLog* cachedTypeProfilerLog = m_jit.vm()->typeProfilerLog(); + m_jit.move(TrustedImmPtr(cachedTypeProfilerLog), scratch2GPR); + + // Load the next LogEntry into Scratch1. + m_jit.loadPtr(MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()), scratch1GPR); + + // Store the JSValue onto the log entry. + m_jit.store64(valueGPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset())); + + // Store the structureID of the cell if valueGPR is a cell, otherwise, store 0 on the log entry. + MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR)); + m_jit.load32(MacroAssembler::Address(valueGPR, JSCell::structureIDOffset()), scratch3GPR); + m_jit.store32(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset())); + MacroAssembler::Jump skipIsCell = m_jit.jump(); + isNotCell.link(&m_jit); + m_jit.store32(TrustedImm32(0), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset())); + skipIsCell.link(&m_jit); + + // Store the typeLocation on the log entry. + m_jit.move(TrustedImmPtr(cachedTypeLocation), scratch3GPR); + m_jit.storePtr(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::locationOffset())); + + // Increment the current log entry. + m_jit.addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), scratch1GPR); + m_jit.storePtr(scratch1GPR, MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset())); + MacroAssembler::Jump clearLog = m_jit.branchPtr(MacroAssembler::Equal, scratch1GPR, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr())); + addSlowPathGenerator( + slowPathCall(clearLog, this, operationProcessTypeProfilerLogDFG, NoResult)); + + jumpToEnd.link(&m_jit); - case Phantom: - DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate); noResult(node); break; - - case Breakpoint: - case ProfileWillCall: - case ProfileDidCall: - case PhantomLocal: - case LoopHint: - // This is a no-op. + } + case ProfileControlFlow: { + BasicBlockLocation* basicBlockLocation = node->basicBlockLocation(); + basicBlockLocation->emitExecuteCode(m_jit); noResult(node); break; - - case Unreachable: - RELEASE_ASSERT_NOT_REACHED(); - break; - - case StoreBarrier: - case ConditionalStoreBarrier: - case StoreBarrierWithNullCheck: { - compileStoreBarrier(node); - break; } #if ENABLE(FTL_JIT) @@ -4949,7 +4859,7 @@ void SpeculativeJIT::compile(Node* node) silentSpillAllRegisters(InvalidGPRReg); m_jit.setupArgumentsExecState(); - appendCall(triggerTierUpNow); + appendCall(triggerTierUpNowInLoop); silentFillAllRegisters(InvalidGPRReg); done.link(&m_jit); @@ -4971,23 +4881,31 @@ void SpeculativeJIT::compile(Node* node) break; } - case CheckTierUpAndOSREnter: { - ASSERT(!node->codeOrigin.inlineCallFrame); + case CheckTierUpAndOSREnter: + case CheckTierUpWithNestedTriggerAndOSREnter: { + ASSERT(!node->origin.semantic.inlineCallFrame); GPRTemporary temp(this); GPRReg tempGPR = temp.gpr(); + + MacroAssembler::Jump forceOSREntry; + if (op == CheckTierUpWithNestedTriggerAndOSREnter) + forceOSREntry = m_jit.branchTest8(MacroAssembler::NonZero, MacroAssembler::AbsoluteAddress(&m_jit.jitCode()->nestedTriggerIsSet)); MacroAssembler::Jump done = m_jit.branchAdd32( MacroAssembler::Signed, TrustedImm32(Options::ftlTierUpCounterIncrementForLoop()), MacroAssembler::AbsoluteAddress(&m_jit.jitCode()->tierUpCounter.m_counter)); - + + if (forceOSREntry.isSet()) + forceOSREntry.link(&m_jit); silentSpillAllRegisters(tempGPR); m_jit.setupArgumentsWithExecState( - TrustedImm32(node->codeOrigin.bytecodeIndex), + TrustedImm32(node->origin.semantic.bytecodeIndex), TrustedImm32(m_stream->size())); appendCallSetResult(triggerOSREntryNow, tempGPR); MacroAssembler::Jump dontEnter = m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR); + m_jit.emitRestoreCalleeSaves(); m_jit.jump(tempGPR); dontEnter.link(&m_jit); silentFillAllRegisters(tempGPR); @@ -4999,18 +4917,36 @@ void SpeculativeJIT::compile(Node* node) case CheckTierUpInLoop: case CheckTierUpAtReturn: case CheckTierUpAndOSREnter: - RELEASE_ASSERT_NOT_REACHED(); + case CheckTierUpWithNestedTriggerAndOSREnter: + DFG_CRASH(m_jit.graph(), node, "Unexpected tier-up node"); break; #endif // ENABLE(FTL_JIT) - + case LastNodeType: case Phi: case Upsilon: - case GetArgument: case ExtractOSREntryLocal: case CheckInBounds: case ArithIMul: - RELEASE_ASSERT_NOT_REACHED(); + case MultiGetByOffset: + case MultiPutByOffset: + case FiatInt52: + case CheckBadCell: + case BottomValue: + case PhantomNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case GetMyArgumentByVal: + case PutHint: + case CheckStructureImmediate: + case MaterializeNewObject: + case MaterializeCreateActivation: + case PutStack: + case KillStack: + case GetStack: + case StringReplace: + DFG_CRASH(m_jit.graph(), node, "Unexpected node"); break; } @@ -5021,35 +4957,100 @@ void SpeculativeJIT::compile(Node* node) use(node); } -#if ENABLE(GGC) void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, GPRReg scratch1, GPRReg scratch2) { JITCompiler::Jump isNotCell; if (!isKnownCell(valueUse.node())) - isNotCell = m_jit.branchTest64(JITCompiler::NonZero, valueGPR, GPRInfo::tagMaskRegister); - - JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, ownerGPR, scratch1, scratch2); + isNotCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR)); + + JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR); storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2); - definitelyNotMarked.link(&m_jit); + ownerIsRememberedOrInEden.link(&m_jit); if (!isKnownCell(valueUse.node())) isNotCell.link(&m_jit); } -void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, GPRReg scratch1, GPRReg scratch2) +void SpeculativeJIT::moveTrueTo(GPRReg gpr) { - JITCompiler::Jump isNotCell; - if (!isKnownCell(valueUse.node())) - isNotCell = m_jit.branchTest64(JITCompiler::NonZero, valueGPR, GPRInfo::tagMaskRegister); + m_jit.move(TrustedImm32(ValueTrue), gpr); +} - JITCompiler::Jump definitelyNotMarked = genericWriteBarrier(m_jit, owner); - storeToWriteBarrierBuffer(owner, scratch1, scratch2); - definitelyNotMarked.link(&m_jit); +void SpeculativeJIT::moveFalseTo(GPRReg gpr) +{ + m_jit.move(TrustedImm32(ValueFalse), gpr); +} - if (!isKnownCell(valueUse.node())) - isNotCell.link(&m_jit); +void SpeculativeJIT::blessBoolean(GPRReg gpr) +{ + m_jit.or32(TrustedImm32(ValueFalse), gpr); +} + +void SpeculativeJIT::convertMachineInt(Edge valueEdge, GPRReg resultGPR) +{ + JSValueOperand value(this, valueEdge, ManualOperandSpeculation); + GPRReg valueGPR = value.gpr(); + + JITCompiler::Jump notInt32 = + m_jit.branch64(JITCompiler::Below, valueGPR, GPRInfo::tagTypeNumberRegister); + + m_jit.signExtend32ToPtr(valueGPR, resultGPR); + JITCompiler::Jump done = m_jit.jump(); + + notInt32.link(&m_jit); + silentSpillAllRegisters(resultGPR); + callOperation(operationConvertBoxedDoubleToInt52, resultGPR, valueGPR); + silentFillAllRegisters(resultGPR); + + DFG_TYPE_CHECK( + JSValueRegs(valueGPR), valueEdge, SpecInt32 | SpecInt52AsDouble, + m_jit.branch64( + JITCompiler::Equal, resultGPR, + JITCompiler::TrustedImm64(JSValue::notInt52))); + done.link(&m_jit); +} + +void SpeculativeJIT::speculateMachineInt(Edge edge) +{ + if (!needsTypeCheck(edge, SpecInt32 | SpecInt52AsDouble)) + return; + + GPRTemporary temp(this); + convertMachineInt(edge, temp.gpr()); +} + +void SpeculativeJIT::speculateDoubleRepMachineInt(Edge edge) +{ + if (!needsTypeCheck(edge, SpecInt52AsDouble)) + return; + + SpeculateDoubleOperand value(this, edge); + FPRReg valueFPR = value.fpr(); + + GPRFlushedCallResult result(this); + GPRReg resultGPR = result.gpr(); + + flushRegisters(); + + callOperation(operationConvertDoubleToInt52, resultGPR, valueFPR); + + DFG_TYPE_CHECK( + JSValueRegs(), edge, SpecInt52AsDouble, + m_jit.branch64( + JITCompiler::Equal, resultGPR, + JITCompiler::TrustedImm64(JSValue::notInt52))); +} + +void SpeculativeJIT::compileArithRandom(Node* node) +{ + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); + GPRTemporary temp1(this); + GPRTemporary temp2(this); + GPRTemporary temp3(this); + FPRTemporary result(this); + m_jit.emitRandomThunk(globalObject, temp1.gpr(), temp2.gpr(), temp3.gpr(), result.fpr()); + doubleResult(result.fpr(), node); } -#endif // ENABLE(GGC) #endif diff --git a/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp b/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp index cf1017624..09fe995e3 100644 --- a/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,7 +31,7 @@ #include "DFGGraph.h" #include "DFGPhase.h" #include "DFGValueSource.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -46,8 +46,6 @@ public: bool run() { - SymbolTable* symbolTable = codeBlock()->symbolTable(); - // This enumerates the locals that we actually care about and packs them. So for example // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal, @@ -56,7 +54,7 @@ public: BitVector usedLocals; // Collect those variables that are used from IR. - bool hasGetLocalUnlinked = false; + bool hasNodesThatNeedFixup = false; for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) @@ -80,7 +78,32 @@ public: if (operand.isArgument()) break; usedLocals.set(operand.toLocal()); - hasGetLocalUnlinked = true; + hasNodesThatNeedFixup = true; + break; + } + + case LoadVarargs: + case ForwardVarargs: { + LoadVarargsData* data = node->loadVarargsData(); + if (data->count.isLocal()) + usedLocals.set(data->count.toLocal()); + if (data->start.isLocal()) { + // This part really relies on the contiguity of stack layout + // assignments. + ASSERT(VirtualRegister(data->start.offset() + data->limit - 1).isLocal()); + for (unsigned i = data->limit; i--;) + usedLocals.set(VirtualRegister(data->start.offset() + i).toLocal()); + } // the else case shouldn't happen. + hasNodesThatNeedFixup = true; + break; + } + + case PutStack: + case GetStack: { + StackAccessData* stack = node->stackAccessData(); + if (stack->local.isArgument()) + break; + usedLocals.set(stack->local.toLocal()); break; } @@ -90,27 +113,13 @@ public: } } - // Ensure that captured variables and captured inline arguments are pinned down. - // They should have been because of flushes, except that the flushes can be optimized - // away. - if (symbolTable) { - for (int i = symbolTable->captureStart(); i > symbolTable->captureEnd(); i--) - usedLocals.set(VirtualRegister(i).toLocal()); - } - if (codeBlock()->usesArguments()) { - usedLocals.set(codeBlock()->argumentsRegister().toLocal()); - usedLocals.set(unmodifiedArgumentsRegister(codeBlock()->argumentsRegister()).toLocal()); - } - if (codeBlock()->uncheckedActivationRegister().isValid()) - usedLocals.set(codeBlock()->activationRegister().toLocal()); - for (InlineCallFrameSet::iterator iter = m_graph.m_inlineCallFrames->begin(); !!iter; ++iter) { + for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames->begin(); !!iter; ++iter) { InlineCallFrame* inlineCallFrame = *iter; - if (!inlineCallFrame->executable->usesArguments()) - continue; - VirtualRegister argumentsRegister = m_graph.argumentsRegisterFor(inlineCallFrame); - usedLocals.set(argumentsRegister.toLocal()); - usedLocals.set(unmodifiedArgumentsRegister(argumentsRegister).toLocal()); + if (inlineCallFrame->isVarargs()) { + usedLocals.set(VirtualRegister( + JSStack::ArgumentCount + inlineCallFrame->stackOffset).toLocal()); + } for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { usedLocals.set(VirtualRegister( @@ -120,7 +129,7 @@ public: } Vector<unsigned> allocation(usedLocals.size()); - m_graph.m_nextMachineLocal = 0; + m_graph.m_nextMachineLocal = codeBlock()->calleeSaveSpaceAsVirtualRegisters(); for (unsigned i = 0; i < usedLocals.size(); ++i) { if (!usedLocals.get(i)) { allocation[i] = UINT_MAX; @@ -147,38 +156,35 @@ public: if (allocation[local] == UINT_MAX) continue; - variable->machineLocal() = virtualRegisterForLocal( - allocation[variable->local().toLocal()]); - } - - if (codeBlock()->usesArguments()) { - VirtualRegister argumentsRegister = virtualRegisterForLocal( - allocation[codeBlock()->argumentsRegister().toLocal()]); - RELEASE_ASSERT( - virtualRegisterForLocal(allocation[ - unmodifiedArgumentsRegister( - codeBlock()->argumentsRegister()).toLocal()]) - == unmodifiedArgumentsRegister(argumentsRegister)); - codeBlock()->setArgumentsRegister(argumentsRegister); + variable->machineLocal() = assign(allocation, variable->local()); } - if (codeBlock()->uncheckedActivationRegister().isValid()) { - codeBlock()->setActivationRegister( - virtualRegisterForLocal(allocation[codeBlock()->activationRegister().toLocal()])); + for (StackAccessData* data : m_graph.m_stackAccessData) { + if (!data->local.isLocal()) { + data->machineLocal = data->local; + continue; + } + + if (static_cast<size_t>(data->local.toLocal()) >= allocation.size()) + continue; + if (allocation[data->local.toLocal()] == UINT_MAX) + continue; + + data->machineLocal = assign(allocation, data->local); } + if (LIKELY(!m_graph.hasDebuggerEnabled())) + codeBlock()->setScopeRegister(VirtualRegister()); + else + codeBlock()->setScopeRegister(assign(allocation, codeBlock()->scopeRegister())); + for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) { InlineVariableData data = m_graph.m_inlineVariableData[i]; InlineCallFrame* inlineCallFrame = data.inlineCallFrame; - if (inlineCallFrame->executable->usesArguments()) { - inlineCallFrame->argumentsRegister = virtualRegisterForLocal( - allocation[m_graph.argumentsRegisterFor(inlineCallFrame).toLocal()]); - - RELEASE_ASSERT( - virtualRegisterForLocal(allocation[unmodifiedArgumentsRegister( - m_graph.argumentsRegisterFor(inlineCallFrame)).toLocal()]) - == unmodifiedArgumentsRegister(inlineCallFrame->argumentsRegister)); + if (inlineCallFrame->isVarargs()) { + inlineCallFrame->argumentCountRegister = assign( + allocation, VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount)); } for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { @@ -197,42 +203,17 @@ public: RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable); if (inlineCallFrame->isClosureCall) { + VariableAccessData* variable = data.calleeVariable->find(); ValueSource source = ValueSource::forFlushFormat( - data.calleeVariable->machineLocal(), - data.calleeVariable->flushFormat()); + variable->machineLocal(), + variable->flushFormat()); inlineCallFrame->calleeRecovery = source.valueRecovery(); } else RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant()); } - if (symbolTable) { - if (symbolTable->captureCount()) { - unsigned captureStartLocal = allocation[ - VirtualRegister(codeBlock()->symbolTable()->captureStart()).toLocal()]; - ASSERT(captureStartLocal != UINT_MAX); - m_graph.m_machineCaptureStart = virtualRegisterForLocal(captureStartLocal).offset(); - } else - m_graph.m_machineCaptureStart = virtualRegisterForLocal(0).offset(); - - // This is an abomination. If we had captured an argument then the argument ends - // up being "slow", meaning that loads of the argument go through an extra lookup - // table. - if (const SlowArgument* slowArguments = symbolTable->slowArguments()) { - auto newSlowArguments = std::make_unique<SlowArgument[]>( - symbolTable->parameterCount()); - for (size_t i = symbolTable->parameterCount(); i--;) { - newSlowArguments[i] = slowArguments[i]; - VirtualRegister reg = VirtualRegister(slowArguments[i].index); - if (reg.isLocal()) - newSlowArguments[i].index = virtualRegisterForLocal(allocation[reg.toLocal()]).offset(); - } - - m_graph.m_slowArguments = std::move(newSlowArguments); - } - } - // Fix GetLocalUnlinked's variable references. - if (hasGetLocalUnlinked) { + if (hasNodesThatNeedFixup) { for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) @@ -241,10 +222,15 @@ public: Node* node = block->at(nodeIndex); switch (node->op()) { case GetLocalUnlinked: { - VirtualRegister operand = node->unlinkedLocal(); - if (operand.isLocal()) - operand = virtualRegisterForLocal(allocation[operand.toLocal()]); - node->setUnlinkedMachineLocal(operand); + node->setUnlinkedMachineLocal(assign(allocation, node->unlinkedLocal())); + break; + } + + case LoadVarargs: + case ForwardVarargs: { + LoadVarargsData* data = node->loadVarargsData(); + data->machineCount = assign(allocation, data->count); + data->machineStart = assign(allocation, data->start); break; } @@ -257,6 +243,20 @@ public: return true; } + +private: + VirtualRegister assign(const Vector<unsigned>& allocation, VirtualRegister src) + { + VirtualRegister result = src; + if (result.isLocal()) { + unsigned myAllocation = allocation[result.toLocal()]; + if (myAllocation == UINT_MAX) + result = VirtualRegister(); + else + result = virtualRegisterForLocal(myAllocation); + } + return result; + } }; bool performStackLayout(Graph& graph) diff --git a/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.h b/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.h index b18ff9505..ccb0cea88 100644 --- a/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.h +++ b/Source/JavaScriptCore/dfg/DFGStackLayoutPhase.h @@ -26,8 +26,6 @@ #ifndef DFGStackLayoutPhase_h #define DFGStackLayoutPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.cpp b/Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.cpp new file mode 100644 index 000000000..bb23a7b9f --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGStaticExecutionCountEstimationPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGBasicBlockInlines.h" +#include "DFGGraph.h" +#include "DFGNaturalLoops.h" +#include "DFGPhase.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +class StaticExecutionCountEstimationPhase : public Phase { +public: + StaticExecutionCountEstimationPhase(Graph& graph) + : Phase(graph, "static execution count estimation") + { + } + + bool run() + { + m_graph.ensureNaturalLoops(); + + // Estimate basic block execution counts based on loop depth. + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + + block->executionCount = pow(10, m_graph.m_naturalLoops->loopDepth(block)); + } + + // Estimate branch weights based on execution counts. This isn't quite correct. It'll + // assume that each block's conditional successor only has that block as its + // predecessor. + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + + Node* terminal = block->terminal(); + switch (terminal->op()) { + case Branch: { + BranchData* data = terminal->branchData(); + applyCounts(data->taken); + applyCounts(data->notTaken); + break; + } + + case Switch: { + SwitchData* data = terminal->switchData(); + for (unsigned i = data->cases.size(); i--;) + applyCounts(data->cases[i].target); + applyCounts(data->fallThrough); + break; + } + + default: + break; + } + } + + return true; + } + +private: + void applyCounts(BranchTarget& target) + { + target.count = target.block->executionCount; + } +}; + +bool performStaticExecutionCountEstimation(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Static Execution Count Estimation"); + return runPhase<StaticExecutionCountEstimationPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + + diff --git a/Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.h b/Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.h index 98378ec52..f2289611e 100644 --- a/Source/JavaScriptCore/dfg/DFGResurrectionForValidationPhase.h +++ b/Source/JavaScriptCore/dfg/DFGStaticExecutionCountEstimationPhase.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,30 +23,27 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef DFGResurrectionForValidationPhase_h -#define DFGResurrectionForValidationPhase_h - -#include <wtf/Platform.h> +#ifndef DFGStaticExecutionCountEstimationPhase_h +#define DFGStaticExecutionCountEstimationPhase_h #if ENABLE(DFG_JIT) -#include "DFGCommon.h" - namespace JSC { namespace DFG { class Graph; -// Places a Phantom after every value-producing node, thereby disabling DCE from killing it. -// This is useful for validating our OSR exit machinery by instituting the requirement that -// any live-in-bytecode variable should be OSR-available. Without this phase, it's impossible -// to make such an assertion because our DCE is more aggressive than the bytecode liveness -// analysis. +// Estimate execution counts (branch execution counts, in particular) based on +// presently available static information. This phase is important because +// subsequent CFG transformations, such as OSR entrypoint creation, perturb our +// ability to do accurate static estimations. Hence we lock in the estimates early. +// Ideally, we would have dynamic information, but we don't right now, so this is as +// good as it gets. -bool performResurrectionForValidation(Graph&); +bool performStaticExecutionCountEstimation(Graph&); } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) -#endif // DFGResurrectionForValidationPhase_h +#endif // DFGStaticExecutionCountEstimationPhase_h diff --git a/Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.cpp b/Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.cpp deleted file mode 100644 index d73c5201e..000000000 --- a/Source/JavaScriptCore/dfg/DFGStoreBarrierElisionPhase.cpp +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "DFGStoreBarrierElisionPhase.h" - -#if ENABLE(DFG_JIT) - -#include "DFGBasicBlock.h" -#include "DFGClobberSet.h" -#include "DFGGraph.h" -#include "DFGPhase.h" -#include <wtf/HashSet.h> - -namespace JSC { namespace DFG { - -class StoreBarrierElisionPhase : public Phase { -public: - StoreBarrierElisionPhase(Graph& graph) - : Phase(graph, "store barrier elision") - , m_currentBlock(0) - , m_currentIndex(0) - { - m_gcClobberSet.add(GCState); - } - - bool run() - { - for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { - m_currentBlock = m_graph.block(blockIndex); - if (!m_currentBlock) - continue; - handleBlock(m_currentBlock); - } - return true; - } - -private: - bool couldCauseGC(Node* node) - { - return writesOverlap(m_graph, node, m_gcClobberSet); - } - - bool allocatesFreshObject(Node* node) - { - switch (node->op()) { - case NewObject: - case NewArray: - case NewArrayWithSize: - case NewArrayBuffer: - case NewTypedArray: - case NewRegexp: - return true; - default: - return false; - } - } - - void noticeFreshObject(HashSet<Node*>& dontNeedBarriers, Node* node) - { - ASSERT(allocatesFreshObject(node)); - dontNeedBarriers.add(node); - } - - Node* getBaseOfStore(Node* barrierNode) - { - ASSERT(barrierNode->isStoreBarrier()); - return barrierNode->child1().node(); - } - - bool shouldBeElided(HashSet<Node*>& dontNeedBarriers, Node* node) - { - ASSERT(node->isStoreBarrier()); - return dontNeedBarriers.contains(node->child1().node()); - } - - void elideBarrier(Node* node) - { - ASSERT(node->isStoreBarrier()); - node->convertToPhantom(); - } - - void handleNode(HashSet<Node*>& dontNeedBarriers, Node* node) - { - if (couldCauseGC(node)) - dontNeedBarriers.clear(); - - if (allocatesFreshObject(node)) - noticeFreshObject(dontNeedBarriers, node); - - if (!node->isStoreBarrier()) - return; - - if (shouldBeElided(dontNeedBarriers, node)) { - elideBarrier(node); - return; - } - - Node* base = getBaseOfStore(node); - if (!base) - return; - - if (dontNeedBarriers.contains(base)) - return; - dontNeedBarriers.add(base); - } - - bool handleBlock(BasicBlock* block) - { - HashSet<Node*> dontNeedBarriers; - for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { - m_currentIndex = indexInBlock; - Node* node = block->at(indexInBlock); - handleNode(dontNeedBarriers, node); - } - return true; - } - - ClobberSet m_gcClobberSet; - BasicBlock* m_currentBlock; - unsigned m_currentIndex; -}; - -bool performStoreBarrierElision(Graph& graph) -{ - SamplingRegion samplingRegion("DFG Store Barrier Elision Phase"); - return runPhase<StoreBarrierElisionPhase>(graph); -} - - -} } // namespace JSC::DFG - -#endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.cpp b/Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.cpp new file mode 100644 index 000000000..72f2200ce --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.cpp @@ -0,0 +1,544 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGStoreBarrierInsertionPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGAbstractInterpreterInlines.h" +#include "DFGBlockMapInlines.h" +#include "DFGDoesGC.h" +#include "DFGGraph.h" +#include "DFGInPlaceAbstractState.h" +#include "DFGInsertionSet.h" +#include "DFGPhase.h" +#include "JSCInlines.h" +#include <wtf/CommaPrinter.h> +#include <wtf/HashSet.h> + +namespace JSC { namespace DFG { + +namespace { + +bool verbose = false; + +enum class PhaseMode { + // Does only a local analysis for store barrier insertion and assumes that pointers live + // from predecessor blocks may need barriers. Assumes CPS conventions. Does not use AI for + // eliminating store barriers, but does a best effort to eliminate barriers when you're + // storing a non-cell value by using Node::result() and by looking at constants. The local + // analysis is based on GC epochs, so it will eliminate a lot of locally redundant barriers. + Fast, + + // Does a global analysis for store barrier insertion. Reuses the GC-epoch-based analysis + // used by Fast, but adds a conservative merge rule for propagating information from one + // block to the next. This will ensure for example that if a value V coming from multiple + // predecessors in B didn't need any more barriers at the end of each predecessor (either + // because it was the last allocated object in that predecessor or because it just had a + // barrier executed), then until we hit another GC point in B, we won't need another barrier + // on V. Uses AI for eliminating barriers when we know that the value being stored is not a + // cell. Assumes SSA conventions. + Global +}; + +template<PhaseMode mode> +class StoreBarrierInsertionPhase : public Phase { +public: + StoreBarrierInsertionPhase(Graph& graph) + : Phase(graph, mode == PhaseMode::Fast ? "fast store barrier insertion" : "global store barrier insertion") + , m_insertionSet(graph) + { + } + + bool run() + { + if (verbose) { + dataLog("Starting store barrier insertion:\n"); + m_graph.dump(); + } + + switch (mode) { + case PhaseMode::Fast: { + DFG_ASSERT(m_graph, nullptr, m_graph.m_form != SSA); + + m_graph.clearEpochs(); + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) + handleBlock(block); + return true; + } + + case PhaseMode::Global: { + DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA); + + m_state = std::make_unique<InPlaceAbstractState>(m_graph); + m_interpreter = std::make_unique<AbstractInterpreter<InPlaceAbstractState>>(m_graph, *m_state); + + m_isConverged = false; + + // First run the analysis. Inside basic blocks we use an epoch-based analysis that + // is very precise. At block boundaries, we just propagate which nodes may need a + // barrier. This gives us a very nice bottom->top fixpoint: we start out assuming + // that no node needs any barriers at block boundaries, and then we converge + // towards believing that all nodes need barriers. "Needing a barrier" is like + // saying that the node is in a past epoch. "Not needing a barrier" is like saying + // that the node is in the current epoch. + m_stateAtHead = std::make_unique<BlockMap<HashSet<Node*>>>(m_graph); + m_stateAtTail = std::make_unique<BlockMap<HashSet<Node*>>>(m_graph); + + BlockList postOrder = m_graph.blocksInPostOrder(); + + bool changed = true; + while (changed) { + changed = false; + + // Intentional backwards loop because we are using RPO. + for (unsigned blockIndex = postOrder.size(); blockIndex--;) { + BasicBlock* block = postOrder[blockIndex]; + + if (!handleBlock(block)) { + // If the block didn't finish, then it cannot affect the fixpoint. + continue; + } + + // Construct the state-at-tail based on the epochs of live nodes and the + // current epoch. We grow state-at-tail monotonically to ensure convergence. + bool thisBlockChanged = false; + for (Node* node : block->ssa->liveAtTail) { + if (node->epoch() != m_currentEpoch) { + // If the node is older than the current epoch, then we may need to + // run a barrier on it in the future. So, add it to the state. + thisBlockChanged |= m_stateAtTail->at(block).add(node).isNewEntry; + } + } + + if (!thisBlockChanged) { + // This iteration didn't learn anything new about this block. + continue; + } + + // Changed things. Make sure that we loop one more time. + changed = true; + + for (BasicBlock* successor : block->successors()) { + for (Node* node : m_stateAtTail->at(block)) + m_stateAtHead->at(successor).add(node); + } + } + } + + // Tell handleBlock() that it's time to actually insert barriers for real. + m_isConverged = true; + + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) + handleBlock(block); + + return true; + } } + + RELEASE_ASSERT_NOT_REACHED(); + return false; + } + +private: + bool handleBlock(BasicBlock* block) + { + if (verbose) { + dataLog("Dealing with block ", pointerDump(block), "\n"); + if (reallyInsertBarriers()) + dataLog(" Really inserting barriers.\n"); + } + + m_currentEpoch = Epoch::first(); + + if (mode == PhaseMode::Global) { + if (!block->cfaHasVisited) + return false; + m_state->beginBasicBlock(block); + + for (Node* node : block->ssa->liveAtHead) { + if (m_stateAtHead->at(block).contains(node)) { + // If previous blocks tell us that this node may need a barrier in the + // future, then put it in the ancient primordial epoch. This forces us to + // emit a barrier on any possibly-cell store, regardless of the epoch of the + // stored value. + node->setEpoch(Epoch()); + } else { + // If previous blocks aren't requiring us to run a barrier on this node, + // then put it in the current epoch. This means that we will skip barriers + // on this node so long as we don't allocate. It also means that we won't + // run barriers on stores to on one such node into another such node. That's + // fine, because nodes would be excluded from the state set if at the tails + // of all predecessors they always had the current epoch. + node->setEpoch(m_currentEpoch); + } + } + } + + bool result = true; + + for (m_nodeIndex = 0; m_nodeIndex < block->size(); ++m_nodeIndex) { + m_node = block->at(m_nodeIndex); + + if (verbose) { + dataLog( + " ", m_currentEpoch, ": Looking at node ", m_node, " with children: "); + CommaPrinter comma; + m_graph.doToChildren( + m_node, + [&] (Edge edge) { + dataLog(comma, edge, " (", edge->epoch(), ")"); + }); + dataLog("\n"); + } + + if (mode == PhaseMode::Global) { + // Execute edges separately because we don't want to insert barriers if the + // operation doing the store does a check that ensures that the child is not + // a cell. + m_interpreter->startExecuting(); + m_interpreter->executeEdges(m_node); + } + + switch (m_node->op()) { + case PutByValDirect: + case PutByVal: + case PutByValAlias: { + switch (m_node->arrayMode().modeForPut().type()) { + case Array::Contiguous: + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: { + Edge child1 = m_graph.varArgChild(m_node, 0); + Edge child3 = m_graph.varArgChild(m_node, 2); + considerBarrier(child1, child3); + break; + } + default: + break; + } + break; + } + + case ArrayPush: { + switch (m_node->arrayMode().type()) { + case Array::Contiguous: + case Array::ArrayStorage: + considerBarrier(m_node->child1(), m_node->child2()); + break; + default: + break; + } + break; + } + + case PutStructure: { + considerBarrier(m_node->child1()); + break; + } + + case PutClosureVar: + case PutToArguments: + case MultiPutByOffset: { + considerBarrier(m_node->child1(), m_node->child2()); + break; + } + + case PutById: + case PutByIdFlush: + case PutByIdDirect: { + considerBarrier(m_node->child1()); + break; + } + + case PutByOffset: { + considerBarrier(m_node->child2(), m_node->child3()); + break; + } + + case PutGlobalVariable: { + considerBarrier(m_node->child1(), m_node->child2()); + break; + } + + default: + break; + } + + if (doesGC(m_graph, m_node)) + m_currentEpoch.bump(); + + switch (m_node->op()) { + case NewObject: + case NewArray: + case NewArrayWithSize: + case NewArrayBuffer: + case NewTypedArray: + case NewRegexp: + case MaterializeNewObject: + case MaterializeCreateActivation: + case NewStringObject: + case MakeRope: + case CreateActivation: + case CreateDirectArguments: + case CreateScopedArguments: + case CreateClonedArguments: + case NewArrowFunction: + case NewFunction: + case NewGeneratorFunction: + // Nodes that allocate get to set their epoch because for those nodes we know + // that they will be the newest object in the heap. + m_node->setEpoch(m_currentEpoch); + break; + + case AllocatePropertyStorage: + case ReallocatePropertyStorage: + // These allocate but then run their own barrier. + insertBarrier(m_nodeIndex + 1, Edge(m_node->child1().node(), KnownCellUse)); + m_node->setEpoch(Epoch()); + break; + + case Upsilon: + m_node->phi()->setEpoch(m_node->epoch()); + m_node->setEpoch(Epoch()); + break; + + default: + // For nodes that aren't guaranteed to allocate, we say that their return value + // (if there is one) could be arbitrarily old. + m_node->setEpoch(Epoch()); + break; + } + + if (verbose) { + dataLog( + " ", m_currentEpoch, ": Done with node ", m_node, " (", m_node->epoch(), + ") with children: "); + CommaPrinter comma; + m_graph.doToChildren( + m_node, + [&] (Edge edge) { + dataLog(comma, edge, " (", edge->epoch(), ")"); + }); + dataLog("\n"); + } + + if (mode == PhaseMode::Global) { + if (!m_interpreter->executeEffects(m_nodeIndex, m_node)) { + result = false; + break; + } + } + } + + if (mode == PhaseMode::Global) + m_state->reset(); + + if (reallyInsertBarriers()) + m_insertionSet.execute(block); + + return result; + } + + void considerBarrier(Edge base, Edge child) + { + if (verbose) + dataLog(" Considering adding barrier ", base, " => ", child, "\n"); + + // We don't need a store barrier if the child is guaranteed to not be a cell. + switch (mode) { + case PhaseMode::Fast: { + // Don't try too hard because it's too expensive to run AI. + if (child->hasConstant()) { + if (!child->asJSValue().isCell()) { + if (verbose) + dataLog(" Rejecting because of constant type.\n"); + return; + } + } else { + switch (child->result()) { + case NodeResultNumber: + case NodeResultDouble: + case NodeResultInt32: + case NodeResultInt52: + case NodeResultBoolean: + if (verbose) + dataLog(" Rejecting because of result type.\n"); + return; + default: + break; + } + } + break; + } + + case PhaseMode::Global: { + // Go into rage mode to eliminate any chance of a barrier with a non-cell child. We + // can afford to keep around AI in Global mode. + if (!m_interpreter->needsTypeCheck(child, ~SpecCell)) { + if (verbose) + dataLog(" Rejecting because of AI type.\n"); + return; + } + break; + } } + + // We don't need a store barrier if the base is at least as new as the child. For + // example this won't need a barrier: + // + // var o = {} + // var p = {} + // p.f = o + // + // This is stronger than the currentEpoch rule in considerBarrier(Edge), because it will + // also eliminate barriers in cases like this: + // + // var o = {} // o.epoch = 1, currentEpoch = 1 + // var p = {} // o.epoch = 1, p.epoch = 2, currentEpoch = 2 + // var q = {} // o.epoch = 1, p.epoch = 2, q.epoch = 3, currentEpoch = 3 + // p.f = o // p.epoch >= o.epoch + // + // This relationship works because if it holds then we are in one of the following + // scenarios. Note that we don't know *which* of these scenarios we are in, but it's + // one of them (though without loss of generality, you can replace "a GC happened" with + // "many GCs happened"). + // + // 1) There is no GC between the allocation/last-barrier of base, child and now. Then + // we definitely don't need a barrier. + // + // 2) There was a GC after child was allocated but before base was allocated. Then we + // don't need a barrier, because base is still a new object. + // + // 3) There was a GC after both child and base were allocated. Then they are both old. + // We don't need barriers on stores of old into old. Note that in this case it + // doesn't matter if there was also a GC between the allocation of child and base. + // + // Note that barriers will lift an object into the current epoch. This is sort of weird. + // It means that later if you store that object into some other object, and that other + // object was previously newer object, you'll think that you need a barrier. We could + // avoid this by tracking allocation epoch and barrier epoch separately. For now I think + // that this would be overkill. But this does mean that there are the following + // possibilities when this relationship holds: + // + // 4) Base is allocated first. A GC happens and base becomes old. Then we allocate + // child. (Note that alternatively the GC could happen during the allocation of + // child.) Then we run a barrier on base. Base will appear to be as new as child + // (same epoch). At this point, we don't need another barrier on base. + // + // 5) Base is allocated first. Then we allocate child. Then we run a GC. Then we run a + // barrier on base. Base will appear newer than child. We don't need a barrier + // because both objects are old. + // + // Something we watch out for here is that the null epoch is a catch-all for objects + // allocated before we did any epoch tracking. Two objects being in the null epoch + // means that we don't know their epoch relationship. + if (!!base->epoch() && base->epoch() >= child->epoch()) { + if (verbose) + dataLog(" Rejecting because of epoch ordering.\n"); + return; + } + + considerBarrier(base); + } + + void considerBarrier(Edge base) + { + if (verbose) + dataLog(" Considering adding barrier on ", base, "\n"); + + // We don't need a store barrier if the epoch of the base is identical to the current + // epoch. That means that we either just allocated the object and so it's guaranteed to + // be in newgen, or we just ran a barrier on it so it's guaranteed to be remembered + // already. + if (base->epoch() == m_currentEpoch) { + if (verbose) + dataLog(" Rejecting because it's in the current epoch.\n"); + return; + } + + if (verbose) + dataLog(" Inserting barrier.\n"); + insertBarrier(m_nodeIndex, base); + } + + void insertBarrier(unsigned nodeIndex, Edge base, bool exitOK = true) + { + // If we're in global mode, we should only insert the barriers once we have converged. + if (!reallyInsertBarriers()) + return; + + // FIXME: We could support StoreBarrier(UntypedUse:). That would be sort of cool. + // But right now we don't need it. + + // If the original edge was unchecked, we should also not have a check. We may be in a context + // where checks are not allowed. If we ever did have to insert a barrier at an ExitInvalid + // context and that barrier needed a check, then we could make that work by hoisting the check. + // That doesn't happen right now. + if (base.useKind() != KnownCellUse) { + DFG_ASSERT(m_graph, m_node, m_node->origin.exitOK); + base.setUseKind(CellUse); + } + + m_insertionSet.insertNode( + nodeIndex, SpecNone, StoreBarrier, m_node->origin.takeValidExit(exitOK), base); + + base->setEpoch(m_currentEpoch); + } + + bool reallyInsertBarriers() + { + return mode == PhaseMode::Fast || m_isConverged; + } + + InsertionSet m_insertionSet; + Epoch m_currentEpoch; + unsigned m_nodeIndex; + Node* m_node; + + // Things we only use in Global mode. + std::unique_ptr<InPlaceAbstractState> m_state; + std::unique_ptr<AbstractInterpreter<InPlaceAbstractState>> m_interpreter; + std::unique_ptr<BlockMap<HashSet<Node*>>> m_stateAtHead; + std::unique_ptr<BlockMap<HashSet<Node*>>> m_stateAtTail; + bool m_isConverged; +}; + +} // anonymous namespace + +bool performFastStoreBarrierInsertion(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Fast Store Barrier Insertion Phase"); + return runPhase<StoreBarrierInsertionPhase<PhaseMode::Fast>>(graph); +} + +bool performGlobalStoreBarrierInsertion(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Global Store Barrier Insertion Phase"); + return runPhase<StoreBarrierInsertionPhase<PhaseMode::Global>>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.cpp b/Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.h index bf1a24375..352759e95 100644 --- a/Source/JavaScriptCore/dfg/DFGDesiredWriteBarriers.cpp +++ b/Source/JavaScriptCore/dfg/DFGStoreBarrierInsertionPhase.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,65 +23,29 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#include "config.h" +#ifndef DFGStoreBarrierInsertionPhase_h +#define DFGStoreBarrierInsertionPhase_h #if ENABLE(DFG_JIT) -#include "DFGDesiredWriteBarriers.h" - -#include "CodeBlock.h" -#include "JSCJSValueInlines.h" - namespace JSC { namespace DFG { -DesiredWriteBarrier::DesiredWriteBarrier(Type type, CodeBlock* codeBlock, unsigned index, JSCell* owner) - : m_owner(owner) - , m_type(type) - , m_codeBlock(codeBlock) -{ - m_which.index = index; -} - -DesiredWriteBarrier::DesiredWriteBarrier(Type type, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSCell* owner) - : m_owner(owner) - , m_type(type) - , m_codeBlock(codeBlock) -{ - m_which.inlineCallFrame = inlineCallFrame; -} - -void DesiredWriteBarrier::trigger(VM& vm) -{ - switch (m_type) { - case ConstantType: { - WriteBarrier<Unknown>& barrier = m_codeBlock->constants()[m_which.index]; - barrier.set(vm, m_owner, barrier.get()); - return; - } +class Graph; - case InlineCallFrameExecutableType: { - InlineCallFrame* inlineCallFrame = m_which.inlineCallFrame; - WriteBarrier<ScriptExecutable>& executable = inlineCallFrame->executable; - executable.set(vm, m_owner, executable.get()); - return; - } } - RELEASE_ASSERT_NOT_REACHED(); -} +// Inserts store barriers in a block-local manner without consulting the abstract interpreter. +// Uses a simple epoch-based analysis to avoid inserting redundant barriers. This phase requires +// that we are not in SSA. +bool performFastStoreBarrierInsertion(Graph&); -DesiredWriteBarriers::DesiredWriteBarriers() -{ -} - -DesiredWriteBarriers::~DesiredWriteBarriers() -{ -} - -void DesiredWriteBarriers::trigger(VM& vm) -{ - for (unsigned i = 0; i < m_barriers.size(); i++) - m_barriers[i].trigger(vm); -} +// Inserts store barriers using a global analysis and consults the abstract interpreter. Uses a +// simple epoch-based analysis to avoid inserting redundant barriers, but only propagates "same +// epoch as current" property from one block to the next. This phase requires SSA. This phase +// also requires having valid AI and liveness. +bool performGlobalStoreBarrierInsertion(Graph&); } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) + +#endif // DFGStoreBarrierInsertionPhase_h + diff --git a/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp b/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp index 3aa991c48..acfad6521 100644 --- a/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,12 +28,15 @@ #if ENABLE(DFG_JIT) +#include "DFGAbstractHeap.h" +#include "DFGClobberize.h" #include "DFGGraph.h" #include "DFGInsertionSet.h" #include "DFGPhase.h" #include "DFGPredictionPropagationPhase.h" #include "DFGVariableAccessDataDump.h" -#include "Operations.h" +#include "JSCInlines.h" +#include <cstdlib> namespace JSC { namespace DFG { @@ -70,73 +73,224 @@ private: { switch (m_node->op()) { case BitOr: - if (m_node->child1()->isConstant()) { - JSValue op1 = m_graph.valueOfJSConstant(m_node->child1().node()); - if (op1.isInt32() && !op1.asInt32()) { - convertToIdentityOverChild2(); + handleCommutativity(); + + if (m_node->child1().useKind() != UntypedUse && m_node->child2()->isInt32Constant() && !m_node->child2()->asInt32()) { + convertToIdentityOverChild1(); + break; + } + break; + + case BitXor: + case BitAnd: + handleCommutativity(); + break; + + case BitLShift: + case BitRShift: + case BitURShift: + if (m_node->child1().useKind() != UntypedUse && m_node->child2()->isInt32Constant() && !(m_node->child2()->asInt32() & 0x1f)) { + convertToIdentityOverChild1(); + break; + } + break; + + case UInt32ToNumber: + if (m_node->child1()->op() == BitURShift + && m_node->child1()->child2()->isInt32Constant() + && (m_node->child1()->child2()->asInt32() & 0x1f) + && m_node->arithMode() != Arith::DoOverflow) { + m_node->convertToIdentity(); + m_changed = true; + break; + } + break; + + case ArithAdd: + handleCommutativity(); + + if (m_node->child2()->isInt32Constant() && !m_node->child2()->asInt32()) { + convertToIdentityOverChild1(); + break; + } + break; + + case ArithMul: { + handleCommutativity(); + Edge& child2 = m_node->child2(); + if (child2->isNumberConstant() && child2->asNumber() == 2) { + switch (m_node->binaryUseKind()) { + case DoubleRepUse: + // It is always valuable to get rid of a double multiplication by 2. + // We won't have half-register dependencies issues on x86 and we won't have to load the constants. + m_node->setOp(ArithAdd); + child2.setNode(m_node->child1().node()); + m_changed = true; + break; +#if USE(JSVALUE64) + case Int52RepUse: +#endif + case Int32Use: + // For integers, we can only convert compatible modes. + // ArithAdd does handle do negative zero check for example. + if (m_node->arithMode() == Arith::CheckOverflow || m_node->arithMode() == Arith::Unchecked) { + m_node->setOp(ArithAdd); + child2.setNode(m_node->child1().node()); + m_changed = true; + } + break; + default: break; } } - if (m_node->child2()->isConstant()) { - JSValue op2 = m_graph.valueOfJSConstant(m_node->child2().node()); - if (op2.isInt32() && !op2.asInt32()) { - convertToIdentityOverChild1(); + break; + } + case ArithSub: + if (m_node->child2()->isInt32Constant() + && m_node->isBinaryUseKind(Int32Use)) { + int32_t value = m_node->child2()->asInt32(); + if (-value != value) { + m_node->setOp(ArithAdd); + m_node->child2().setNode( + m_insertionSet.insertConstant( + m_nodeIndex, m_node->origin, jsNumber(-value))); + m_changed = true; break; } } break; - - case BitLShift: - case BitRShift: - case BitURShift: - if (m_node->child2()->isConstant()) { - JSValue op2 = m_graph.valueOfJSConstant(m_node->child2().node()); - if (op2.isInt32() && !(op2.asInt32() & 0x1f)) { + + case ArithPow: + if (m_node->child2()->isNumberConstant()) { + double yOperandValue = m_node->child2()->asNumber(); + if (yOperandValue == 1) { convertToIdentityOverChild1(); - break; + } else if (yOperandValue == 0.5) { + m_insertionSet.insertCheck(m_nodeIndex, m_node); + m_node->convertToArithSqrt(); + m_changed = true; } } break; + + case ArithMod: + // On Integers + // In: ArithMod(ArithMod(x, const1), const2) + // Out: Identity(ArithMod(x, const1)) + // if const1 <= const2. + if (m_node->binaryUseKind() == Int32Use + && m_node->child2()->isInt32Constant() + && m_node->child1()->op() == ArithMod + && m_node->child1()->binaryUseKind() == Int32Use + && m_node->child1()->child2()->isInt32Constant() + && std::abs(m_node->child1()->child2()->asInt32()) <= std::abs(m_node->child2()->asInt32())) { + convertToIdentityOverChild1(); + } + break; + + case ValueRep: + case Int52Rep: + case DoubleRep: { + // This short-circuits circuitous conversions, like ValueRep(DoubleRep(value)) or + // even more complicated things. Like, it can handle a beast like + // ValueRep(DoubleRep(Int52Rep(value))). - case UInt32ToNumber: - if (m_node->child1()->op() == BitURShift - && m_node->child1()->child2()->isConstant()) { - JSValue shiftAmount = m_graph.valueOfJSConstant( - m_node->child1()->child2().node()); - if (shiftAmount.isInt32() && (shiftAmount.asInt32() & 0x1f)) { + // The only speculation that we would do beyond validating that we have a type that + // can be represented a certain way is an Int32 check that would appear on Int52Rep + // nodes. For now, if we see this and the final type we want is an Int52, we use it + // as an excuse not to fold. The only thing we would need is a Int52RepInt32Use kind. + bool hadInt32Check = false; + if (m_node->op() == Int52Rep) { + if (m_node->child1().useKind() != Int32Use) + break; + hadInt32Check = true; + } + for (Node* node = m_node->child1().node(); ; node = node->child1().node()) { + if (canonicalResultRepresentation(node->result()) == + canonicalResultRepresentation(m_node->result())) { + m_insertionSet.insertCheck(m_nodeIndex, m_node); + if (hadInt32Check) { + // FIXME: Consider adding Int52RepInt32Use or even DoubleRepInt32Use, + // which would be super weird. The latter would only arise in some + // seriously circuitous conversions. + if (canonicalResultRepresentation(node->result()) != NodeResultJS) + break; + + m_insertionSet.insertCheck( + m_nodeIndex, m_node->origin, Edge(node, Int32Use)); + } + m_node->child1() = node->defaultEdge(); m_node->convertToIdentity(); m_changed = true; break; } + + switch (node->op()) { + case Int52Rep: + if (node->child1().useKind() != Int32Use) + break; + hadInt32Check = true; + continue; + + case DoubleRep: + case ValueRep: + continue; + + default: + break; + } + break; } break; + } - case GetArrayLength: - if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node)) - foldTypedArrayPropertyToConstant(view, jsNumber(view->length())); - break; + case Flush: { + ASSERT(m_graph.m_form != SSA); - case GetTypedArrayByteOffset: - if (JSArrayBufferView* view = m_graph.tryGetFoldableView(m_node->child1().node())) - foldTypedArrayPropertyToConstant(view, jsNumber(view->byteOffset())); - break; + Node* setLocal = nullptr; + VirtualRegister local = m_node->local(); - case GetIndexedPropertyStorage: - if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node)) { - if (view->mode() != FastTypedArray) { - prepareToFoldTypedArray(view); - m_node->convertToConstantStoragePointer(view->vector()); - m_changed = true; + for (unsigned i = m_nodeIndex; i--;) { + Node* node = m_block->at(i); + if (node->op() == SetLocal && node->local() == local) { + setLocal = node; break; - } else { - // FIXME: It would be awesome to be able to fold the property storage for - // these GC-allocated typed arrays. For now it doesn't matter because the - // most common use-cases for constant typed arrays involve large arrays with - // aliased buffer views. - // https://bugs.webkit.org/show_bug.cgi?id=125425 } + if (accessesOverlap(m_graph, node, AbstractHeap(Stack, local))) + break; + } + + if (!setLocal) + break; + + // The Flush should become a PhantomLocal at this point. This means that we want the + // local's value during OSR, but we don't care if the value is stored to the stack. CPS + // rethreading can canonicalize PhantomLocals for us. + m_node->convertFlushToPhantomLocal(); + m_graph.dethread(); + m_changed = true; + break; + } + + // FIXME: we should probably do this in constant folding but this currently relies on an OSR exit rule. + // https://bugs.webkit.org/show_bug.cgi?id=154832 + case OverridesHasInstance: { + if (!m_node->child2().node()->isCellConstant()) + break; + + if (m_node->child2().node()->asCell() != m_graph.globalObjectFor(m_node->origin.semantic)->functionProtoHasInstanceSymbolFunction()) { + m_graph.convertToConstant(m_node, jsBoolean(true)); + m_changed = true; + + } else if (!m_graph.hasExitSite(m_node->origin.semantic, BadTypeInfoFlags)) { + // We optimistically assume that we will not see a function that has a custom instanceof operation as they should be rare. + m_insertionSet.insertNode(m_nodeIndex, SpecNone, CheckTypeInfoFlags, m_node->origin, OpInfo(ImplementsDefaultHasInstance), Edge(m_node->child1().node(), CellUse)); + m_graph.convertToConstant(m_node, jsBoolean(false)); + m_changed = true; } + break; + } default: break; @@ -145,8 +299,7 @@ private: void convertToIdentityOverChild(unsigned childIndex) { - m_insertionSet.insertNode( - m_nodeIndex, SpecNone, Phantom, m_node->codeOrigin, m_node->children); + m_insertionSet.insertCheck(m_nodeIndex, m_node); m_node->children.removeEdge(childIndex ^ 1); m_node->convertToIdentity(); m_changed = true; @@ -162,20 +315,26 @@ private: convertToIdentityOverChild(1); } - void foldTypedArrayPropertyToConstant(JSArrayBufferView* view, JSValue constant) + void handleCommutativity() { - prepareToFoldTypedArray(view); - m_graph.convertToConstant(m_node, constant); - m_changed = true; - } - - void prepareToFoldTypedArray(JSArrayBufferView* view) - { - m_insertionSet.insertNode( - m_nodeIndex, SpecNone, TypedArrayWatchpoint, m_node->codeOrigin, - OpInfo(view)); - m_insertionSet.insertNode( - m_nodeIndex, SpecNone, Phantom, m_node->codeOrigin, m_node->children); + // If the right side is a constant then there is nothing left to do. + if (m_node->child2()->hasConstant()) + return; + + // This case ensures that optimizations that look for x + const don't also have + // to look for const + x. + if (m_node->child1()->hasConstant()) { + std::swap(m_node->child1(), m_node->child2()); + m_changed = true; + return; + } + + // This case ensures that CSE is commutativity-aware. + if (m_node->child1().node() > m_node->child2().node()) { + std::swap(m_node->child1(), m_node->child2()); + m_changed = true; + return; + } } InsertionSet m_insertionSet; diff --git a/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.cpp b/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.cpp new file mode 100644 index 000000000..f77f06a74 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.cpp @@ -0,0 +1,399 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGStructureAbstractValue.h" + +#if ENABLE(DFG_JIT) + +#include "DFGGraph.h" + +namespace JSC { namespace DFG { + +// Comment out the empty SAMPLE() definition, and uncomment the one that uses SamplingRegion, if +// you want extremely fine-grained profiling in this code. +#define SAMPLE(name) +//#define SAMPLE(name) SamplingRegion samplingRegion(name) + +#if !ASSERT_DISABLED +void StructureAbstractValue::assertIsRegistered(Graph& graph) const +{ + SAMPLE("StructureAbstractValue assertIsRegistered"); + + if (isTop()) + return; + + for (unsigned i = size(); i--;) + graph.assertIsRegistered(at(i)); +} +#endif // !ASSERT_DISABLED + +void StructureAbstractValue::clobber() +{ + SAMPLE("StructureAbstractValue clobber"); + + // The premise of this approach to clobbering is that anytime we introduce + // a watchable structure into an abstract value, we watchpoint it. You can assert + // that this holds by calling assertIsWatched(). + + if (isTop()) + return; + + setClobbered(true); + + if (m_set.isThin()) { + if (!m_set.singleEntry()) + return; + if (!m_set.singleEntry()->dfgShouldWatch()) + makeTopWhenThin(); + return; + } + + StructureSet::OutOfLineList* list = m_set.list(); + for (unsigned i = list->m_length; i--;) { + if (!list->list()[i]->dfgShouldWatch()) { + makeTop(); + return; + } + } +} + +void StructureAbstractValue::observeTransition(Structure* from, Structure* to) +{ + SAMPLE("StructureAbstractValue observeTransition"); + + ASSERT(!from->dfgShouldWatch()); + + if (isTop()) + return; + + if (!m_set.contains(from)) + return; + + if (!m_set.add(to)) + return; + + if (m_set.size() > polymorphismLimit) + makeTop(); +} + +void StructureAbstractValue::observeTransitions(const TransitionVector& vector) +{ + SAMPLE("StructureAbstractValue observeTransitions"); + + if (isTop()) + return; + + StructureSet newStructures; + for (unsigned i = vector.size(); i--;) { + ASSERT(!vector[i].previous->dfgShouldWatch()); + + if (!m_set.contains(vector[i].previous)) + continue; + + newStructures.add(vector[i].next); + } + + if (!m_set.merge(newStructures)) + return; + + if (m_set.size() > polymorphismLimit) + makeTop(); +} + +bool StructureAbstractValue::add(Structure* structure) +{ + SAMPLE("StructureAbstractValue add"); + + if (isTop()) + return false; + + if (!m_set.add(structure)) + return false; + + if (m_set.size() > polymorphismLimit) + makeTop(); + + return true; +} + +bool StructureAbstractValue::merge(const StructureSet& other) +{ + SAMPLE("StructureAbstractValue merge set"); + + if (isTop()) + return false; + + return mergeNotTop(other); +} + +bool StructureAbstractValue::mergeSlow(const StructureAbstractValue& other) +{ + SAMPLE("StructureAbstractValue merge value slow"); + + // It isn't immediately obvious that the code below is doing the right thing, so let's go + // through it. + // + // This not clobbered, other not clobbered: Clearly, we don't want to make anything clobbered + // since we just have two sets and we are merging them. mergeNotTop() can handle this just + // fine. + // + // This clobbered, other clobbered: Clobbered means that we have a set of things, plus we + // temporarily have the set of all things but the latter will go away once we hit the next + // invalidation point. This allows us to merge two clobbered sets the natural way. For now + // the set will still be TOP (and so we keep the clobbered bit set), but we know that after + // invalidation, we will have the union of the this and other. + // + // This clobbered, other not clobbered: It's safe to merge in other for both before and after + // invalidation, so long as we leave the clobbered bit set. Before invalidation this has no + // effect since the set will still appear to have all things in it. The way to think about + // what invalidation would do is imagine if we had a set A that was clobbered and a set B + // that wasn't and we considered the following two cases. Note that we expect A to be the + // same at the end in both cases: + // + // A.merge(B) InvalidationPoint + // InvalidationPoint A.merge(B) + // + // The fact that we expect A to be the same in both cases means that we want to merge other + // into this but keep the clobbered bit. + // + // This not clobbered, other clobbered: This is just the converse of the previous case. We + // want to merge other into this and set the clobbered bit. + + bool changed = false; + + if (!isClobbered() && other.isClobbered()) { + setClobbered(true); + changed = true; + } + + changed |= mergeNotTop(other.m_set); + + return changed; +} + +bool StructureAbstractValue::mergeNotTop(const StructureSet& other) +{ + SAMPLE("StructureAbstractValue merge not top"); + + if (!m_set.merge(other)) + return false; + + if (m_set.size() > polymorphismLimit) + makeTop(); + + return true; +} + +void StructureAbstractValue::filter(const StructureSet& other) +{ + SAMPLE("StructureAbstractValue filter set"); + + if (isTop()) { + m_set = other; + return; + } + + if (isClobbered()) { + // We have two choices here: + // + // Do nothing: It's legal to keep our set intact, which would essentially mean that for + // now, our set would behave like TOP but after the next invalidation point it wold be + // a finite set again. This may be a good choice if 'other' is much bigger than our + // m_set. + // + // Replace m_set with other and clear the clobber bit: This is also legal, and means that + // we're no longer clobbered. This is usually better because it immediately gives us a + // smaller set. + // + // This scenario should come up rarely. We usually don't do anything to an abstract value + // after it is clobbered. But we apply some heuristics. + + if (other.size() > m_set.size() + clobberedSupremacyThreshold) + return; // Keep the clobbered set. + + m_set = other; + setClobbered(false); + return; + } + + m_set.filter(other); +} + +void StructureAbstractValue::filter(const StructureAbstractValue& other) +{ + SAMPLE("StructureAbstractValue filter value"); + + if (other.isTop()) + return; + + if (other.isClobbered()) { + if (isTop()) + return; + + if (!isClobbered()) { + // See justification in filter(const StructureSet&), above. An unclobbered set is + // almost always better. + if (m_set.size() > other.m_set.size() + clobberedSupremacyThreshold) + *this = other; // Keep the clobbered set. + return; + } + + m_set.filter(other.m_set); + return; + } + + filter(other.m_set); +} + +void StructureAbstractValue::filterSlow(SpeculatedType type) +{ + SAMPLE("StructureAbstractValue filter type slow"); + + if (!(type & SpecCell)) { + clear(); + return; + } + + ASSERT(!isTop()); + + m_set.genericFilter( + [&] (Structure* structure) { + return !!(speculationFromStructure(structure) & type); + }); +} + +bool StructureAbstractValue::contains(Structure* structure) const +{ + SAMPLE("StructureAbstractValue contains"); + + if (isInfinite()) + return true; + + return m_set.contains(structure); +} + +bool StructureAbstractValue::isSubsetOf(const StructureSet& other) const +{ + SAMPLE("StructureAbstractValue isSubsetOf set"); + + if (isInfinite()) + return false; + + return m_set.isSubsetOf(other); +} + +bool StructureAbstractValue::isSubsetOf(const StructureAbstractValue& other) const +{ + SAMPLE("StructureAbstractValue isSubsetOf value"); + + if (isTop()) + return false; + + if (other.isTop()) + return true; + + if (isClobbered() == other.isClobbered()) + return m_set.isSubsetOf(other.m_set); + + // Here it gets tricky. If in doubt, return false! + + if (isClobbered()) + return false; // A clobbered set is never a subset of an unclobbered set. + + // An unclobbered set is currently a subset of a clobbered set, but it may not be so after + // invalidation. + return m_set.isSubsetOf(other.m_set); +} + +bool StructureAbstractValue::isSupersetOf(const StructureSet& other) const +{ + SAMPLE("StructureAbstractValue isSupersetOf set"); + + if (isInfinite()) + return true; + + return m_set.isSupersetOf(other); +} + +bool StructureAbstractValue::overlaps(const StructureSet& other) const +{ + SAMPLE("StructureAbstractValue overlaps set"); + + if (isInfinite()) + return true; + + return m_set.overlaps(other); +} + +bool StructureAbstractValue::overlaps(const StructureAbstractValue& other) const +{ + SAMPLE("StructureAbstractValue overlaps value"); + + if (other.isInfinite()) + return true; + + return overlaps(other.m_set); +} + +bool StructureAbstractValue::equalsSlow(const StructureAbstractValue& other) const +{ + SAMPLE("StructureAbstractValue equalsSlow"); + + ASSERT(m_set.m_pointer != other.m_set.m_pointer); + ASSERT(!isTop()); + ASSERT(!other.isTop()); + + return m_set == other.m_set + && isClobbered() == other.isClobbered(); +} + +void StructureAbstractValue::dumpInContext(PrintStream& out, DumpContext* context) const +{ + if (isClobbered()) + out.print("Clobbered:"); + + if (isTop()) + out.print("TOP"); + else + out.print(inContext(m_set, context)); +} + +void StructureAbstractValue::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} + +void StructureAbstractValue::validateReferences(const TrackedReferences& trackedReferences) const +{ + if (isTop()) + return; + m_set.validateReferences(trackedReferences); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.h b/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.h index 54d3bd29b..a1a5f2982 100644 --- a/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.h +++ b/Source/JavaScriptCore/dfg/DFGStructureAbstractValue.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,313 +26,241 @@ #ifndef DFGStructureAbstractValue_h #define DFGStructureAbstractValue_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) +#include "DFGTransition.h" #include "JSCell.h" #include "SpeculatedType.h" #include "DumpContext.h" #include "StructureSet.h" -namespace JSC { namespace DFG { +namespace JSC { + +class TrackedReferences; + +namespace DFG { class StructureAbstractValue { public: - StructureAbstractValue() - : m_structure(0) - { - } - + StructureAbstractValue() { } StructureAbstractValue(Structure* structure) - : m_structure(structure) + : m_set(StructureSet(structure)) { + setClobbered(false); } - - StructureAbstractValue(const StructureSet& set) + StructureAbstractValue(const StructureSet& other) + : m_set(other) { - switch (set.size()) { - case 0: - m_structure = 0; - break; - - case 1: - m_structure = set[0]; - break; - - default: - m_structure = topValue(); - break; - } + setClobbered(false); } - - void clear() + ALWAYS_INLINE StructureAbstractValue(const StructureAbstractValue& other) + : m_set(other.m_set) { - m_structure = 0; + setClobbered(other.isClobbered()); } - void makeTop() + ALWAYS_INLINE StructureAbstractValue& operator=(Structure* structure) { - m_structure = topValue(); + m_set = StructureSet(structure); + setClobbered(false); + return *this; } - - static StructureAbstractValue top() + ALWAYS_INLINE StructureAbstractValue& operator=(const StructureSet& other) { - StructureAbstractValue value; - value.makeTop(); - return value; + m_set = other; + setClobbered(false); + return *this; } - - void add(Structure* structure) + ALWAYS_INLINE StructureAbstractValue& operator=(const StructureAbstractValue& other) { - ASSERT(!contains(structure) && !isTop()); - if (m_structure) - makeTop(); - else - m_structure = structure; + m_set = other.m_set; + setClobbered(other.isClobbered()); + return *this; } - bool addAll(const StructureSet& other) + void clear() { - if (isTop() || !other.size()) - return false; - if (other.size() > 1) { - makeTop(); - return true; - } - if (!m_structure) { - m_structure = other[0]; - return true; - } - if (m_structure == other[0]) - return false; - makeTop(); - return true; + m_set.clear(); + setClobbered(false); } - bool addAll(const StructureAbstractValue& other) + void makeTop() { - if (!other.m_structure) - return false; - if (isTop()) - return false; - if (other.isTop()) { - makeTop(); - return true; - } - if (m_structure) { - if (m_structure == other.m_structure) - return false; - makeTop(); - return true; - } - m_structure = other.m_structure; - return true; + m_set.deleteListIfNecessary(); + m_set.m_pointer = topValue; } - bool contains(Structure* structure) const - { - if (isTop()) - return true; - if (m_structure == structure) - return true; - return false; - } +#if ASSERT_DISABLED + void assertIsRegistered(Graph&) const { } +#else + void assertIsRegistered(Graph&) const; +#endif - bool isSubsetOf(const StructureSet& other) const - { - if (isTop()) - return false; - if (!m_structure) - return true; - return other.contains(m_structure); - } + void clobber(); + void observeInvalidationPoint() { setClobbered(false); } - bool doesNotContainAnyOtherThan(Structure* structure) const - { - if (isTop()) - return false; - if (!m_structure) - return true; - return m_structure == structure; - } + void observeTransition(Structure* from, Structure* to); + void observeTransitions(const TransitionVector&); - bool isSupersetOf(const StructureSet& other) const + static StructureAbstractValue top() { - if (isTop()) - return true; - if (!other.size()) - return true; - if (other.size() > 1) - return false; - return m_structure == other[0]; + StructureAbstractValue result; + result.m_set.m_pointer = topValue; + return result; } - bool isSubsetOf(const StructureAbstractValue& other) const - { - if (other.isTop()) - return true; - if (isTop()) - return false; - if (m_structure) { - if (other.m_structure) - return m_structure == other.m_structure; - return false; - } - return true; - } + bool isClear() const { return m_set.isEmpty(); } + bool isTop() const { return m_set.m_pointer == topValue; } + bool isNeitherClearNorTop() const { return !isClear() && !isTop(); } + + // A clobbered abstract value means that the set currently contains the m_set set of + // structures plus TOP, except that the "plus TOP" will go away at the next invalidation + // point. Note that it's tempting to think of this as "the set of structures in m_set plus + // the set of structures transition-reachable from m_set" - but this isn't really correct, + // since if we add an unwatchable structure after clobbering, the two definitions are not + // equivalent. If we do this, the new unwatchable structure will be added to m_set. + // Invalidation points do not try to "clip" the set of transition-reachable structures from + // m_set by looking at reachability as this would mean that the new set is TOP. Instead they + // literally assume that the set is just m_set rather than m_set plus TOP. + bool isClobbered() const { return m_set.getReservedFlag(); } + + // A finite structure abstract value is one where enumerating over it will yield all + // of the structures that the value may have right now. This is true so long as we're + // neither top nor clobbered. + bool isFinite() const { return !isTop() && !isClobbered(); } + + // An infinite structure abstract value may currently have any structure. + bool isInfinite() const { return !isFinite(); } - bool isSupersetOf(const StructureAbstractValue& other) const - { - return other.isSubsetOf(*this); - } + bool add(Structure* structure); + + bool merge(const StructureSet& other); - void filter(const StructureSet& other) + ALWAYS_INLINE bool merge(const StructureAbstractValue& other) { - if (!m_structure) - return; + if (other.isClear()) + return false; - if (isTop()) { - switch (other.size()) { - case 0: - m_structure = 0; - return; - - case 1: - m_structure = other[0]; - return; - - default: - return; - } - } + if (isTop()) + return false; - if (other.contains(m_structure)) - return; + if (other.isTop()) { + makeTop(); + return true; + } - m_structure = 0; + return mergeSlow(other); } - void filter(const StructureAbstractValue& other) + void filter(const StructureSet& other); + void filter(const StructureAbstractValue& other); + + ALWAYS_INLINE void filter(SpeculatedType type) { - if (isTop()) { - m_structure = other.m_structure; + if (!(type & SpecCell)) { + clear(); return; } - if (m_structure == other.m_structure) - return; - if (other.isTop()) - return; - m_structure = 0; + if (isNeitherClearNorTop()) + filterSlow(type); } - void filter(SpeculatedType other) + ALWAYS_INLINE bool operator==(const StructureAbstractValue& other) const { - if (!(other & SpecCell)) { - clear(); - return; - } + if ((m_set.isThin() && other.m_set.isThin()) || isTop() || other.isTop()) + return m_set.m_pointer == other.m_set.m_pointer; - if (isClearOrTop()) - return; - - if (!(speculationFromStructure(m_structure) & other)) - m_structure = 0; + return equalsSlow(other); } - bool isClear() const + const StructureSet& set() const { - return !m_structure; + ASSERT(!isTop()); + return m_set; } - bool isTop() const { return m_structure == topValue(); } - - bool isClearOrTop() const { return m_structure <= topValue(); } - bool isNeitherClearNorTop() const { return !isClearOrTop(); } - size_t size() const { ASSERT(!isTop()); - return !!m_structure; + return m_set.size(); } Structure* at(size_t i) const { ASSERT(!isTop()); - ASSERT(m_structure); - ASSERT_UNUSED(i, !i); - return m_structure; + return m_set.at(i); } - Structure* operator[](size_t i) const + Structure* operator[](size_t i) const { return at(i); } + + // In most cases, what you really want to do is verify whether the set is top or clobbered, and + // if not, enumerate the set of structures. Use this only in cases where the singleton case is + // meaningfully special, like for transitions. + Structure* onlyStructure() const { - return at(i); + if (isInfinite()) + return nullptr; + return m_set.onlyStructure(); } - - Structure* last() const + + template<typename Functor> + void forEach(const Functor& functor) const { - return at(0); + ASSERT(!isTop()); + m_set.forEach(functor); } - SpeculatedType speculationFromStructures() const - { - if (isTop()) - return SpecCell; - if (isClear()) - return SpecNone; - return speculationFromStructure(m_structure); - } + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; - bool isValidOffset(PropertyOffset offset) - { - if (isTop()) - return false; - if (isClear()) - return true; - return m_structure->isValidOffset(offset); - } + // The methods below are all conservative and err on the side of making 'this' appear bigger + // than it is. For example, contains() may return true if the set is clobbered or TOP. + // isSubsetOf() may return false in case of ambiguities. Therefore you should only perform + // optimizations as a consequence of the "this is smaller" return value - so false for + // contains(), true for isSubsetOf(), false for isSupersetOf(), and false for overlaps(). + + bool contains(Structure* structure) const; - bool hasSingleton() const - { - return isNeitherClearNorTop(); - } + bool isSubsetOf(const StructureSet& other) const; + bool isSubsetOf(const StructureAbstractValue& other) const; - Structure* singleton() const + bool isSupersetOf(const StructureSet& other) const; + bool isSupersetOf(const StructureAbstractValue& other) const { - ASSERT(isNeitherClearNorTop()); - return m_structure; + return other.isSubsetOf(*this); } - bool operator==(const StructureAbstractValue& other) const - { - return m_structure == other.m_structure; - } + bool overlaps(const StructureSet& other) const; + bool overlaps(const StructureAbstractValue& other) const; + + void validateReferences(const TrackedReferences&) const; + +private: + static const uintptr_t clobberedFlag = StructureSet::reservedFlag; + static const uintptr_t topValue = StructureSet::reservedValue; + static const unsigned polymorphismLimit = 10; + static const unsigned clobberedSupremacyThreshold = 2; - void dumpInContext(PrintStream& out, DumpContext* context) const + void filterSlow(SpeculatedType type); + bool mergeSlow(const StructureAbstractValue& other); + + bool equalsSlow(const StructureAbstractValue& other) const; + + void makeTopWhenThin() { - if (isTop()) { - out.print("TOP"); - return; - } - - out.print("["); - if (m_structure) - out.print(inContext(*m_structure, context)); - out.print("]"); + ASSERT(m_set.isThin()); + m_set.m_pointer = topValue; } - - void dump(PrintStream& out) const + + bool mergeNotTop(const StructureSet& other); + + void setClobbered(bool clobbered) { - dumpInContext(out, 0); + ASSERT(!isTop() || !clobbered); + m_set.setReservedFlag(clobbered); } - -private: - static Structure* topValue() { return reinterpret_cast<Structure*>(1); } - - // NB. This must have a trivial destructor. - // This can only remember one structure at a time. - Structure* m_structure; + StructureSet m_set; }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGStructureClobberState.h b/Source/JavaScriptCore/dfg/DFGStructureClobberState.h new file mode 100644 index 000000000..ac4275af3 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGStructureClobberState.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGStructureClobberState_h +#define DFGStructureClobberState_h + +#if ENABLE(DFG_JIT) + +#include <wtf/PrintStream.h> + +namespace JSC { namespace DFG { + +enum StructureClobberState { + StructuresAreWatched, // Constants with watchable structures must have those structures. + StructuresAreClobbered // Constants with watchable structures could have any structure. +}; + +inline StructureClobberState merge(StructureClobberState a, StructureClobberState b) +{ + switch (a) { + case StructuresAreWatched: + return b; + case StructuresAreClobbered: + return StructuresAreClobbered; + } + RELEASE_ASSERT_NOT_REACHED(); + return StructuresAreClobbered; +} + +} } // namespace JSC::DFG + +namespace WTF { + +inline void printInternal(PrintStream& out, JSC::DFG::StructureClobberState state) +{ + switch (state) { + case JSC::DFG::StructuresAreWatched: + out.print("StructuresAreWatched"); + return; + case JSC::DFG::StructuresAreClobbered: + out.print("StructuresAreClobbered"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + +#endif // DFGStructureClobberState_h diff --git a/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.cpp b/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.cpp new file mode 100644 index 000000000..2c0559849 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.cpp @@ -0,0 +1,197 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGStructureRegistrationPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGBasicBlockInlines.h" +#include "DFGGraph.h" +#include "DFGPhase.h" +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +class StructureRegistrationPhase : public Phase { +public: + StructureRegistrationPhase(Graph& graph) + : Phase(graph, "structure registration") + { + } + + bool run() + { + // FIXME: This phase shouldn't exist. We should have registered all structures by now, since + // we may already have done optimizations that rely on structures having been registered. + // Currently, we still have places where we don't register structures prior to this phase, + // but structures don't end up being used for optimization prior to this phase. That's a + // pretty fragile situation and we should fix it eventually. + // https://bugs.webkit.org/show_bug.cgi?id=147889 + + // We need to set this before this phase finishes. This phase doesn't do anything + // conditioned on this field, except for assertIsRegistered() below. We intend for that + // method to behave as if the phase was already finished. So, we set this up here. + m_graph.m_structureRegistrationState = AllStructuresAreRegistered; + + // These are pretty dumb, but needed to placate subsequent assertions. We don't actually + // have to watch these because there is no way to transition away from it, but they are + // watchable and so we will assert if they aren't watched. + registerStructure(m_graph.m_vm.structureStructure.get()); + registerStructure(m_graph.m_vm.stringStructure.get()); + registerStructure(m_graph.m_vm.symbolStructure.get()); + registerStructure(m_graph.m_vm.getterSetterStructure.get()); + + for (FrozenValue* value : m_graph.m_frozenValues) + assertIsRegistered(value->structure()); + + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + + switch (node->op()) { + case CheckStructure: + assertAreRegistered(node->structureSet()); + break; + + case NewObject: + case ArrayifyToStructure: + case NewStringObject: + registerStructure(node->structure()); + break; + + case PutStructure: + case AllocatePropertyStorage: + case ReallocatePropertyStorage: + registerStructure(node->transition()->previous); + registerStructure(node->transition()->next); + break; + + case MultiGetByOffset: + for (const MultiGetByOffsetCase& getCase : node->multiGetByOffsetData().cases) + registerStructures(getCase.set()); + break; + + case MultiPutByOffset: + for (unsigned i = node->multiPutByOffsetData().variants.size(); i--;) { + PutByIdVariant& variant = node->multiPutByOffsetData().variants[i]; + registerStructures(variant.oldStructure()); + if (variant.kind() == PutByIdVariant::Transition) + registerStructure(variant.newStructure()); + } + break; + + case NewArray: + case NewArrayBuffer: + case NewArrayWithSize: { + JSGlobalObject* globalObject = m_graph.globalObjectFor(node->origin.semantic); + registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())); + registerStructure(globalObject->originalArrayStructureForIndexingType(ArrayWithSlowPutArrayStorage)); + break; + } + + case NewTypedArray: + registerStructure(m_graph.globalObjectFor(node->origin.semantic)->typedArrayStructure(node->typedArrayType())); + break; + + case ToString: + case CallStringConstructor: + registerStructure(m_graph.globalObjectFor(node->origin.semantic)->stringObjectStructure()); + break; + + case CreateActivation: + registerStructure(m_graph.globalObjectFor(node->origin.semantic)->activationStructure()); + break; + + case CreateDirectArguments: + registerStructure(m_graph.globalObjectFor(node->origin.semantic)->directArgumentsStructure()); + break; + + case CreateScopedArguments: + registerStructure(m_graph.globalObjectFor(node->origin.semantic)->scopedArgumentsStructure()); + break; + + case NewRegexp: + registerStructure(m_graph.globalObjectFor(node->origin.semantic)->regExpStructure()); + break; + case NewArrowFunction: + registerStructure(m_graph.globalObjectFor(node->origin.semantic)->functionStructure()); + break; + case NewFunction: + registerStructure(m_graph.globalObjectFor(node->origin.semantic)->functionStructure()); + break; + case NewGeneratorFunction: + registerStructure(m_graph.globalObjectFor(node->origin.semantic)->generatorFunctionStructure()); + break; + + default: + break; + } + } + } + + return true; + } + +private: + void registerStructures(const StructureSet& set) + { + for (Structure* structure : set) + registerStructure(structure); + } + + void registerStructure(Structure* structure) + { + if (structure) + m_graph.registerStructure(structure); + } + + void assertAreRegistered(const StructureSet& set) + { + for (Structure* structure : set) + assertIsRegistered(structure); + } + + void assertIsRegistered(Structure* structure) + { + if (structure) + m_graph.assertIsRegistered(structure); + } +}; + +bool performStructureRegistration(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Structure Registration Phase"); + return runPhase<StructureRegistrationPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.h b/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.h new file mode 100644 index 000000000..bba789164 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGStructureRegistrationPhase_h +#define DFGStructureRegistrationPhase_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; + +// Registers any structures we know about as weak references, and sets watchpoints on any +// such structures that we know of that are currently watchable. It's somewhat +// counterintuitive, but this ends up being the cleanest and most effective way of reducing +// structure checks on terminal structures: +// +// - We used to only set watchpoints on watchable structures if we knew that this would +// remove a structure check. Experiments show that switching from that, to blindly +// setting watchpoints on all watchable structures, was not a regression. +// +// - It makes abstract interpretation a whole lot easier. We just assume that watchable +// structures are unclobberable without having to do any other logic. + +bool performStructureRegistration(Graph&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGStructureRegistrationPhase_h + diff --git a/Source/JavaScriptCore/dfg/DFGThreadData.cpp b/Source/JavaScriptCore/dfg/DFGThreadData.cpp new file mode 100644 index 000000000..d605b0092 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGThreadData.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGThreadData.h" + +#if ENABLE(DFG_JIT) + +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +ThreadData::ThreadData(Worklist* worklist) + : m_worklist(worklist) + , m_identifier(0) + , m_safepoint(nullptr) +{ +} + +ThreadData::~ThreadData() +{ +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGValueRecoveryOverride.h b/Source/JavaScriptCore/dfg/DFGThreadData.h index 89de9fd08..d86cf9078 100644 --- a/Source/JavaScriptCore/dfg/DFGValueRecoveryOverride.h +++ b/Source/JavaScriptCore/dfg/DFGThreadData.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,35 +23,38 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef DFGValueRecoveryOverride_h -#define DFGValueRecoveryOverride_h - -#include <wtf/Platform.h> +#ifndef DFGThreadData_h +#define DFGThreadData_h #if ENABLE(DFG_JIT) -#include "ValueRecovery.h" -#include <wtf/RefCounted.h> +#include <wtf/Lock.h> +#include <wtf/Threading.h> namespace JSC { namespace DFG { -class ValueRecoveryOverride : public RefCounted<ValueRecoveryOverride> { +class Safepoint; +class Worklist; + +class ThreadData { + WTF_MAKE_FAST_ALLOCATED; public: - ValueRecoveryOverride() { } + ThreadData(Worklist*); + ~ThreadData(); - ValueRecoveryOverride(VirtualRegister operand, const ValueRecovery& recovery) - : operand(operand) - , recovery(recovery) - { - } +private: + friend class Safepoint; + friend class Worklist; - VirtualRegister operand; - ValueRecovery recovery; + Worklist* m_worklist; + ThreadIdentifier m_identifier; + Lock m_rightToRun; + Safepoint* m_safepoint; }; } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) -#endif // DFGValueRecoveryOverride_h +#endif // DFGThreadData_h diff --git a/Source/JavaScriptCore/dfg/DFGThunks.cpp b/Source/JavaScriptCore/dfg/DFGThunks.cpp index c0935b95a..0c6194613 100644 --- a/Source/JavaScriptCore/dfg/DFGThunks.cpp +++ b/Source/JavaScriptCore/dfg/DFGThunks.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,15 +30,22 @@ #include "CCallHelpers.h" #include "DFGOSRExitCompiler.h" +#include "DFGJITCode.h" #include "FPRInfo.h" #include "GPRInfo.h" +#include "LinkBuffer.h" #include "MacroAssembler.h" +#include "JSCInlines.h" +#include "DFGOSRExitCompilerCommon.h" namespace JSC { namespace DFG { MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM* vm) { MacroAssembler jit; + + // This needs to happen before we use the scratch buffer because this function also uses the scratch buffer. + adjustFrameAndStackInOSRExitCompilerThunk<DFG::JITCode>(jit, vm, JITCode::DFGJIT); size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters); ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(scratchSize); @@ -86,13 +93,57 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM* vm) jit.jump(MacroAssembler::AbsoluteAddress(&vm->osrExitJumpDestination)); - LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); patchBuffer.link(functionCall, compileOSRExit); return FINALIZE_CODE(patchBuffer, ("DFG OSR exit generation thunk")); } +MacroAssemblerCodeRef osrEntryThunkGenerator(VM* vm) +{ + AssemblyHelpers jit(vm, nullptr); + + // We get passed the address of a scratch buffer. The first 8-byte slot of the buffer + // is the frame size. The second 8-byte slot is the pointer to where we are supposed to + // jump. The remaining bytes are the new call frame header followed by the locals. + + ptrdiff_t offsetOfFrameSize = 0; // This is the DFG frame count. + ptrdiff_t offsetOfTargetPC = offsetOfFrameSize + sizeof(EncodedJSValue); + ptrdiff_t offsetOfPayload = offsetOfTargetPC + sizeof(EncodedJSValue); + ptrdiff_t offsetOfLocals = offsetOfPayload + sizeof(Register) * JSStack::CallFrameHeaderSize; + + jit.move(GPRInfo::returnValueGPR2, GPRInfo::regT0); + jit.loadPtr(MacroAssembler::Address(GPRInfo::regT0, offsetOfFrameSize), GPRInfo::regT1); // Load the frame size. + jit.move(GPRInfo::regT1, GPRInfo::regT2); + jit.lshiftPtr(MacroAssembler::Imm32(3), GPRInfo::regT2); + jit.move(GPRInfo::callFrameRegister, MacroAssembler::stackPointerRegister); + jit.subPtr(GPRInfo::regT2, MacroAssembler::stackPointerRegister); + + MacroAssembler::Label loop = jit.label(); + jit.subPtr(MacroAssembler::TrustedImm32(1), GPRInfo::regT1); + jit.move(GPRInfo::regT1, GPRInfo::regT4); + jit.negPtr(GPRInfo::regT4); + jit.load32(MacroAssembler::BaseIndex(GPRInfo::regT0, GPRInfo::regT1, MacroAssembler::TimesEight, offsetOfLocals), GPRInfo::regT2); + jit.load32(MacroAssembler::BaseIndex(GPRInfo::regT0, GPRInfo::regT1, MacroAssembler::TimesEight, offsetOfLocals + sizeof(int32_t)), GPRInfo::regT3); + jit.store32(GPRInfo::regT2, MacroAssembler::BaseIndex(GPRInfo::callFrameRegister, GPRInfo::regT4, MacroAssembler::TimesEight, -static_cast<intptr_t>(sizeof(Register)))); + jit.store32(GPRInfo::regT3, MacroAssembler::BaseIndex(GPRInfo::callFrameRegister, GPRInfo::regT4, MacroAssembler::TimesEight, -static_cast<intptr_t>(sizeof(Register)) + static_cast<intptr_t>(sizeof(int32_t)))); + jit.branchPtr(MacroAssembler::NotEqual, GPRInfo::regT1, MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(-static_cast<intptr_t>(JSStack::CallFrameHeaderSize)))).linkTo(loop, &jit); + + jit.loadPtr(MacroAssembler::Address(GPRInfo::regT0, offsetOfTargetPC), GPRInfo::regT1); + MacroAssembler::Jump ok = jit.branchPtr(MacroAssembler::Above, GPRInfo::regT1, MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000)))); + jit.abortWithReason(DFGUnreasonableOSREntryJumpDestination); + + ok.link(&jit); + jit.restoreCalleeSavesFromVMCalleeSavesBuffer(); + jit.emitMaterializeTagCheckRegisters(); + + jit.jump(GPRInfo::regT1); + + LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); + return FINALIZE_CODE(patchBuffer, ("DFG OSR entry thunk")); +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGThunks.h b/Source/JavaScriptCore/dfg/DFGThunks.h index 60bfea634..6ef0c50cc 100644 --- a/Source/JavaScriptCore/dfg/DFGThunks.h +++ b/Source/JavaScriptCore/dfg/DFGThunks.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGThunks_h #define DFGThunks_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "MacroAssemblerCodeRef.h" @@ -39,6 +37,7 @@ class VM; namespace DFG { MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM*); +MacroAssemblerCodeRef osrEntryThunkGenerator(VM*); } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp b/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp index d51a1f0d8..9b14a9820 100644 --- a/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,9 +30,10 @@ #include "DFGGraph.h" #include "DFGInsertionSet.h" +#include "DFGNaturalLoops.h" #include "DFGPhase.h" #include "FTLCapabilities.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -47,7 +48,13 @@ public: { RELEASE_ASSERT(m_graph.m_plan.mode == DFGMode); - if (!Options::useExperimentalFTL()) + if (!Options::useFTLJIT()) + return false; + + if (m_graph.m_profiledBlock->m_didFailFTLCompilation) + return false; + + if (!Options::bytecodeRangeToFTLCompile().isInRange(m_graph.m_profiledBlock->instructionCount())) return false; #if ENABLE(FTL_JIT) @@ -55,37 +62,102 @@ public: if (level == FTL::CannotCompile) return false; + if (!Options::useOSREntryToFTL()) + level = FTL::CanCompile; + + // First we find all the loops that contain a LoopHint for which we cannot OSR enter. + // We use that information to decide if we need CheckTierUpAndOSREnter or CheckTierUpWithNestedTriggerAndOSREnter. + m_graph.ensureNaturalLoops(); + NaturalLoops& naturalLoops = *m_graph.m_naturalLoops; + + HashSet<const NaturalLoop*> loopsContainingLoopHintWithoutOSREnter = findLoopsContainingLoopHintWithoutOSREnter(naturalLoops, level); + + bool canTierUpAndOSREnter = false; + InsertionSet insertionSet(m_graph); for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; - if (block->at(0)->op() == LoopHint) { - CodeOrigin codeOrigin = block->at(0)->codeOrigin; - NodeType nodeType; - if (level == FTL::CanCompileAndOSREnter && !codeOrigin.inlineCallFrame) { - nodeType = CheckTierUpAndOSREnter; - RELEASE_ASSERT(block->bytecodeBegin == codeOrigin.bytecodeIndex); + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + if (node->op() != LoopHint) + continue; + + NodeOrigin origin = node->origin; + if (canOSREnterAtLoopHint(level, block, nodeIndex)) { + canTierUpAndOSREnter = true; + const NaturalLoop* loop = naturalLoops.innerMostLoopOf(block); + if (loop && loopsContainingLoopHintWithoutOSREnter.contains(loop)) + insertionSet.insertNode(nodeIndex + 1, SpecNone, CheckTierUpWithNestedTriggerAndOSREnter, origin); + else + insertionSet.insertNode(nodeIndex + 1, SpecNone, CheckTierUpAndOSREnter, origin); } else - nodeType = CheckTierUpInLoop; - insertionSet.insertNode(1, SpecNone, nodeType, codeOrigin); + insertionSet.insertNode(nodeIndex + 1, SpecNone, CheckTierUpInLoop, origin); + break; } - if (block->last()->op() == Return) { + NodeAndIndex terminal = block->findTerminal(); + if (terminal.node->isFunctionTerminal()) { insertionSet.insertNode( - block->size() - 1, SpecNone, CheckTierUpAtReturn, block->last()->codeOrigin); + terminal.index, SpecNone, CheckTierUpAtReturn, terminal.node->origin); } insertionSet.execute(block); } - + + m_graph.m_plan.canTierUpAndOSREnter = canTierUpAndOSREnter; + m_graph.m_plan.willTryToTierUp = true; return true; #else // ENABLE(FTL_JIT) RELEASE_ASSERT_NOT_REACHED(); return false; #endif // ENABLE(FTL_JIT) } + +private: +#if ENABLE(FTL_JIT) + bool canOSREnterAtLoopHint(FTL::CapabilityLevel level, const BasicBlock* block, unsigned nodeIndex) + { + Node* node = block->at(nodeIndex); + ASSERT(node->op() == LoopHint); + + NodeOrigin origin = node->origin; + if (level != FTL::CanCompileAndOSREnter || origin.semantic.inlineCallFrame) + return false; + + // We only put OSR checks for the first LoopHint in the block. Note that + // more than one LoopHint could happen in cases where we did a lot of CFG + // simplification in the bytecode parser, but it should be very rare. + for (unsigned subNodeIndex = nodeIndex; subNodeIndex--;) { + if (!block->at(subNodeIndex)->isSemanticallySkippable()) + return false; + } + return true; + } + + HashSet<const NaturalLoop*> findLoopsContainingLoopHintWithoutOSREnter(const NaturalLoops& naturalLoops, FTL::CapabilityLevel level) + { + HashSet<const NaturalLoop*> loopsContainingLoopHintWithoutOSREnter; + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + if (node->op() != LoopHint) + continue; + + if (!canOSREnterAtLoopHint(level, block, nodeIndex)) { + const NaturalLoop* loop = naturalLoops.innerMostLoopOf(block); + while (loop) { + loopsContainingLoopHintWithoutOSREnter.add(loop); + loop = naturalLoops.innerMostOuterLoop(*loop); + } + } + } + } + return loopsContainingLoopHintWithoutOSREnter; + } +#endif }; bool performTierUpCheckInjection(Graph& graph) diff --git a/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.h b/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.h index f6e799ad8..25e935589 100644 --- a/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.h +++ b/Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.h @@ -26,8 +26,6 @@ #ifndef DFGTierUpCheckInjectionPhase_h #define DFGTierUpCheckInjectionPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp b/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp index 7770fd627..a18664173 100644 --- a/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp +++ b/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.cpp @@ -31,59 +31,59 @@ #include "CodeBlock.h" #include "DFGJITCode.h" #include "Executable.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { -ToFTLDeferredCompilationCallback::ToFTLDeferredCompilationCallback( - PassRefPtr<CodeBlock> dfgCodeBlock) - : m_dfgCodeBlock(dfgCodeBlock) +ToFTLDeferredCompilationCallback::ToFTLDeferredCompilationCallback() { } ToFTLDeferredCompilationCallback::~ToFTLDeferredCompilationCallback() { } -PassRefPtr<ToFTLDeferredCompilationCallback> ToFTLDeferredCompilationCallback::create( - PassRefPtr<CodeBlock> dfgCodeBlock) +Ref<ToFTLDeferredCompilationCallback> ToFTLDeferredCompilationCallback::create() { - return adoptRef(new ToFTLDeferredCompilationCallback(dfgCodeBlock)); + return adoptRef(*new ToFTLDeferredCompilationCallback()); } void ToFTLDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously( - CodeBlock* codeBlock) + CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock) { if (Options::verboseOSR()) { dataLog( - "Optimizing compilation of ", *codeBlock, " (for ", *m_dfgCodeBlock, + "Optimizing compilation of ", *codeBlock, " (for ", *profiledDFGCodeBlock, ") did become ready.\n"); } - m_dfgCodeBlock->jitCode()->dfg()->forceOptimizationSlowPathConcurrently( - m_dfgCodeBlock.get()); + profiledDFGCodeBlock->jitCode()->dfg()->forceOptimizationSlowPathConcurrently( + profiledDFGCodeBlock); } void ToFTLDeferredCompilationCallback::compilationDidComplete( - CodeBlock* codeBlock, CompilationResult result) + CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationResult result) { if (Options::verboseOSR()) { dataLog( - "Optimizing compilation of ", *codeBlock, " (for ", *m_dfgCodeBlock, + "Optimizing compilation of ", *codeBlock, " (for ", *profiledDFGCodeBlock, ") result: ", result, "\n"); } - if (m_dfgCodeBlock->replacement() != m_dfgCodeBlock) { + if (profiledDFGCodeBlock->replacement() != profiledDFGCodeBlock) { if (Options::verboseOSR()) { dataLog( "Dropping FTL code block ", *codeBlock, " on the floor because the " - "DFG code block ", *m_dfgCodeBlock, " was jettisoned.\n"); + "DFG code block ", *profiledDFGCodeBlock, " was jettisoned.\n"); } return; } if (result == CompilationSuccessful) - codeBlock->install(); + codeBlock->ownerScriptExecutable()->installCode(codeBlock); - m_dfgCodeBlock->jitCode()->dfg()->setOptimizationThresholdBasedOnCompilationResult( - m_dfgCodeBlock.get(), result); + profiledDFGCodeBlock->jitCode()->dfg()->setOptimizationThresholdBasedOnCompilationResult( + profiledDFGCodeBlock, result); + + DeferredCompilationCallback::compilationDidComplete(codeBlock, profiledDFGCodeBlock, result); } } } // JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.h b/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.h index a4d840b20..2a7931d2c 100644 --- a/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.h +++ b/Source/JavaScriptCore/dfg/DFGToFTLDeferredCompilationCallback.h @@ -26,8 +26,6 @@ #ifndef DFGToFTLDeferredCompilationCallback_h #define DFGToFTLDeferredCompilationCallback_h -#include <wtf/Platform.h> - #if ENABLE(FTL_JIT) #include "DeferredCompilationCallback.h" @@ -42,19 +40,15 @@ namespace DFG { class ToFTLDeferredCompilationCallback : public DeferredCompilationCallback { protected: - ToFTLDeferredCompilationCallback(PassRefPtr<CodeBlock> dfgCodeBlock); + ToFTLDeferredCompilationCallback(); public: virtual ~ToFTLDeferredCompilationCallback(); - static PassRefPtr<ToFTLDeferredCompilationCallback> create( - PassRefPtr<CodeBlock> dfgCodeBlock); + static Ref<ToFTLDeferredCompilationCallback> create(); - virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*); - virtual void compilationDidComplete(CodeBlock*, CompilationResult); - -private: - RefPtr<CodeBlock> m_dfgCodeBlock; + virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*, CodeBlock* profiledDFGCodeBlock); + virtual void compilationDidComplete(CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationResult); }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp b/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp index 17b45a328..c9e5c79fd 100644 --- a/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp +++ b/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp @@ -31,12 +31,11 @@ #include "CodeBlock.h" #include "DFGJITCode.h" #include "Executable.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { -ToFTLForOSREntryDeferredCompilationCallback::ToFTLForOSREntryDeferredCompilationCallback( - PassRefPtr<CodeBlock> dfgCodeBlock) - : m_dfgCodeBlock(dfgCodeBlock) +ToFTLForOSREntryDeferredCompilationCallback::ToFTLForOSREntryDeferredCompilationCallback() { } @@ -44,42 +43,51 @@ ToFTLForOSREntryDeferredCompilationCallback::~ToFTLForOSREntryDeferredCompilatio { } -PassRefPtr<ToFTLForOSREntryDeferredCompilationCallback> -ToFTLForOSREntryDeferredCompilationCallback::create( - PassRefPtr<CodeBlock> dfgCodeBlock) +Ref<ToFTLForOSREntryDeferredCompilationCallback>ToFTLForOSREntryDeferredCompilationCallback::create() { - return adoptRef(new ToFTLForOSREntryDeferredCompilationCallback(dfgCodeBlock)); + return adoptRef(*new ToFTLForOSREntryDeferredCompilationCallback()); } void ToFTLForOSREntryDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously( - CodeBlock* codeBlock) + CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock) { if (Options::verboseOSR()) { dataLog( - "Optimizing compilation of ", *codeBlock, " (for ", *m_dfgCodeBlock, + "Optimizing compilation of ", *codeBlock, " (for ", *profiledDFGCodeBlock, ") did become ready.\n"); } - m_dfgCodeBlock->jitCode()->dfg()->forceOptimizationSlowPathConcurrently( - m_dfgCodeBlock.get()); + profiledDFGCodeBlock->jitCode()->dfg()->forceOptimizationSlowPathConcurrently( + profiledDFGCodeBlock); } void ToFTLForOSREntryDeferredCompilationCallback::compilationDidComplete( - CodeBlock* codeBlock, CompilationResult result) + CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationResult result) { if (Options::verboseOSR()) { dataLog( - "Optimizing compilation of ", *codeBlock, " (for ", *m_dfgCodeBlock, + "Optimizing compilation of ", *codeBlock, " (for ", *profiledDFGCodeBlock, ") result: ", result, "\n"); } - if (result == CompilationSuccessful) - m_dfgCodeBlock->jitCode()->dfg()->osrEntryBlock = codeBlock; + JITCode* jitCode = profiledDFGCodeBlock->jitCode()->dfg(); + + switch (result) { + case CompilationSuccessful: + jitCode->setOSREntryBlock(*codeBlock->vm(), profiledDFGCodeBlock, codeBlock); + break; + case CompilationFailed: + jitCode->osrEntryRetry = 0; + jitCode->abandonOSREntry = true; + break; + case CompilationDeferred: + RELEASE_ASSERT_NOT_REACHED(); + case CompilationInvalidated: + jitCode->osrEntryRetry = 0; + break; + } - // FIXME: if we failed, we might want to just turn off OSR entry rather than - // totally turning off tier-up. - m_dfgCodeBlock->jitCode()->dfg()->setOptimizationThresholdBasedOnCompilationResult( - m_dfgCodeBlock.get(), result); + DeferredCompilationCallback::compilationDidComplete(codeBlock, profiledDFGCodeBlock, result); } } } // JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h b/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h index af6b97b16..580f7d2f4 100644 --- a/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h +++ b/Source/JavaScriptCore/dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h @@ -26,8 +26,6 @@ #ifndef DFGToFTLForOSREntryDeferredCompilationCallback_h #define DFGToFTLForOSREntryDeferredCompilationCallback_h -#include <wtf/Platform.h> - #if ENABLE(FTL_JIT) #include "DeferredCompilationCallback.h" @@ -42,19 +40,15 @@ namespace DFG { class ToFTLForOSREntryDeferredCompilationCallback : public DeferredCompilationCallback { protected: - ToFTLForOSREntryDeferredCompilationCallback(PassRefPtr<CodeBlock> dfgCodeBlock); + ToFTLForOSREntryDeferredCompilationCallback(); public: virtual ~ToFTLForOSREntryDeferredCompilationCallback(); - static PassRefPtr<ToFTLForOSREntryDeferredCompilationCallback> create( - PassRefPtr<CodeBlock> dfgCodeBlock); + static Ref<ToFTLForOSREntryDeferredCompilationCallback> create(); - virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*); - virtual void compilationDidComplete(CodeBlock*, CompilationResult); - -private: - RefPtr<CodeBlock> m_dfgCodeBlock; + virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*, CodeBlock* profiledDFGCodeBlock); + virtual void compilationDidComplete(CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationResult); }; } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGTransition.cpp b/Source/JavaScriptCore/dfg/DFGTransition.cpp new file mode 100644 index 000000000..80d9b994b --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGTransition.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGTransition.h" + +#if ENABLE(DFG_JIT) + +#include "JSCInlines.h" + +namespace JSC { namespace DFG { + +void Transition::dumpInContext(PrintStream& out, DumpContext* context) const +{ + out.print(pointerDumpInContext(previous, context), " -> ", pointerDumpInContext(next, context)); +} + +void Transition::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGTransition.h b/Source/JavaScriptCore/dfg/DFGTransition.h new file mode 100644 index 000000000..49a654436 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGTransition.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGTransition_h +#define DFGTransition_h + +#if ENABLE(DFG_JIT) + +#include <wtf/PrintStream.h> +#include <wtf/Vector.h> + +namespace JSC { + +class Structure; +struct DumpContext; + +namespace DFG { + +struct Transition { + Structure* previous; + Structure* next; + + Transition() + : previous(nullptr) + , next(nullptr) + { + } + + Transition(Structure* previous, Structure* next) + : previous(previous) + , next(next) + { + } + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; +}; + +typedef Vector<Transition, 3> TransitionVector; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGTransition_h + diff --git a/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp b/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp index 5625ef4f0..052f4b9d9 100644 --- a/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -33,7 +33,7 @@ #include "DFGInsertionSet.h" #include "DFGPhase.h" #include "DFGVariableAccessDataDump.h" -#include "Operations.h" +#include "JSCInlines.h" #include <wtf/HashMap.h> namespace JSC { namespace DFG { @@ -108,14 +108,21 @@ public: BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; + unsigned indexForChecks = UINT_MAX; + NodeOrigin originForChecks; for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { Node* node = block->at(indexInBlock); + + if (node->origin.exitOK) { + indexForChecks = indexInBlock; + originForChecks = node->origin; + } + // Be careful not to use 'node' after appending to the graph. In those switch // cases where we need to append, we first carefully extract everything we need // from the node, before doing any appending. switch (node->op()) { case SetArgument: { - ASSERT(!blockIndex); // Insert a GetLocal and a CheckStructure immediately following this // SetArgument, if the variable was a candidate for structure hoisting. // If the basic block previously only had the SetArgument as its @@ -127,20 +134,23 @@ public: if (!iter->value.m_structure && !iter->value.m_arrayModeIsValid) break; - CodeOrigin codeOrigin = node->codeOrigin; + // Currently we should only be doing this hoisting for SetArguments at the prologue. + ASSERT(!blockIndex); + + NodeOrigin origin = node->origin; Node* getLocal = insertionSet.insertNode( - indexInBlock + 1, variable->prediction(), GetLocal, codeOrigin, + indexInBlock + 1, variable->prediction(), GetLocal, origin, OpInfo(variable), Edge(node)); if (iter->value.m_structure) { insertionSet.insertNode( - indexInBlock + 1, SpecNone, CheckStructure, codeOrigin, + indexInBlock + 1, SpecNone, CheckStructure, origin, OpInfo(m_graph.addStructureSet(iter->value.m_structure)), Edge(getLocal, CellUse)); } else if (iter->value.m_arrayModeIsValid) { ASSERT(iter->value.m_arrayModeHoistingOkay); insertionSet.insertNode( - indexInBlock + 1, SpecNone, CheckArray, codeOrigin, + indexInBlock + 1, SpecNone, CheckArray, origin, OpInfo(iter->value.m_arrayMode.asWord()), Edge(getLocal, CellUse)); } else @@ -163,18 +173,20 @@ public: if (!iter->value.m_structure && !iter->value.m_arrayModeIsValid) break; - CodeOrigin codeOrigin = node->codeOrigin; + NodeOrigin origin = node->origin; Edge child1 = node->child1(); if (iter->value.m_structure) { insertionSet.insertNode( - indexInBlock, SpecNone, CheckStructure, codeOrigin, + indexForChecks, SpecNone, CheckStructure, + originForChecks.withSemantic(origin.semantic), OpInfo(m_graph.addStructureSet(iter->value.m_structure)), Edge(child1.node(), CellUse)); } else if (iter->value.m_arrayModeIsValid) { ASSERT(iter->value.m_arrayModeHoistingOkay); insertionSet.insertNode( - indexInBlock, SpecNone, CheckArray, codeOrigin, + indexForChecks, SpecNone, CheckArray, + originForChecks.withSemantic(origin.semantic), OpInfo(iter->value.m_arrayMode.asWord()), Edge(child1.node(), CellUse)); } else @@ -215,8 +227,7 @@ private: for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { Node* node = block->at(indexInBlock); switch (node->op()) { - case CheckStructure: - case StructureTransitionWatchpoint: { + case CheckStructure: { Node* child = node->child1().node(); if (child->op() != GetLocal) break; @@ -227,13 +238,16 @@ private: noticeStructureCheck(variable, node->structureSet()); break; } - + + case ArrayifyToStructure: + case Arrayify: case GetByOffset: case PutByOffset: case PutStructure: case AllocatePropertyStorage: case ReallocatePropertyStorage: case GetButterfly: + case GetButterflyReadOnly: case GetByVal: case PutByValDirect: case PutByVal: @@ -244,25 +258,11 @@ private: case GetTypedArrayByteOffset: case Phantom: case MovHint: + case MultiGetByOffset: + case MultiPutByOffset: // Don't count these uses. break; - case ArrayifyToStructure: - case Arrayify: - if (node->arrayMode().conversion() == Array::RageConvert) { - // Rage conversion changes structures. We should avoid tying to do - // any kind of hoisting when rage conversion is in play. - Node* child = node->child1().node(); - if (child->op() != GetLocal) - break; - VariableAccessData* variable = child->variableAccessData(); - variable->vote(VoteOther); - if (!shouldConsiderForHoisting<StructureTypeCheck>(variable)) - break; - noticeStructureCheck(variable, 0); - } - break; - case SetLocal: { // Find all uses of the source of the SetLocal. If any of them are a // kind of CheckStructure, then we should notice them to ensure that @@ -282,13 +282,6 @@ private: noticeStructureCheck(variable, subNode->structureSet()); break; } - case StructureTransitionWatchpoint: { - if (subNode->child1() != source) - break; - - noticeStructureCheck(variable, subNode->structure()); - break; - } default: break; } @@ -328,12 +321,12 @@ private: } case CheckStructure: - case StructureTransitionWatchpoint: case GetByOffset: case PutByOffset: case PutStructure: case ReallocatePropertyStorage: case GetButterfly: + case GetButterflyReadOnly: case GetByVal: case PutByValDirect: case PutByVal: @@ -342,6 +335,8 @@ private: case GetIndexedPropertyStorage: case Phantom: case MovHint: + case MultiGetByOffset: + case MultiPutByOffset: // Don't count these uses. break; @@ -380,13 +375,6 @@ private: noticeStructureCheckAccountingForArrayMode(variable, subNode->structureSet()); break; } - case StructureTransitionWatchpoint: { - if (subNode->child1() != source) - break; - - noticeStructureCheckAccountingForArrayMode(variable, subNode->structure()); - break; - } case CheckArray: { if (subNode->child1() != source) break; @@ -497,7 +485,7 @@ private: noticeStructureCheck(variable, 0); return; } - noticeStructureCheck(variable, set.singletonStructure()); + noticeStructureCheck(variable, set.onlyStructure()); } void noticeCheckArray(VariableAccessData* variable, ArrayMode arrayMode) diff --git a/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.h b/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.h index 97adaf2e7..756dfb1f1 100644 --- a/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.h +++ b/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.h @@ -26,8 +26,6 @@ #ifndef DFGTypeCheckHoistingPhase_h #define DFGTypeCheckHoistingPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGUnificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGUnificationPhase.cpp index 8f2929d5c..30003f315 100644 --- a/Source/JavaScriptCore/dfg/DFGUnificationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGUnificationPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,7 +31,7 @@ #include "DFGBasicBlockInlines.h" #include "DFGGraph.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -59,9 +59,10 @@ public: for (unsigned childIdx = 0; childIdx < AdjacencyList::Size; ++childIdx) { if (!phi->children.child(childIdx)) break; - - phi->variableAccessData()->unify( - phi->children.child(childIdx)->variableAccessData()); + + // FIXME: Consider reversing the order of this unification, since the other + // order will reveal more bugs. https://bugs.webkit.org/show_bug.cgi?id=154368 + phi->variableAccessData()->unify(phi->children.child(childIdx)->variableAccessData()); } } } @@ -70,11 +71,11 @@ public: for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) { VariableAccessData* data = &m_graph.m_variableAccessData[i]; data->find()->predict(data->nonUnifiedPrediction()); - data->find()->mergeIsCaptured(data->isCaptured()); data->find()->mergeStructureCheckHoistingFailed(data->structureCheckHoistingFailed()); data->find()->mergeCheckArrayHoistingFailed(data->checkArrayHoistingFailed()); data->find()->mergeShouldNeverUnbox(data->shouldNeverUnbox()); data->find()->mergeIsLoadedFrom(data->isLoadedFrom()); + data->find()->mergeIsProfitableToUnbox(data->isProfitableToUnbox()); } m_graph.m_unificationState = GloballyUnified; diff --git a/Source/JavaScriptCore/dfg/DFGUnificationPhase.h b/Source/JavaScriptCore/dfg/DFGUnificationPhase.h index 6713de78e..cb93b70f8 100644 --- a/Source/JavaScriptCore/dfg/DFGUnificationPhase.h +++ b/Source/JavaScriptCore/dfg/DFGUnificationPhase.h @@ -26,8 +26,6 @@ #ifndef DFGUnificationPhase_h #define DFGUnificationPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGUseKind.cpp b/Source/JavaScriptCore/dfg/DFGUseKind.cpp index 073eb0e66..502a1472e 100644 --- a/Source/JavaScriptCore/dfg/DFGUseKind.cpp +++ b/Source/JavaScriptCore/dfg/DFGUseKind.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,6 +28,8 @@ #if ENABLE(DFG_JIT) +#include "JSCInlines.h" + namespace WTF { using namespace JSC::DFG; @@ -37,68 +39,105 @@ void printInternal(PrintStream& out, UseKind useKind) switch (useKind) { case UntypedUse: out.print("Untyped"); - break; + return; case Int32Use: out.print("Int32"); - break; + return; case KnownInt32Use: out.print("KnownInt32"); - break; + return; + case Int52RepUse: + out.print("Int52Rep"); + return; case MachineIntUse: out.print("MachineInt"); - break; - case RealNumberUse: - out.print("RealNumber"); - break; + return; case NumberUse: out.print("Number"); - break; - case KnownNumberUse: - out.print("KnownNumber"); - break; + return; + case RealNumberUse: + out.print("RealNumber"); + return; + case DoubleRepUse: + out.print("DoubleRep"); + return; + case DoubleRepRealUse: + out.print("DoubleRepReal"); + return; + case DoubleRepMachineIntUse: + out.print("DoubleRepMachineInt"); + return; case BooleanUse: out.print("Boolean"); - break; + return; + case KnownBooleanUse: + out.print("KnownBoolean"); + return; case CellUse: out.print("Cell"); - break; + return; case KnownCellUse: out.print("KnownCell"); - break; + return; + case CellOrOtherUse: + out.print("CellOrOther"); + return; case ObjectUse: out.print("Object"); - break; + return; + case FunctionUse: + out.print("Function"); + return; case FinalObjectUse: out.print("FinalObject"); - break; + return; + case RegExpObjectUse: + out.print("RegExpObject"); + return; case ObjectOrOtherUse: out.print("ObjectOrOther"); - break; + return; case StringIdentUse: out.print("StringIdent"); - break; + return; case StringUse: out.print("String"); - break; + return; + case StringOrOtherUse: + out.print("StringOrOther"); + return; case KnownStringUse: out.print("KnownString"); - break; + return; + case KnownPrimitiveUse: + out.print("KnownPrimitive"); + return; + case SymbolUse: + out.print("Symbol"); + return; case StringObjectUse: out.print("StringObject"); - break; + return; case StringOrStringObjectUse: out.print("StringOrStringObject"); - break; + return; + case NotStringVarUse: + out.print("NotStringVar"); + return; case NotCellUse: out.print("NotCell"); - break; + return; case OtherUse: out.print("Other"); - break; - default: + return; + case MiscUse: + out.print("Misc"); + return; + case LastUseKind: RELEASE_ASSERT_NOT_REACHED(); - break; + return; } + RELEASE_ASSERT_NOT_REACHED(); } } // namespace WTF diff --git a/Source/JavaScriptCore/dfg/DFGUseKind.h b/Source/JavaScriptCore/dfg/DFGUseKind.h index 7ad390524..41527f647 100644 --- a/Source/JavaScriptCore/dfg/DFGUseKind.h +++ b/Source/JavaScriptCore/dfg/DFGUseKind.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,40 +26,64 @@ #ifndef DFGUseKind_h #define DFGUseKind_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) +#include "DFGNodeFlags.h" #include "SpeculatedType.h" #include <wtf/PrintStream.h> namespace JSC { namespace DFG { enum UseKind { - UntypedUse, + // The DFG has 3 representations of values used: + + // 1. The JSValue representation for a JSValue that must be stored in a GP + // register (or a GP register pair), and follows rules for boxing and unboxing + // that allow the JSValue to be stored as either fully boxed JSValues, or + // unboxed Int32, Booleans, Cells, etc. in 32-bit as appropriate. + UntypedUse, // UntypedUse must come first (value 0). Int32Use, KnownInt32Use, MachineIntUse, - RealNumberUse, NumberUse, - KnownNumberUse, + RealNumberUse, BooleanUse, + KnownBooleanUse, CellUse, KnownCellUse, + CellOrOtherUse, ObjectUse, + FunctionUse, FinalObjectUse, + RegExpObjectUse, ObjectOrOtherUse, StringIdentUse, StringUse, + StringOrOtherUse, KnownStringUse, + KnownPrimitiveUse, // This bizarre type arises for op_strcat, which has a bytecode guarantee that it will only see primitives (i.e. not objects). + SymbolUse, StringObjectUse, StringOrStringObjectUse, + NotStringVarUse, NotCellUse, OtherUse, + MiscUse, + + // 2. The Double representation for an unboxed double value that must be stored + // in an FP register. + DoubleRepUse, + DoubleRepRealUse, + DoubleRepMachineIntUse, + + // 3. The Int52 representation for an unboxed integer value that must be stored + // in a GP register. + Int52RepUse, + LastUseKind // Must always be the last entry in the enum, as it is used to denote the number of enum elements. }; -ALWAYS_INLINE SpeculatedType typeFilterFor(UseKind useKind) +inline SpeculatedType typeFilterFor(UseKind useKind) { switch (useKind) { case UntypedUse: @@ -67,22 +91,36 @@ ALWAYS_INLINE SpeculatedType typeFilterFor(UseKind useKind) case Int32Use: case KnownInt32Use: return SpecInt32; - case MachineIntUse: + case Int52RepUse: return SpecMachineInt; - case RealNumberUse: - return SpecFullRealNumber; + case MachineIntUse: + return SpecInt32 | SpecInt52AsDouble; case NumberUse: - case KnownNumberUse: - return SpecFullNumber; + return SpecBytecodeNumber; + case RealNumberUse: + return SpecBytecodeRealNumber; + case DoubleRepUse: + return SpecFullDouble; + case DoubleRepRealUse: + return SpecDoubleReal; + case DoubleRepMachineIntUse: + return SpecInt52AsDouble; case BooleanUse: + case KnownBooleanUse: return SpecBoolean; case CellUse: case KnownCellUse: return SpecCell; + case CellOrOtherUse: + return SpecCell | SpecOther; case ObjectUse: return SpecObject; + case FunctionUse: + return SpecFunction; case FinalObjectUse: return SpecFinalObject; + case RegExpObjectUse: + return SpecRegExpObject; case ObjectOrOtherUse: return SpecObject | SpecOther; case StringIdentUse: @@ -90,76 +128,97 @@ ALWAYS_INLINE SpeculatedType typeFilterFor(UseKind useKind) case StringUse: case KnownStringUse: return SpecString; + case StringOrOtherUse: + return SpecString | SpecOther; + case KnownPrimitiveUse: + return SpecHeapTop & ~SpecObject; + case SymbolUse: + return SpecSymbol; case StringObjectUse: return SpecStringObject; case StringOrStringObjectUse: return SpecString | SpecStringObject; + case NotStringVarUse: + return ~SpecStringVar; case NotCellUse: return ~SpecCell; case OtherUse: return SpecOther; + case MiscUse: + return SpecMisc; default: RELEASE_ASSERT_NOT_REACHED(); return SpecFullTop; } } -ALWAYS_INLINE bool shouldNotHaveTypeCheck(UseKind kind) +inline bool shouldNotHaveTypeCheck(UseKind kind) { switch (kind) { case UntypedUse: case KnownInt32Use: - case KnownNumberUse: case KnownCellUse: case KnownStringUse: + case KnownPrimitiveUse: + case KnownBooleanUse: + case Int52RepUse: + case DoubleRepUse: return true; default: return false; } } -ALWAYS_INLINE bool mayHaveTypeCheck(UseKind kind) +inline bool mayHaveTypeCheck(UseKind kind) { return !shouldNotHaveTypeCheck(kind); } -ALWAYS_INLINE bool isNumerical(UseKind kind) +inline bool isNumerical(UseKind kind) { switch (kind) { case Int32Use: case KnownInt32Use: - case MachineIntUse: - case RealNumberUse: case NumberUse: - case KnownNumberUse: + case RealNumberUse: + case Int52RepUse: + case DoubleRepUse: + case DoubleRepRealUse: + case MachineIntUse: + case DoubleRepMachineIntUse: return true; default: return false; } } -ALWAYS_INLINE bool isDouble(UseKind kind) +inline bool isDouble(UseKind kind) { switch (kind) { - case RealNumberUse: - case NumberUse: - case KnownNumberUse: + case DoubleRepUse: + case DoubleRepRealUse: + case DoubleRepMachineIntUse: return true; default: return false; } } -ALWAYS_INLINE bool isCell(UseKind kind) +// Returns true if the use kind only admits cells, and is therefore appropriate for +// SpeculateCellOperand in the DFG or lowCell() in the FTL. +inline bool isCell(UseKind kind) { switch (kind) { case CellUse: case KnownCellUse: case ObjectUse: + case FunctionUse: case FinalObjectUse: + case RegExpObjectUse: case StringIdentUse: case StringUse: case KnownStringUse: + case SymbolUse: case StringObjectUse: case StringOrStringObjectUse: return true; @@ -170,7 +229,7 @@ ALWAYS_INLINE bool isCell(UseKind kind) // Returns true if it uses structure in a way that could be clobbered by // things that change the structure. -ALWAYS_INLINE bool usesStructure(UseKind kind) +inline bool usesStructure(UseKind kind) { switch (kind) { case StringObjectUse: @@ -181,6 +240,30 @@ ALWAYS_INLINE bool usesStructure(UseKind kind) } } +// Returns true if we've already guaranteed the type +inline bool alreadyChecked(UseKind kind, SpeculatedType type) +{ + // If the check involves the structure then we need to know more than just the type to be sure + // that the check is done. + if (usesStructure(kind)) + return false; + + return !(type & ~typeFilterFor(kind)); +} + +inline UseKind useKindForResult(NodeFlags result) +{ + ASSERT(!(result & ~NodeResultMask)); + switch (result) { + case NodeResultInt52: + return Int52RepUse; + case NodeResultDouble: + return DoubleRepUse; + default: + return UntypedUse; + } +} + } } // namespace JSC::DFG namespace WTF { diff --git a/Source/JavaScriptCore/dfg/DFGValidate.cpp b/Source/JavaScriptCore/dfg/DFGValidate.cpp index 09bea406f..3fe1da28f 100644 --- a/Source/JavaScriptCore/dfg/DFGValidate.cpp +++ b/Source/JavaScriptCore/dfg/DFGValidate.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,6 +29,9 @@ #if ENABLE(DFG_JIT) #include "CodeBlockWithJITType.h" +#include "DFGClobbersExitState.h" +#include "DFGMayExit.h" +#include "JSCInlines.h" #include <wtf/Assertions.h> #include <wtf/BitVector.h> @@ -36,17 +39,19 @@ namespace JSC { namespace DFG { class Validate { public: - Validate(Graph& graph, GraphDumpMode graphDumpMode) + Validate(Graph& graph, GraphDumpMode graphDumpMode, CString graphDumpBeforePhase) : m_graph(graph) , m_graphDumpMode(graphDumpMode) + , m_graphDumpBeforePhase(graphDumpBeforePhase) { } #define VALIDATE(context, assertion) do { \ if (!(assertion)) { \ + startCrashing(); \ dataLogF("\n\n\nAt "); \ reportValidationContext context; \ - dataLogF(": validation %s (%s:%d) failed.\n", #assertion, __FILE__, __LINE__); \ + dataLogF(": validation failed: %s (%s:%d).\n", #assertion, __FILE__, __LINE__); \ dumpGraphIfAppropriate(); \ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \ CRASH(); \ @@ -55,13 +60,14 @@ public: #define V_EQUAL(context, left, right) do { \ if (left != right) { \ + startCrashing(); \ dataLogF("\n\n\nAt "); \ reportValidationContext context; \ - dataLogF(": validation (%s = ", #left); \ + dataLogF(": validation failed: (%s = ", #left); \ dataLog(left); \ dataLogF(") == (%s = ", #right); \ dataLog(right); \ - dataLogF(") (%s:%d) failed.\n", __FILE__, __LINE__); \ + dataLogF(") (%s:%d).\n", __FILE__, __LINE__); \ dumpGraphIfAppropriate(); \ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #left " == " #right); \ CRASH(); \ @@ -69,7 +75,7 @@ public: } while (0) #define notSet (static_cast<size_t>(-1)) - + void validate() { // NB. This code is not written for performance, since it is not intended to run @@ -113,6 +119,9 @@ public: continue; m_myRefCounts.find(edge.node())->value++; + + validateEdgeWithDoubleResultIfNecessary(node, edge); + VALIDATE((node, edge), edge->hasInt52Result() == (edge.useKind() == Int52RepUse)); if (m_graph.m_form == SSA) { // In SSA, all edges must hasResult(). @@ -138,31 +147,6 @@ public: break; VALIDATE((node, edge), edge->variableAccessData() == node->variableAccessData()); break; - case Phantom: - switch (m_graph.m_form) { - case LoadStore: - if (j) { - VALIDATE((node, edge), edge->hasResult()); - break; - } - switch (edge->op()) { - case Phi: - case SetArgument: - case SetLocal: - break; - default: - VALIDATE((node, edge), edge->hasResult()); - break; - } - break; - case ThreadedCPS: - VALIDATE((node, edge), edge->hasResult()); - break; - case SSA: - RELEASE_ASSERT_NOT_REACHED(); - break; - } - break; default: VALIDATE((node, edge), edge->hasResult()); break; @@ -179,8 +163,128 @@ public: Node* node = block->node(i); if (m_graph.m_refCountState == ExactRefCount) V_EQUAL((node), m_myRefCounts.get(node), node->adjustedRefCount()); - else - V_EQUAL((node), node->refCount(), 1); + } + + bool foundTerminal = false; + for (size_t i = 0 ; i < block->size(); ++i) { + Node* node = block->at(i); + if (node->isTerminal()) { + foundTerminal = true; + for (size_t j = i + 1; j < block->size(); ++j) { + node = block->at(j); + VALIDATE((node), node->op() == Phantom || node->op() == PhantomLocal || node->op() == Flush || node->op() == Check); + m_graph.doToChildren( + node, + [&] (Edge edge) { + VALIDATE((node, edge), shouldNotHaveTypeCheck(edge.useKind())); + }); + } + break; + } + } + VALIDATE((block), foundTerminal); + + for (size_t i = 0; i < block->size(); ++i) { + Node* node = block->at(i); + + VALIDATE((node), node->origin.isSet()); + VALIDATE((node), node->origin.semantic.isSet() == node->origin.forExit.isSet()); + VALIDATE((node), !(!node->origin.forExit.isSet() && node->origin.exitOK)); + VALIDATE((node), !(mayExit(m_graph, node) == Exits && !node->origin.exitOK)); + + if (i) { + Node* previousNode = block->at(i - 1); + VALIDATE( + (node), + !clobbersExitState(m_graph, previousNode) + || !node->origin.exitOK + || node->op() == ExitOK + || node->origin.forExit != previousNode->origin.forExit); + VALIDATE( + (node), + !(!previousNode->origin.exitOK && node->origin.exitOK) + || node->op() == ExitOK + || node->origin.forExit != previousNode->origin.forExit); + } + + VALIDATE((node), !node->hasStructure() || !!node->structure()); + VALIDATE((node), !node->hasCellOperand() || node->cellOperand()->value().isCell()); + VALIDATE((node), !node->hasCellOperand() || !!node->cellOperand()->value()); + + if (!(node->flags() & NodeHasVarArgs)) { + if (!node->child2()) + VALIDATE((node), !node->child3()); + if (!node->child1()) + VALIDATE((node), !node->child2()); + } + + switch (node->op()) { + case Identity: + VALIDATE((node), canonicalResultRepresentation(node->result()) == canonicalResultRepresentation(node->child1()->result())); + break; + case SetLocal: + case PutStack: + case Upsilon: + VALIDATE((node), !!node->child1()); + switch (node->child1().useKind()) { + case UntypedUse: + case CellUse: + case KnownCellUse: + case Int32Use: + case KnownInt32Use: + case Int52RepUse: + case DoubleRepUse: + case BooleanUse: + case KnownBooleanUse: + break; + default: + VALIDATE((node), !"Bad use kind"); + break; + } + break; + case MakeRope: + case ValueAdd: + case ArithAdd: + case ArithSub: + case ArithMul: + case ArithIMul: + case ArithDiv: + case ArithMod: + case ArithMin: + case ArithMax: + case ArithPow: + case CompareLess: + case CompareLessEq: + case CompareGreater: + case CompareGreaterEq: + case CompareEq: + case CompareStrictEq: + case StrCat: + VALIDATE((node), !!node->child1()); + VALIDATE((node), !!node->child2()); + break; + case CheckStructure: + case StringFromCharCode: + VALIDATE((node), !!node->child1()); + break; + case PutStructure: + VALIDATE((node), !node->transition()->previous->dfgShouldWatch()); + break; + case MultiPutByOffset: + for (unsigned i = node->multiPutByOffsetData().variants.size(); i--;) { + const PutByIdVariant& variant = node->multiPutByOffsetData().variants[i]; + if (variant.kind() != PutByIdVariant::Transition) + continue; + VALIDATE((node), !variant.oldStructureForTransition()->dfgShouldWatch()); + } + break; + case DoubleConstant: + case Int52Constant: + VALIDATE((node), node->isNumberConstant()); + break; + default: + break; + } } } @@ -199,6 +303,7 @@ public: private: Graph& m_graph; GraphDumpMode m_graphDumpMode; + CString m_graphDumpBeforePhase; HashMap<Node*, unsigned> m_myRefCounts; HashSet<Node*> m_acceptableNodes; @@ -328,6 +433,7 @@ private: Node* node = block->at(i); ASSERT(nodesInThisBlock.contains(node)); VALIDATE((node), node->op() != Phi); + VALIDATE((node), node->origin.forExit.isSet()); for (unsigned j = 0; j < m_graph.numChildren(node); ++j) { Edge edge = m_graph.child(node, j); if (!edge) @@ -338,39 +444,64 @@ private: case GetLocal: case Flush: break; - case Phantom: - if (m_graph.m_form == LoadStore && !j) - break; - FALLTHROUGH; default: VALIDATE((node, edge), !phisInThisBlock.contains(edge.node())); break; } } + switch (node->op()) { + case Phi: + case Upsilon: + case CheckInBounds: + case PhantomNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case GetMyArgumentByVal: + case PutHint: + case CheckStructureImmediate: + case MaterializeNewObject: + case MaterializeCreateActivation: + case PutStack: + case KillStack: + case GetStack: + VALIDATE((node), !"unexpected node type in CPS"); + break; + case Phantom: + VALIDATE((node), m_graph.m_fixpointState != FixpointNotConverged); + break; + default: + break; + } + if (!node->shouldGenerate()) continue; switch (node->op()) { case GetLocal: - if (node->variableAccessData()->isCaptured()) - break; // Ignore GetLocal's that we know to be dead, but that the graph // doesn't yet know to be dead. if (!m_myRefCounts.get(node)) break; - if (m_graph.m_form == ThreadedCPS) + if (m_graph.m_form == ThreadedCPS) { VALIDATE((node, block), getLocalPositions.operand(node->local()) == notSet); + VALIDATE((node, block), !!node->child1()); + } getLocalPositions.operand(node->local()) = i; break; case SetLocal: - if (node->variableAccessData()->isCaptured()) - break; // Only record the first SetLocal. There may be multiple SetLocals // because of flushing. if (setLocalPositions.operand(node->local()) != notSet) break; setLocalPositions.operand(node->local()) = i; break; + case SetArgument: + // This acts like a reset. It's ok to have a second GetLocal for a local in the same + // block if we had a SetArgument for that local. + getLocalPositions.operand(node->local()) = notSet; + setLocalPositions.operand(node->local()) = notSet; + break; default: break; } @@ -400,30 +531,85 @@ private: if (!block) continue; - unsigned nodeIndex = 0; - for (; nodeIndex < block->size() && !block->at(nodeIndex)->codeOrigin.isSet(); nodeIndex++) { } - - VALIDATE((block), nodeIndex < block->size()); - - for (; nodeIndex < block->size(); nodeIndex++) - VALIDATE((block->at(nodeIndex)), block->at(nodeIndex)->codeOrigin.isSet()); + VALIDATE((block), block->phis.isEmpty()); + + bool didSeeExitOK = false; for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { Node* node = block->at(nodeIndex); + didSeeExitOK |= node->origin.exitOK; switch (node->op()) { case Phi: - VALIDATE((node), !node->codeOrigin.isSet()); + // Phi cannot exit, and it would be wrong to hoist anything to the Phi that could + // exit. + VALIDATE((node), !node->origin.exitOK); + + // It never makes sense to have exitOK anywhere in the block before a Phi. It's only + // OK to exit after all Phis are done. + VALIDATE((node), !didSeeExitOK); break; + case GetLocal: + case SetLocal: + case GetLocalUnlinked: + case SetArgument: + case Phantom: + VALIDATE((node), !"bad node type for SSA"); + break; + default: // FIXME: Add more things here. // https://bugs.webkit.org/show_bug.cgi?id=123471 break; } + switch (node->op()) { + case PhantomNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case PhantomDirectArguments: + case PhantomClonedArguments: + case MovHint: + case Upsilon: + case ForwardVarargs: + case CallForwardVarargs: + case TailCallForwardVarargs: + case TailCallForwardVarargsInlinedCaller: + case ConstructForwardVarargs: + case GetMyArgumentByVal: + break; + + case Check: + // FIXME: This is probably not correct. + break; + + case PutHint: + VALIDATE((node), node->child1()->isPhantomAllocation()); + break; + + default: + m_graph.doToChildren( + node, + [&] (const Edge& edge) { + VALIDATE((node), !edge->isPhantomAllocation()); + }); + break; + } } } } - + + void validateEdgeWithDoubleResultIfNecessary(Node* node, Edge edge) + { + if (!edge->hasDoubleResult()) + return; + + if (m_graph.m_planStage < PlanStage::AfterFixup) + return; + + VALIDATE((node, edge), edge.useKind() == DoubleRepUse || edge.useKind() == DoubleRepRealUse || edge.useKind() == DoubleRepMachineIntUse); + } + void checkOperand( BasicBlock* block, Operands<size_t>& getLocalPositions, Operands<size_t>& setLocalPositions, VirtualRegister operand) @@ -458,23 +644,23 @@ private: void reportValidationContext(VirtualRegister local, BasicBlock* block) { if (!block) { - dataLog("r", local, " in null Block "); + dataLog(local, " in null Block "); return; } - dataLog("r", local, " in Block ", *block); + dataLog(local, " in Block ", *block); } void reportValidationContext( VirtualRegister local, BasicBlock* sourceBlock, BasicBlock* destinationBlock) { - dataLog("r", local, " in Block ", *sourceBlock, " -> ", *destinationBlock); + dataLog(local, " in Block ", *sourceBlock, " -> ", *destinationBlock); } void reportValidationContext( VirtualRegister local, BasicBlock* sourceBlock, Node* prevNode) { - dataLog(prevNode, " for r", local, " in Block ", *sourceBlock); + dataLog(prevNode, " for ", local, " in Block ", *sourceBlock); } void reportValidationContext(Node* node, BasicBlock* block) @@ -497,14 +683,19 @@ private: { if (m_graphDumpMode == DontDumpGraph) return; + dataLog("\n"); + if (!m_graphDumpBeforePhase.isNull()) { + dataLog("Before phase:\n"); + dataLog(m_graphDumpBeforePhase); + } dataLog("At time of failure:\n"); m_graph.dump(); } }; -void validate(Graph& graph, GraphDumpMode graphDumpMode) +void validate(Graph& graph, GraphDumpMode graphDumpMode, CString graphDumpBeforePhase) { - Validate validationObject(graph, graphDumpMode); + Validate validationObject(graph, graphDumpMode, graphDumpBeforePhase); validationObject.validate(); } diff --git a/Source/JavaScriptCore/dfg/DFGValidate.h b/Source/JavaScriptCore/dfg/DFGValidate.h index 92aa293e3..ff4d06bdd 100644 --- a/Source/JavaScriptCore/dfg/DFGValidate.h +++ b/Source/JavaScriptCore/dfg/DFGValidate.h @@ -26,8 +26,6 @@ #ifndef DFGValidate_h #define DFGValidate_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" @@ -37,7 +35,7 @@ namespace JSC { namespace DFG { enum GraphDumpMode { DontDumpGraph, DumpGraph }; -void validate(Graph&, GraphDumpMode = DumpGraph); +void validate(Graph&, GraphDumpMode = DumpGraph, CString graphDumpBeforePhase = CString()); } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGValueSource.cpp b/Source/JavaScriptCore/dfg/DFGValueSource.cpp index 51cf78847..41d8b475a 100644 --- a/Source/JavaScriptCore/dfg/DFGValueSource.cpp +++ b/Source/JavaScriptCore/dfg/DFGValueSource.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,6 +28,8 @@ #if ENABLE(DFG_JIT) +#include "JSCInlines.h" + namespace JSC { namespace DFG { void ValueSource::dump(PrintStream& out) const @@ -40,25 +42,22 @@ void ValueSource::dump(PrintStream& out) const out.print("IsDead"); break; case ValueInJSStack: - out.print("JS:r", virtualRegister()); + out.print("JS:", virtualRegister()); break; case Int32InJSStack: - out.print("Int32:r", virtualRegister()); + out.print("Int32:", virtualRegister()); break; case Int52InJSStack: - out.print("Int52:r", virtualRegister()); + out.print("Int52:", virtualRegister()); break; case CellInJSStack: - out.print("Cell:r", virtualRegister()); + out.print("Cell:", virtualRegister()); break; case BooleanInJSStack: - out.print("Bool:r", virtualRegister()); + out.print("Bool:", virtualRegister()); break; case DoubleInJSStack: - out.print("Double:r", virtualRegister()); - break; - case ArgumentsSource: - out.print("Arguments"); + out.print("Double:", virtualRegister()); break; case HaveNode: out.print("Node(", m_value, ")"); @@ -69,6 +68,11 @@ void ValueSource::dump(PrintStream& out) const } } +void ValueSource::dumpInContext(PrintStream& out, DumpContext*) const +{ + dump(out); +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGValueSource.h b/Source/JavaScriptCore/dfg/DFGValueSource.h index 1e56f654f..1b55797d5 100644 --- a/Source/JavaScriptCore/dfg/DFGValueSource.h +++ b/Source/JavaScriptCore/dfg/DFGValueSource.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGValueSource_h #define DFGValueSource_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" @@ -47,7 +45,6 @@ enum ValueSourceKind { CellInJSStack, BooleanInJSStack, DoubleInJSStack, - ArgumentsSource, SourceIsDead, HaveNode }; @@ -67,8 +64,6 @@ static inline ValueSourceKind dataFormatToValueSourceKind(DataFormat dataFormat) return CellInJSStack; case DataFormatDead: return SourceIsDead; - case DataFormatArguments: - return ArgumentsSource; default: RELEASE_ASSERT(dataFormat & DataFormatJS); return ValueInJSStack; @@ -90,8 +85,6 @@ static inline DataFormat valueSourceKindToDataFormat(ValueSourceKind kind) return DataFormatBoolean; case DoubleInJSStack: return DataFormatDouble; - case ArgumentsSource: - return DataFormatArguments; case SourceIsDead: return DataFormatDead; default: @@ -122,7 +115,7 @@ public: explicit ValueSource(ValueSourceKind valueSourceKind) : m_kind(valueSourceKind) { - ASSERT(kind() == ArgumentsSource || kind() == SourceIsDead || kind() == ArgumentsSource); + ASSERT(kind() == SourceIsDead); } explicit ValueSource(MinifiedID id) @@ -159,8 +152,6 @@ public: return ValueSource(CellInJSStack, where); case FlushedBoolean: return ValueSource(BooleanInJSStack, where); - case FlushedArguments: - return ValueSource(ArgumentsSource); } RELEASE_ASSERT_NOT_REACHED(); return ValueSource(); @@ -176,6 +167,8 @@ public: return kind() != SourceNotSet; } + bool operator!() const { return !isSet(); } + ValueSourceKind kind() const { return m_kind; @@ -196,9 +189,6 @@ public: case SourceIsDead: return ValueRecovery::constant(jsUndefined()); - case ArgumentsSource: - return ValueRecovery::argumentsThatWereNotCreated(); - default: return ValueRecovery::displacedInJSStack(virtualRegister(), dataFormat()); } @@ -217,6 +207,7 @@ public: } void dump(PrintStream&) const; + void dumpInContext(PrintStream&, DumpContext*) const; private: ValueSourceKind m_kind; diff --git a/Source/JavaScriptCore/dfg/DFGValueStrength.cpp b/Source/JavaScriptCore/dfg/DFGValueStrength.cpp new file mode 100644 index 000000000..2c6e612b4 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGValueStrength.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGValueStrength.h" + +#if ENABLE(DFG_JIT) + +namespace WTF { + +using namespace JSC::DFG; + +void printInternal(PrintStream& out, ValueStrength strength) +{ + switch (strength) { + case WeakValue: + out.print("Weak"); + return; + case StrongValue: + out.print("Strong"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGValueStrength.h b/Source/JavaScriptCore/dfg/DFGValueStrength.h new file mode 100644 index 000000000..72cd71c29 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGValueStrength.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGValueStrength_h +#define DFGValueStrength_h + +#if ENABLE(DFG_JIT) + +#include <wtf/PrintStream.h> + +namespace JSC { namespace DFG { + +enum ValueStrength { + // The value has been used for optimization and it arose through inference. We don't want the + // fact that we optimized the code to result in the GC keeping this value alive unnecessarily, + // so we'd rather kill the code and recompile than keep the object alive longer. + WeakValue, + + // The code will keep this value alive. This is true of constants that were present in the + // source. String constants tend to be strong. + StrongValue +}; + +inline ValueStrength merge(ValueStrength a, ValueStrength b) +{ + switch (a) { + case WeakValue: + return b; + case StrongValue: + return StrongValue; + } + RELEASE_ASSERT_NOT_REACHED(); + + return WeakValue; +} + +} } // namespace JSC::DFG + +namespace WTF { + +void printInternal(PrintStream&, JSC::DFG::ValueStrength); + +} // namespace WTF + +#endif // ENABLE(DFG_JIT) + +#endif // DFGValueStrength_h + diff --git a/Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.cpp b/Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.cpp new file mode 100644 index 000000000..d04677093 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.cpp @@ -0,0 +1,335 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGVarargsForwardingPhase.h" + +#if ENABLE(DFG_JIT) + +#include "DFGArgumentsUtilities.h" +#include "DFGClobberize.h" +#include "DFGForAllKills.h" +#include "DFGGraph.h" +#include "DFGPhase.h" +#include "JSCInlines.h" +#include <wtf/ListDump.h> + +namespace JSC { namespace DFG { + +namespace { + +bool verbose = false; + +class VarargsForwardingPhase : public Phase { +public: + VarargsForwardingPhase(Graph& graph) + : Phase(graph, "varargs forwarding") + { + } + + bool run() + { + DFG_ASSERT(m_graph, nullptr, m_graph.m_form != SSA); + + if (verbose) { + dataLog("Graph before varargs forwarding:\n"); + m_graph.dump(); + } + + m_changed = false; + for (BasicBlock* block : m_graph.blocksInNaturalOrder()) + handleBlock(block); + return m_changed; + } + +private: + void handleBlock(BasicBlock* block) + { + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + switch (node->op()) { + case CreateDirectArguments: + case CreateClonedArguments: + handleCandidate(block, nodeIndex); + break; + default: + break; + } + } + } + + void handleCandidate(BasicBlock* block, unsigned candidateNodeIndex) + { + // We expect calls into this function to be rare. So, this is written in a simple O(n) manner. + + Node* candidate = block->at(candidateNodeIndex); + if (verbose) + dataLog("Handling candidate ", candidate, "\n"); + + // Find the index of the last node in this block to use the candidate, and look for escaping + // sites. + unsigned lastUserIndex = candidateNodeIndex; + Vector<VirtualRegister, 2> relevantLocals; // This is a set. We expect it to be a small set. + for (unsigned nodeIndex = candidateNodeIndex + 1; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + + switch (node->op()) { + case MovHint: + if (node->child1() != candidate) + break; + lastUserIndex = nodeIndex; + if (!relevantLocals.contains(node->unlinkedLocal())) + relevantLocals.append(node->unlinkedLocal()); + break; + + case Check: { + bool sawEscape = false; + m_graph.doToChildren( + node, + [&] (Edge edge) { + if (edge == candidate) + lastUserIndex = nodeIndex; + + if (edge.willNotHaveCheck()) + return; + + if (alreadyChecked(edge.useKind(), SpecObject)) + return; + + sawEscape = true; + }); + if (sawEscape) { + if (verbose) + dataLog(" Escape at ", node, "\n"); + return; + } + break; + } + + case LoadVarargs: + if (m_graph.uses(node, candidate)) + lastUserIndex = nodeIndex; + break; + + case CallVarargs: + case ConstructVarargs: + case TailCallVarargs: + case TailCallVarargsInlinedCaller: + if (node->child1() == candidate || node->child3() == candidate) { + if (verbose) + dataLog(" Escape at ", node, "\n"); + return; + } + if (node->child2() == candidate) + lastUserIndex = nodeIndex; + break; + + case SetLocal: + if (node->child1() == candidate && node->variableAccessData()->isLoadedFrom()) { + if (verbose) + dataLog(" Escape at ", node, "\n"); + return; + } + break; + + default: + if (m_graph.uses(node, candidate)) { + if (verbose) + dataLog(" Escape at ", node, "\n"); + return; + } + } + + forAllKilledOperands( + m_graph, node, block->tryAt(nodeIndex + 1), + [&] (VirtualRegister reg) { + if (verbose) + dataLog(" Killing ", reg, " while we are interested in ", listDump(relevantLocals), "\n"); + for (unsigned i = 0; i < relevantLocals.size(); ++i) { + if (relevantLocals[i] == reg) { + relevantLocals[i--] = relevantLocals.last(); + relevantLocals.removeLast(); + lastUserIndex = nodeIndex; + } + } + }); + } + if (verbose) + dataLog("Selected lastUserIndex = ", lastUserIndex, ", ", block->at(lastUserIndex), "\n"); + + // We're still in business. Determine if between the candidate and the last user there is any + // effect that could interfere with sinking. + for (unsigned nodeIndex = candidateNodeIndex + 1; nodeIndex <= lastUserIndex; ++nodeIndex) { + Node* node = block->at(nodeIndex); + + // We have our own custom switch to detect some interferences that clobberize() wouldn't know + // about, and also some of the common ones, too. In particular, clobberize() doesn't know + // that Flush, MovHint, ZombieHint, and KillStack are bad because it's not worried about + // what gets read on OSR exit. + switch (node->op()) { + case MovHint: + case ZombieHint: + case KillStack: + if (argumentsInvolveStackSlot(candidate, node->unlinkedLocal())) { + if (verbose) + dataLog(" Interference at ", node, "\n"); + return; + } + break; + + case PutStack: + if (argumentsInvolveStackSlot(candidate, node->stackAccessData()->local)) { + if (verbose) + dataLog(" Interference at ", node, "\n"); + return; + } + break; + + case SetLocal: + case Flush: + if (argumentsInvolveStackSlot(candidate, node->local())) { + if (verbose) + dataLog(" Interference at ", node, "\n"); + return; + } + break; + + default: { + bool doesInterfere = false; + clobberize( + m_graph, node, NoOpClobberize(), + [&] (AbstractHeap heap) { + if (heap.kind() != Stack) { + ASSERT(!heap.overlaps(Stack)); + return; + } + ASSERT(!heap.payload().isTop()); + VirtualRegister reg(heap.payload().value32()); + if (argumentsInvolveStackSlot(candidate, reg)) + doesInterfere = true; + }, + NoOpClobberize()); + if (doesInterfere) { + if (verbose) + dataLog(" Interference at ", node, "\n"); + return; + } + } } + } + + // We can make this work. + if (verbose) + dataLog(" Will do forwarding!\n"); + m_changed = true; + + // Transform the program. + switch (candidate->op()) { + case CreateDirectArguments: + candidate->setOpAndDefaultFlags(PhantomDirectArguments); + break; + + case CreateClonedArguments: + candidate->setOpAndDefaultFlags(PhantomClonedArguments); + break; + + default: + DFG_CRASH(m_graph, candidate, "bad node type"); + break; + } + for (unsigned nodeIndex = candidateNodeIndex + 1; nodeIndex <= lastUserIndex; ++nodeIndex) { + Node* node = block->at(nodeIndex); + switch (node->op()) { + case Check: + case MovHint: + case PutHint: + // We don't need to change anything with these. + break; + + case LoadVarargs: + if (node->child1() != candidate) + break; + node->setOpAndDefaultFlags(ForwardVarargs); + break; + + case CallVarargs: + if (node->child2() != candidate) + break; + node->setOpAndDefaultFlags(CallForwardVarargs); + break; + + case ConstructVarargs: + if (node->child2() != candidate) + break; + node->setOpAndDefaultFlags(ConstructForwardVarargs); + break; + + case TailCallVarargs: + if (node->child2() != candidate) + break; + node->setOpAndDefaultFlags(TailCallForwardVarargs); + break; + + case TailCallVarargsInlinedCaller: + if (node->child2() != candidate) + break; + node->setOpAndDefaultFlags(TailCallForwardVarargsInlinedCaller); + break; + + case SetLocal: + // This is super odd. We don't have to do anything here, since in DFG IR, the phantom + // arguments nodes do produce a JSValue. Also, we know that if this SetLocal referenecs a + // candidate then the SetLocal - along with all of its references - will die off pretty + // soon, since it has no real users. DCE will surely kill it. If we make it to SSA, then + // SSA conversion will kill it. + break; + + default: + if (ASSERT_DISABLED) + break; + m_graph.doToChildren( + node, + [&] (Edge edge) { + DFG_ASSERT(m_graph, node, edge != candidate); + }); + break; + } + } + } + + bool m_changed; +}; + +} // anonymous namespace + +bool performVarargsForwarding(Graph& graph) +{ + SamplingRegion samplingRegion("DFG Varargs Forwarding Phase"); + return runPhase<VarargsForwardingPhase>(graph); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.h b/Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.h new file mode 100644 index 000000000..ece3747ee --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGVarargsForwardingPhase.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGVarargsForwardingPhase_h +#define DFGVarargsForwardingPhase_h + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +class Graph; + +// Eliminates allocations of Arguments-class objects when they flow into CallVarargs, ConstructVarargs, +// or LoadVarargs. + +bool performVarargsForwarding(Graph&); + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGVarargsForwardingPhase_h + diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessData.cpp b/Source/JavaScriptCore/dfg/DFGVariableAccessData.cpp new file mode 100644 index 000000000..bd1ba87ee --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGVariableAccessData.cpp @@ -0,0 +1,222 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGVariableAccessData.h" + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +VariableAccessData::VariableAccessData() + : m_local(static_cast<VirtualRegister>(std::numeric_limits<int>::min())) + , m_prediction(SpecNone) + , m_argumentAwarePrediction(SpecNone) + , m_flags(0) + , m_shouldNeverUnbox(false) + , m_structureCheckHoistingFailed(false) + , m_checkArrayHoistingFailed(false) + , m_isProfitableToUnbox(false) + , m_isLoadedFrom(false) + , m_doubleFormatState(EmptyDoubleFormatState) +{ + clearVotes(); +} + +VariableAccessData::VariableAccessData(VirtualRegister local) + : m_local(local) + , m_prediction(SpecNone) + , m_argumentAwarePrediction(SpecNone) + , m_flags(0) + , m_shouldNeverUnbox(false) + , m_structureCheckHoistingFailed(false) + , m_checkArrayHoistingFailed(false) + , m_isProfitableToUnbox(false) + , m_isLoadedFrom(false) + , m_doubleFormatState(EmptyDoubleFormatState) +{ + clearVotes(); +} + +bool VariableAccessData::mergeShouldNeverUnbox(bool shouldNeverUnbox) +{ + bool newShouldNeverUnbox = m_shouldNeverUnbox | shouldNeverUnbox; + if (newShouldNeverUnbox == m_shouldNeverUnbox) + return false; + m_shouldNeverUnbox = newShouldNeverUnbox; + return true; +} + +bool VariableAccessData::predict(SpeculatedType prediction) +{ + VariableAccessData* self = find(); + bool result = mergeSpeculation(self->m_prediction, prediction); + if (result) + mergeSpeculation(m_argumentAwarePrediction, m_prediction); + return result; +} + +bool VariableAccessData::mergeArgumentAwarePrediction(SpeculatedType prediction) +{ + return mergeSpeculation(find()->m_argumentAwarePrediction, prediction); +} + +bool VariableAccessData::shouldUseDoubleFormatAccordingToVote() +{ + // We don't support this facility for arguments, yet. + // FIXME: make this work for arguments. + if (local().isArgument()) + return false; + + // If the variable is not a number prediction, then this doesn't + // make any sense. + if (!isFullNumberSpeculation(prediction())) { + // FIXME: we may end up forcing a local in inlined argument position to be a double even + // if it is sometimes not even numeric, since this never signals the fact that it doesn't + // want doubles. https://bugs.webkit.org/show_bug.cgi?id=109511 + return false; + } + + // If the variable is predicted to hold only doubles, then it's a + // no-brainer: it should be formatted as a double. + if (isDoubleSpeculation(prediction())) + return true; + + // If the variable is known to be used as an integer, then be safe - + // don't force it to be a double. + if (flags() & NodeBytecodeUsesAsInt) + return false; + + // If the variable has been voted to become a double, then make it a + // double. + if (voteRatio() >= Options::doubleVoteRatioForDoubleFormat()) + return true; + + return false; +} + +bool VariableAccessData::tallyVotesForShouldUseDoubleFormat() +{ + ASSERT(isRoot()); + + if (local().isArgument() || shouldNeverUnbox() + || (flags() & NodeBytecodeUsesAsArrayIndex)) + return DFG::mergeDoubleFormatState(m_doubleFormatState, NotUsingDoubleFormat); + + if (m_doubleFormatState == CantUseDoubleFormat) + return false; + + bool newValueOfShouldUseDoubleFormat = shouldUseDoubleFormatAccordingToVote(); + if (!newValueOfShouldUseDoubleFormat) { + // We monotonically convert to double. Hence, if the fixpoint leads us to conclude that we should + // switch back to int, we instead ignore this and stick with double. + return false; + } + + if (m_doubleFormatState == UsingDoubleFormat) + return false; + + return DFG::mergeDoubleFormatState(m_doubleFormatState, UsingDoubleFormat); +} + +bool VariableAccessData::mergeDoubleFormatState(DoubleFormatState doubleFormatState) +{ + return DFG::mergeDoubleFormatState(find()->m_doubleFormatState, doubleFormatState); +} + +bool VariableAccessData::makePredictionForDoubleFormat() +{ + ASSERT(isRoot()); + + if (m_doubleFormatState != UsingDoubleFormat) + return false; + + SpeculatedType type = m_prediction; + if (type & ~SpecBytecodeNumber) + type |= SpecDoublePureNaN; + if (type & SpecMachineInt) + type |= SpecInt52AsDouble; + return checkAndSet(m_prediction, type); +} + +bool VariableAccessData::couldRepresentInt52() +{ + if (shouldNeverUnbox()) + return false; + + return couldRepresentInt52Impl(); +} + +bool VariableAccessData::couldRepresentInt52Impl() +{ + // The hardware has to support it. + if (!enableInt52()) + return false; + + // We punt for machine arguments. + if (m_local.isArgument()) + return false; + + // The argument-aware prediction -- which merges all of an (inlined or machine) + // argument's variable access datas' predictions -- must possibly be MachineInt. + return !(argumentAwarePrediction() & ~SpecMachineInt); +} + +FlushFormat VariableAccessData::flushFormat() +{ + ASSERT(find() == this); + + if (!shouldUnboxIfPossible()) + return FlushedJSValue; + + if (shouldUseDoubleFormat()) + return FlushedDouble; + + SpeculatedType prediction = argumentAwarePrediction(); + + // This guard is here to protect the call to couldRepresentInt52(), which will return + // true for !prediction. + if (!prediction) + return FlushedJSValue; + + if (isInt32Speculation(prediction)) + return FlushedInt32; + + if (couldRepresentInt52Impl()) + return FlushedInt52; + + if (isCellSpeculation(prediction)) + return FlushedCell; + + if (isBooleanSpeculation(prediction)) + return FlushedBoolean; + + return FlushedJSValue; +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h index 5f83aeaf5..0f817561c 100644 --- a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h +++ b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,6 +26,8 @@ #ifndef DFGVariableAccessData_h #define DFGVariableAccessData_h +#if ENABLE(DFG_JIT) + #include "DFGCommon.h" #include "DFGDoubleFormatState.h" #include "DFGFlushFormat.h" @@ -34,7 +36,6 @@ #include "Operands.h" #include "SpeculatedType.h" #include "VirtualRegister.h" -#include <wtf/Platform.h> #include <wtf/UnionFind.h> #include <wtf/Vector.h> @@ -46,39 +47,8 @@ enum DoubleBallot { VoteValue, VoteDouble }; class VariableAccessData : public UnionFind<VariableAccessData> { public: - VariableAccessData() - : m_local(static_cast<VirtualRegister>(std::numeric_limits<int>::min())) - , m_prediction(SpecNone) - , m_argumentAwarePrediction(SpecNone) - , m_flags(0) - , m_isCaptured(false) - , m_shouldNeverUnbox(false) - , m_isArgumentsAlias(false) - , m_structureCheckHoistingFailed(false) - , m_checkArrayHoistingFailed(false) - , m_isProfitableToUnbox(false) - , m_isLoadedFrom(false) - , m_doubleFormatState(EmptyDoubleFormatState) - { - clearVotes(); - } - - VariableAccessData(VirtualRegister local, bool isCaptured) - : m_local(local) - , m_prediction(SpecNone) - , m_argumentAwarePrediction(SpecNone) - , m_flags(0) - , m_isCaptured(isCaptured) - , m_shouldNeverUnbox(isCaptured) - , m_isArgumentsAlias(false) - , m_structureCheckHoistingFailed(false) - , m_checkArrayHoistingFailed(false) - , m_isProfitableToUnbox(false) - , m_isLoadedFrom(false) - , m_doubleFormatState(EmptyDoubleFormatState) - { - clearVotes(); - } + VariableAccessData(); + VariableAccessData(VirtualRegister local); VirtualRegister local() { @@ -92,20 +62,9 @@ public: return m_machineLocal; } - bool mergeIsCaptured(bool isCaptured) - { - return checkAndSet(m_shouldNeverUnbox, m_shouldNeverUnbox | isCaptured) - | checkAndSet(m_isCaptured, m_isCaptured | isCaptured); - } - - bool isCaptured() - { - return m_isCaptured; - } - bool mergeIsProfitableToUnbox(bool isProfitableToUnbox) { - return checkAndSet(m_isProfitableToUnbox, m_isProfitableToUnbox | isProfitableToUnbox); + return checkAndSet(m_isProfitableToUnbox, m_isProfitableToUnbox || isProfitableToUnbox); } bool isProfitableToUnbox() @@ -113,21 +72,13 @@ public: return m_isProfitableToUnbox; } - bool mergeShouldNeverUnbox(bool shouldNeverUnbox) - { - bool newShouldNeverUnbox = m_shouldNeverUnbox | shouldNeverUnbox; - if (newShouldNeverUnbox == m_shouldNeverUnbox) - return false; - m_shouldNeverUnbox = newShouldNeverUnbox; - return true; - } + bool mergeShouldNeverUnbox(bool shouldNeverUnbox); // Returns true if it would be unsound to store the value in an unboxed fashion. // If this returns false, it simply means that it is sound to unbox; it doesn't // mean that we have actually done so. bool shouldNeverUnbox() { - ASSERT(!(m_isCaptured && !m_shouldNeverUnbox)); return m_shouldNeverUnbox; } @@ -141,12 +92,12 @@ public: bool mergeStructureCheckHoistingFailed(bool failed) { - return checkAndSet(m_structureCheckHoistingFailed, m_structureCheckHoistingFailed | failed); + return checkAndSet(m_structureCheckHoistingFailed, m_structureCheckHoistingFailed || failed); } bool mergeCheckArrayHoistingFailed(bool failed) { - return checkAndSet(m_checkArrayHoistingFailed, m_checkArrayHoistingFailed | failed); + return checkAndSet(m_checkArrayHoistingFailed, m_checkArrayHoistingFailed || failed); } bool structureCheckHoistingFailed() @@ -159,19 +110,9 @@ public: return m_checkArrayHoistingFailed; } - bool mergeIsArgumentsAlias(bool isArgumentsAlias) - { - return checkAndSet(m_isArgumentsAlias, m_isArgumentsAlias | isArgumentsAlias); - } - - bool isArgumentsAlias() - { - return m_isArgumentsAlias; - } - bool mergeIsLoadedFrom(bool isLoadedFrom) { - return checkAndSet(m_isLoadedFrom, m_isLoadedFrom | isLoadedFrom); + return checkAndSet(m_isLoadedFrom, m_isLoadedFrom || isLoadedFrom); } void setIsLoadedFrom(bool isLoadedFrom) @@ -184,14 +125,7 @@ public: return m_isLoadedFrom; } - bool predict(SpeculatedType prediction) - { - VariableAccessData* self = find(); - bool result = mergeSpeculation(self->m_prediction, prediction); - if (result) - mergeSpeculation(m_argumentAwarePrediction, m_prediction); - return result; - } + bool predict(SpeculatedType prediction); SpeculatedType nonUnifiedPrediction() { @@ -208,10 +142,7 @@ public: return find()->m_argumentAwarePrediction; } - bool mergeArgumentAwarePrediction(SpeculatedType prediction) - { - return mergeSpeculation(find()->m_argumentAwarePrediction, prediction); - } + bool mergeArgumentAwarePrediction(SpeculatedType prediction); void clearVotes() { @@ -220,10 +151,10 @@ public: m_votes[1] = 0; } - void vote(unsigned ballot) + void vote(unsigned ballot, float weight = 1) { ASSERT(ballot < 2); - m_votes[ballot]++; + m_votes[ballot] += weight; } double voteRatio() @@ -232,39 +163,7 @@ public: return static_cast<double>(m_votes[1]) / m_votes[0]; } - bool shouldUseDoubleFormatAccordingToVote() - { - // We don't support this facility for arguments, yet. - // FIXME: make this work for arguments. - if (local().isArgument()) - return false; - - // If the variable is not a number prediction, then this doesn't - // make any sense. - if (!isFullNumberSpeculation(prediction())) { - // FIXME: we may end up forcing a local in inlined argument position to be a double even - // if it is sometimes not even numeric, since this never signals the fact that it doesn't - // want doubles. https://bugs.webkit.org/show_bug.cgi?id=109511 - return false; - } - - // If the variable is predicted to hold only doubles, then it's a - // no-brainer: it should be formatted as a double. - if (isDoubleSpeculation(prediction())) - return true; - - // If the variable is known to be used as an integer, then be safe - - // don't force it to be a double. - if (flags() & NodeBytecodeUsesAsInt) - return false; - - // If the variable has been voted to become a double, then make it a - // double. - if (voteRatio() >= Options::doubleVoteRatioForDoubleFormat()) - return true; - - return false; - } + bool shouldUseDoubleFormatAccordingToVote(); DoubleFormatState doubleFormatState() { @@ -276,47 +175,14 @@ public: ASSERT(isRoot()); bool doubleState = m_doubleFormatState == UsingDoubleFormat; ASSERT(!(doubleState && shouldNeverUnbox())); - ASSERT(!(doubleState && isCaptured())); return doubleState && isProfitableToUnbox(); } - bool tallyVotesForShouldUseDoubleFormat() - { - ASSERT(isRoot()); - - if (local().isArgument() || shouldNeverUnbox()) - return DFG::mergeDoubleFormatState(m_doubleFormatState, NotUsingDoubleFormat); - - if (m_doubleFormatState == CantUseDoubleFormat) - return false; - - bool newValueOfShouldUseDoubleFormat = shouldUseDoubleFormatAccordingToVote(); - if (!newValueOfShouldUseDoubleFormat) { - // We monotonically convert to double. Hence, if the fixpoint leads us to conclude that we should - // switch back to int, we instead ignore this and stick with double. - return false; - } - - if (m_doubleFormatState == UsingDoubleFormat) - return false; - - return DFG::mergeDoubleFormatState(m_doubleFormatState, UsingDoubleFormat); - } + bool tallyVotesForShouldUseDoubleFormat(); - bool mergeDoubleFormatState(DoubleFormatState doubleFormatState) - { - return DFG::mergeDoubleFormatState(find()->m_doubleFormatState, doubleFormatState); - } + bool mergeDoubleFormatState(DoubleFormatState); - bool makePredictionForDoubleFormat() - { - ASSERT(isRoot()); - - if (m_doubleFormatState != UsingDoubleFormat) - return false; - - return mergeSpeculation(m_prediction, SpecDouble); - } + bool makePredictionForDoubleFormat(); NodeFlags flags() const { return m_flags; } @@ -325,34 +191,9 @@ public: return checkAndSet(m_flags, m_flags | newFlags); } - FlushFormat flushFormat() - { - ASSERT(find() == this); - - if (isArgumentsAlias()) - return FlushedArguments; - - if (!shouldUnboxIfPossible()) - return FlushedJSValue; - - if (shouldUseDoubleFormat()) - return FlushedDouble; - - SpeculatedType prediction = argumentAwarePrediction(); - if (isInt32Speculation(prediction)) - return FlushedInt32; - - if (enableInt52() && !m_local.isArgument() && isMachineIntSpeculation(prediction)) - return FlushedInt52; - - if (isCellSpeculation(prediction)) - return FlushedCell; - - if (isBooleanSpeculation(prediction)) - return FlushedBoolean; - - return FlushedJSValue; - } + FlushFormat flushFormat(); + + bool couldRepresentInt52(); FlushedAt flushedAt() { @@ -360,6 +201,8 @@ public: } private: + bool couldRepresentInt52Impl(); + // This is slightly space-inefficient, since anything we're unified with // will have the same operand and should have the same prediction. But // putting them here simplifies the code, and we don't expect DFG space @@ -371,9 +214,7 @@ private: SpeculatedType m_argumentAwarePrediction; NodeFlags m_flags; - bool m_isCaptured; bool m_shouldNeverUnbox; - bool m_isArgumentsAlias; bool m_structureCheckHoistingFailed; bool m_checkArrayHoistingFailed; bool m_isProfitableToUnbox; @@ -385,4 +226,6 @@ private: } } // namespace JSC::DFG +#endif // ENABLE(DFG_JIT) + #endif // DFGVariableAccessData_h diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.cpp b/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.cpp index 92050006f..00621737e 100644 --- a/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.cpp +++ b/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,6 +30,7 @@ #include "DFGGraph.h" #include "DFGVariableAccessData.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -61,9 +62,7 @@ void VariableAccessDataDump::dump(PrintStream& out) const index /= 26; } - if (m_data->isCaptured()) - out.print("*"); - else if (m_data->shouldNeverUnbox()) + if (m_data->shouldNeverUnbox()) out.print("!"); else if (!m_data->shouldUnboxIfPossible()) out.print("~"); diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.h b/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.h index 1422d7fac..fd53fcd2c 100644 --- a/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.h +++ b/Source/JavaScriptCore/dfg/DFGVariableAccessDataDump.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef DFGVariableAccessDataDump_h #define DFGVariableAccessDataDump_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include <wtf/PrintStream.h> diff --git a/Source/JavaScriptCore/dfg/DFGVariableEvent.cpp b/Source/JavaScriptCore/dfg/DFGVariableEvent.cpp index bb104ab54..28e437fd5 100644 --- a/Source/JavaScriptCore/dfg/DFGVariableEvent.cpp +++ b/Source/JavaScriptCore/dfg/DFGVariableEvent.cpp @@ -30,6 +30,7 @@ #include "FPRInfo.h" #include "GPRInfo.h" +#include "JSCInlines.h" namespace JSC { namespace DFG { @@ -45,6 +46,9 @@ void VariableEvent::dump(PrintStream& out) const case BirthToSpill: dumpSpillInfo("BirthToSpill", out); break; + case Birth: + out.print("Birth(", id(), ")"); + break; case Fill: dumpFillInfo("Fill", out); break; @@ -55,11 +59,11 @@ void VariableEvent::dump(PrintStream& out) const out.print("Death(", id(), ")"); break; case MovHintEvent: - out.print("MovHint(", id(), ", r", bytecodeRegister(), ")"); + out.print("MovHint(", id(), ", ", bytecodeRegister(), ")"); break; case SetLocalEvent: out.print( - "SetLocal(machine:r", machineRegister(), " -> bytecode:r", bytecodeRegister(), + "SetLocal(machine:", machineRegister(), " -> bytecode:", bytecodeRegister(), ", ", dataFormatToString(dataFormat()), ")"); break; default: @@ -84,7 +88,7 @@ void VariableEvent::dumpFillInfo(const char* name, PrintStream& out) const void VariableEvent::dumpSpillInfo(const char* name, PrintStream& out) const { - out.print(name, "(", id(), ", r", spillRegister(), ", ", dataFormatToString(dataFormat()), ")"); + out.print(name, "(", id(), ", ", spillRegister(), ", ", dataFormatToString(dataFormat()), ")"); } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGVariableEvent.h b/Source/JavaScriptCore/dfg/DFGVariableEvent.h index 24423ed2d..5fa4bb686 100644 --- a/Source/JavaScriptCore/dfg/DFGVariableEvent.h +++ b/Source/JavaScriptCore/dfg/DFGVariableEvent.h @@ -26,8 +26,6 @@ #ifndef DFGVariableEvent_h #define DFGVariableEvent_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" @@ -49,6 +47,7 @@ enum VariableEventKind { // that we start to care about this node. BirthToFill, BirthToSpill, + Birth, // Events related to how a node is represented. Fill, @@ -135,6 +134,14 @@ public: return event; } + static VariableEvent birth(MinifiedID id) + { + VariableEvent event; + event.m_which.id = id.bits(); + event.m_kind = Birth; + return event; + } + static VariableEvent spill(VariableEventKind kind, MinifiedID id, VirtualRegister virtualRegister, DataFormat format) { ASSERT(kind == BirthToSpill || kind == Spill); @@ -181,17 +188,17 @@ public: MinifiedID id() const { - ASSERT(m_kind == BirthToFill || m_kind == Fill - || m_kind == BirthToSpill || m_kind == Spill - || m_kind == Death || m_kind == MovHintEvent); + ASSERT( + m_kind == BirthToFill || m_kind == Fill || m_kind == BirthToSpill || m_kind == Spill + || m_kind == Death || m_kind == MovHintEvent || m_kind == Birth); return MinifiedID::fromBits(m_which.id); } DataFormat dataFormat() const { - ASSERT(m_kind == BirthToFill || m_kind == Fill - || m_kind == BirthToSpill || m_kind == Spill - || m_kind == SetLocalEvent); + ASSERT( + m_kind == BirthToFill || m_kind == Fill || m_kind == BirthToSpill || m_kind == Spill + || m_kind == SetLocalEvent); return static_cast<DataFormat>(m_dataFormat); } diff --git a/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp b/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp index 98e08f6ff..6392f14a9 100644 --- a/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp +++ b/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,7 +31,8 @@ #include "CodeBlock.h" #include "DFGJITCode.h" #include "DFGValueSource.h" -#include "Operations.h" +#include "InlineCallFrame.h" +#include "JSCInlines.h" #include <wtf/DataLog.h> #include <wtf/HashMap.h> @@ -48,11 +49,14 @@ namespace { struct MinifiedGenerationInfo { bool filled; // true -> in gpr/fpr/pair, false -> spilled + bool alive; VariableRepresentation u; DataFormat format; MinifiedGenerationInfo() - : format(DataFormatNone) + : filled(false) + , alive(false) + , format(DataFormatNone) { } @@ -62,13 +66,19 @@ struct MinifiedGenerationInfo { case BirthToFill: case Fill: filled = true; + alive = true; break; case BirthToSpill: case Spill: filled = false; + alive = true; break; + case Birth: + alive = true; + return; case Death: format = DataFormatNone; + alive = false; return; default: return; @@ -81,25 +91,23 @@ struct MinifiedGenerationInfo { } // namespace -bool VariableEventStream::tryToSetConstantRecovery(ValueRecovery& recovery, CodeBlock* codeBlock, MinifiedNode* node) const +bool VariableEventStream::tryToSetConstantRecovery(ValueRecovery& recovery, MinifiedNode* node) const { if (!node) return false; - if (node->hasConstantNumber()) { - recovery = ValueRecovery::constant( - codeBlock->constantRegister( - FirstConstantRegisterIndex + node->constantNumber()).get()); + if (node->hasConstant()) { + recovery = ValueRecovery::constant(node->constant()); return true; } - if (node->hasWeakConstant()) { - recovery = ValueRecovery::constant(node->weakConstant()); + if (node->op() == PhantomDirectArguments) { + recovery = ValueRecovery::directArgumentsThatWereNotCreated(node->id()); return true; } - if (node->op() == PhantomArguments) { - recovery = ValueRecovery::argumentsThatWereNotCreated(); + if (node->op() == PhantomClonedArguments) { + recovery = ValueRecovery::outOfBandArgumentsThatWereNotCreated(node->id()); return true; } @@ -115,9 +123,9 @@ void VariableEventStream::reconstruct( unsigned numVariables; if (codeOrigin.inlineCallFrame) - numVariables = baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeRegisters + VirtualRegister(codeOrigin.inlineCallFrame->stackOffset).toLocal() + 1; + numVariables = baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeLocals + VirtualRegister(codeOrigin.inlineCallFrame->stackOffset).toLocal() + 1; else - numVariables = baselineCodeBlock->m_numCalleeRegisters; + numVariables = baselineCodeBlock->m_numCalleeLocals; // Crazy special case: if we're at index == 0 then this must be an argument check // failure, in which case all variables are already set up. The recoveries should @@ -148,7 +156,8 @@ void VariableEventStream::reconstruct( // nothing to do. break; case BirthToFill: - case BirthToSpill: { + case BirthToSpill: + case Birth: { MinifiedGenerationInfo info; info.update(event); generationInfos.add(event.id(), info); @@ -187,20 +196,20 @@ void VariableEventStream::reconstruct( ASSERT(source.kind() == HaveNode); MinifiedNode* node = graph.at(source.id()); - if (tryToSetConstantRecovery(valueRecoveries[i], codeBlock, node)) - continue; - MinifiedGenerationInfo info = generationInfos.get(source.id()); - if (info.format == DataFormatNone) { + if (!info.alive) { valueRecoveries[i] = ValueRecovery::constant(jsUndefined()); continue; } + + if (tryToSetConstantRecovery(valueRecoveries[i], node)) + continue; ASSERT(info.format != DataFormatNone); if (info.filled) { if (info.format == DataFormatDouble) { - valueRecoveries[i] = ValueRecovery::inFPR(info.u.fpr); + valueRecoveries[i] = ValueRecovery::inFPR(info.u.fpr, DataFormatDouble); continue; } #if USE(JSVALUE32_64) diff --git a/Source/JavaScriptCore/dfg/DFGVariableEventStream.h b/Source/JavaScriptCore/dfg/DFGVariableEventStream.h index 130fd6a99..b0e4afac5 100644 --- a/Source/JavaScriptCore/dfg/DFGVariableEventStream.h +++ b/Source/JavaScriptCore/dfg/DFGVariableEventStream.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,14 +26,13 @@ #ifndef DFGVariableEventStream_h #define DFGVariableEventStream_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGCommon.h" #include "DFGMinifiedGraph.h" #include "DFGVariableEvent.h" #include "Operands.h" +#include "ValueRecovery.h" #include <wtf/Vector.h> namespace JSC { namespace DFG { @@ -50,7 +49,7 @@ public: unsigned index, Operands<ValueRecovery>&) const; private: - bool tryToSetConstantRecovery(ValueRecovery&, CodeBlock*, MinifiedNode*) const; + bool tryToSetConstantRecovery(ValueRecovery&, MinifiedNode*) const; void logEvent(const VariableEvent&); }; diff --git a/Source/JavaScriptCore/dfg/DFGVariadicFunction.h b/Source/JavaScriptCore/dfg/DFGVariadicFunction.h deleted file mode 100644 index f5523af77..000000000 --- a/Source/JavaScriptCore/dfg/DFGVariadicFunction.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef DFGVariadicFunction_h -#define DFGVariadicFunction_h - -#define DFG_COMMA , - -// The signature of v is (templatePre, templatePost, typeParams, valueParams, valueArgs) -// -// You would use it like: -// #define DEFINE_FUNCTION(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) -// templatePre typeParams templatePost void f(valueParams) { g(valueArgs); } -// DFG_VARIADIC_TEMPLATE_FUNCTION(DEFINE_FUNCTION) -// #undef DEFINE_FUNCTION -// -// Or if you wanted the defined function to take an additional template arg, you would do: -// #define DEFINE_FUNCTION(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) -// template<typename T valueParamsComma typeParams> void f(T value valueParamsComma valueParams) { g(value, valueArgs); } -// DFG_VARIADIC_TEMPLATE_FUNCTION(DEFINE_FUNCTION) -// #undef DEFINE_FUNCTION - -#define DFG_VARIADIC_TEMPLATE_FUNCTION(v) \ - v(, , , , , ) \ - v(template<, >, typename _DFG_T1, DFG_COMMA, const _DFG_T1& _DFG_value1, _DFG_value1) \ - v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2, _DFG_value1 DFG_COMMA _DFG_value2) \ - v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3) \ - v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3 DFG_COMMA typename _DFG_T4, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3 DFG_COMMA const _DFG_T4& _DFG_value4, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3 DFG_COMMA _DFG_value4) \ - v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3 DFG_COMMA typename _DFG_T4 DFG_COMMA typename _DFG_T5, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3 DFG_COMMA const _DFG_T4& _DFG_value4 DFG_COMMA const _DFG_T5& _DFG_value5, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3 DFG_COMMA _DFG_value4 DFG_COMMA _DFG_value5) \ - v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3 DFG_COMMA typename _DFG_T4 DFG_COMMA typename _DFG_T5 DFG_COMMA typename _DFG_T6, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3 DFG_COMMA const _DFG_T4& _DFG_value4 DFG_COMMA const _DFG_T5& _DFG_value5 DFG_COMMA const _DFG_T6& _DFG_value6, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3 DFG_COMMA _DFG_value4 DFG_COMMA _DFG_value5 DFG_COMMA _DFG_value6) \ - v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3 DFG_COMMA typename _DFG_T4 DFG_COMMA typename _DFG_T5 DFG_COMMA typename _DFG_T6 DFG_COMMA typename _DFG_T7, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3 DFG_COMMA const _DFG_T4& _DFG_value4 DFG_COMMA const _DFG_T5& _DFG_value5 DFG_COMMA const _DFG_T6& _DFG_value6 DFG_COMMA const _DFG_T7& _DFG_value7, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3 DFG_COMMA _DFG_value4 DFG_COMMA _DFG_value5 DFG_COMMA _DFG_value6 DFG_COMMA _DFG_value7) \ - v(template<, >, typename _DFG_T1 DFG_COMMA typename _DFG_T2 DFG_COMMA typename _DFG_T3 DFG_COMMA typename _DFG_T4 DFG_COMMA typename _DFG_T5 DFG_COMMA typename _DFG_T6 DFG_COMMA typename _DFG_T7 DFG_COMMA typename _DFG_T8, DFG_COMMA, const _DFG_T1& _DFG_value1 DFG_COMMA const _DFG_T2& _DFG_value2 DFG_COMMA const _DFG_T3& _DFG_value3 DFG_COMMA const _DFG_T4& _DFG_value4 DFG_COMMA const _DFG_T5& _DFG_value5 DFG_COMMA const _DFG_T6& _DFG_value6 DFG_COMMA const _DFG_T7& _DFG_value7 DFG_COMMA _DFG_T8& _DFG_value8, _DFG_value1 DFG_COMMA _DFG_value2 DFG_COMMA _DFG_value3 DFG_COMMA _DFG_value4 DFG_COMMA _DFG_value5 DFG_COMMA _DFG_value6 DFG_COMMA _DFG_value7 DFG_COMMA _DFG_value8) - -#endif // DFGVariadicFunction_h - diff --git a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp index 71d526159..e5e133d43 100644 --- a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp @@ -30,7 +30,7 @@ #include "DFGGraph.h" #include "DFGScoreBoard.h" -#include "JSCellInlines.h" +#include "JSCInlines.h" #include "StackAlignment.h" #include <wtf/StdLibExtras.h> @@ -45,6 +45,8 @@ public: bool run() { + DFG_ASSERT(m_graph, nullptr, m_graph.m_form == ThreadedCPS); + ScoreBoard scoreBoard(m_graph.m_nextMachineLocal); scoreBoard.assertClear(); for (size_t blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { @@ -53,6 +55,10 @@ public: continue; if (!block->isReachable) continue; + if (!ASSERT_DISABLED) { + // Force usage of highest-numbered virtual registers. + scoreBoard.sortFree(); + } for (size_t indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { Node* node = block->at(indexInBlock); diff --git a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h index 5878ed13f..42128a0e8 100644 --- a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h +++ b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h @@ -26,8 +26,6 @@ #ifndef DFGVirtualRegisterAllocationPhase_h #define DFGVirtualRegisterAllocationPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGPhase.h" diff --git a/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.cpp b/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.cpp index 78df55009..c967f622e 100644 --- a/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,7 +32,15 @@ #include "DFGClobberize.h" #include "DFGGraph.h" #include "DFGPhase.h" -#include "Operations.h" +#include "JSCInlines.h" + +// FIXME: Remove this phase entirely by moving the addLazily() calls into either the backend or +// into the phase that performs the optimization. Moving the calls into the backend makes the most +// sense when the intermediate phases don't need to know that the watchpoint was set. Moving the +// calls earlier usually only makes sense if the node's only purpose was to convey the need for +// the watchpoint (like VarInjectionWatchpoint). But, it can also make sense if the fact that the +// watchpoint was set enables other optimizations. +// https://bugs.webkit.org/show_bug.cgi?id=144669 namespace JSC { namespace DFG { @@ -64,10 +72,7 @@ public: private: void handle() { - DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, handleEdge); - switch (m_node->op()) { - case CompareEqConstant: case IsUndefined: handleMasqueradesAsUndefined(); break; @@ -75,93 +80,27 @@ private: case CompareEq: if (m_node->isBinaryUseKind(ObjectUse) || (m_node->child1().useKind() == ObjectUse && m_node->child2().useKind() == ObjectOrOtherUse) - || (m_node->child1().useKind() == ObjectOrOtherUse && m_node->child2().useKind() == ObjectUse)) + || (m_node->child1().useKind() == ObjectOrOtherUse && m_node->child2().useKind() == ObjectUse) + || (m_node->child1().useKind() == OtherUse || m_node->child2().useKind() == OtherUse)) handleMasqueradesAsUndefined(); break; case LogicalNot: case Branch: - if (m_node->child1().useKind() == ObjectOrOtherUse) + switch (m_node->child1().useKind()) { + case ObjectOrOtherUse: + case UntypedUse: handleMasqueradesAsUndefined(); - break; - - case GetByVal: - if (m_node->arrayMode().type() == Array::Double - && m_node->arrayMode().isSaneChain()) { - addLazily(globalObject()->arrayPrototype()->structure()->transitionWatchpointSet()); - addLazily(globalObject()->objectPrototype()->structure()->transitionWatchpointSet()); + break; + default: + break; } - - if (m_node->arrayMode().type() == Array::String) - handleStringGetByVal(); - - if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node)) - addLazily(view); - break; - - case PutByVal: - if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node)) - addLazily(view); - break; - - case StringCharAt: - handleStringGetByVal(); - break; - - case NewArray: - case NewArrayWithSize: - case NewArrayBuffer: - if (!globalObject()->isHavingABadTime() && !hasArrayStorage(m_node->indexingType())) - addLazily(globalObject()->havingABadTimeWatchpoint()); - break; - - case AllocationProfileWatchpoint: - addLazily(jsCast<JSFunction*>(m_node->function())->allocationProfileWatchpointSet()); - break; - - case StructureTransitionWatchpoint: - m_graph.watchpoints().addLazily( - m_node->codeOrigin, - m_node->child1()->op() == WeakJSConstant ? BadWeakConstantCacheWatchpoint : BadCacheWatchpoint, - m_node->structure()->transitionWatchpointSet()); - break; - - case VariableWatchpoint: - addLazily(m_node->variableWatchpointSet()); break; case VarInjectionWatchpoint: addLazily(globalObject()->varInjectionWatchpoint()); break; - case FunctionReentryWatchpoint: - addLazily(m_node->symbolTable()->m_functionEnteredOnce); - break; - - case TypedArrayWatchpoint: - addLazily(m_node->typedArray()); - break; - - default: - break; - } - } - - void handleEdge(Node*, Edge edge) - { - switch (edge.useKind()) { - case StringObjectUse: - case StringOrStringObjectUse: { - Structure* stringObjectStructure = globalObject()->stringObjectStructure(); - Structure* stringPrototypeStructure = stringObjectStructure->storedPrototype().asCell()->structure(); - ASSERT(m_graph.watchpoints().isValidOrMixed(stringPrototypeStructure->transitionWatchpointSet())); - - m_graph.watchpoints().addLazily( - m_node->codeOrigin, NotStringObject, - stringPrototypeStructure->transitionWatchpointSet()); - break; - } - default: break; } @@ -169,20 +108,10 @@ private: void handleMasqueradesAsUndefined() { - if (m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_node->codeOrigin)) + if (m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_node->origin.semantic)) addLazily(globalObject()->masqueradesAsUndefinedWatchpoint()); } - void handleStringGetByVal() - { - if (!m_node->arrayMode().isOutOfBounds()) - return; - if (!globalObject()->stringPrototypeChainIsSane()) - return; - addLazily(globalObject()->stringPrototype()->structure()->transitionWatchpointSet()); - addLazily(globalObject()->objectPrototype()->structure()->transitionWatchpointSet()); - } - void addLazily(WatchpointSet* set) { m_graph.watchpoints().addLazily(set); @@ -191,14 +120,10 @@ private: { m_graph.watchpoints().addLazily(set); } - void addLazily(JSArrayBufferView* view) - { - m_graph.watchpoints().addLazily(view); - } JSGlobalObject* globalObject() { - return m_graph.globalObjectFor(m_node->codeOrigin); + return m_graph.globalObjectFor(m_node->origin.semantic); } Node* m_node; diff --git a/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.h b/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.h index eb41522b1..fe8fef5d3 100644 --- a/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.h +++ b/Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.h @@ -26,8 +26,6 @@ #ifndef DFGWatchpointCollectionPhase_h #define DFGWatchpointCollectionPhase_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) namespace JSC { namespace DFG { diff --git a/Source/JavaScriptCore/dfg/DFGWorklist.cpp b/Source/JavaScriptCore/dfg/DFGWorklist.cpp index 3df19ac11..9d7e68a0c 100644 --- a/Source/JavaScriptCore/dfg/DFGWorklist.cpp +++ b/Source/JavaScriptCore/dfg/DFGWorklist.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,46 +31,65 @@ #include "CodeBlock.h" #include "DeferGC.h" #include "DFGLongLivedState.h" +#include "DFGSafepoint.h" +#include "JSCInlines.h" #include <mutex> namespace JSC { namespace DFG { -Worklist::Worklist() - : m_numberOfActiveThreads(0) +Worklist::Worklist(CString worklistName) + : m_threadName(toCString(worklistName, " Worker Thread")) + , m_numberOfActiveThreads(0) { } Worklist::~Worklist() { { - MutexLocker locker(m_lock); + LockHolder locker(m_lock); for (unsigned i = m_threads.size(); i--;) m_queue.append(nullptr); // Use null plan to indicate that we want the thread to terminate. - m_planEnqueued.broadcast(); + m_planEnqueued.notifyAll(); } for (unsigned i = m_threads.size(); i--;) - waitForThreadCompletion(m_threads[i]); + waitForThreadCompletion(m_threads[i]->m_identifier); ASSERT(!m_numberOfActiveThreads); } -void Worklist::finishCreation(unsigned numberOfThreads) +void Worklist::finishCreation(unsigned numberOfThreads, int relativePriority) { RELEASE_ASSERT(numberOfThreads); - for (unsigned i = numberOfThreads; i--;) - m_threads.append(createThread(threadFunction, this, "JSC Compilation Thread")); + for (unsigned i = numberOfThreads; i--;) { + std::unique_ptr<ThreadData> data = std::make_unique<ThreadData>(this); + data->m_identifier = createThread(threadFunction, data.get(), m_threadName.data()); + if (relativePriority) + changeThreadPriority(data->m_identifier, relativePriority); + m_threads.append(WTFMove(data)); + } } -PassRefPtr<Worklist> Worklist::create(unsigned numberOfThreads) +Ref<Worklist> Worklist::create(CString worklistName, unsigned numberOfThreads, int relativePriority) { - RefPtr<Worklist> result = adoptRef(new Worklist()); - result->finishCreation(numberOfThreads); + Ref<Worklist> result = adoptRef(*new Worklist(worklistName)); + result->finishCreation(numberOfThreads, relativePriority); return result; } +bool Worklist::isActiveForVM(VM& vm) const +{ + LockHolder locker(m_lock); + PlanMap::const_iterator end = m_plans.end(); + for (PlanMap::const_iterator iter = m_plans.begin(); iter != end; ++iter) { + if (&iter->value->vm == &vm) + return true; + } + return false; +} + void Worklist::enqueue(PassRefPtr<Plan> passedPlan) { RefPtr<Plan> plan = passedPlan; - MutexLocker locker(m_lock); + LockHolder locker(m_lock); if (Options::verboseCompilationQueue()) { dump(locker, WTF::dataFile()); dataLog(": Enqueueing plan to optimize ", plan->key(), "\n"); @@ -78,16 +97,16 @@ void Worklist::enqueue(PassRefPtr<Plan> passedPlan) ASSERT(m_plans.find(plan->key()) == m_plans.end()); m_plans.add(plan->key(), plan); m_queue.append(plan); - m_planEnqueued.signal(); + m_planEnqueued.notifyOne(); } Worklist::State Worklist::compilationState(CompilationKey key) { - MutexLocker locker(m_lock); + LockHolder locker(m_lock); PlanMap::iterator iter = m_plans.find(key); if (iter == m_plans.end()) return NotKnown; - return iter->value->isCompiled ? Compiled : Compiling; + return iter->value->stage == Plan::Ready ? Compiled : Compiling; } void Worklist::waitUntilAllPlansForVMAreReady(VM& vm) @@ -99,7 +118,7 @@ void Worklist::waitUntilAllPlansForVMAreReady(VM& vm) // After we release this lock, we know that although other VMs may still // be adding plans, our VM will not be. - MutexLocker locker(m_lock); + LockHolder locker(m_lock); if (Options::verboseCompilationQueue()) { dump(locker, WTF::dataFile()); @@ -112,7 +131,7 @@ void Worklist::waitUntilAllPlansForVMAreReady(VM& vm) for (PlanMap::iterator iter = m_plans.begin(); iter != end; ++iter) { if (&iter->value->vm != &vm) continue; - if (!iter->value->isCompiled) { + if (iter->value->stage != Plan::Ready) { allAreCompiled = false; break; } @@ -128,12 +147,12 @@ void Worklist::waitUntilAllPlansForVMAreReady(VM& vm) void Worklist::removeAllReadyPlansForVM(VM& vm, Vector<RefPtr<Plan>, 8>& myReadyPlans) { DeferGC deferGC(vm.heap); - MutexLocker locker(m_lock); + LockHolder locker(m_lock); for (size_t i = 0; i < m_readyPlans.size(); ++i) { RefPtr<Plan> plan = m_readyPlans[i]; if (&plan->vm != &vm) continue; - if (!plan->isCompiled) + if (plan->stage != Plan::Ready) continue; myReadyPlans.append(plan); m_readyPlans[i--] = m_readyPlans.last(); @@ -164,7 +183,7 @@ Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requ if (Options::verboseCompilationQueue()) dataLog(*this, ": Completing ", currentKey, "\n"); - RELEASE_ASSERT(plan->isCompiled); + RELEASE_ASSERT(plan->stage == Plan::Ready); plan->finalizeAndNotifyCallback(); @@ -173,7 +192,7 @@ Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requ } if (!!requestedKey && resultingState == NotKnown) { - MutexLocker locker(m_lock); + LockHolder locker(m_lock); if (m_plans.contains(requestedKey)) resultingState = Compiling; } @@ -188,19 +207,116 @@ void Worklist::completeAllPlansForVM(VM& vm) completeAllReadyPlansForVM(vm); } +void Worklist::rememberCodeBlocks(VM& vm) +{ + LockHolder locker(m_lock); + for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) { + Plan* plan = iter->value.get(); + if (&plan->vm != &vm) + continue; + plan->rememberCodeBlocks(); + } +} + +void Worklist::suspendAllThreads() +{ + m_suspensionLock.lock(); + for (unsigned i = m_threads.size(); i--;) + m_threads[i]->m_rightToRun.lock(); +} + +void Worklist::resumeAllThreads() +{ + for (unsigned i = m_threads.size(); i--;) + m_threads[i]->m_rightToRun.unlock(); + m_suspensionLock.unlock(); +} + +void Worklist::visitWeakReferences(SlotVisitor& visitor) +{ + VM* vm = visitor.heap()->vm(); + { + LockHolder locker(m_lock); + for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) { + Plan* plan = iter->value.get(); + if (&plan->vm != vm) + continue; + plan->checkLivenessAndVisitChildren(visitor); + } + } + // This loop doesn't need locking because: + // (1) no new threads can be added to m_threads. Hence, it is immutable and needs no locks. + // (2) ThreadData::m_safepoint is protected by that thread's m_rightToRun which we must be + // holding here because of a prior call to suspendAllThreads(). + for (unsigned i = m_threads.size(); i--;) { + ThreadData* data = m_threads[i].get(); + Safepoint* safepoint = data->m_safepoint; + if (safepoint && &safepoint->vm() == vm) + safepoint->checkLivenessAndVisitChildren(visitor); + } +} + +void Worklist::removeDeadPlans(VM& vm) +{ + { + LockHolder locker(m_lock); + HashSet<CompilationKey> deadPlanKeys; + for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) { + Plan* plan = iter->value.get(); + if (&plan->vm != &vm) + continue; + if (plan->isKnownToBeLiveDuringGC()) + continue; + RELEASE_ASSERT(plan->stage != Plan::Cancelled); // Should not be cancelled, yet. + ASSERT(!deadPlanKeys.contains(plan->key())); + deadPlanKeys.add(plan->key()); + } + if (!deadPlanKeys.isEmpty()) { + for (HashSet<CompilationKey>::iterator iter = deadPlanKeys.begin(); iter != deadPlanKeys.end(); ++iter) + m_plans.take(*iter)->cancel(); + Deque<RefPtr<Plan>> newQueue; + while (!m_queue.isEmpty()) { + RefPtr<Plan> plan = m_queue.takeFirst(); + if (plan->stage != Plan::Cancelled) + newQueue.append(plan); + } + m_queue.swap(newQueue); + for (unsigned i = 0; i < m_readyPlans.size(); ++i) { + if (m_readyPlans[i]->stage != Plan::Cancelled) + continue; + m_readyPlans[i] = m_readyPlans.last(); + m_readyPlans.removeLast(); + } + } + } + + // No locking needed for this part, see comment in visitWeakReferences(). + for (unsigned i = m_threads.size(); i--;) { + ThreadData* data = m_threads[i].get(); + Safepoint* safepoint = data->m_safepoint; + if (!safepoint) + continue; + if (&safepoint->vm() != &vm) + continue; + if (safepoint->isKnownToBeLiveDuringGC()) + continue; + safepoint->cancel(); + } +} + size_t Worklist::queueLength() { - MutexLocker locker(m_lock); + LockHolder locker(m_lock); return m_queue.size(); } void Worklist::dump(PrintStream& out) const { - MutexLocker locker(m_lock); + LockHolder locker(m_lock); dump(locker, out); } -void Worklist::dump(const MutexLocker&, PrintStream& out) const +void Worklist::dump(const LockHolder&, PrintStream& out) const { out.print( "Worklist(", RawPointer(this), ")[Queue Length = ", m_queue.size(), @@ -208,7 +324,7 @@ void Worklist::dump(const MutexLocker&, PrintStream& out) const ", Num Active Threads = ", m_numberOfActiveThreads, "/", m_threads.size(), "]"); } -void Worklist::runThread() +void Worklist::runThread(ThreadData* data) { CompilationScope compilationScope; @@ -220,9 +336,10 @@ void Worklist::runThread() for (;;) { RefPtr<Plan> plan; { - MutexLocker locker(m_lock); + LockHolder locker(m_lock); while (m_queue.isEmpty()) m_planEnqueued.wait(m_lock); + plan = m_queue.takeFirst(); if (plan) m_numberOfActiveThreads++; @@ -234,13 +351,45 @@ void Worklist::runThread() return; } - if (Options::verboseCompilationQueue()) - dataLog(*this, ": Compiling ", plan->key(), " asynchronously\n"); + { + LockHolder locker(data->m_rightToRun); + { + LockHolder locker(m_lock); + if (plan->stage == Plan::Cancelled) { + m_numberOfActiveThreads--; + continue; + } + plan->notifyCompiling(); + } - plan->compileInThread(longLivedState); + if (Options::verboseCompilationQueue()) + dataLog(*this, ": Compiling ", plan->key(), " asynchronously\n"); + RELEASE_ASSERT(!plan->vm.heap.isCollecting()); + plan->compileInThread(longLivedState, data); + RELEASE_ASSERT(plan->stage == Plan::Cancelled || !plan->vm.heap.isCollecting()); + + { + LockHolder locker(m_lock); + if (plan->stage == Plan::Cancelled) { + m_numberOfActiveThreads--; + continue; + } + plan->notifyCompiled(); + } + RELEASE_ASSERT(!plan->vm.heap.isCollecting()); + } + { - MutexLocker locker(m_lock); + LockHolder locker(m_lock); + + // We could have been cancelled between releasing rightToRun and acquiring m_lock. + // This would mean that we might be in the middle of GC right now. + if (plan->stage == Plan::Cancelled) { + m_numberOfActiveThreads--; + continue; + } + plan->notifyReady(); if (Options::verboseCompilationQueue()) { @@ -250,7 +399,7 @@ void Worklist::runThread() m_readyPlans.append(plan); - m_planCompiled.broadcast(); + m_planCompiled.notifyAll(); m_numberOfActiveThreads--; } } @@ -258,26 +407,72 @@ void Worklist::runThread() void Worklist::threadFunction(void* argument) { - static_cast<Worklist*>(argument)->runThread(); + ThreadData* data = static_cast<ThreadData*>(argument); + data->m_worklist->runThread(data); } -static Worklist* theGlobalWorklist; +static Worklist* theGlobalDFGWorklist; -Worklist* globalWorklist() +Worklist* ensureGlobalDFGWorklist() { static std::once_flag initializeGlobalWorklistOnceFlag; std::call_once(initializeGlobalWorklistOnceFlag, [] { - unsigned numberOfThreads; + theGlobalDFGWorklist = &Worklist::create("DFG Worklist", Options::numberOfDFGCompilerThreads(), Options::priorityDeltaOfDFGCompilerThreads()).leakRef(); + }); + return theGlobalDFGWorklist; +} - if (Options::useExperimentalFTL()) - numberOfThreads = 1; // We don't yet use LLVM in a thread-safe way. - else - numberOfThreads = Options::numberOfCompilerThreads(); +Worklist* existingGlobalDFGWorklistOrNull() +{ + return theGlobalDFGWorklist; +} + +static Worklist* theGlobalFTLWorklist; - theGlobalWorklist = Worklist::create(numberOfThreads).leakRef(); +Worklist* ensureGlobalFTLWorklist() +{ + static std::once_flag initializeGlobalWorklistOnceFlag; + std::call_once(initializeGlobalWorklistOnceFlag, [] { + theGlobalFTLWorklist = &Worklist::create("FTL Worklist", Options::numberOfFTLCompilerThreads(), Options::priorityDeltaOfFTLCompilerThreads()).leakRef(); }); + return theGlobalFTLWorklist; +} - return theGlobalWorklist; +Worklist* existingGlobalFTLWorklistOrNull() +{ + return theGlobalFTLWorklist; +} + +Worklist* ensureGlobalWorklistFor(CompilationMode mode) +{ + switch (mode) { + case InvalidCompilationMode: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + case DFGMode: + return ensureGlobalDFGWorklist(); + case FTLMode: + case FTLForOSREntryMode: + return ensureGlobalFTLWorklist(); + } + RELEASE_ASSERT_NOT_REACHED(); + return 0; +} + +void completeAllPlansForVM(VM& vm) +{ + for (unsigned i = DFG::numberOfWorklists(); i--;) { + if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) + worklist->completeAllPlansForVM(vm); + } +} + +void rememberCodeBlocks(VM& vm) +{ + for (unsigned i = DFG::numberOfWorklists(); i--;) { + if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) + worklist->rememberCodeBlocks(vm); + } } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGWorklist.h b/Source/JavaScriptCore/dfg/DFGWorklist.h index d3419f0a9..075f7ccd7 100644 --- a/Source/JavaScriptCore/dfg/DFGWorklist.h +++ b/Source/JavaScriptCore/dfg/DFGWorklist.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,18 +26,21 @@ #ifndef DFGWorklist_h #define DFGWorklist_h -#include <wtf/Platform.h> - #if ENABLE(DFG_JIT) #include "DFGPlan.h" +#include "DFGThreadData.h" +#include <wtf/Condition.h> #include <wtf/Deque.h> #include <wtf/HashMap.h> +#include <wtf/Lock.h> #include <wtf/Noncopyable.h> -#include <wtf/PassOwnPtr.h> -#include <wtf/ThreadingPrimitives.h> -namespace JSC { namespace DFG { +namespace JSC { + +class SlotVisitor; + +namespace DFG { class Worklist : public RefCounted<Worklist> { public: @@ -45,7 +48,7 @@ public: ~Worklist(); - static PassRefPtr<Worklist> create(unsigned numberOfThreads); + static Ref<Worklist> create(CString worklistName, unsigned numberOfThreads, int relativePriority = 0); void enqueue(PassRefPtr<Plan>); @@ -53,7 +56,9 @@ public: // worklist->waitUntilAllPlansForVMAreReady(vm); // worklist->completeAllReadyPlansForVM(vm); void completeAllPlansForVM(VM&); - + + void rememberCodeBlocks(VM&); + void waitUntilAllPlansForVMAreReady(VM&); State completeAllReadyPlansForVM(VM&, CompilationKey = CompilationKey()); void removeAllReadyPlansForVM(VM&); @@ -61,21 +66,33 @@ public: State compilationState(CompilationKey); size_t queueLength(); + + void suspendAllThreads(); + void resumeAllThreads(); + + bool isActiveForVM(VM&) const; + + // Only called on the main thread after suspending all threads. + void visitWeakReferences(SlotVisitor&); + void removeDeadPlans(VM&); + void dump(PrintStream&) const; private: - Worklist(); - void finishCreation(unsigned numberOfThreads); + Worklist(CString worklistName); + void finishCreation(unsigned numberOfThreads, int); - void runThread(); + void runThread(ThreadData*); static void threadFunction(void* argument); void removeAllReadyPlansForVM(VM&, Vector<RefPtr<Plan>, 8>&); - void dump(const MutexLocker&, PrintStream&) const; - + void dump(const LockHolder&, PrintStream&) const; + + CString m_threadName; + // Used to inform the thread about what work there is left to do. - Deque<RefPtr<Plan>, 16> m_queue; + Deque<RefPtr<Plan>> m_queue; // Used to answer questions about the current state of a code block. This // is particularly great for the cti_optimize OSR slow path, which wants @@ -87,18 +104,44 @@ private: // Used to quickly find which plans have been compiled and are ready to // be completed. Vector<RefPtr<Plan>, 16> m_readyPlans; + + Lock m_suspensionLock; + + mutable Lock m_lock; + Condition m_planEnqueued; + Condition m_planCompiled; - mutable Mutex m_lock; - ThreadCondition m_planEnqueued; - ThreadCondition m_planCompiled; - Vector<ThreadIdentifier> m_threads; + Vector<std::unique_ptr<ThreadData>> m_threads; unsigned m_numberOfActiveThreads; }; -// For now we use a single global worklist. It's not clear that this -// is the right thing to do, but it is what we do, for now. This function -// will lazily create one when it's needed. -Worklist* globalWorklist(); +// For DFGMode compilations. +Worklist* ensureGlobalDFGWorklist(); +Worklist* existingGlobalDFGWorklistOrNull(); + +// For FTLMode and FTLForOSREntryMode compilations. +Worklist* ensureGlobalFTLWorklist(); +Worklist* existingGlobalFTLWorklistOrNull(); + +Worklist* ensureGlobalWorklistFor(CompilationMode); + +// Simplify doing things for all worklists. +inline unsigned numberOfWorklists() { return 2; } +inline Worklist* worklistForIndexOrNull(unsigned index) +{ + switch (index) { + case 0: + return existingGlobalDFGWorklistOrNull(); + case 1: + return existingGlobalFTLWorklistOrNull(); + default: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } +} + +void completeAllPlansForVM(VM&); +void rememberCodeBlocks(VM&); } } // namespace JSC::DFG |
