summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-10-22 15:40:17 +0200
committerSimon Hausmann <simon.hausmann@digia.com>2012-10-22 15:40:17 +0200
commit43a42f108af6bcbd91f2672731c3047c26213af1 (patch)
tree7fa092e5f5d873c72f2486a70e26be26f7a38bec /Source/JavaScriptCore/dfg
parentd9cf437c840c6eb7417bdd97e6c40979255d3158 (diff)
downloadqtwebkit-43a42f108af6bcbd91f2672731c3047c26213af1.tar.gz
Imported WebKit commit 302e7806bff028bd1167a1ec7c86a1ee00ecfb49 (http://svn.webkit.org/repository/webkit/trunk@132067)
New snapshot that fixes build without QtWidgets
Diffstat (limited to 'Source/JavaScriptCore/dfg')
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.cpp16
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractValue.h161
-rw-r--r--Source/JavaScriptCore/dfg/DFGArrayMode.cpp20
-rw-r--r--Source/JavaScriptCore/dfg/DFGArrayMode.h45
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp12
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h37
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp488
-rw-r--r--Source/JavaScriptCore/dfg/DFGCCallHelpers.h14
-rw-r--r--Source/JavaScriptCore/dfg/DFGCapabilities.h89
-rw-r--r--Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp3
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h19
-rw-r--r--Source/JavaScriptCore/dfg/DFGNode.h9
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeType.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.h4
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp9
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp105
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp153
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.cpp21
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.h14
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp31
-rw-r--r--Source/JavaScriptCore/dfg/DFGRepatch.cpp24
-rw-r--r--Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h22
-rw-r--r--Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp95
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h105
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp33
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp439
-rw-r--r--Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGThunks.cpp30
30 files changed, 1381 insertions, 625 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
index da5682f55..928788bf3 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
@@ -1420,9 +1420,15 @@ bool AbstractState::execute(unsigned indexInBlock)
ASSERT_NOT_REACHED();
break;
}
+ forNode(node.child1()).filterArrayModes(arrayModesFor(node.arrayMode()));
break;
}
case Arrayify: {
+ if (modeAlreadyChecked(forNode(node.child1()), node.arrayMode())) {
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
switch (node.arrayMode()) {
case ALL_EFFECTFUL_MODES:
node.setCanExit(true);
@@ -1431,9 +1437,10 @@ bool AbstractState::execute(unsigned indexInBlock)
forNode(node.child2()).filter(SpecInt32);
forNode(nodeIndex).clear();
clobberStructures(indexInBlock);
+ forNode(node.child1()).filterArrayModes(arrayModesFor(node.arrayMode()));
break;
default:
- ASSERT_NOT_REACHED();
+ CRASH();
break;
}
break;
@@ -1524,7 +1531,12 @@ bool AbstractState::execute(unsigned indexInBlock)
clobberWorld(node.codeOrigin, indexInBlock);
forNode(nodeIndex).makeTop();
break;
-
+
+ case GarbageValue:
+ clobberWorld(node.codeOrigin, indexInBlock);
+ forNode(nodeIndex).makeTop();
+ break;
+
case ForceOSRExit:
node.setCanExit(true);
m_isValid = false;
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractValue.h b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
index ff1c6d205..5382cd3ad 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractValue.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
@@ -30,6 +30,7 @@
#if ENABLE(DFG_JIT)
+#include "ArrayProfile.h"
#include "DFGStructureAbstractValue.h"
#include "JSCell.h"
#include "SpeculatedType.h"
@@ -40,12 +41,14 @@ namespace JSC { namespace DFG {
struct AbstractValue {
AbstractValue()
: m_type(SpecNone)
+ , m_arrayModes(0)
{
}
void clear()
{
m_type = SpecNone;
+ m_arrayModes = 0;
m_currentKnownStructure.clear();
m_futurePossibleStructure.clear();
m_value = JSValue();
@@ -54,7 +57,7 @@ struct AbstractValue {
bool isClear() const
{
- bool result = m_type == SpecNone && m_currentKnownStructure.isClear() && m_futurePossibleStructure.isClear();
+ bool result = m_type == SpecNone && !m_arrayModes && m_currentKnownStructure.isClear() && m_futurePossibleStructure.isClear();
if (result)
ASSERT(!m_value);
return result;
@@ -63,6 +66,7 @@ struct AbstractValue {
void makeTop()
{
m_type = SpecTop;
+ m_arrayModes = ALL_ARRAY_MODES;
m_currentKnownStructure.makeTop();
m_futurePossibleStructure.makeTop();
m_value = JSValue();
@@ -71,13 +75,16 @@ struct AbstractValue {
void clobberStructures()
{
- if (m_type & SpecCell)
+ if (m_type & SpecCell) {
m_currentKnownStructure.makeTop();
- else
+ clobberArrayModes();
+ } else {
ASSERT(m_currentKnownStructure.isClear());
+ ASSERT(!m_arrayModes);
+ }
checkConsistency();
}
-
+
void clobberValue()
{
m_value = JSValue();
@@ -105,29 +112,17 @@ struct AbstractValue {
return result;
}
- void setFuturePossibleStructure(Structure* structure)
- {
- if (structure->transitionWatchpointSetIsStillValid())
- m_futurePossibleStructure = structure;
- else
- m_futurePossibleStructure.makeTop();
- }
-
- void filterFuturePossibleStructure(Structure* structure)
- {
- if (structure->transitionWatchpointSetIsStillValid())
- m_futurePossibleStructure.filter(StructureAbstractValue(structure));
- }
-
void setMostSpecific(JSValue value)
{
if (!!value && value.isCell()) {
Structure* structure = value.asCell()->structure();
m_currentKnownStructure = structure;
setFuturePossibleStructure(structure);
+ m_arrayModes = asArrayModes(structure->indexingType());
} else {
m_currentKnownStructure.clear();
m_futurePossibleStructure.clear();
+ m_arrayModes = 0;
}
m_type = speculationFromValue(value);
@@ -140,10 +135,14 @@ struct AbstractValue {
{
if (!!value && value.isCell()) {
m_currentKnownStructure.makeTop();
- setFuturePossibleStructure(value.asCell()->structure());
+ Structure* structure = value.asCell()->structure();
+ setFuturePossibleStructure(structure);
+ m_arrayModes = asArrayModes(structure->indexingType());
+ clobberArrayModes();
} else {
m_currentKnownStructure.clear();
m_futurePossibleStructure.clear();
+ m_arrayModes = 0;
}
m_type = speculationFromValue(value);
@@ -156,6 +155,7 @@ struct AbstractValue {
{
m_currentKnownStructure = structure;
setFuturePossibleStructure(structure);
+ m_arrayModes = asArrayModes(structure->indexingType());
m_type = speculationFromStructure(structure);
m_value = JSValue();
@@ -167,9 +167,11 @@ struct AbstractValue {
if (type & SpecCell) {
m_currentKnownStructure.makeTop();
m_futurePossibleStructure.makeTop();
+ m_arrayModes = ALL_ARRAY_MODES;
} else {
m_currentKnownStructure.clear();
m_futurePossibleStructure.clear();
+ m_arrayModes = 0;
}
m_type = type;
m_value = JSValue();
@@ -179,6 +181,7 @@ struct AbstractValue {
bool operator==(const AbstractValue& other) const
{
return m_type == other.m_type
+ && m_arrayModes == other.m_arrayModes
&& m_currentKnownStructure == other.m_currentKnownStructure
&& m_futurePossibleStructure == other.m_futurePossibleStructure
&& m_value == other.m_value;
@@ -199,6 +202,7 @@ struct AbstractValue {
result = !other.isClear();
} else {
result |= mergeSpeculation(m_type, other.m_type);
+ result |= mergeArrayModes(m_arrayModes, other.m_arrayModes);
result |= m_currentKnownStructure.addAll(other.m_currentKnownStructure);
result |= m_futurePossibleStructure.addAll(other.m_futurePossibleStructure);
if (m_value != other.m_value) {
@@ -218,6 +222,7 @@ struct AbstractValue {
if (type & SpecCell) {
m_currentKnownStructure.makeTop();
m_futurePossibleStructure.makeTop();
+ m_arrayModes = ALL_ARRAY_MODES;
}
m_value = JSValue();
@@ -227,6 +232,7 @@ struct AbstractValue {
void filter(const StructureSet& other)
{
m_type &= other.speculationFromStructures();
+ m_arrayModes &= other.arrayModesFromStructures();
m_currentKnownStructure.filter(other);
if (m_currentKnownStructure.isClear())
m_futurePossibleStructure.clear();
@@ -241,11 +247,24 @@ struct AbstractValue {
m_currentKnownStructure.filter(m_type);
m_futurePossibleStructure.filter(m_type);
+ filterArrayModesByType();
filterValueByType();
checkConsistency();
}
+ void filterArrayModes(ArrayModes arrayModes)
+ {
+ ASSERT(arrayModes);
+
+ m_type &= SpecCell;
+ m_arrayModes &= arrayModes;
+
+ // I could do more fancy filtering here. But it probably won't make any difference.
+
+ checkConsistency();
+ }
+
void filter(SpeculatedType type)
{
if (type == SpecTop)
@@ -258,31 +277,13 @@ struct AbstractValue {
// the new type (None) rather than the one passed (Array).
m_currentKnownStructure.filter(m_type);
m_futurePossibleStructure.filter(m_type);
-
+
+ filterArrayModesByType();
filterValueByType();
checkConsistency();
}
- // We could go further, and ensure that if the futurePossibleStructure contravenes
- // the value, then we could clear both of those things. But that's unlikely to help
- // in any realistic scenario, so we don't do it. Simpler is better.
- void filterValueByType()
- {
- if (!!m_type) {
- // The type is still non-empty. This implies that regardless of what filtering
- // was done, we either didn't have a value to begin with, or that value is still
- // valid.
- ASSERT(!m_value || validateType(m_value));
- return;
- }
-
- // The type has been rendered empty. That means that the value must now be invalid,
- // as well.
- ASSERT(!m_value || !validateType(m_value));
- m_value = JSValue();
- }
-
bool validateType(JSValue value) const
{
if (isTop())
@@ -319,7 +320,8 @@ struct AbstractValue {
ASSERT(m_type & SpecCell);
Structure* structure = value.asCell()->structure();
return m_currentKnownStructure.contains(structure)
- && m_futurePossibleStructure.contains(structure);
+ && m_futurePossibleStructure.contains(structure)
+ && (m_arrayModes & asArrayModes(structure->indexingType()));
}
return true;
@@ -330,6 +332,7 @@ struct AbstractValue {
if (!(m_type & SpecCell)) {
ASSERT(m_currentKnownStructure.isClear());
ASSERT(m_futurePossibleStructure.isClear());
+ ASSERT(!m_arrayModes);
}
if (isClear())
@@ -346,7 +349,7 @@ struct AbstractValue {
void dump(FILE* out) const
{
- fprintf(out, "(%s, ", speculationToString(m_type));
+ fprintf(out, "(%s, %s, ", speculationToString(m_type), arrayModesToString(m_arrayModes));
m_currentKnownStructure.dump(out);
dataLog(", ");
m_futurePossibleStructure.dump(out);
@@ -437,6 +440,13 @@ struct AbstractValue {
// unified with the set of all objects with structure 0x12345.
SpeculatedType m_type;
+ // This is a proven constraint on the possible indexing types that this value
+ // can have right now. It also implicitly constraints the set of structures
+ // that the value may have right now, since a structure has an immutable
+ // indexing type. This is subject to change upon reassignment, or any side
+ // effect that makes non-obvious changes to the heap.
+ ArrayModes m_arrayModes;
+
// This is a proven constraint on the possible values that this value can
// have now or any time in the future, unless it is reassigned. Note that this
// implies nothing about the structure. Oddly, JSValue() (i.e. the empty value)
@@ -444,6 +454,75 @@ struct AbstractValue {
// BOTTOM then JSValue() means BOTTOM; if m_type is not BOTTOM then JSValue()
// means TOP.
JSValue m_value;
+
+private:
+ void clobberArrayModes()
+ {
+ if (m_arrayModes == ALL_ARRAY_MODES)
+ return;
+
+ if (LIKELY(m_arrayModes & asArrayModes(NonArray)))
+ m_arrayModes = ALL_ARRAY_MODES;
+ else
+ clobberArrayModesSlow();
+ }
+
+ void clobberArrayModesSlow()
+ {
+ if (m_arrayModes & asArrayModes(ArrayClass))
+ m_arrayModes = ALL_ARRAY_MODES;
+ else if (m_arrayModes & asArrayModes(NonArrayWithContiguous))
+ m_arrayModes |= asArrayModes(NonArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage);
+ else if (m_arrayModes & asArrayModes(ArrayWithContiguous))
+ m_arrayModes |= asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage);
+ else if (m_arrayModes & asArrayModes(NonArrayWithArrayStorage))
+ m_arrayModes |= asArrayModes(NonArrayWithSlowPutArrayStorage);
+ else if (m_arrayModes & asArrayModes(ArrayWithArrayStorage))
+ m_arrayModes |= asArrayModes(ArrayWithArrayStorage);
+ }
+
+ void setFuturePossibleStructure(Structure* structure)
+ {
+ if (structure->transitionWatchpointSetIsStillValid())
+ m_futurePossibleStructure = structure;
+ else
+ m_futurePossibleStructure.makeTop();
+ }
+
+ void filterFuturePossibleStructure(Structure* structure)
+ {
+ if (structure->transitionWatchpointSetIsStillValid())
+ m_futurePossibleStructure.filter(StructureAbstractValue(structure));
+ }
+
+ // We could go further, and ensure that if the futurePossibleStructure contravenes
+ // the value, then we could clear both of those things. But that's unlikely to help
+ // in any realistic scenario, so we don't do it. Simpler is better.
+ void filterValueByType()
+ {
+ if (!!m_type) {
+ // The type is still non-empty. This implies that regardless of what filtering
+ // was done, we either didn't have a value to begin with, or that value is still
+ // valid.
+ ASSERT(!m_value || validateType(m_value));
+ return;
+ }
+
+ // The type has been rendered empty. That means that the value must now be invalid,
+ // as well.
+ ASSERT(!m_value || !validateType(m_value));
+ m_value = JSValue();
+ }
+
+ void filterArrayModesByType()
+ {
+ if (!(m_type & SpecCell))
+ m_arrayModes = 0;
+ else if (!(m_type & ~SpecArray))
+ m_arrayModes &= ALL_ARRAY_ARRAY_MODES;
+ else if (!(m_type & SpecArray))
+ m_arrayModes &= ALL_NON_ARRAY_ARRAY_MODES;
+ }
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp
index 3985d769c..623e9d743 100644
--- a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp
+++ b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp
@@ -167,12 +167,17 @@ bool modeAlreadyChecked(AbstractValue& value, Array::Mode arrayMode)
case Array::PossiblyArrayWithContiguous:
case Array::PossiblyArrayWithContiguousToTail:
case Array::PossiblyArrayWithContiguousOutOfBounds:
+ case Array::ToContiguous:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(NonArrayWithContiguous) | asArrayModes(ArrayWithContiguous)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasContiguous(value.m_currentKnownStructure.singleton()->indexingType());
case Array::ArrayWithContiguous:
case Array::ArrayWithContiguousToTail:
case Array::ArrayWithContiguousOutOfBounds:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithContiguous)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasContiguous(value.m_currentKnownStructure.singleton()->indexingType())
&& (value.m_currentKnownStructure.singleton()->indexingType() & IsArray);
@@ -183,29 +188,38 @@ bool modeAlreadyChecked(AbstractValue& value, Array::Mode arrayMode)
case Array::PossiblyArrayWithArrayStorage:
case Array::PossiblyArrayWithArrayStorageToHole:
case Array::PossiblyArrayWithArrayStorageOutOfBounds:
+ case Array::ToArrayStorage:
+ case Array::PossiblyArrayToArrayStorage:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasFastArrayStorage(value.m_currentKnownStructure.singleton()->indexingType());
case Array::SlowPutArrayStorage:
case Array::PossiblyArrayWithSlowPutArrayStorage:
+ case Array::ToSlowPutArrayStorage:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType());
case Array::ArrayWithArrayStorage:
case Array::ArrayWithArrayStorageToHole:
case Array::ArrayWithArrayStorageOutOfBounds:
+ case Array::ArrayToArrayStorage:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithArrayStorage)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasFastArrayStorage(value.m_currentKnownStructure.singleton()->indexingType())
&& (value.m_currentKnownStructure.singleton()->indexingType() & IsArray);
case Array::ArrayWithSlowPutArrayStorage:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType())
&& (value.m_currentKnownStructure.singleton()->indexingType() & IsArray);
- case ALL_EFFECTFUL_MODES:
- return false;
-
case Array::Arguments:
return isArgumentsSpeculation(value.m_type);
diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.h b/Source/JavaScriptCore/dfg/DFGArrayMode.h
index a666bb83f..f7ac92733 100644
--- a/Source/JavaScriptCore/dfg/DFGArrayMode.h
+++ b/Source/JavaScriptCore/dfg/DFGArrayMode.h
@@ -349,6 +349,51 @@ inline bool isEffectful(Array::Mode mode)
}
}
+// This returns the set of array modes that will pass filtering of a CheckArray or
+// Arrayify with the given mode.
+inline ArrayModes arrayModesFor(Array::Mode arrayMode)
+{
+ switch (arrayMode) {
+ case Array::Generic:
+ return ALL_ARRAY_MODES;
+ case Array::Contiguous:
+ case Array::ContiguousToTail:
+ case Array::ContiguousOutOfBounds:
+ case Array::ToContiguous:
+ return asArrayModes(NonArrayWithContiguous);
+ case Array::PossiblyArrayWithContiguous:
+ case Array::PossiblyArrayWithContiguousToTail:
+ case Array::PossiblyArrayWithContiguousOutOfBounds:
+ return asArrayModes(NonArrayWithContiguous) | asArrayModes(ArrayWithContiguous);
+ case ARRAY_WITH_CONTIGUOUS_MODES:
+ return asArrayModes(ArrayWithContiguous);
+ case Array::ArrayStorage:
+ case Array::ArrayStorageToHole:
+ case Array::ArrayStorageOutOfBounds:
+ case Array::ToArrayStorage:
+ return asArrayModes(NonArrayWithArrayStorage);
+ case Array::ToSlowPutArrayStorage:
+ case Array::SlowPutArrayStorage:
+ return asArrayModes(NonArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage);
+ case Array::PossiblyArrayWithArrayStorage:
+ case Array::PossiblyArrayWithArrayStorageToHole:
+ case Array::PossiblyArrayWithArrayStorageOutOfBounds:
+ case Array::PossiblyArrayToArrayStorage:
+ return asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage);
+ case Array::PossiblyArrayWithSlowPutArrayStorage:
+ return asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage);
+ case Array::ArrayWithArrayStorage:
+ case Array::ArrayWithArrayStorageToHole:
+ case Array::ArrayWithArrayStorageOutOfBounds:
+ case Array::ArrayToArrayStorage:
+ return asArrayModes(ArrayWithArrayStorage);
+ case Array::ArrayWithSlowPutArrayStorage:
+ return asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage);
+ default:
+ return asArrayModes(NonArray);
+ }
+}
+
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
index a19b723d8..ca8683ead 100644
--- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
@@ -73,7 +73,7 @@ void AssemblyHelpers::clearSamplingFlag(int32_t flag)
void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
{
#if CPU(X86_64)
- Jump checkInt32 = branchPtr(BelowOrEqual, gpr, TrustedImmPtr(reinterpret_cast<void*>(static_cast<uintptr_t>(0xFFFFFFFFu))));
+ Jump checkInt32 = branch64(BelowOrEqual, gpr, TrustedImm64(static_cast<uintptr_t>(0xFFFFFFFFu)));
breakpoint();
checkInt32.link(this);
#else
@@ -83,22 +83,22 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
{
- Jump checkJSInt32 = branchPtr(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
breakpoint();
checkJSInt32.link(this);
}
void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
{
- Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
+ Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
breakpoint();
checkJSNumber.link(this);
}
void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
{
- Jump checkJSInt32 = branchPtr(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
- Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
+ Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
checkJSInt32.link(this);
breakpoint();
checkJSNumber.link(this);
@@ -106,7 +106,7 @@ void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
{
- Jump checkCell = branchTestPtr(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
+ Jump checkCell = branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
breakpoint();
checkCell.link(this);
}
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
index 5d338fa57..953a743ff 100644
--- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
@@ -99,7 +99,11 @@ public:
}
void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry)
{
- storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+#if USE(JSVALUE64)
+ store64(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+#else
+ store32(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+#endif
}
void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
@@ -110,7 +114,7 @@ public:
Jump branchIfNotCell(GPRReg reg)
{
#if USE(JSVALUE64)
- return branchTestPtr(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
+ return branchTest64(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
#else
return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
#endif
@@ -172,8 +176,14 @@ public:
ScratchBuffer* scratchBuffer = m_globalData->scratchBufferForSize(scratchSize);
EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
- storePtr(GPRInfo::toRegister(i), buffer + i);
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ store64(GPRInfo::toRegister(i), buffer + i);
+#else
+ store32(GPRInfo::toRegister(i), buffer + i);
+#endif
+ }
+
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
@@ -204,8 +214,13 @@ public:
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
}
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
- loadPtr(buffer + i, GPRInfo::toRegister(i));
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ load64(buffer + i, GPRInfo::toRegister(i));
+#else
+ load32(buffer + i, GPRInfo::toRegister(i));
+#endif
+ }
}
// These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
@@ -229,16 +244,16 @@ public:
#if USE(JSVALUE64)
GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
{
- moveDoubleToPtr(fpr, gpr);
- subPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ moveDoubleTo64(fpr, gpr);
+ sub64(GPRInfo::tagTypeNumberRegister, gpr);
jitAssertIsJSDouble(gpr);
return gpr;
}
FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
{
jitAssertIsJSDouble(gpr);
- addPtr(GPRInfo::tagTypeNumberRegister, gpr);
- movePtrToDouble(gpr, fpr);
+ add64(GPRInfo::tagTypeNumberRegister, gpr);
+ move64ToDouble(gpr, fpr);
return fpr;
}
#endif
@@ -258,7 +273,7 @@ public:
Jump emitExceptionCheck(ExceptionCheckKind kind = NormalExceptionCheck)
{
#if USE(JSVALUE64)
- return branchTestPtr(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(&globalData()->exception));
+ return branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(&globalData()->exception));
#elif USE(JSVALUE32_64)
return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast<char*>(&globalData()->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index 4869cf8c1..36d18d7b3 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -169,12 +169,17 @@ private:
// Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
bool handleConstantInternalFunction(bool usesResult, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
+ NodeIndex handleGetByOffset(SpeculatedType, NodeIndex base, unsigned identifierNumber, PropertyOffset);
void handleGetByOffset(
int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber,
PropertyOffset);
void handleGetById(
int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber,
const GetByIdStatus&);
+
+ // Convert a set of ResolveOperations into graph nodes
+ bool parseResolveOperations(SpeculatedType, unsigned identifierNumber, unsigned operations, unsigned putToBaseOperation, NodeIndex* base, NodeIndex* value);
+
// Prepare to parse a block.
void prepareToParseBlock();
// Parse a single basic block of bytecode instructions.
@@ -1143,6 +1148,8 @@ private:
Vector<unsigned> m_identifierRemap;
Vector<unsigned> m_constantRemap;
Vector<unsigned> m_constantBufferRemap;
+ Vector<unsigned> m_resolveOperationRemap;
+ Vector<unsigned> m_putToBaseOperationRemap;
// Blocks introduced by this code block, which need successor linking.
// May include up to one basic block that includes the continuation after
@@ -1779,24 +1786,28 @@ bool ByteCodeParser::handleConstantInternalFunction(
return false;
}
-void ByteCodeParser::handleGetByOffset(
- int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber,
- PropertyOffset offset)
+NodeIndex ByteCodeParser::handleGetByOffset(SpeculatedType prediction, NodeIndex base, unsigned identifierNumber, PropertyOffset offset)
{
NodeIndex propertyStorage;
if (isInlineOffset(offset))
propertyStorage = base;
else
propertyStorage = addToGraph(GetButterfly, base);
- set(destinationOperand,
- addToGraph(
- GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction),
- propertyStorage));
-
+ NodeIndex getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage);
+
StorageAccessData storageAccessData;
storageAccessData.offset = indexRelativeToBase(offset);
storageAccessData.identifierNumber = identifierNumber;
m_graph.m_storageAccessData.append(storageAccessData);
+
+ return getByOffset;
+}
+
+void ByteCodeParser::handleGetByOffset(
+ int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber,
+ PropertyOffset offset)
+{
+ set(destinationOperand, handleGetByOffset(prediction, base, identifierNumber, offset));
}
void ByteCodeParser::handleGetById(
@@ -1860,10 +1871,174 @@ void ByteCodeParser::prepareToParseBlock()
m_cellConstantNodes.clear();
}
+bool ByteCodeParser::parseResolveOperations(SpeculatedType prediction, unsigned identifier, unsigned operations, unsigned putToBaseOperation, NodeIndex* base, NodeIndex* value)
+{
+ ResolveOperations* resolveOperations = m_codeBlock->resolveOperations(operations);
+ if (resolveOperations->isEmpty()) {
+ addToGraph(ForceOSRExit);
+ return false;
+ }
+ JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+ int skipCount = 0;
+ bool skippedScopes = false;
+ bool setBase = false;
+ ResolveOperation* pc = resolveOperations->data();
+ NodeIndex localBase = 0;
+ bool resolvingBase = true;
+ while (resolvingBase) {
+ switch (pc->m_operation) {
+ case ResolveOperation::ReturnGlobalObjectAsBase:
+ *base = get(m_codeBlock->globalObjectConstant());
+ ASSERT(!value);
+ return true;
+
+ case ResolveOperation::SetBaseToGlobal:
+ *base = get(m_codeBlock->globalObjectConstant());
+ setBase = true;
+ resolvingBase = false;
+ ++pc;
+ break;
+
+ case ResolveOperation::SetBaseToUndefined:
+ *base = constantUndefined();
+ setBase = true;
+ resolvingBase = false;
+ ++pc;
+ break;
+
+ case ResolveOperation::SetBaseToScope:
+ localBase = addToGraph(GetScope, OpInfo(skipCount));
+ *base = localBase;
+ setBase = true;
+
+ resolvingBase = false;
+
+ // Reset the scope skipping as we've already loaded it
+ skippedScopes = false;
+ ++pc;
+ break;
+ case ResolveOperation::ReturnScopeAsBase:
+ *base = addToGraph(GetScope, OpInfo(skipCount));
+ ASSERT(!value);
+ return true;
+
+ case ResolveOperation::SkipTopScopeNode:
+ if (m_inlineStackTop->m_inlineCallFrame)
+ return false;
+ skipCount = 1;
+ skippedScopes = true;
+ ++pc;
+ break;
+
+ case ResolveOperation::SkipScopes:
+ if (m_inlineStackTop->m_inlineCallFrame)
+ return false;
+ skipCount += pc->m_scopesToSkip;
+ skippedScopes = true;
+ ++pc;
+ break;
+
+ case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope:
+ return false;
+
+ case ResolveOperation::Fail:
+ return false;
+
+ default:
+ resolvingBase = false;
+ }
+ }
+ if (skippedScopes)
+ localBase = addToGraph(GetScope, OpInfo(skipCount));
+
+ if (base && !setBase)
+ *base = localBase;
+
+ ASSERT(value);
+ ResolveOperation* resolveValueOperation = pc;
+ switch (resolveValueOperation->m_operation) {
+ case ResolveOperation::GetAndReturnGlobalProperty: {
+ ResolveGlobalStatus status = ResolveGlobalStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex, resolveValueOperation, m_codeBlock->identifier(identifier));
+ if (status.isSimple()) {
+ ASSERT(status.structure());
+
+ NodeIndex globalObjectNode = addStructureTransitionCheck(globalObject, status.structure());
+
+ if (status.specificValue()) {
+ ASSERT(status.specificValue().isCell());
+ *value = cellConstant(status.specificValue().asCell());
+ } else
+ *value = handleGetByOffset(prediction, globalObjectNode, identifier, status.offset());
+ return true;
+ }
+
+ NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
+ m_graph.m_resolveGlobalData.append(ResolveGlobalData());
+ ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
+ data.identifierNumber = identifier;
+ data.resolveOperationsIndex = operations;
+ data.putToBaseOperationIndex = putToBaseOperation;
+ data.resolvePropertyIndex = resolveValueOperation - resolveOperations->data();
+ *value = resolve;
+ return true;
+ }
+ case ResolveOperation::GetAndReturnGlobalVar: {
+ *value = addToGraph(GetGlobalVar,
+ OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)),
+ OpInfo(prediction));
+ return true;
+ }
+ case ResolveOperation::GetAndReturnGlobalVarWatchable: {
+ SpeculatedType prediction = getPrediction();
+
+ JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+
+ Identifier ident = m_codeBlock->identifier(identifier);
+ SymbolTableEntry entry = globalObject->symbolTable()->get(ident.impl());
+ if (!entry.couldBeWatched()) {
+ *value = addToGraph(GetGlobalVar, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(prediction));
+ return true;
+ }
+
+ // The watchpoint is still intact! This means that we will get notified if the
+ // current value in the global variable changes. So, we can inline that value.
+ // Moreover, currently we can assume that this value is a JSFunction*, which
+ // implies that it's a cell. This simplifies things, since in general we'd have
+ // to use a JSConstant for non-cells and a WeakJSConstant for cells. So instead
+ // of having both cases we just assert that the value is a cell.
+
+ // NB. If it wasn't for CSE, GlobalVarWatchpoint would have no need for the
+ // register pointer. But CSE tracks effects on global variables by comparing
+ // register pointers. Because CSE executes multiple times while the backend
+ // executes once, we use the following performance trade-off:
+ // - The node refers directly to the register pointer to make CSE super cheap.
+ // - To perform backend code generation, the node only contains the identifier
+ // number, from which it is possible to get (via a few average-time O(1)
+ // lookups) to the WatchpointSet.
+
+ addToGraph(GlobalVarWatchpoint, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(identifier));
+
+ JSValue specificValue = globalObject->registerAt(entry.getIndex()).get();
+ ASSERT(specificValue.isCell());
+ *value = cellConstant(specificValue.asCell());
+ return true;
+ }
+ case ResolveOperation::GetAndReturnScopedVar: {
+ NodeIndex getScopeRegisters = addToGraph(GetScopeRegisters, localBase);
+ *value = addToGraph(GetScopedVar, OpInfo(resolveValueOperation->m_offset), OpInfo(prediction), getScopeRegisters);
+ return true;
+ }
+ default:
+ CRASH();
+ return false;
+ }
+
+}
+
bool ByteCodeParser::parseBlock(unsigned limit)
{
bool shouldContinueParsing = true;
-
+
Interpreter* interpreter = m_globalData->interpreter;
Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
unsigned blockBegin = m_currentIndex;
@@ -2364,26 +2539,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
continue;
}
- case op_get_scoped_var: {
- SpeculatedType prediction = getPrediction();
- int dst = currentInstruction[1].u.operand;
- int slot = currentInstruction[2].u.operand;
- int depth = currentInstruction[3].u.operand;
- NodeIndex getScope = addToGraph(GetScope, OpInfo(depth));
- NodeIndex getScopeRegisters = addToGraph(GetScopeRegisters, getScope);
- NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeRegisters);
- set(dst, getScopedVar);
- NEXT_OPCODE(op_get_scoped_var);
- }
- case op_put_scoped_var: {
- int slot = currentInstruction[1].u.operand;
- int depth = currentInstruction[2].u.operand;
- int source = currentInstruction[3].u.operand;
- NodeIndex getScope = addToGraph(GetScope, OpInfo(depth));
- NodeIndex getScopeRegisters = addToGraph(GetScopeRegisters, getScope);
- addToGraph(PutScopedVar, OpInfo(slot), getScope, getScopeRegisters, get(source));
- NEXT_OPCODE(op_put_scoped_var);
- }
case op_get_by_id:
case op_get_by_id_out_of_line:
case op_get_array_length: {
@@ -2510,75 +2665,15 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_put_by_id);
}
- case op_get_global_var: {
- SpeculatedType prediction = getPrediction();
-
- JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
-
- NodeIndex getGlobalVar = addToGraph(
- GetGlobalVar,
- OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[2].u.registerPointer)),
- OpInfo(prediction));
- set(currentInstruction[1].u.operand, getGlobalVar);
- NEXT_OPCODE(op_get_global_var);
- }
-
- case op_get_global_var_watchable: {
- SpeculatedType prediction = getPrediction();
-
- JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
-
- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
- Identifier identifier = m_codeBlock->identifier(identifierNumber);
- SymbolTableEntry entry = globalObject->symbolTable()->get(identifier.impl());
- if (!entry.couldBeWatched()) {
- NodeIndex getGlobalVar = addToGraph(
- GetGlobalVar,
- OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[2].u.registerPointer)),
- OpInfo(prediction));
- set(currentInstruction[1].u.operand, getGlobalVar);
- NEXT_OPCODE(op_get_global_var_watchable);
- }
-
- // The watchpoint is still intact! This means that we will get notified if the
- // current value in the global variable changes. So, we can inline that value.
- // Moreover, currently we can assume that this value is a JSFunction*, which
- // implies that it's a cell. This simplifies things, since in general we'd have
- // to use a JSConstant for non-cells and a WeakJSConstant for cells. So instead
- // of having both cases we just assert that the value is a cell.
-
- // NB. If it wasn't for CSE, GlobalVarWatchpoint would have no need for the
- // register pointer. But CSE tracks effects on global variables by comparing
- // register pointers. Because CSE executes multiple times while the backend
- // executes once, we use the following performance trade-off:
- // - The node refers directly to the register pointer to make CSE super cheap.
- // - To perform backend code generation, the node only contains the identifier
- // number, from which it is possible to get (via a few average-time O(1)
- // lookups) to the WatchpointSet.
-
- addToGraph(
- GlobalVarWatchpoint,
- OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[2].u.registerPointer)),
- OpInfo(identifierNumber));
-
- JSValue specificValue = globalObject->registerAt(entry.getIndex()).get();
- ASSERT(specificValue.isCell());
- set(currentInstruction[1].u.operand, cellConstant(specificValue.asCell()));
-
- NEXT_OPCODE(op_get_global_var_watchable);
- }
-
- case op_put_global_var:
case op_init_global_const: {
NodeIndex value = get(currentInstruction[2].u.operand);
addToGraph(
PutGlobalVar,
OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
value);
- NEXT_OPCODE(op_put_global_var);
+ NEXT_OPCODE(op_init_global_const);
}
- case op_put_global_var_check:
case op_init_global_const_check: {
NodeIndex value = get(currentInstruction[2].u.operand);
CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
@@ -2591,16 +2686,17 @@ bool ByteCodeParser::parseBlock(unsigned limit)
PutGlobalVar,
OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
value);
- NEXT_OPCODE(op_put_global_var_check);
+ NEXT_OPCODE(op_init_global_const_check);
}
addToGraph(
PutGlobalVarCheck,
OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
OpInfo(identifierNumber),
value);
- NEXT_OPCODE(op_put_global_var_check);
+ NEXT_OPCODE(op_init_global_const_check);
}
+
// === Block terminators. ===
case op_jmp: {
@@ -2869,69 +2965,175 @@ bool ByteCodeParser::parseBlock(unsigned limit)
addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
LAST_OPCODE(op_jneq_ptr);
- case op_resolve: {
+ case op_resolve:
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check: {
SpeculatedType prediction = getPrediction();
unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ unsigned operations = m_inlineStackTop->m_resolveOperationRemap[currentInstruction[3].u.operand];
+ NodeIndex value = 0;
+ if (parseResolveOperations(prediction, identifier, operations, 0, 0, &value)) {
+ set(currentInstruction[1].u.operand, value);
+ NEXT_OPCODE(op_resolve);
+ }
+
+ NodeIndex resolve = addToGraph(Resolve, OpInfo(m_graph.m_resolveOperationsData.size()), OpInfo(prediction));
+ m_graph.m_resolveOperationsData.append(ResolveOperationData());
+ ResolveOperationData& data = m_graph.m_resolveOperationsData.last();
+ data.identifierNumber = identifier;
+ data.resolveOperationsIndex = operations;
- NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
set(currentInstruction[1].u.operand, resolve);
NEXT_OPCODE(op_resolve);
}
+ case op_put_to_base_variable:
+ case op_put_to_base: {
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ unsigned value = currentInstruction[3].u.operand;
+ unsigned operation = m_inlineStackTop->m_putToBaseOperationRemap[currentInstruction[4].u.operand];
+ PutToBaseOperation* putToBase = m_codeBlock->putToBaseOperation(operation);
+
+ if (putToBase->m_isDynamic) {
+ addToGraph(Phantom, get(base));
+ addToGraph(PutById, OpInfo(identifier), get(base), get(value));
+ NEXT_OPCODE(op_put_to_base);
+ }
+
+ switch (putToBase->m_kind) {
+ case PutToBaseOperation::Uninitialised:
+ addToGraph(Phantom, get(base));
+ addToGraph(ForceOSRExit);
+ break;
+
+ case PutToBaseOperation::GlobalVariablePutChecked: {
+ CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
+ JSGlobalObject* globalObject = codeBlock->globalObject();
+ SymbolTableEntry entry = globalObject->symbolTable()->get(m_codeBlock->identifier(identifier).impl());
+ if (entry.couldBeWatched()) {
+ addToGraph(PutGlobalVarCheck,
+ OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
+ OpInfo(identifier),
+ get(value));
+ break;
+ }
+ }
+ case PutToBaseOperation::GlobalVariablePut:
+ addToGraph(PutGlobalVar,
+ OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
+ get(value));
+ break;
+ case PutToBaseOperation::VariablePut: {
+ addToGraph(Phantom, get(base));
+ NodeIndex getScope = addToGraph(GetScope, OpInfo(putToBase->m_scopeDepth));
+ NodeIndex getScopeRegisters = addToGraph(GetScopeRegisters, getScope);
+ addToGraph(PutScopedVar, OpInfo(putToBase->m_offset), getScope, getScopeRegisters, get(value));
+ break;
+ }
+ case PutToBaseOperation::GlobalPropertyPut: {
+ if (!putToBase->m_structure) {
+ addToGraph(Phantom, get(base));
+ addToGraph(ForceOSRExit);
+ NEXT_OPCODE(op_put_to_base);
+ }
+ NodeIndex baseNode = get(base);
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putToBase->m_structure.get())), baseNode);
+ NodeIndex propertyStorage;
+ if (isInlineOffset(putToBase->m_offset))
+ propertyStorage = baseNode;
+ else
+ propertyStorage = addToGraph(GetButterfly, baseNode);
+ addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, baseNode, get(value));
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = indexRelativeToBase(putToBase->m_offset);
+ storageAccessData.identifierNumber = identifier;
+ m_graph.m_storageAccessData.append(storageAccessData);
+ break;
+ }
+ case PutToBaseOperation::Readonly:
+ case PutToBaseOperation::Generic:
+ addToGraph(Phantom, get(base));
+ addToGraph(PutById, OpInfo(identifier), get(base), get(value));
+ }
+ NEXT_OPCODE(op_put_to_base);
+ }
+
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check:
case op_resolve_base: {
SpeculatedType prediction = getPrediction();
unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ unsigned operations = m_inlineStackTop->m_resolveOperationRemap[currentInstruction[4].u.operand];
+ unsigned putToBaseOperation = m_inlineStackTop->m_putToBaseOperationRemap[currentInstruction[5].u.operand];
- NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
+ NodeIndex base = 0;
+ if (parseResolveOperations(prediction, identifier, operations, 0, &base, 0)) {
+ set(currentInstruction[1].u.operand, base);
+ NEXT_OPCODE(op_resolve_base);
+ }
+
+ NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(m_graph.m_resolveOperationsData.size()), OpInfo(prediction));
+ m_graph.m_resolveOperationsData.append(ResolveOperationData());
+ ResolveOperationData& data = m_graph.m_resolveOperationsData.last();
+ data.identifierNumber = identifier;
+ data.resolveOperationsIndex = operations;
+ data.putToBaseOperationIndex = putToBaseOperation;
+
set(currentInstruction[1].u.operand, resolve);
NEXT_OPCODE(op_resolve_base);
}
-
- case op_resolve_global: {
+ case op_resolve_with_base: {
SpeculatedType prediction = getPrediction();
-
- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[
- currentInstruction[2].u.operand];
-
- ResolveGlobalStatus status = ResolveGlobalStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, m_currentIndex,
- m_codeBlock->identifier(identifierNumber));
- if (status.isSimple()) {
- ASSERT(status.structure());
-
- NodeIndex globalObject = addStructureTransitionCheck(
- m_inlineStackTop->m_codeBlock->globalObject(), status.structure());
-
- if (status.specificValue()) {
- ASSERT(status.specificValue().isCell());
-
- set(currentInstruction[1].u.operand,
- cellConstant(status.specificValue().asCell()));
- } else {
- handleGetByOffset(
- currentInstruction[1].u.operand, prediction, globalObject,
- identifierNumber, status.offset());
- }
-
- m_globalResolveNumber++; // Skip over the unused global resolve info.
-
- NEXT_OPCODE(op_resolve_global);
+ unsigned baseDst = currentInstruction[1].u.operand;
+ unsigned valueDst = currentInstruction[2].u.operand;
+ unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+ unsigned operations = m_inlineStackTop->m_resolveOperationRemap[currentInstruction[4].u.operand];
+ unsigned putToBaseOperation = m_inlineStackTop->m_putToBaseOperationRemap[currentInstruction[5].u.operand];
+
+ NodeIndex base = 0;
+ NodeIndex value = 0;
+ if (parseResolveOperations(prediction, identifier, operations, putToBaseOperation, &base, &value)) {
+ set(baseDst, base);
+ set(valueDst, value);
+ } else {
+ addToGraph(ForceOSRExit);
+ set(baseDst, addToGraph(GarbageValue));
+ set(valueDst, addToGraph(GarbageValue));
}
-
- NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
- m_graph.m_resolveGlobalData.append(ResolveGlobalData());
- ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
- data.identifierNumber = identifierNumber;
- data.resolveInfoIndex = m_globalResolveNumber++;
- set(currentInstruction[1].u.operand, resolve);
- NEXT_OPCODE(op_resolve_global);
+ NEXT_OPCODE(op_resolve_with_base);
}
+ case op_resolve_with_this: {
+ SpeculatedType prediction = getPrediction();
+ unsigned baseDst = currentInstruction[1].u.operand;
+ unsigned valueDst = currentInstruction[2].u.operand;
+ unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+ unsigned operations = m_inlineStackTop->m_resolveOperationRemap[currentInstruction[4].u.operand];
+
+ NodeIndex base = 0;
+ NodeIndex value = 0;
+ if (parseResolveOperations(prediction, identifier, operations, 0, &base, &value)) {
+ set(baseDst, base);
+ set(valueDst, value);
+ } else {
+ addToGraph(ForceOSRExit);
+ set(baseDst, addToGraph(GarbageValue));
+ set(valueDst, addToGraph(GarbageValue));
+ }
+ NEXT_OPCODE(op_resolve_with_this);
+ }
case op_loop_hint: {
// Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
// OSR can only happen at basic block boundaries. Assert that these two statements
@@ -2943,7 +3145,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// block. Hence, machine code block = true code block = not inline code block.
if (!m_inlineStackTop->m_caller)
m_currentBlock->isOSRTarget = true;
-
+
// Emit a phantom node to ensure that there is a placeholder node for this bytecode
// op.
addToGraph(Phantom);
@@ -3331,6 +3533,8 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
+ m_resolveOperationRemap.resize(codeBlock->numberOfResolveOperations());
+ m_putToBaseOperationRemap.resize(codeBlock->numberOfPutToBaseOperations());
for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
StringImpl* rep = codeBlock->identifier(i).impl();
@@ -3357,8 +3561,11 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
}
m_constantRemap[i] = result.iterator->value;
}
- for (unsigned i = 0; i < codeBlock->numberOfGlobalResolveInfos(); ++i)
- byteCodeParser->m_codeBlock->addGlobalResolveInfo(std::numeric_limits<unsigned>::max());
+ for (size_t i = 0; i < codeBlock->numberOfResolveOperations(); i++) {
+ uint32_t newResolve = byteCodeParser->m_codeBlock->addResolve();
+ m_resolveOperationRemap[i] = newResolve;
+ byteCodeParser->m_codeBlock->resolveOperations(newResolve)->append(*codeBlock->resolveOperations(i));
+ }
for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) {
// If we inline the same code block multiple times, we don't want to needlessly
// duplicate its constant buffers.
@@ -3373,6 +3580,11 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_constantBufferRemap[i] = newIndex;
byteCodeParser->m_constantBufferCache.add(ConstantBufferKey(codeBlock, i), newIndex);
}
+ for (size_t i = 0; i < codeBlock->numberOfPutToBaseOperations(); i++) {
+ uint32_t putToBaseResolve = byteCodeParser->m_codeBlock->addPutToBase();
+ m_putToBaseOperationRemap[i] = putToBaseResolve;
+ *byteCodeParser->m_codeBlock->putToBaseOperation(putToBaseResolve) = *codeBlock->putToBaseOperation(i);
+ }
m_callsiteBlockHeadNeedsLinking = true;
} else {
@@ -3389,6 +3601,8 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
+ m_resolveOperationRemap.resize(codeBlock->numberOfResolveOperations());
+ m_putToBaseOperationRemap.resize(codeBlock->numberOfPutToBaseOperations());
for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
m_identifierRemap[i] = i;
@@ -3396,6 +3610,10 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_constantRemap[i] = i + FirstConstantRegisterIndex;
for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i)
m_constantBufferRemap[i] = i;
+ for (size_t i = 0; i < codeBlock->numberOfResolveOperations(); ++i)
+ m_resolveOperationRemap[i] = i;
+ for (size_t i = 0; i < codeBlock->numberOfPutToBaseOperations(); ++i)
+ m_putToBaseOperationRemap[i] = i;
m_callsiteBlockHeadNeedsLinking = false;
}
diff --git a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
index 4a6024305..a2570b7ea 100644
--- a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
@@ -551,7 +551,21 @@ public:
move(arg2, GPRInfo::argumentGPR2);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+#if CPU(X86_64)
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm64 arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm64 arg1, GPRReg arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample!
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+#endif
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2)
{
move(arg1, GPRInfo::argumentGPR1);
diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h
index dc6f7aa1c..e80cc28ae 100644
--- a/Source/JavaScriptCore/dfg/DFGCapabilities.h
+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h
@@ -68,6 +68,48 @@ inline bool mightInlineFunctionForConstruct(CodeBlock* codeBlock)
}
// Opcode checking.
+inline bool canInlineResolveOperations(OpcodeID opcode, ResolveOperations* operations)
+{
+ // Don't try to inline a resolve for which we have no information
+ if (operations->isEmpty())
+ return false;
+
+ for (unsigned i = 0; i < operations->size(); i++) {
+ switch (operations->data()[i].m_operation) {
+ case ResolveOperation::ReturnGlobalObjectAsBase:
+ case ResolveOperation::SetBaseToGlobal:
+ case ResolveOperation::SetBaseToUndefined:
+ case ResolveOperation::GetAndReturnGlobalProperty:
+ case ResolveOperation::GetAndReturnGlobalVar:
+ case ResolveOperation::GetAndReturnGlobalVarWatchable:
+ continue;
+
+ case ResolveOperation::Fail:
+ // The DFG can handle generic cases of failed resolves
+ ASSERT(opcode != op_resolve_base_to_global_dynamic);
+ ASSERT(opcode != op_resolve_base_to_scope_with_top_scope_check);
+ ASSERT(opcode != op_resolve_base_to_global);
+ ASSERT(opcode != op_resolve_base_to_scope);
+ if (opcode != op_resolve && opcode != op_resolve_base)
+ return false;
+
+ case ResolveOperation::SkipTopScopeNode:
+ case ResolveOperation::SkipScopes:
+ case ResolveOperation::SetBaseToScope:
+ case ResolveOperation::ReturnScopeAsBase:
+ case ResolveOperation::GetAndReturnScopedVar:
+ // These opcodes would be easy to support with inlining, but we currently don't do it.
+ // The issue is that the scope chain will not be set correctly.
+ return false;
+
+ case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope:
+ // This would be easy to support in all cases.
+ return false;
+ }
+ }
+ return true;
+}
+
inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instruction*)
{
switch (opcodeID) {
@@ -116,8 +158,6 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi
case op_get_by_val:
case op_put_by_val:
case op_method_check:
- case op_get_scoped_var:
- case op_put_scoped_var:
case op_get_by_id:
case op_get_by_id_out_of_line:
case op_get_array_length:
@@ -127,10 +167,6 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi
case op_put_by_id_transition_direct_out_of_line:
case op_put_by_id_transition_normal:
case op_put_by_id_transition_normal_out_of_line:
- case op_get_global_var:
- case op_get_global_var_watchable:
- case op_put_global_var:
- case op_put_global_var_check:
case op_init_global_const:
case op_init_global_const_check:
case op_jmp:
@@ -157,9 +193,6 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi
case op_ret:
case op_end:
case op_call_put_result:
- case op_resolve:
- case op_resolve_base:
- case op_resolve_global:
case op_new_object:
case op_new_array:
case op_new_array_with_size:
@@ -181,11 +214,30 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi
case op_get_argument_by_val:
case op_get_arguments_length:
case op_jneq_ptr:
+ case op_put_to_base_variable:
+ case op_put_to_base:
return CanCompile;
case op_call_varargs:
return ShouldProfile;
+ case op_resolve:
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check:
+ return CanCompile;
+
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check:
+ case op_resolve_base:
+ case op_resolve_with_base:
+ case op_resolve_with_this:
+ return CanCompile;
+
default:
return CannotCompile;
}
@@ -194,13 +246,22 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi
inline bool canInlineOpcode(OpcodeID opcodeID, CodeBlock* codeBlock, Instruction* pc)
{
switch (opcodeID) {
-
- // These opcodes would be easy to support with inlining, but we currently don't do it.
- // The issue is that the scope chain will not be set correctly.
- case op_get_scoped_var:
- case op_put_scoped_var:
case op_resolve:
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check:
+ return canInlineResolveOperations(opcodeID, codeBlock->resolveOperations(pc[3].u.operand));
+
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check:
case op_resolve_base:
+ case op_resolve_with_base:
+ case op_resolve_with_this:
+ return canInlineResolveOperations(opcodeID, codeBlock->resolveOperations(pc[4].u.operand));
// Inlining doesn't correctly remap regular expression operands.
case op_new_regexp:
diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
index 8a261ad2b..25915cfd4 100644
--- a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
@@ -102,7 +102,8 @@ private:
break;
}
- case CheckArray: {
+ case CheckArray:
+ case Arrayify: {
if (!modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode()))
break;
ASSERT(node.refCount() == 1);
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index 212c8bbd2..b2c754f85 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -57,10 +57,23 @@ struct StorageAccessData {
struct ResolveGlobalData {
unsigned identifierNumber;
- unsigned resolveInfoIndex;
+ unsigned resolveOperationsIndex;
+ unsigned putToBaseOperationIndex;
+ unsigned resolvePropertyIndex;
};
-//
+struct ResolveOperationData {
+ unsigned identifierNumber;
+ unsigned resolveOperationsIndex;
+ unsigned putToBaseOperationIndex;
+};
+
+struct PutToBaseOperationData {
+ unsigned putToBaseOperationIndex;
+};
+
+
+//
// === Graph ===
//
// The dataflow graph is an ordered vector of nodes.
@@ -669,6 +682,8 @@ public:
Vector<Edge, 16> m_varArgChildren;
Vector<StorageAccessData> m_storageAccessData;
Vector<ResolveGlobalData> m_resolveGlobalData;
+ Vector<ResolveOperationData> m_resolveOperationsData;
+ Vector<PutToBaseOperationData> m_putToBaseOperationData;
Vector<NodeIndex, 8> m_arguments;
SegmentedVector<VariableAccessData, 16> m_variableAccessData;
SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h
index df6191eab..40b3ed7ec 100644
--- a/Source/JavaScriptCore/dfg/DFGNode.h
+++ b/Source/JavaScriptCore/dfg/DFGNode.h
@@ -352,9 +352,6 @@ struct Node {
case GetByIdFlush:
case PutById:
case PutByIdDirect:
- case Resolve:
- case ResolveBase:
- case ResolveBaseStrictPut:
return true;
default:
return false;
@@ -373,6 +370,12 @@ struct Node {
return m_opInfo;
}
+ unsigned resolveOperationsDataIndex()
+ {
+ ASSERT(op() == Resolve || op() == ResolveBase || op() == ResolveBaseStrictPut);
+ return m_opInfo;
+ }
+
bool hasArithNodeFlags()
{
switch (op()) {
diff --git a/Source/JavaScriptCore/dfg/DFGNodeType.h b/Source/JavaScriptCore/dfg/DFGNodeType.h
index 9c93a8ba3..1d2460659 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeType.h
+++ b/Source/JavaScriptCore/dfg/DFGNodeType.h
@@ -234,6 +234,8 @@ namespace JSC { namespace DFG {
macro(Throw, NodeMustGenerate) \
macro(ThrowReferenceError, NodeMustGenerate) \
\
+ macro(GarbageValue, NodeResultJS | NodeClobbersWorld) \
+ \
/* This is a pseudo-terminal. It means that execution should fall out of DFG at */\
/* this point, but execution does continue in the basic block - just in a */\
/* different compiler. */\
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
index b3701722e..6560088fd 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
@@ -45,9 +45,9 @@ OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAVal
, m_kind(kind)
, m_count(0)
, m_streamIndex(streamIndex)
- , m_lastSetOperand(jit->m_lastSetOperand)
{
ASSERT(m_codeOrigin.isSet());
+ m_setOperands.append(jit->m_lastSetOperand);
}
bool OSRExit::considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock)
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.h b/Source/JavaScriptCore/dfg/DFGOSRExit.h
index cd2434c11..0ecefe386 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.h
@@ -110,9 +110,9 @@ struct OSRExit {
}
unsigned m_streamIndex;
- int m_lastSetOperand;
+ Vector<int, 1> m_setOperands;
- RefPtr<ValueRecoveryOverride> m_valueRecoveryOverride;
+ Vector<RefPtr<ValueRecoveryOverride>, 1> m_valueRecoveryOverrides;
private:
bool considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock);
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
index 2ce1c887b..55a903c7a 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
@@ -70,11 +70,10 @@ void compileOSRExit(ExecState* exec)
Operands<ValueRecovery> operands;
codeBlock->variableEventStream().reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->minifiedDFG(), exit.m_streamIndex, operands);
- // There may be an override, for forward speculations.
- if (!!exit.m_valueRecoveryOverride) {
- operands.setOperand(
- exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery);
- }
+ // There may be overrides, for forward speculations.
+ for (size_t i = 0; i < exit.m_valueRecoveryOverrides.size(); i++)
+ operands.setOperand(exit.m_valueRecoveryOverrides[i]->operand, exit.m_valueRecoveryOverrides[i]->recovery);
+
SpeculationRecovery* recovery = 0;
if (exit.m_recoveryIndex)
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
index cb13dcc50..b64ce3fa1 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
@@ -83,28 +83,85 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// 3) Refine some value profile, if appropriate.
- if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
- EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+ if (!!exit.m_jsValueSource) {
+ if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
+ // If the instruction that this originated from has an array profile, then
+ // refine it. If it doesn't, then do nothing. The latter could happen for
+ // hoisted checks, or checks emitted for operations that didn't have array
+ // profiling - either ops that aren't array accesses at all, or weren't
+ // known to be array acceses in the bytecode. The latter case is a FIXME
+ // while the former case is an outcome of a CheckStructure not knowing why
+ // it was emitted (could be either due to an inline cache of a property
+ // property access, or due to an array profile).
+
+ // Note: We are free to assume that the jsValueSource is already known to
+ // be a cell since both BadCache and BadIndexingType exits occur after
+ // the cell check would have already happened.
+
+ CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
+ if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
+ GPRReg usedRegister1;
+ GPRReg usedRegister2;
+ if (exit.m_jsValueSource.isAddress()) {
+ usedRegister1 = exit.m_jsValueSource.base();
+ usedRegister2 = InvalidGPRReg;
+ } else {
+ usedRegister1 = exit.m_jsValueSource.payloadGPR();
+ if (exit.m_jsValueSource.hasKnownTag())
+ usedRegister2 = InvalidGPRReg;
+ else
+ usedRegister2 = exit.m_jsValueSource.tagGPR();
+ }
+
+ GPRReg scratch1;
+ GPRReg scratch2;
+ scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
+ scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
+
+ m_jit.push(scratch1);
+ m_jit.push(scratch2);
+
+ GPRReg value;
+ if (exit.m_jsValueSource.isAddress()) {
+ value = scratch1;
+ m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
+ } else
+ value = exit.m_jsValueSource.payloadGPR();
+
+ m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
+ m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
+ m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
+ m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
+ m_jit.lshift32(scratch1, scratch2);
+ m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
+
+ m_jit.pop(scratch2);
+ m_jit.pop(scratch1);
+ }
+ }
- if (exit.m_jsValueSource.isAddress()) {
- // Save a register so we can use it.
- GPRReg scratch = GPRInfo::regT0;
- if (scratch == exit.m_jsValueSource.base())
- scratch = GPRInfo::regT1;
- ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(uint32_t));
- EncodedJSValue* scratchDataBuffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
- m_jit.store32(scratch, scratchDataBuffer);
- m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
- m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
- m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
- m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
- m_jit.load32(scratchDataBuffer, scratch);
- } else if (exit.m_jsValueSource.hasKnownTag()) {
- m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
- m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
- } else {
- m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
- m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+ if (!!exit.m_valueProfile) {
+ EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+
+ if (exit.m_jsValueSource.isAddress()) {
+ // Save a register so we can use it.
+ GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
+
+ m_jit.push(scratch);
+
+ m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
+ m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
+ m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
+ m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+
+ m_jit.pop(scratch);
+ } else if (exit.m_jsValueSource.hasKnownTag()) {
+ m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
+ m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+ } else {
+ m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
+ m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+ }
}
}
@@ -675,9 +732,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// 15) Load the result of the last bytecode operation into regT0.
- if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) {
- m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
- m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2);
+ for (size_t i = 0; i < exit.m_setOperands.size(); i++) {
+ m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_setOperands[i]), GPRInfo::cachedResultRegister);
+ m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_setOperands[i]), GPRInfo::cachedResultRegister2);
}
// 16) Adjust the call frame pointer.
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
index 968e56f1a..65b89a550 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
@@ -73,12 +73,12 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
switch (recovery->type()) {
case SpeculativeAdd:
m_jit.sub32(recovery->src(), recovery->dest());
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery->dest());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
alreadyBoxed = recovery->dest();
break;
case BooleanSpeculationCheck:
- m_jit.xorPtr(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
+ m_jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
break;
default:
@@ -86,23 +86,70 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
}
}
- // 3) Refine some value profile, if appropriate.
-
- if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
- EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+ // 3) Refine some array and/or value profile, if appropriate.
+
+ if (!!exit.m_jsValueSource) {
+ if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
+ // If the instruction that this originated from has an array profile, then
+ // refine it. If it doesn't, then do nothing. The latter could happen for
+ // hoisted checks, or checks emitted for operations that didn't have array
+ // profiling - either ops that aren't array accesses at all, or weren't
+ // known to be array acceses in the bytecode. The latter case is a FIXME
+ // while the former case is an outcome of a CheckStructure not knowing why
+ // it was emitted (could be either due to an inline cache of a property
+ // property access, or due to an array profile).
+
+ CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
+ if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
+ GPRReg usedRegister;
+ if (exit.m_jsValueSource.isAddress())
+ usedRegister = exit.m_jsValueSource.base();
+ else
+ usedRegister = exit.m_jsValueSource.gpr();
+
+ GPRReg scratch1;
+ GPRReg scratch2;
+ scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
+ scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
+
+ m_jit.push(scratch1);
+ m_jit.push(scratch2);
+
+ GPRReg value;
+ if (exit.m_jsValueSource.isAddress()) {
+ value = scratch1;
+ m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
+ } else
+ value = exit.m_jsValueSource.gpr();
+
+ m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
+ m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
+ m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
+ m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
+ m_jit.lshift32(scratch1, scratch2);
+ m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
+
+ m_jit.pop(scratch2);
+ m_jit.pop(scratch1);
+ }
+ }
+ if (!!exit.m_valueProfile) {
+ EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+
#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
- dataLog(" (have exit profile, bucket %p) ", bucket);
+ dataLog(" (have exit profile, bucket %p) ", bucket);
#endif
- if (exit.m_jsValueSource.isAddress()) {
- // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
- // since we know how to restore it.
- m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
- m_jit.storePtr(GPRInfo::tagTypeNumberRegister, bucket);
- m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(TagTypeNumber)), GPRInfo::tagTypeNumberRegister);
- } else
- m_jit.storePtr(exit.m_jsValueSource.gpr(), bucket);
+ if (exit.m_jsValueSource.isAddress()) {
+ // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
+ // since we know how to restore it.
+ m_jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
+ m_jit.store64(GPRInfo::tagTypeNumberRegister, bucket);
+ m_jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
+ } else
+ m_jit.store64(exit.m_jsValueSource.gpr(), bucket);
+ }
}
// 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
@@ -230,7 +277,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
switch (recovery.technique()) {
case UnboxedInt32InGPR:
if (recovery.gpr() != alreadyBoxed)
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
break;
case AlreadyInJSStackAsUnboxedInt32:
@@ -252,7 +299,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
if (addressGPR == recovery.gpr())
addressGPR = GPRInfo::regT1;
- m_jit.storePtr(addressGPR, scratchDataBuffer);
+ m_jit.store64(addressGPR, scratchDataBuffer);
m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + 1), addressGPR);
m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
@@ -266,12 +313,12 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
positive.link(&m_jit);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
done.link(&m_jit);
m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
- m_jit.loadPtr(scratchDataBuffer, addressGPR);
+ m_jit.load64(scratchDataBuffer, addressGPR);
break;
}
@@ -296,11 +343,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case UnboxedInt32InGPR:
case UInt32InGPR:
if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
- m_jit.storePtr(recovery.gpr(), scratchDataBuffer + currentPoisonIndex);
+ m_jit.store64(recovery.gpr(), scratchDataBuffer + currentPoisonIndex);
m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else
- m_jit.storePtr(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand));
+ m_jit.store64(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand));
break;
default:
break;
@@ -330,11 +377,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
continue;
GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr()));
if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
- m_jit.storePtr(gpr, scratchDataBuffer + currentPoisonIndex);
+ m_jit.store64(gpr, scratchDataBuffer + currentPoisonIndex);
m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else
- m_jit.storePtr(gpr, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store64(gpr, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
}
}
@@ -348,7 +395,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
continue;
m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)), FPRInfo::fpRegT0);
m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
}
}
@@ -368,20 +415,20 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
const ValueRecovery& recovery = operands[index];
switch (recovery.technique()) {
case DisplacedInJSStack:
- m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
+ m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
break;
case Int32DisplacedInJSStack: {
GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
break;
}
case DoubleDisplacedInJSStack: {
GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
- m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
- m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
break;
}
@@ -397,7 +444,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case DisplacedInJSStack:
case Int32DisplacedInJSStack:
case DoubleDisplacedInJSStack:
- m_jit.storePtr(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store64(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
break;
default:
@@ -428,21 +475,21 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
switch (recovery.technique()) {
case DisplacedInJSStack:
- m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
+ m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
break;
case Int32DisplacedInJSStack: {
m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
break;
}
case DoubleDisplacedInJSStack: {
- m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
- m_jit.subPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
+ m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
break;
}
@@ -458,8 +505,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case DisplacedInJSStack:
case Int32DisplacedInJSStack:
case DoubleDisplacedInJSStack:
- m_jit.loadPtr(scratchDataBuffer + scratchIndex++, GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.load64(scratchDataBuffer + scratchIndex++, GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
break;
default:
@@ -484,8 +531,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case UnboxedInt32InGPR:
case UInt32InGPR:
case InFPR:
- m_jit.loadPtr(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
+ m_jit.load64(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
break;
default:
@@ -499,16 +546,16 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
if (haveConstants) {
if (haveUndefined)
- m_jit.move(AssemblyHelpers::TrustedImmPtr(JSValue::encode(jsUndefined())), GPRInfo::regT0);
+ m_jit.move(AssemblyHelpers::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0);
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
if (recovery.technique() != Constant)
continue;
if (recovery.constant().isUndefined())
- m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
else
- m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
}
}
@@ -576,11 +623,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
callerFrameGPR = GPRInfo::callFrameRegister;
m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
- m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
- m_jit.storePtr(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
+ m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
+ m_jit.store64(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC)));
m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
- m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
+ m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee.get()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
}
// 15) Create arguments if necessary and place them into the appropriate aliased
@@ -620,23 +667,23 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
bitwise_cast<void*>(operationCreateArguments)),
GPRInfo::nonArgGPR0);
m_jit.call(GPRInfo::nonArgGPR0);
- m_jit.storePtr(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
- m_jit.storePtr(
+ m_jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
+ m_jit.store64(
GPRInfo::returnValueGPR,
AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister)));
m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
}
- m_jit.loadPtr(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
+ m_jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
}
}
// 16) Load the result of the last bytecode operation into regT0.
- if (exit.m_lastSetOperand != std::numeric_limits<int>::max())
- m_jit.loadPtr(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
-
+ for (size_t i = 0; i < exit.m_setOperands.size(); i++)
+ m_jit.load64(AssemblyHelpers::addressFor((VirtualRegister)exit.m_setOperands[i]), GPRInfo::cachedResultRegister);
+
// 17) Adjust the call frame pointer.
if (exit.m_codeOrigin.inlineCallFrame)
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp
index db736feeb..11c2c1cef 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp
@@ -1059,35 +1059,38 @@ void DFG_OPERATION operationNotifyGlobalVarWrite(WatchpointSet* watchpointSet)
watchpointSet->notifyWrite();
}
-EncodedJSValue DFG_OPERATION operationResolve(ExecState* exec, Identifier* propertyName)
+EncodedJSValue DFG_OPERATION operationResolve(ExecState* exec, Identifier* propertyName, ResolveOperations* operations)
{
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
- return JSValue::encode(JSScope::resolve(exec, *propertyName));
+ return JSValue::encode(JSScope::resolve(exec, *propertyName, operations));
}
-EncodedJSValue DFG_OPERATION operationResolveBase(ExecState* exec, Identifier* propertyName)
+EncodedJSValue DFG_OPERATION operationResolveBase(ExecState* exec, Identifier* propertyName, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations)
{
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
- return JSValue::encode(JSScope::resolveBase(exec, *propertyName, false));
+ return JSValue::encode(JSScope::resolveBase(exec, *propertyName, false, operations, putToBaseOperations));
}
-EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState* exec, Identifier* propertyName)
+EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState* exec, Identifier* propertyName, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations)
{
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
- return JSValue::encode(JSScope::resolveBase(exec, *propertyName, true));
+ return JSValue::encode(JSScope::resolveBase(exec, *propertyName, true, operations, putToBaseOperations));
}
-EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState* exec, GlobalResolveInfo* resolveInfo, JSGlobalObject* globalObject, Identifier* propertyName)
+EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState* exec, ResolveOperation* resolveOperation, JSGlobalObject* globalObject, Identifier* propertyName)
{
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
-
- return JSValue::encode(JSScope::resolveGlobal(exec, *propertyName, globalObject, &resolveInfo->structure, &resolveInfo->offset));
+ ASSERT(globalObject);
+ UNUSED_PARAM(resolveOperation);
+ UNUSED_PARAM(globalObject);
+ ASSERT(resolveOperation->m_operation == ResolveOperation::GetAndReturnGlobalProperty);
+ return JSValue::encode(JSScope::resolveGlobal(exec, *propertyName, globalObject, resolveOperation));
}
EncodedJSValue DFG_OPERATION operationToPrimitive(ExecState* exec, EncodedJSValue value)
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h
index b6530b755..8d2beacec 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.h
+++ b/Source/JavaScriptCore/dfg/DFGOperations.h
@@ -33,8 +33,6 @@
namespace JSC {
-struct GlobalResolveInfo;
-
namespace DFG {
extern "C" {
@@ -66,8 +64,10 @@ typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EAZ)(ExecState*, JSArray*,
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECC)(ExecState*, JSCell*, JSCell*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECI)(ExecState*, JSCell*, Identifier*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECJ)(ExecState*, JSCell*, EncodedJSValue);
-typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EGriJsgI)(ExecState*, GlobalResolveInfo*, JSGlobalObject*, Identifier*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EGriJsgI)(ExecState*, ResolveOperation*, JSGlobalObject*, Identifier*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EI)(ExecState*, Identifier*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EIRo)(ExecState*, Identifier*, ResolveOperations*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EIRoPtbo)(ExecState*, Identifier*, ResolveOperations*, PutToBaseOperation*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJ)(ExecState*, EncodedJSValue);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJA)(ExecState*, EncodedJSValue, JSArray*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJI)(ExecState*, EncodedJSValue, Identifier*);
@@ -131,10 +131,10 @@ EncodedJSValue DFG_OPERATION operationGetByIdOptimize(ExecState*, EncodedJSValue
EncodedJSValue DFG_OPERATION operationCallCustomGetter(ExecState*, JSCell*, PropertySlot::GetValueFunc, Identifier*) WTF_INTERNAL;
EncodedJSValue DFG_OPERATION operationCallGetter(ExecState*, JSCell*, JSCell*) WTF_INTERNAL;
void DFG_OPERATION operationNotifyGlobalVarWrite(WatchpointSet* watchpointSet) WTF_INTERNAL;
-EncodedJSValue DFG_OPERATION operationResolve(ExecState*, Identifier*) WTF_INTERNAL;
-EncodedJSValue DFG_OPERATION operationResolveBase(ExecState*, Identifier*) WTF_INTERNAL;
-EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState*, Identifier*) WTF_INTERNAL;
-EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState*, GlobalResolveInfo*, JSGlobalObject*, Identifier*) WTF_INTERNAL;
+EncodedJSValue DFG_OPERATION operationResolve(ExecState*, Identifier*, ResolveOperations*) WTF_INTERNAL;
+EncodedJSValue DFG_OPERATION operationResolveBase(ExecState*, Identifier*, ResolveOperations*, PutToBaseOperation*) WTF_INTERNAL;
+EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState*, Identifier*, ResolveOperations*, PutToBaseOperation*) WTF_INTERNAL;
+EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState*, ResolveOperation*, JSGlobalObject*, Identifier*) WTF_INTERNAL;
EncodedJSValue DFG_OPERATION operationToPrimitive(ExecState*, EncodedJSValue) WTF_INTERNAL;
EncodedJSValue DFG_OPERATION operationStrCat(ExecState*, void*, size_t) WTF_INTERNAL;
char* DFG_OPERATION operationNewArray(ExecState*, Structure*, void*, size_t) WTF_INTERNAL;
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
index d76fd8018..fee7a3ca2 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
@@ -116,7 +116,21 @@ private:
return false;
return !!m_graph.valueOfNumberConstant(nodeIndex);
}
-
+
+ SpeculatedType speculatedDoubleTypeForPrediction(SpeculatedType value)
+ {
+ if (!isNumberSpeculation(value))
+ return SpecDouble;
+ if (value & SpecDoubleNaN)
+ return SpecDouble;
+ return SpecDoubleReal;
+ }
+
+ SpeculatedType speculatedDoubleTypeForPredictions(SpeculatedType left, SpeculatedType right)
+ {
+ return speculatedDoubleTypeForPrediction(mergeSpeculations(left, right));
+ }
+
void propagate(Node& node)
{
if (!node.shouldGenerate())
@@ -248,7 +262,7 @@ private:
if (m_graph.addShouldSpeculateInteger(node))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
} else if (!(left & SpecNumber) || !(right & SpecNumber)) {
// left or right is definitely something other than a number.
changed |= mergePrediction(SpecString);
@@ -272,7 +286,7 @@ private:
if (m_graph.addShouldSpeculateInteger(node))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
}
if (isNotNegZero(node.child1().index()) || isNotNegZero(node.child2().index()))
@@ -291,7 +305,7 @@ private:
if (m_graph.addShouldSpeculateInteger(node))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
}
if (isNotZero(node.child1().index()) || isNotZero(node.child2().index()))
@@ -307,7 +321,7 @@ private:
if (m_graph.negateShouldSpeculateInteger(node))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPrediction(m_graph[node.child1()].prediction()));
}
changed |= m_graph[node.child1()].mergeFlags(flags);
@@ -323,7 +337,7 @@ private:
&& nodeCanSpeculateInteger(node.arithNodeFlags()))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
}
flags |= NodeUsedAsNumber;
@@ -340,7 +354,7 @@ private:
if (m_graph.mulShouldSpeculateInteger(node))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
}
// As soon as a multiply happens, we can easily end up in the part
@@ -388,7 +402,7 @@ private:
if (nodeCanSpeculateInteger(node.arithNodeFlags()))
changed |= mergePrediction(child);
else
- changed |= setPrediction(SpecDouble);
+ changed |= setPrediction(speculatedDoubleTypeForPrediction(child));
flags &= ~NodeNeedsNegZero;
changed |= m_graph[node.child1()].mergeFlags(flags);
@@ -674,6 +688,7 @@ private:
case CheckNumber:
case CheckArgumentsNotCreated:
case GlobalVarWatchpoint:
+ case GarbageValue:
changed |= mergeDefaultFlags(node);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.cpp b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
index 6fb185c12..531a525d5 100644
--- a/Source/JavaScriptCore/dfg/DFGRepatch.cpp
+++ b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
@@ -192,7 +192,7 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu
if (isInlineOffset(offset)) {
#if USE(JSVALUE64)
- stubJit.loadPtr(protoObject->locationForOffset(offset), resultGPR);
+ stubJit.load64(protoObject->locationForOffset(offset), resultGPR);
#elif USE(JSVALUE32_64)
stubJit.move(MacroAssembler::TrustedImmPtr(protoObject->locationForOffset(offset)), resultGPR);
stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
@@ -201,7 +201,7 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu
} else {
stubJit.loadPtr(protoObject->butterflyAddress(), resultGPR);
#if USE(JSVALUE64)
- stubJit.loadPtr(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>)), resultGPR);
+ stubJit.load64(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>)), resultGPR);
#elif USE(JSVALUE32_64)
stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
@@ -263,7 +263,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0)));
#if USE(JSVALUE64)
- stubJit.orPtr(GPRInfo::tagTypeNumberRegister, scratchGPR, resultGPR);
+ stubJit.or64(GPRInfo::tagTypeNumberRegister, scratchGPR, resultGPR);
#elif USE(JSVALUE32_64)
stubJit.move(scratchGPR, resultGPR);
stubJit.move(JITCompiler::TrustedImm32(0xffffffff), resultTagGPR); // JSValue::Int32Tag
@@ -299,7 +299,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
Structure* structure = baseCell->structure();
if (!slot.isCacheable())
return false;
- if (structure->isUncacheableDictionary() || structure->typeInfo().prohibitsPropertyCaching())
+ if (!structure->propertyAccessesAreCacheable())
return false;
// Optimize self access.
@@ -421,14 +421,14 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
ASSERT(baseGPR != scratchGPR);
if (isInlineOffset(slot.cachedOffset())) {
#if USE(JSVALUE64)
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+ stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
#else
stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
#endif
} else {
stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
#if USE(JSVALUE64)
- stubJit.loadPtr(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+ stubJit.load64(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
#else
stubJit.load32(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
#endif
@@ -465,7 +465,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
} else {
if (isInlineOffset(slot.cachedOffset())) {
#if USE(JSVALUE64)
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
+ stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
#else
if (baseGPR == resultTagGPR) {
stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
@@ -478,7 +478,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
} else {
stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
#if USE(JSVALUE64)
- stubJit.loadPtr(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
+ stubJit.load64(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
#else
stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
@@ -682,10 +682,10 @@ static void emitPutReplaceStub(
#if USE(JSVALUE64)
if (isInlineOffset(slot.cachedOffset()))
- stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
+ stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
else {
stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
- stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
+ stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
}
#elif USE(JSVALUE32_64)
if (isInlineOffset(slot.cachedOffset())) {
@@ -854,11 +854,11 @@ static void emitPutTransitionStub(
stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
#if USE(JSVALUE64)
if (isInlineOffset(slot.cachedOffset()))
- stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
+ stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
else {
if (!scratchGPR1HasStorage)
stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
- stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
+ stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
}
#elif USE(JSVALUE32_64)
if (isInlineOffset(slot.cachedOffset())) {
diff --git a/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h b/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h
index 9a65e8b7d..706bcd61d 100644
--- a/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h
+++ b/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h
@@ -127,15 +127,20 @@ public:
{
unsigned count = 0;
for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
- if (m_usedRegisters.getGPRByIndex(i))
- jit.storePtr(GPRInfo::toRegister(i), scratchBuffer->m_buffer + (count++));
+ if (m_usedRegisters.getGPRByIndex(i)) {
+#if USE(JSVALUE64)
+ jit.store64(GPRInfo::toRegister(i), static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++));
+#else
+ jit.store32(GPRInfo::toRegister(i), static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++));
+#endif
+ }
if (scratchGPR == InvalidGPRReg && !m_lockedRegisters.getGPRByIndex(i) && !m_scratchRegisters.getGPRByIndex(i))
scratchGPR = GPRInfo::toRegister(i);
}
ASSERT(scratchGPR != InvalidGPRReg);
for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
if (m_usedRegisters.getFPRByIndex(i)) {
- jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->m_buffer + (count++)), scratchGPR);
+ jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
jit.storeDouble(FPRInfo::toRegister(i), scratchGPR);
}
}
@@ -165,15 +170,20 @@ public:
unsigned count = m_usedRegisters.numberOfSetGPRs();
for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
if (m_usedRegisters.getFPRByIndex(i)) {
- jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->m_buffer + (count++)), scratchGPR);
+ jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
jit.loadDouble(scratchGPR, FPRInfo::toRegister(i));
}
}
count = 0;
for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
- if (m_usedRegisters.getGPRByIndex(i))
- jit.loadPtr(scratchBuffer->m_buffer + (count++), GPRInfo::toRegister(i));
+ if (m_usedRegisters.getGPRByIndex(i)) {
+#if USE(JSVALUE64)
+ jit.load64(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), GPRInfo::toRegister(i));
+#else
+ jit.load32(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), GPRInfo::toRegister(i));
+#endif
+ }
}
}
diff --git a/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h b/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h
index ab99b014d..bb04646bf 100644
--- a/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h
+++ b/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h
@@ -41,6 +41,7 @@ enum SilentSpillAction {
Store32Tag,
Store32Payload,
StorePtr,
+ Store64,
StoreDouble
};
@@ -61,6 +62,7 @@ enum SilentFillAction {
Load32Payload,
Load32PayloadBoxInt,
LoadPtr,
+ Load64,
LoadDouble,
LoadDoubleBoxDouble,
LoadJSUnboxDouble
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index 850d5aa74..a9b91d046 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -163,7 +163,9 @@ void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecov
#endif
unsigned setLocalIndexInBlock = m_indexInBlock + 1;
-
+
+ OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
+
Node* setLocal = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock));
bool hadInt32ToDouble = false;
@@ -173,7 +175,7 @@ void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecov
}
if (setLocal->op() == Flush || setLocal->op() == Phantom)
setLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock));
-
+
if (!!valueRecovery) {
if (hadInt32ToDouble)
ASSERT(at(setLocal->child1()).child1() == m_compileIndex);
@@ -188,16 +190,34 @@ void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecov
// We're at an inlined return. Use a backward speculation instead.
return;
}
+
+ exit.m_setOperands[0] = setLocal->local();
+ while (nextNode->codeOrigin == at(m_compileIndex).codeOrigin) {
+ ++setLocalIndexInBlock;
+ Node* nextSetLocal = nextNode;
+ if (nextSetLocal->op() == Int32ToDouble)
+ nextSetLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock));
+
+ if (nextSetLocal->op() == Flush || nextSetLocal->op() == Phantom)
+ nextSetLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock));
+
+ nextNode = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 1));
+ ASSERT(nextNode->op() != Jump || nextNode->codeOrigin != at(m_compileIndex).codeOrigin);
+ ASSERT(nextSetLocal->op() == SetLocal);
+ exit.m_setOperands.append(nextSetLocal->local());
+ }
+
ASSERT(nextNode->codeOrigin != at(m_compileIndex).codeOrigin);
-
- OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
+
exit.m_codeOrigin = nextNode->codeOrigin;
if (!valueRecovery)
return;
- exit.m_lastSetOperand = setLocal->local();
- exit.m_valueRecoveryOverride = adoptRef(
- new ValueRecoveryOverride(setLocal->local(), valueRecovery));
+
+ ASSERT(exit.m_setOperands.size() == 1);
+ for (size_t i = 0; i < exit.m_setOperands.size(); i++)
+ exit.m_valueRecoveryOverrides.append(adoptRef(new ValueRecoveryOverride(exit.m_setOperands[i], valueRecovery)));
+
}
JumpReplacementWatchpoint* SpeculativeJIT::forwardSpeculationWatchpoint(ExitKind kind)
@@ -417,7 +437,7 @@ void SpeculativeJIT::checkArray(Node& node)
MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
speculationCheck(
- Uncountable, JSValueRegs(), NoNode,
+ BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode,
jumpSlowForUnwantedArrayMode(tempGPR, node.arrayMode()));
noResult(m_compileIndex);
@@ -515,7 +535,7 @@ void SpeculativeJIT::arrayify(Node& node, GPRReg baseReg, GPRReg propertyReg)
// Next check that the object does not intercept indexed accesses. If it does,
// then this mode won't work.
speculationCheck(
- Uncountable, JSValueRegs(), NoNode,
+ BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode,
m_jit.branchTest8(
MacroAssembler::NonZero,
MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
@@ -549,7 +569,7 @@ void SpeculativeJIT::arrayify(Node& node, GPRReg baseReg, GPRReg propertyReg)
m_jit.load8(
MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), structureGPR);
speculationCheck(
- Uncountable, JSValueRegs(), NoNode,
+ BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode,
jumpSlowForUnwantedArrayMode(structureGPR, desiredArrayMode));
done.link(&m_jit);
@@ -559,10 +579,17 @@ void SpeculativeJIT::arrayify(Node& node, GPRReg baseReg, GPRReg propertyReg)
void SpeculativeJIT::arrayify(Node& node)
{
ASSERT(modeIsSpecific(node.arrayMode()));
- ASSERT(!modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode()));
SpeculateCellOperand base(this, node.child1());
+ if (modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode())) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(
+ MacroAssembler::Address(base.gpr(), JSObject::butterflyOffset()), temp.gpr());
+ storageResult(temp.gpr(), m_compileIndex);
+ return;
+ }
+
if (!node.child2()) {
arrayify(node, base.gpr(), InvalidGPRReg);
return;
@@ -1686,14 +1713,14 @@ void SpeculativeJIT::checkArgumentTypes()
#if USE(JSVALUE64)
if (isInt32Speculation(predictedType))
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
else if (isBooleanSpeculation(predictedType)) {
GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
} else if (isCellSpeculation(predictedType))
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
#else
if (isInt32Speculation(predictedType))
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
@@ -1953,10 +1980,10 @@ void SpeculativeJIT::compileValueToInt32(Node& node)
FPRTemporary tempFpr(this);
FPRReg fpr = tempFpr.fpr();
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
if (!isNumberSpeculation(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueRegs(gpr), node.child1().index(), m_jit.branchTestPtr(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
+ speculationCheck(BadType, JSValueRegs(gpr), node.child1().index(), m_jit.branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
// First, if we get here we have a double encoded as a JSValue
m_jit.move(gpr, resultGpr);
@@ -2099,8 +2126,8 @@ void SpeculativeJIT::compileInt32ToDouble(Node& node)
ASSERT(isInt32Constant(node.child1().index()));
FPRTemporary result(this);
GPRTemporary temp(this);
- m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(node.child1().index())))), temp.gpr());
- m_jit.movePtrToDouble(temp.gpr(), result.fpr());
+ m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(node.child1().index()))), temp.gpr());
+ m_jit.move64ToDouble(temp.gpr(), result.fpr());
doubleResult(result.fpr(), m_compileIndex);
return;
}
@@ -2124,13 +2151,13 @@ void SpeculativeJIT::compileInt32ToDouble(Node& node)
GPRReg tempGPR = temp.gpr();
FPRReg resultFPR = result.fpr();
- JITCompiler::Jump isInteger = m_jit.branchPtr(
+ JITCompiler::Jump isInteger = m_jit.branch64(
MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
if (!isNumberSpeculation(m_state.forNode(node.child1()).m_type)) {
speculationCheck(
BadType, JSValueRegs(op1GPR), node.child1(),
- m_jit.branchTestPtr(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
+ m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
}
m_jit.move(op1GPR, tempGPR);
@@ -2460,20 +2487,18 @@ void SpeculativeJIT::compileInstanceOfForObject(Node&, GPRReg valueReg, GPRReg p
MacroAssembler::Label loop(&m_jit);
m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
#if USE(JSVALUE64)
- m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
+ m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
+ MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
+ m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
#else
m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
-#endif
MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
-#if USE(JSVALUE64)
- m_jit.branchTestPtr(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
-#else
m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
#endif
// No match - result is false.
#if USE(JSVALUE64)
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), scratchReg);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
#else
m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
#endif
@@ -2481,7 +2506,7 @@ void SpeculativeJIT::compileInstanceOfForObject(Node&, GPRReg valueReg, GPRReg p
isInstance.link(&m_jit);
#if USE(JSVALUE64)
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), scratchReg);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
#else
m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
#endif
@@ -2507,8 +2532,8 @@ void SpeculativeJIT::compileInstanceOf(Node& node)
#if USE(JSVALUE64)
GPRReg valueReg = value.gpr();
- MacroAssembler::Jump isCell = m_jit.branchTestPtr(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), scratchReg);
+ MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
#else
GPRReg valueTagReg = value.tagGPR();
GPRReg valueReg = value.payloadGPR();
@@ -3071,7 +3096,7 @@ bool SpeculativeJIT::compileStrictEqForConstant(Node& node, Edge value, JSValue
}
#if USE(JSVALUE64)
- branchPtr(condition, op1.gpr(), MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(constant))), taken);
+ branch64(condition, op1.gpr(), MacroAssembler::TrustedImm64(JSValue::encode(constant)), taken);
#else
GPRReg payloadGPR = op1.payloadGPR();
GPRReg tagGPR = op1.tagGPR();
@@ -3101,8 +3126,8 @@ bool SpeculativeJIT::compileStrictEqForConstant(Node& node, Edge value, JSValue
#if USE(JSVALUE64)
GPRReg op1GPR = op1.gpr();
GPRReg resultGPR = result.gpr();
- m_jit.move(MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(ValueFalse)), resultGPR);
- MacroAssembler::Jump notEqual = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(constant))));
+ m_jit.move(MacroAssembler::TrustedImm64(ValueFalse), resultGPR);
+ MacroAssembler::Jump notEqual = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImm64(JSValue::encode(constant)));
m_jit.or32(MacroAssembler::TrustedImm32(1), resultGPR);
notEqual.link(&m_jit);
jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean);
@@ -3282,7 +3307,7 @@ void SpeculativeJIT::compileGetByValOnArguments(Node& node)
resultReg);
jsValueResult(resultTagReg, resultReg, m_compileIndex);
#else
- m_jit.loadPtr(
+ m_jit.load64(
MacroAssembler::BaseIndex(
scratchReg, resultReg, MacroAssembler::TimesEight,
CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register)),
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
index 90b6d483a..3796cc704 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -71,6 +71,8 @@ private:
typedef JITCompiler::Imm32 Imm32;
typedef JITCompiler::TrustedImmPtr TrustedImmPtr;
typedef JITCompiler::ImmPtr ImmPtr;
+ typedef JITCompiler::TrustedImm64 TrustedImm64;
+ typedef JITCompiler::Imm64 Imm64;
// These constants are used to set priorities for spill order for
// the register allocator.
@@ -347,9 +349,11 @@ public:
ASSERT(info.gpr() == source);
if (registerFormat == DataFormatInteger)
spillAction = Store32Payload;
- else {
- ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage);
+ else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
spillAction = StorePtr;
+ else {
+ ASSERT(registerFormat & DataFormatJS);
+ spillAction = Store64;
}
#elif USE(JSVALUE32_64)
if (registerFormat & DataFormatJS) {
@@ -414,7 +418,7 @@ public:
ASSERT(registerFormat == DataFormatJSDouble);
fillAction = LoadDoubleBoxDouble;
} else
- fillAction = LoadPtr;
+ fillAction = Load64;
#else
ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
if (node.hasConstant())
@@ -501,6 +505,11 @@ public:
case StorePtr:
m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()));
break;
+#if USE(JSVALUE64)
+ case Store64:
+ m_jit.store64(plan.gpr(), JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()));
+ break;
+#endif
case StoreDouble:
m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()));
break;
@@ -528,25 +537,25 @@ public:
break;
#if USE(JSVALUE64)
case SetTrustedJSConstant:
- m_jit.move(valueOfJSConstantAsImmPtr(plan.nodeIndex()).asTrustedImmPtr(), plan.gpr());
+ m_jit.move(valueOfJSConstantAsImm64(plan.nodeIndex()).asTrustedImm64(), plan.gpr());
break;
case SetJSConstant:
- m_jit.move(valueOfJSConstantAsImmPtr(plan.nodeIndex()), plan.gpr());
+ m_jit.move(valueOfJSConstantAsImm64(plan.nodeIndex()), plan.gpr());
break;
case SetDoubleConstant:
- m_jit.move(ImmPtr(bitwise_cast<void*>(valueOfNumberConstant(plan.nodeIndex()))), canTrample);
- m_jit.movePtrToDouble(canTrample, plan.fpr());
+ m_jit.move(Imm64(valueOfNumberConstant(plan.nodeIndex())), canTrample);
+ m_jit.move64ToDouble(canTrample, plan.fpr());
break;
case Load32PayloadBoxInt:
m_jit.load32(JITCompiler::payloadFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, plan.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
break;
case LoadDoubleBoxDouble:
- m_jit.loadPtr(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
- m_jit.subPtr(GPRInfo::tagTypeNumberRegister, plan.gpr());
+ m_jit.load64(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
break;
case LoadJSUnboxDouble:
- m_jit.loadPtr(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), canTrample);
+ m_jit.load64(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), canTrample);
unboxDouble(canTrample, plan.fpr());
break;
#else
@@ -578,6 +587,11 @@ public:
case LoadPtr:
m_jit.loadPtr(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
break;
+#if USE(JSVALUE64)
+ case Load64:
+ m_jit.load64(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
+ break;
+#endif
case LoadDouble:
m_jit.loadDouble(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.fpr());
break;
@@ -752,10 +766,10 @@ public:
// We need to box int32 and cell values ...
// but on JSVALUE64 boxing a cell is a no-op!
if (spillFormat == DataFormatInteger)
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, reg);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, reg);
// Spill the value, and record it as spilled in its boxed form.
- m_jit.storePtr(reg, JITCompiler::addressFor(spillMe));
+ m_jit.store64(reg, JITCompiler::addressFor(spillMe));
info.spill(*m_stream, spillMe, (DataFormat)(spillFormat | DataFormatJS));
return;
#elif USE(JSVALUE32_64)
@@ -830,6 +844,16 @@ public:
return &m_jit.codeBlock()->identifier(index);
}
+ ResolveOperations* resolveOperations(unsigned index)
+ {
+ return m_jit.codeBlock()->resolveOperations(index);
+ }
+
+ PutToBaseOperation* putToBaseOperation(unsigned index)
+ {
+ return m_jit.codeBlock()->putToBaseOperation(index);
+ }
+
// Spill all VirtualRegisters back to the JSStack.
void flushRegisters()
{
@@ -865,9 +889,9 @@ public:
#endif
#if USE(JSVALUE64)
- MacroAssembler::ImmPtr valueOfJSConstantAsImmPtr(NodeIndex nodeIndex)
+ MacroAssembler::Imm64 valueOfJSConstantAsImm64(NodeIndex nodeIndex)
{
- return MacroAssembler::ImmPtr(JSValue::encode(valueOfJSConstant(nodeIndex)));
+ return MacroAssembler::Imm64(JSValue::encode(valueOfJSConstant(nodeIndex)));
}
#endif
@@ -1209,6 +1233,16 @@ public:
m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier));
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(J_DFGOperation_EIRo operation, GPRReg result, Identifier* identifier, ResolveOperations* operations)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EIRoPtbo operation, GPRReg result, Identifier* identifier, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations), TrustedImmPtr(putToBaseOperations));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg result, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
@@ -1339,6 +1373,11 @@ public:
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(J_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
{
m_jit.setupArgumentsWithExecState(arg1, arg2);
@@ -1354,14 +1393,19 @@ public:
m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::TrustedImm32 imm)
{
- m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::TrustedImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))));
+ m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value))));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, MacroAssembler::TrustedImm32 imm, GPRReg arg2)
{
- m_jit.setupArgumentsWithExecState(MacroAssembler::TrustedImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))), arg2);
+ m_jit.setupArgumentsWithExecState(MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value))), arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
@@ -1707,6 +1751,19 @@ public:
m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG imm, TrustedImm32(JSValue::Int32Tag), arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
+
+ JITCompiler::Call callOperation(J_DFGOperation_EIRo operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier, ResolveOperations* operations)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+
+ JITCompiler::Call callOperation(J_DFGOperation_EIRoPtbo operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations), TrustedImmPtr(putToBaseOperations));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+
JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload)
{
m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
@@ -2030,6 +2087,20 @@ public:
notTaken.link(&m_jit);
}
+#if USE(JSVALUE64)
+ template<typename T, typename U>
+ void branch64(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination)
+ {
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branch64(cond, left, right), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branch64(JITCompiler::invert(cond), left, right);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+#endif
+
template<typename T, typename U>
void branchPtr(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination)
{
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
index 41fe8db0f..453851ba3 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
@@ -3879,7 +3879,7 @@ void SpeculativeJIT::compile(Node& node)
if (node.structureSet().size() == 1) {
speculationCheckWithConditionalDirection(
- BadCache, JSValueRegs(), NoNode,
+ BadCache, JSValueSource::unboxedCell(base.gpr()), NoNode,
m_jit.branchWeakPtr(
JITCompiler::NotEqual,
JITCompiler::Address(base.gpr(), JSCell::structureOffset()),
@@ -3896,7 +3896,7 @@ void SpeculativeJIT::compile(Node& node)
done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node.structureSet()[i]));
speculationCheckWithConditionalDirection(
- BadCache, JSValueRegs(), NoNode,
+ BadCache, JSValueSource::unboxedCell(base.gpr()), NoNode,
m_jit.branchWeakPtr(
JITCompiler::NotEqual, structure.gpr(), node.structureSet().last()),
node.op() == ForwardCheckStructure);
@@ -3910,6 +3910,13 @@ void SpeculativeJIT::compile(Node& node)
case StructureTransitionWatchpoint:
case ForwardStructureTransitionWatchpoint: {
+ // There is a fascinating question here of what to do about array profiling.
+ // We *could* try to tell the OSR exit about where the base of the access is.
+ // The DFG will have kept it alive, though it may not be in a register, and
+ // we shouldn't really load it since that could be a waste. For now though,
+ // we'll just rely on the fact that when a watchpoint fires then that's
+ // quite a hint already.
+
m_jit.addWeakReference(node.structure());
node.structure()->addTransitionWatchpoint(
speculationWatchpointWithConditionalDirection(
@@ -4288,7 +4295,8 @@ void SpeculativeJIT::compile(Node& node)
flushRegisters();
GPRResult resultPayload(this);
GPRResult2 resultTag(this);
- callOperation(operationResolve, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolve, resultTag.gpr(), resultPayload.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex));
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
break;
}
@@ -4297,7 +4305,8 @@ void SpeculativeJIT::compile(Node& node)
flushRegisters();
GPRResult resultPayload(this);
GPRResult2 resultTag(this);
- callOperation(operationResolveBase, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolveBase, resultTag.gpr(), resultPayload.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex), putToBaseOperation(data.putToBaseOperationIndex));
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
break;
}
@@ -4306,7 +4315,8 @@ void SpeculativeJIT::compile(Node& node)
flushRegisters();
GPRResult resultPayload(this);
GPRResult2 resultTag(this);
- callOperation(operationResolveBaseStrictPut, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolveBaseStrictPut, resultTag.gpr(), resultPayload.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex), putToBaseOperation(data.putToBaseOperationIndex));
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
break;
}
@@ -4323,18 +4333,18 @@ void SpeculativeJIT::compile(Node& node)
GPRReg resultPayloadGPR = resultPayload.gpr();
ResolveGlobalData& data = m_jit.graph().m_resolveGlobalData[node.resolveGlobalDataIndex()];
- GlobalResolveInfo* resolveInfoAddress = &(m_jit.codeBlock()->globalResolveInfo(data.resolveInfoIndex));
+ ResolveOperation* resolveOperationAddress = &(m_jit.codeBlock()->resolveOperations(data.resolveOperationsIndex)->data()[data.resolvePropertyIndex]);
// Check Structure of global object
m_jit.move(JITCompiler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectGPR);
- m_jit.move(JITCompiler::TrustedImmPtr(resolveInfoAddress), resolveInfoGPR);
- m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), resultPayloadGPR);
+ m_jit.move(JITCompiler::TrustedImmPtr(resolveOperationAddress), resolveInfoGPR);
+ m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(ResolveOperation, m_structure)), resultPayloadGPR);
JITCompiler::Jump structuresNotMatch = m_jit.branchPtr(JITCompiler::NotEqual, resultPayloadGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset()));
// Fast case
m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::butterflyOffset()), resultPayloadGPR);
- m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR);
+ m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(ResolveOperation, m_offset)), resolveInfoGPR);
#if DFG_ENABLE(JIT_ASSERT)
JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(firstOutOfLineOffset));
m_jit.breakpoint();
@@ -4739,6 +4749,11 @@ void SpeculativeJIT::compile(Node& node)
compileNewFunctionExpression(node);
break;
+ case GarbageValue:
+ // We should never get to the point of code emission for a GarbageValue
+ CRASH();
+ break;
+
case ForceOSRExit: {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
index daca71da7..42ab40341 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
@@ -55,21 +55,21 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat
}
if (isNumberConstant(nodeIndex)) {
JSValue jsValue = jsNumber(valueOfNumberConstant(nodeIndex));
- m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
} else {
ASSERT(isJSConstant(nodeIndex));
JSValue jsValue = valueOfJSConstant(nodeIndex);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
}
} else if (info.spillFormat() == DataFormatInteger) {
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
// Tag it, since fillInteger() is used when we want a boxed integer.
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
} else {
ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
}
// Since we statically know that we're filling an integer, and values
@@ -133,8 +133,8 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
unlock(gpr);
} else if (isNumberConstant(nodeIndex)) {
FPRReg fpr = fprAllocate();
- m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex)))), gpr);
- m_jit.movePtrToDouble(gpr, fpr);
+ m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(nodeIndex))), gpr);
+ m_jit.move64ToDouble(gpr, fpr);
unlock(gpr);
m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
@@ -144,7 +144,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
// FIXME: should not be reachable?
ASSERT(isJSConstant(nodeIndex));
JSValue jsValue = valueOfJSConstant(nodeIndex);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
info.fillJSValue(*m_stream, gpr, DataFormatJS);
unlock(gpr);
@@ -175,7 +175,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
ASSERT(spillFormat & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, spillFormat);
unlock(gpr);
break;
@@ -200,7 +200,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
FPRReg fpr = fprAllocate();
GPRReg tempGpr = allocate(); // FIXME: can we skip this allocation on the last use of the virtual register?
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
m_jit.jitAssertIsJSDouble(jsValueGpr);
@@ -279,15 +279,15 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex)
if (isInt32Constant(nodeIndex)) {
info.fillJSValue(*m_stream, gpr, DataFormatJSInteger);
JSValue jsValue = jsNumber(valueOfInt32Constant(nodeIndex));
- m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
} else if (isNumberConstant(nodeIndex)) {
info.fillJSValue(*m_stream, gpr, DataFormatJSDouble);
JSValue jsValue(JSValue::EncodeAsDouble, valueOfNumberConstant(nodeIndex));
- m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
} else {
ASSERT(isJSConstant(nodeIndex));
JSValue jsValue = valueOfJSConstant(nodeIndex);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJS);
}
@@ -297,13 +297,13 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex)
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
if (spillFormat == DataFormatInteger) {
m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
spillFormat = DataFormatJSInteger;
} else {
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
if (spillFormat == DataFormatDouble) {
// Need to box the double, since we want a JSValue.
- m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
spillFormat = DataFormatJSDouble;
} else
ASSERT(spillFormat & DataFormatJS);
@@ -319,11 +319,11 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex)
// If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger.
if (m_gprs.isLocked(gpr)) {
GPRReg result = allocate();
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr, result);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr, result);
return result;
}
m_gprs.lock(gpr);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJSInteger);
return gpr;
}
@@ -408,8 +408,8 @@ void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node)
GPRReg gpr = result.gpr();
op1.use();
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
- JITCompiler::Jump nonNumeric = m_jit.branchTestPtr(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump nonNumeric = m_jit.branchTest64(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister);
// First, if we get here we have a double encoded as a JSValue
m_jit.move(jsValueGpr, gpr);
@@ -417,7 +417,7 @@ void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node)
// Finally, handle integers.
isInteger.link(&m_jit);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, jsValueGpr, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, jsValueGpr, gpr);
hasUnboxedDouble.link(&m_jit);
addSlowPathGenerator(adoptPtr(new ValueToNumberSlowPathGenerator(nonNumeric, this, gpr, jsValueGpr)));
@@ -459,7 +459,7 @@ void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node)
GPRReg resultGPR = result.gpr();
op1.use();
- JITCompiler::Jump isNotInteger = m_jit.branchPtr(MacroAssembler::Below, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isNotInteger = m_jit.branch64(MacroAssembler::Below, jsValueGpr, GPRInfo::tagTypeNumberRegister);
m_jit.zeroExtend32ToPtr(jsValueGpr, resultGPR);
@@ -486,7 +486,7 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
positive.link(&m_jit);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, op1.gpr(), result.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, op1.gpr(), result.gpr());
done.link(&m_jit);
@@ -500,7 +500,7 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
JITCompiler::ConvertibleLoadLabel propertyStorageLoad =
m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
- JITCompiler::DataLabelCompact loadWithPatch = m_jit.loadPtrWithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR);
+ JITCompiler::DataLabelCompact loadWithPatch = m_jit.load64WithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR);
JITCompiler::Label doneLabel = m_jit.label();
@@ -536,7 +536,7 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
JITCompiler::ConvertibleLoadLabel propertyStorageLoad =
m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
- JITCompiler::DataLabel32 storeWithPatch = m_jit.storePtrWithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0));
+ JITCompiler::DataLabel32 storeWithPatch = m_jit.store64WithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0));
JITCompiler::Label doneLabel = m_jit.label();
@@ -588,7 +588,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
JITCompiler::Jump notCell;
if (!isKnownCell(operand.index()))
- notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
+ notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
JITCompiler::Jump notMasqueradesAsUndefined;
if (m_jit.graph().globalObjectFor(m_jit.graph()[operand].codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
@@ -618,8 +618,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
notCell.link(&m_jit);
m_jit.move(argGPR, resultGPR);
- m_jit.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
- m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR);
+ m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
+ m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR);
done.link(&m_jit);
}
@@ -652,7 +652,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex br
JITCompiler::Jump notCell;
if (!isKnownCell(operand.index()))
- notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
+ notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
if (m_jit.graph().globalObjectFor(m_jit.graph()[operand].codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
m_jit.graph().globalObjectFor(m_jit.graph()[operand].codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
@@ -676,8 +676,8 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex br
notCell.link(&m_jit);
m_jit.move(argGPR, resultGPR);
- m_jit.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
- branchPtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull)), taken);
+ m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
+ branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm64(ValueNull), taken);
}
jump(notTaken);
@@ -750,9 +750,9 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
arg2.use();
if (!isKnownInteger(node.child1().index()))
- slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
+ slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
if (!isKnownInteger(node.child2().index()))
- slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
+ slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
branch32(cond, arg1GPR, arg2GPR, taken);
@@ -833,9 +833,9 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler
arg2.use();
if (!isKnownInteger(node.child1().index()))
- slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
+ slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
if (!isKnownInteger(node.child2().index()))
- slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
+ slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR);
m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
@@ -879,7 +879,7 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch
if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) {
// see if we get lucky: if the arguments are cells and they reference the same
// cell, then they must be strictly equal.
- branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
+ branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
silentSpillAllRegisters(resultGPR);
callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
@@ -887,22 +887,22 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch
branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
} else {
- m_jit.orPtr(arg1GPR, arg2GPR, resultGPR);
+ m_jit.or64(arg1GPR, arg2GPR, resultGPR);
- JITCompiler::Jump twoCellsCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
+ JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
- JITCompiler::Jump leftOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
- JITCompiler::Jump leftDouble = m_jit.branchTestPtr(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump leftDouble = m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister);
leftOK.link(&m_jit);
- JITCompiler::Jump rightOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
- JITCompiler::Jump rightDouble = m_jit.branchTestPtr(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump rightDouble = m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister);
rightOK.link(&m_jit);
- branchPtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken);
+ branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken);
jump(notTaken, ForceJump);
twoCellsCase.link(&m_jit);
- branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
+ branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
leftDouble.link(&m_jit);
rightDouble.link(&m_jit);
@@ -934,9 +934,9 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
// see if we get lucky: if the arguments are cells and they reference the same
// cell, then they must be strictly equal.
// FIXME: this should flush registers instead of silent spill/fill.
- JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR);
+ JITCompiler::Jump notEqualCase = m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR);
- m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR);
+ m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
JITCompiler::Jump done = m_jit.jump();
@@ -946,33 +946,33 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
silentFillAllRegisters(resultGPR);
- m_jit.andPtr(JITCompiler::TrustedImm32(1), resultGPR);
+ m_jit.and64(JITCompiler::TrustedImm32(1), resultGPR);
m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
done.link(&m_jit);
} else {
- m_jit.orPtr(arg1GPR, arg2GPR, resultGPR);
+ m_jit.or64(arg1GPR, arg2GPR, resultGPR);
JITCompiler::JumpList slowPathCases;
- JITCompiler::Jump twoCellsCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
+ JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
- JITCompiler::Jump leftOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
- slowPathCases.append(m_jit.branchTestPtr(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister));
+ JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
+ slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister));
leftOK.link(&m_jit);
- JITCompiler::Jump rightOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
- slowPathCases.append(m_jit.branchTestPtr(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister));
+ JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
+ slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister));
rightOK.link(&m_jit);
- m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
+ m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
JITCompiler::Jump done = m_jit.jump();
twoCellsCase.link(&m_jit);
- slowPathCases.append(m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR));
+ slowPathCases.append(m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR));
- m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR);
+ m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
addSlowPathGenerator(
adoptPtr(
@@ -1007,8 +1007,8 @@ void SpeculativeJIT::emitCall(Node& node)
int numPassedArgs = node.numChildren() - 1;
m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(JSStack::ArgumentCount));
- m_jit.storePtr(GPRInfo::callFrameRegister, callFrameSlot(JSStack::CallerFrame));
- m_jit.storePtr(calleeGPR, callFrameSlot(JSStack::Callee));
+ m_jit.store64(GPRInfo::callFrameRegister, callFrameSlot(JSStack::CallerFrame));
+ m_jit.store64(calleeGPR, callFrameSlot(JSStack::Callee));
for (int i = 0; i < numPassedArgs; i++) {
Edge argEdge = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i];
@@ -1016,7 +1016,7 @@ void SpeculativeJIT::emitCall(Node& node)
GPRReg argGPR = arg.gpr();
use(argEdge);
- m_jit.storePtr(argGPR, argumentSlot(i + dummyThisArgument));
+ m_jit.store64(argGPR, argumentSlot(i + dummyThisArgument));
}
flushRegisters();
@@ -1025,16 +1025,17 @@ void SpeculativeJIT::emitCall(Node& node)
GPRReg resultGPR = result.gpr();
JITCompiler::DataLabelPtr targetToCheck;
- JITCompiler::Jump slowPath;
+ JITCompiler::JumpList slowPath;
CallBeginToken token;
m_jit.beginCall(node.codeOrigin, token);
m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
- slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(JSValue::encode(JSValue())));
+ slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0)));
+
m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultGPR);
- m_jit.storePtr(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
+ m_jit.store64(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
JITCompiler::Call fastCall = m_jit.nearCall();
@@ -1104,14 +1105,14 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat&
}
if (spillFormat == DataFormatInteger) {
m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
} else
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJSInteger);
returnFormat = DataFormatJSInteger;
return gpr;
}
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
// Fill as JSValue, and fall through.
info.fillJSValue(*m_stream, gpr, DataFormatJSInteger);
@@ -1123,7 +1124,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat&
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
if (!isInt32Speculation(type))
- speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchPtr(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branch64(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
info.fillJSValue(*m_stream, gpr, DataFormatJSInteger);
// If !strict we're done, return.
if (!strict) {
@@ -1223,8 +1224,8 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
if (isInt32Constant(nodeIndex)) {
FPRReg fpr = fprAllocate();
- m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(static_cast<double>(valueOfInt32Constant(nodeIndex))))), gpr);
- m_jit.movePtrToDouble(gpr, fpr);
+ m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(static_cast<double>(valueOfInt32Constant(nodeIndex)))), gpr);
+ m_jit.move64ToDouble(gpr, fpr);
unlock(gpr);
m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
@@ -1233,8 +1234,8 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
}
if (isNumberConstant(nodeIndex)) {
FPRReg fpr = fprAllocate();
- m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex)))), gpr);
- m_jit.movePtrToDouble(gpr, fpr);
+ m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(nodeIndex))), gpr);
+ m_jit.move64ToDouble(gpr, fpr);
unlock(gpr);
m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
@@ -1270,7 +1271,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
ASSERT(spillFormat & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, spillFormat);
unlock(gpr);
break;
@@ -1295,10 +1296,10 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
FPRReg fpr = fprAllocate();
GPRReg tempGpr = allocate();
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
if (!isNumberSpeculation(type))
- speculationCheck(BadType, JSValueRegs(jsValueGpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister));
+ speculationCheck(BadType, JSValueRegs(jsValueGpr), nodeIndex, m_jit.branchTest64(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister));
// First, if we get here we have a double encoded as a JSValue
m_jit.move(jsValueGpr, tempGpr);
@@ -1384,7 +1385,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex, bool isForwardSpec
JSValue jsValue = valueOfJSConstant(nodeIndex);
if (jsValue.isCell()) {
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
return gpr;
}
@@ -1393,11 +1394,11 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex, bool isForwardSpec
}
ASSERT(info.spillFormat() & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJS);
if (!isCellSpeculation(type))
- speculationCheckWithConditionalDirection(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister), isForwardSpeculation);
+ speculationCheckWithConditionalDirection(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister), isForwardSpeculation);
info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
return gpr;
}
@@ -1413,7 +1414,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex, bool isForwardSpec
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
if (!isCellSpeculation(type))
- speculationCheckWithConditionalDirection(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister), isForwardSpeculation);
+ speculationCheckWithConditionalDirection(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister), isForwardSpeculation);
info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
return gpr;
}
@@ -1460,7 +1461,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
JSValue jsValue = valueOfJSConstant(nodeIndex);
if (jsValue.isBoolean()) {
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
return gpr;
}
@@ -1469,13 +1470,13 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
}
ASSERT(info.spillFormat() & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJS);
if (!isBooleanSpeculation(type)) {
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
- speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
}
info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
return gpr;
@@ -1492,9 +1493,9 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
if (!isBooleanSpeculation(type)) {
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
- speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
}
info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
return gpr;
@@ -1521,9 +1522,9 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
JITCompiler::Jump SpeculativeJIT::convertToDouble(GPRReg value, FPRReg result, GPRReg tmp)
{
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, value, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, value, GPRInfo::tagTypeNumberRegister);
- JITCompiler::Jump notNumber = m_jit.branchTestPtr(MacroAssembler::Zero, value, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump notNumber = m_jit.branchTest64(MacroAssembler::Zero, value, GPRInfo::tagTypeNumberRegister);
m_jit.move(value, tmp);
unboxDouble(tmp, result);
@@ -1590,7 +1591,7 @@ void SpeculativeJIT::compileObjectEquality(Node& node)
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
- MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR);
+ MacroAssembler::Jump falseCase = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, op2GPR);
m_jit.move(TrustedImm32(ValueTrue), resultGPR);
MacroAssembler::Jump done = m_jit.jump();
falseCase.link(&m_jit);
@@ -1638,7 +1639,7 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r
// It seems that most of the time when programs do a == b where b may be either null/undefined
// or an object, b is usually an object. Balance the branches to make that case fast.
MacroAssembler::Jump rightNotCell =
- m_jit.branchTestPtr(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
+ m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
// We know that within this branch, rightChild must be a cell.
if (m_jit.graph().globalObjectFor(leftNode.codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
@@ -1668,7 +1669,7 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r
// At this point we know that we can perform a straight-forward equality comparison on pointer
// values because both left and right are pointers to objects that have no special equality
// protocols.
- MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR);
+ MacroAssembler::Jump falseCase = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, op2GPR);
MacroAssembler::Jump trueCase = m_jit.jump();
rightNotCell.link(&m_jit);
@@ -1677,13 +1678,13 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r
// prove that it is either null or undefined.
if (!isOtherOrEmptySpeculation(m_state.forNode(rightChild).m_type & ~SpecCell)) {
m_jit.move(op2GPR, resultGPR);
- m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
speculationCheck(
BadType, JSValueRegs(op2GPR), rightChild.index(),
- m_jit.branchPtr(
+ m_jit.branch64(
MacroAssembler::NotEqual, resultGPR,
- MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ MacroAssembler::TrustedImm64(ValueNull)));
}
falseCase.link(&m_jit);
@@ -1737,7 +1738,7 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
// It seems that most of the time when programs do a == b where b may be either null/undefined
// or an object, b is usually an object. Balance the branches to make that case fast.
MacroAssembler::Jump rightNotCell =
- m_jit.branchTestPtr(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
+ m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
// We know that within this branch, rightChild must be a cell.
if (m_jit.graph().globalObjectFor(branchNode.codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
@@ -1767,7 +1768,7 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
// At this point we know that we can perform a straight-forward equality comparison on pointer
// values because both left and right are pointers to objects that have no special equality
// protocols.
- branchPtr(MacroAssembler::Equal, op1GPR, op2GPR, taken);
+ branch64(MacroAssembler::Equal, op1GPR, op2GPR, taken);
// We know that within this branch, rightChild must not be a cell. Check if that is enough to
// prove that it is either null or undefined.
@@ -1778,13 +1779,13 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
rightNotCell.link(&m_jit);
m_jit.move(op2GPR, resultGPR);
- m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
speculationCheck(
BadType, JSValueRegs(op2GPR), rightChild.index(),
- m_jit.branchPtr(
+ m_jit.branch64(
MacroAssembler::NotEqual, resultGPR,
- MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ MacroAssembler::TrustedImm64(ValueNull)));
}
jump(notTaken);
@@ -1811,7 +1812,7 @@ void SpeculativeJIT::compileDoubleCompare(Node& node, MacroAssembler::DoubleCond
m_jit.move(TrustedImm32(ValueTrue), result.gpr());
MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr());
- m_jit.xorPtr(TrustedImm32(true), result.gpr());
+ m_jit.xor64(TrustedImm32(true), result.gpr());
trueCase.link(&m_jit);
jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
@@ -1843,7 +1844,7 @@ void SpeculativeJIT::compileNonStringCellOrOtherLogicalNot(Edge nodeUse, bool ne
GPRReg valueGPR = value.gpr();
GPRReg resultGPR = result.gpr();
- MacroAssembler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
+ MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
if (m_jit.graph().globalObjectFor(m_jit.graph()[nodeUse.index()].codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
m_jit.graph().globalObjectFor(m_jit.graph()[nodeUse.index()].codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
@@ -1889,12 +1890,12 @@ void SpeculativeJIT::compileNonStringCellOrOtherLogicalNot(Edge nodeUse, bool ne
if (needSpeculationCheck) {
m_jit.move(valueGPR, resultGPR);
- m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
- m_jit.branchPtr(
+ m_jit.branch64(
MacroAssembler::NotEqual,
resultGPR,
- MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ MacroAssembler::TrustedImm64(ValueNull)));
}
m_jit.move(TrustedImm32(ValueTrue), resultGPR);
@@ -1937,7 +1938,7 @@ void SpeculativeJIT::compileLogicalNot(Node& node)
GPRTemporary result(this, value);
m_jit.move(value.gpr(), result.gpr());
- m_jit.xorPtr(TrustedImm32(true), result.gpr());
+ m_jit.xor64(TrustedImm32(true), result.gpr());
jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
return;
@@ -1947,9 +1948,9 @@ void SpeculativeJIT::compileLogicalNot(Node& node)
GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
m_jit.move(value.gpr(), result.gpr());
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
- speculationCheck(BadType, JSValueRegs(value.gpr()), node.child1(), m_jit.branchTestPtr(JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr());
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
+ speculationCheck(BadType, JSValueRegs(value.gpr()), node.child1(), m_jit.branchTest64(JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr());
// If we add a DataFormatBool, we should use it here.
jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
@@ -1965,13 +1966,13 @@ void SpeculativeJIT::compileLogicalNot(Node& node)
arg1.use();
m_jit.move(arg1GPR, resultGPR);
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR);
- JITCompiler::Jump slowCase = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR, TrustedImm32(static_cast<int32_t>(~1)));
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR);
+ JITCompiler::Jump slowCase = m_jit.branchTest64(JITCompiler::NonZero, resultGPR, TrustedImm32(static_cast<int32_t>(~1)));
addSlowPathGenerator(
slowPathCall(slowCase, this, dfgConvertJSValueToBoolean, resultGPR, arg1GPR));
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR);
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR);
jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
}
@@ -1982,7 +1983,7 @@ void SpeculativeJIT::emitNonStringCellOrOtherBranch(Edge nodeUse, BlockIndex tak
GPRReg valueGPR = value.gpr();
GPRReg scratchGPR = scratch.gpr();
- MacroAssembler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
+ MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
if (m_jit.graph().globalObjectFor(m_jit.graph()[nodeUse.index()].codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
m_jit.graph().globalObjectFor(m_jit.graph()[nodeUse.index()].codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
@@ -2020,8 +2021,8 @@ void SpeculativeJIT::emitNonStringCellOrOtherBranch(Edge nodeUse, BlockIndex tak
if (needSpeculationCheck) {
m_jit.move(valueGPR, scratchGPR);
- m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
- speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse.index(), m_jit.branchPtr(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
+ speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse.index(), m_jit.branch64(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImm64(ValueNull)));
}
jump(notTaken);
@@ -2078,8 +2079,8 @@ void SpeculativeJIT::emitBranch(Node& node)
branchTest32(condition, valueGPR, TrustedImm32(true), taken);
jump(notTaken);
} else {
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), notTaken);
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), taken);
+ branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
+ branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
speculationCheck(BadType, JSValueRegs(valueGPR), node.child1(), m_jit.jump());
}
@@ -2088,12 +2089,12 @@ void SpeculativeJIT::emitBranch(Node& node)
GPRTemporary result(this);
GPRReg resultGPR = result.gpr();
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsNumber(0))), notTaken);
- branchPtr(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken);
+ branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(0))), notTaken);
+ branch64(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken);
if (!predictBoolean) {
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), notTaken);
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), taken);
+ branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
+ branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
}
value.use();
@@ -2115,8 +2116,8 @@ MacroAssembler::JumpList SpeculativeJIT::compileContiguousGetByVal(Node&, GPRReg
MacroAssembler::JumpList slowCases;
slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
- m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr), resultReg);
- slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultReg));
+ m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
+ slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
return slowCases;
}
@@ -2125,8 +2126,8 @@ MacroAssembler::JumpList SpeculativeJIT::compileArrayStorageGetByVal(Node&, GPRR
{
MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
- m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg);
- MacroAssembler::Jump hole = m_jit.branchTestPtr(MacroAssembler::Zero, resultReg);
+ m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg);
+ MacroAssembler::Jump hole = m_jit.branchTest64(MacroAssembler::Zero, resultReg);
MacroAssembler::JumpList slowCases;
slowCases.append(outOfBounds);
@@ -2158,7 +2159,7 @@ MacroAssembler::JumpList SpeculativeJIT::compileContiguousPutByVal(Node& node, G
inBounds.link(&m_jit);
}
- m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr));
+ m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
if (isInBoundsAccess(arrayMode))
return MacroAssembler::JumpList();
@@ -2184,9 +2185,9 @@ MacroAssembler::JumpList SpeculativeJIT::compileArrayStoragePutByVal(Node& node,
// profiling.
speculationCheck(
Uncountable, JSValueRegs(), NoNode,
- m_jit.branchTestPtr(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
+ m_jit.branchTest64(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
} else {
- MacroAssembler::Jump notHoleValue = m_jit.branchTestPtr(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ MacroAssembler::Jump notHoleValue = m_jit.branchTest64(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
if (isSlowPutAccess(arrayMode)) {
// This is sort of strange. If we wanted to optimize this code path, we would invert
// the above branch. But it's simply not worth it since this only happens if we're
@@ -2206,7 +2207,7 @@ MacroAssembler::JumpList SpeculativeJIT::compileArrayStoragePutByVal(Node& node,
}
// Store the value to the array.
- m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
return slowCases;
}
@@ -2270,7 +2271,7 @@ void SpeculativeJIT::compile(Node& node)
}
GPRTemporary result(this);
- m_jit.loadPtr(JITCompiler::addressFor(node.local()), result.gpr());
+ m_jit.load64(JITCompiler::addressFor(node.local()), result.gpr());
// Like jsValueResult, but don't useChildren - our children are phi nodes,
// and don't represent values within this dataflow with virtual registers.
@@ -2294,7 +2295,7 @@ void SpeculativeJIT::compile(Node& node)
case GetLocalUnlinked: {
GPRTemporary result(this);
- m_jit.loadPtr(JITCompiler::addressFor(node.unlinkedLocal()), result.gpr());
+ m_jit.load64(JITCompiler::addressFor(node.unlinkedLocal()), result.gpr());
jsValueResult(result.gpr(), m_compileIndex);
break;
@@ -2360,14 +2361,14 @@ void SpeculativeJIT::compile(Node& node)
if (isCellSpeculation(predictedType)) {
SpeculateCellOperand cell(this, node.child1());
GPRReg cellGPR = cell.gpr();
- m_jit.storePtr(cellGPR, JITCompiler::addressFor(node.local()));
+ m_jit.store64(cellGPR, JITCompiler::addressFor(node.local()));
noResult(m_compileIndex);
recordSetLocal(node.local(), ValueSource(CellInJSStack));
break;
}
if (isBooleanSpeculation(predictedType)) {
SpeculateBooleanOperand boolean(this, node.child1());
- m_jit.storePtr(boolean.gpr(), JITCompiler::addressFor(node.local()));
+ m_jit.store64(boolean.gpr(), JITCompiler::addressFor(node.local()));
noResult(m_compileIndex);
recordSetLocal(node.local(), ValueSource(BooleanInJSStack));
break;
@@ -2375,7 +2376,7 @@ void SpeculativeJIT::compile(Node& node)
}
JSValueOperand value(this, node.child1());
- m_jit.storePtr(value.gpr(), JITCompiler::addressFor(node.local()));
+ m_jit.store64(value.gpr(), JITCompiler::addressFor(node.local()));
noResult(m_compileIndex);
recordSetLocal(node.local(), ValueSource(ValueInJSStack));
@@ -2475,10 +2476,10 @@ void SpeculativeJIT::compile(Node& node)
case CheckNumber: {
if (!isNumberSpeculation(m_state.forNode(node.child1()).m_type)) {
JSValueOperand op1(this, node.child1());
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, op1.gpr(), GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, op1.gpr(), GPRInfo::tagTypeNumberRegister);
speculationCheck(
BadType, JSValueRegs(op1.gpr()), node.child1().index(),
- m_jit.branchTestPtr(MacroAssembler::Zero, op1.gpr(), GPRInfo::tagTypeNumberRegister));
+ m_jit.branchTest64(MacroAssembler::Zero, op1.gpr(), GPRInfo::tagTypeNumberRegister));
isInteger.link(&m_jit);
}
noResult(m_compileIndex);
@@ -2710,8 +2711,8 @@ void SpeculativeJIT::compile(Node& node)
speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
GPRTemporary result(this);
- m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr), result.gpr());
- speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr()));
+ m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr());
+ speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
jsValueResult(result.gpr(), m_compileIndex);
break;
}
@@ -2755,8 +2756,8 @@ void SpeculativeJIT::compile(Node& node)
speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
GPRTemporary result(this);
- m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr());
- speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr()));
+ m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr());
+ speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
jsValueResult(result.gpr(), m_compileIndex);
break;
@@ -2900,7 +2901,7 @@ void SpeculativeJIT::compile(Node& node)
// Store the value to the array.
GPRReg propertyReg = property.gpr();
GPRReg valueReg = value.gpr();
- m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr));
+ m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
noResult(m_compileIndex);
break;
@@ -2951,7 +2952,7 @@ void SpeculativeJIT::compile(Node& node)
// Store the value to the array.
GPRReg propertyReg = property.gpr();
GPRReg valueReg = value.gpr();
- m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
noResult(m_compileIndex);
break;
@@ -3013,7 +3014,7 @@ void SpeculativeJIT::compile(Node& node)
MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_registers)),
scratchReg);
- m_jit.storePtr(
+ m_jit.store64(
valueReg,
MacroAssembler::BaseIndex(
scratchReg, scratch2Reg, MacroAssembler::TimesEight,
@@ -3139,10 +3140,10 @@ void SpeculativeJIT::compile(Node& node)
case Array::ArrayWithContiguousOutOfBounds: {
m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
- m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr));
+ m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
m_jit.add32(TrustedImm32(1), storageLengthGPR);
m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
addSlowPathGenerator(
slowPathCall(
@@ -3162,12 +3163,12 @@ void SpeculativeJIT::compile(Node& node)
MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
- m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
m_jit.add32(TrustedImm32(1), storageLengthGPR);
m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
addSlowPathGenerator(
slowPathCall(
@@ -3208,19 +3209,19 @@ void SpeculativeJIT::compile(Node& node)
m_jit.sub32(TrustedImm32(1), storageLengthGPR);
m_jit.store32(
storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
- m_jit.loadPtr(
- MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr),
+ m_jit.load64(
+ MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight),
valueGPR);
// FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
// length and the new length.
- m_jit.storePtr(
- MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr));
- MacroAssembler::Jump slowCase = m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR);
+ m_jit.store64(
+ MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
+ MacroAssembler::Jump slowCase = m_jit.branchTest64(MacroAssembler::Zero, valueGPR);
addSlowPathGenerator(
slowPathMove(
undefinedCase, this,
- MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR));
+ MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
addSlowPathGenerator(
slowPathCall(
slowCase, this, operationArrayPopAndRecoverLength, valueGPR, baseGPR));
@@ -3241,18 +3242,18 @@ void SpeculativeJIT::compile(Node& node)
JITCompiler::JumpList slowCases;
slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())));
- m_jit.loadPtr(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR);
- slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR));
+ m_jit.load64(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR);
+ slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, valueGPR));
m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
- m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ m_jit.store64(MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
addSlowPathGenerator(
slowPathMove(
undefinedCase, this,
- MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR));
+ MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
addSlowPathGenerator(
slowPathCall(
@@ -3345,7 +3346,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(op1.gpr(), result.gpr());
if (op1.format() == DataFormatInteger)
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, result.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, result.gpr());
jsValueResult(result.gpr(), m_compileIndex);
break;
@@ -3364,7 +3365,7 @@ void SpeculativeJIT::compile(Node& node)
if (!(m_state.forNode(node.child1()).m_type & ~(SpecNumber | SpecBoolean)))
m_jit.move(op1GPR, resultGPR);
else {
- MacroAssembler::Jump alreadyPrimitive = m_jit.branchTestPtr(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister);
+ MacroAssembler::Jump alreadyPrimitive = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister);
MacroAssembler::Jump notPrimitive = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->stringStructure.get()));
alreadyPrimitive.link(&m_jit);
@@ -3401,7 +3402,7 @@ void SpeculativeJIT::compile(Node& node)
for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) {
JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]);
GPRReg opGPR = operand.gpr();
- m_jit.storePtr(opGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx));
+ m_jit.store64(opGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx));
}
// Yuck, we should *really* have a way of also returning the storageGPR. But
@@ -3432,7 +3433,7 @@ void SpeculativeJIT::compile(Node& node)
GPRReg opGPR = operand.gpr();
operand.use();
- m_jit.storePtr(opGPR, buffer + operandIdx);
+ m_jit.store64(opGPR, buffer + operandIdx);
}
flushRegisters();
@@ -3532,7 +3533,7 @@ void SpeculativeJIT::compile(Node& node)
GPRReg opGPR = operand.gpr();
operand.use();
- m_jit.storePtr(opGPR, buffer + operandIdx);
+ m_jit.store64(opGPR, buffer + operandIdx);
}
flushRegisters();
@@ -3577,8 +3578,8 @@ void SpeculativeJIT::compile(Node& node)
JSValue* data = m_jit.codeBlock()->constantBuffer(node.startConstant());
for (unsigned index = 0; index < node.numConstants(); ++index) {
- m_jit.storePtr(
- ImmPtr(bitwise_cast<void*>(JSValue::encode(data[index]))),
+ m_jit.store64(
+ Imm64(JSValue::encode(data[index])),
MacroAssembler::Address(storageGPR, sizeof(JSValue) * index));
}
@@ -3622,8 +3623,8 @@ void SpeculativeJIT::compile(Node& node)
if (!isOtherSpeculation(m_state.forNode(node.child1()).m_type)) {
m_jit.move(thisValueGPR, scratchGPR);
- m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
- speculationCheck(BadType, JSValueRegs(thisValueGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
+ speculationCheck(BadType, JSValueRegs(thisValueGPR), node.child1(), m_jit.branch64(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImm64(ValueNull)));
}
m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalThisObjectFor(node.codeOrigin)), scratchGPR);
@@ -3723,7 +3724,7 @@ void SpeculativeJIT::compile(Node& node)
if (checkTopLevel && skip--) {
JITCompiler::Jump activationNotCreated;
if (checkTopLevel)
- activationNotCreated = m_jit.branchTestPtr(JITCompiler::Zero, JITCompiler::addressFor(static_cast<VirtualRegister>(m_jit.codeBlock()->activationRegister())));
+ activationNotCreated = m_jit.branchTest64(JITCompiler::Zero, JITCompiler::addressFor(static_cast<VirtualRegister>(m_jit.codeBlock()->activationRegister())));
m_jit.loadPtr(JITCompiler::Address(resultGPR, JSScope::offsetOfNext()), resultGPR);
activationNotCreated.link(&m_jit);
}
@@ -3749,7 +3750,7 @@ void SpeculativeJIT::compile(Node& node)
GPRReg registersGPR = registers.gpr();
GPRReg resultGPR = result.gpr();
- m_jit.loadPtr(JITCompiler::Address(registersGPR, node.varNumber() * sizeof(Register)), resultGPR);
+ m_jit.load64(JITCompiler::Address(registersGPR, node.varNumber() * sizeof(Register)), resultGPR);
jsValueResult(resultGPR, m_compileIndex);
break;
}
@@ -3764,7 +3765,7 @@ void SpeculativeJIT::compile(Node& node)
GPRReg valueGPR = value.gpr();
GPRReg scratchGPR = scratchRegister.gpr();
- m_jit.storePtr(valueGPR, JITCompiler::Address(registersGPR, node.varNumber() * sizeof(Register)));
+ m_jit.store64(valueGPR, JITCompiler::Address(registersGPR, node.varNumber() * sizeof(Register)));
writeBarrier(scopeGPR, valueGPR, node.child3(), WriteBarrierForVariableAccess, scratchGPR);
noResult(m_compileIndex);
break;
@@ -3798,7 +3799,7 @@ void SpeculativeJIT::compile(Node& node)
base.use();
- JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
+ JITCompiler::Jump notCell = m_jit.branchTest64(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber(), notCell);
@@ -3840,7 +3841,7 @@ void SpeculativeJIT::compile(Node& node)
base.use();
flushRegisters();
- JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
+ JITCompiler::Jump notCell = m_jit.branchTest64(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber(), notCell, DontSpill);
@@ -3874,7 +3875,7 @@ void SpeculativeJIT::compile(Node& node)
if (node.structureSet().size() == 1) {
speculationCheckWithConditionalDirection(
- BadCache, JSValueRegs(), NoNode,
+ BadCache, JSValueRegs(base.gpr()), NoNode,
m_jit.branchWeakPtr(
JITCompiler::NotEqual,
JITCompiler::Address(base.gpr(), JSCell::structureOffset()),
@@ -3891,7 +3892,7 @@ void SpeculativeJIT::compile(Node& node)
done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node.structureSet()[i]));
speculationCheckWithConditionalDirection(
- BadCache, JSValueRegs(), NoNode,
+ BadCache, JSValueRegs(base.gpr()), NoNode,
m_jit.branchWeakPtr(
JITCompiler::NotEqual, structure.gpr(), node.structureSet().last()),
node.op() == ForwardCheckStructure);
@@ -3905,6 +3906,13 @@ void SpeculativeJIT::compile(Node& node)
case StructureTransitionWatchpoint:
case ForwardStructureTransitionWatchpoint: {
+ // There is a fascinating question here of what to do about array profiling.
+ // We *could* try to tell the OSR exit about where the base of the access is.
+ // The DFG will have kept it alive, though it may not be in a register, and
+ // we shouldn't really load it since that could be a waste. For now though,
+ // we'll just rely on the fact that when a watchpoint fires then that's
+ // quite a hint already.
+
m_jit.addWeakReference(node.structure());
node.structure()->addTransitionWatchpoint(
speculationWatchpointWithConditionalDirection(
@@ -3988,7 +3996,7 @@ void SpeculativeJIT::compile(Node& node)
StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
- m_jit.loadPtr(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)), resultGPR);
+ m_jit.load64(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)), resultGPR);
jsValueResult(resultGPR, m_compileIndex);
break;
@@ -4010,7 +4018,7 @@ void SpeculativeJIT::compile(Node& node)
StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
- m_jit.storePtr(valueGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)));
+ m_jit.store64(valueGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)));
noResult(m_compileIndex);
break;
@@ -4055,7 +4063,7 @@ void SpeculativeJIT::compile(Node& node)
case GetGlobalVar: {
GPRTemporary result(this);
- m_jit.loadPtr(node.registerPointer(), result.gpr());
+ m_jit.load64(node.registerPointer(), result.gpr());
jsValueResult(result.gpr(), m_compileIndex);
break;
@@ -4071,7 +4079,7 @@ void SpeculativeJIT::compile(Node& node)
writeBarrier(m_jit.globalObjectFor(node.codeOrigin), value.gpr(), node.child1(), WriteBarrierForVariableAccess, scratchReg);
}
- m_jit.storePtr(value.gpr(), node.registerPointer());
+ m_jit.store64(value.gpr(), node.registerPointer());
noResult(m_compileIndex);
break;
@@ -4097,7 +4105,7 @@ void SpeculativeJIT::compile(Node& node)
writeBarrier(m_jit.globalObjectFor(node.codeOrigin), value.gpr(), node.child1(), WriteBarrierForVariableAccess, scratchReg);
}
- m_jit.storePtr(value.gpr(), node.registerPointer());
+ m_jit.store64(value.gpr(), node.registerPointer());
noResult(m_compileIndex);
break;
@@ -4111,10 +4119,10 @@ void SpeculativeJIT::compile(Node& node)
#if DFG_ENABLE(JIT_ASSERT)
GPRTemporary scratch(this);
GPRReg scratchGPR = scratch.gpr();
- m_jit.loadPtr(node.registerPointer(), scratchGPR);
- JITCompiler::Jump ok = m_jit.branchPtr(
+ m_jit.load64(node.registerPointer(), scratchGPR);
+ JITCompiler::Jump ok = m_jit.branch64(
JITCompiler::Equal, scratchGPR,
- TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(node.registerPointer()->get()))));
+ TrustedImm64(JSValue::encode(node.registerPointer()->get())));
m_jit.breakpoint();
ok.link(&m_jit);
#endif
@@ -4144,9 +4152,9 @@ void SpeculativeJIT::compile(Node& node)
JSValueOperand value(this, node.child1());
GPRTemporary result(this);
- JITCompiler::Jump isCell = m_jit.branchTestPtr(JITCompiler::Zero, value.gpr(), GPRInfo::tagMaskRegister);
+ JITCompiler::Jump isCell = m_jit.branchTest64(JITCompiler::Zero, value.gpr(), GPRInfo::tagMaskRegister);
- m_jit.comparePtr(JITCompiler::Equal, value.gpr(), TrustedImm32(ValueUndefined), result.gpr());
+ m_jit.compare64(JITCompiler::Equal, value.gpr(), TrustedImm32(ValueUndefined), result.gpr());
JITCompiler::Jump done = m_jit.jump();
isCell.link(&m_jit);
@@ -4183,8 +4191,8 @@ void SpeculativeJIT::compile(Node& node)
GPRTemporary result(this, value);
m_jit.move(value.gpr(), result.gpr());
- m_jit.xorPtr(JITCompiler::TrustedImm32(ValueFalse), result.gpr());
- m_jit.testPtr(JITCompiler::Zero, result.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result.gpr());
+ m_jit.xor64(JITCompiler::TrustedImm32(ValueFalse), result.gpr());
+ m_jit.test64(JITCompiler::Zero, result.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result.gpr());
m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
break;
@@ -4194,7 +4202,7 @@ void SpeculativeJIT::compile(Node& node)
JSValueOperand value(this, node.child1());
GPRTemporary result(this, value);
- m_jit.testPtr(JITCompiler::NonZero, value.gpr(), GPRInfo::tagTypeNumberRegister, result.gpr());
+ m_jit.test64(JITCompiler::NonZero, value.gpr(), GPRInfo::tagTypeNumberRegister, result.gpr());
m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
break;
@@ -4204,7 +4212,7 @@ void SpeculativeJIT::compile(Node& node)
JSValueOperand value(this, node.child1());
GPRTemporary result(this, value);
- JITCompiler::Jump isNotCell = m_jit.branchTestPtr(JITCompiler::NonZero, value.gpr(), GPRInfo::tagMaskRegister);
+ JITCompiler::Jump isNotCell = m_jit.branchTest64(JITCompiler::NonZero, value.gpr(), GPRInfo::tagMaskRegister);
m_jit.loadPtr(JITCompiler::Address(value.gpr(), JSCell::structureOffset()), result.gpr());
m_jit.compare8(JITCompiler::Equal, JITCompiler::Address(result.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType), result.gpr());
@@ -4263,7 +4271,8 @@ void SpeculativeJIT::compile(Node& node)
case Resolve: {
flushRegisters();
GPRResult result(this);
- callOperation(operationResolve, result.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolve, result.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex));
jsValueResult(result.gpr(), m_compileIndex);
break;
}
@@ -4271,7 +4280,8 @@ void SpeculativeJIT::compile(Node& node)
case ResolveBase: {
flushRegisters();
GPRResult result(this);
- callOperation(operationResolveBase, result.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolveBase, result.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex), putToBaseOperation(data.putToBaseOperationIndex));
jsValueResult(result.gpr(), m_compileIndex);
break;
}
@@ -4279,7 +4289,8 @@ void SpeculativeJIT::compile(Node& node)
case ResolveBaseStrictPut: {
flushRegisters();
GPRResult result(this);
- callOperation(operationResolveBaseStrictPut, result.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolveBaseStrictPut, result.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex), putToBaseOperation(data.putToBaseOperationIndex));
jsValueResult(result.gpr(), m_compileIndex);
break;
}
@@ -4294,16 +4305,16 @@ void SpeculativeJIT::compile(Node& node)
GPRReg resultGPR = result.gpr();
ResolveGlobalData& data = m_jit.graph().m_resolveGlobalData[node.resolveGlobalDataIndex()];
- GlobalResolveInfo* resolveInfoAddress = &(m_jit.codeBlock()->globalResolveInfo(data.resolveInfoIndex));
+ ResolveOperation* resolveOperationAddress = &(m_jit.codeBlock()->resolveOperations(data.resolveOperationsIndex)->data()[data.resolvePropertyIndex]);
// Check Structure of global object
m_jit.move(JITCompiler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectGPR);
- m_jit.move(JITCompiler::TrustedImmPtr(resolveInfoAddress), resolveInfoGPR);
- m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), resultGPR);
+ m_jit.move(JITCompiler::TrustedImmPtr(resolveOperationAddress), resolveInfoGPR);
+ m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(ResolveOperation, m_structure)), resultGPR);
JITCompiler::Jump structuresDontMatch = m_jit.branchPtr(JITCompiler::NotEqual, resultGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset()));
// Fast case
- m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR);
+ m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(ResolveOperation, m_offset)), resolveInfoGPR);
#if DFG_ENABLE(JIT_ASSERT)
JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(firstOutOfLineOffset));
m_jit.breakpoint();
@@ -4312,7 +4323,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.neg32(resolveInfoGPR);
m_jit.signExtend32ToPtr(resolveInfoGPR, resolveInfoGPR);
m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::butterflyOffset()), resultGPR);
- m_jit.loadPtr(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::ScalePtr, (firstOutOfLineOffset - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultGPR);
+ m_jit.load64(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::TimesEight, (firstOutOfLineOffset - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultGPR);
addSlowPathGenerator(
slowPathCall(
@@ -4335,7 +4346,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(valueGPR, resultGPR);
- JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR);
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR);
addSlowPathGenerator(
slowPathCall(notCreated, this, operationCreateActivation, resultGPR));
@@ -4353,7 +4364,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(valueGPR, resultGPR);
- JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR);
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR);
if (node.codeOrigin.inlineCallFrame) {
addSlowPathGenerator(
@@ -4377,17 +4388,17 @@ void SpeculativeJIT::compile(Node& node)
GPRReg activationValueGPR = activationValue.gpr();
GPRReg scratchGPR = scratch.gpr();
- JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, activationValueGPR);
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, activationValueGPR);
SharedSymbolTable* symbolTable = m_jit.symbolTableFor(node.codeOrigin);
int registersOffset = JSActivation::registersOffset(symbolTable);
int captureEnd = symbolTable->captureEnd();
for (int i = symbolTable->captureStart(); i < captureEnd; ++i) {
- m_jit.loadPtr(
+ m_jit.load64(
JITCompiler::Address(
GPRInfo::callFrameRegister, i * sizeof(Register)), scratchGPR);
- m_jit.storePtr(
+ m_jit.store64(
scratchGPR, JITCompiler::Address(
activationValueGPR, registersOffset + i * sizeof(Register)));
}
@@ -4405,7 +4416,7 @@ void SpeculativeJIT::compile(Node& node)
GPRReg unmodifiedArgumentsValueGPR = unmodifiedArgumentsValue.gpr();
GPRReg activationValueGPR = activationValue.gpr();
- JITCompiler::Jump created = m_jit.branchTestPtr(JITCompiler::NonZero, unmodifiedArgumentsValueGPR);
+ JITCompiler::Jump created = m_jit.branchTest64(JITCompiler::NonZero, unmodifiedArgumentsValueGPR);
if (node.codeOrigin.inlineCallFrame) {
addSlowPathGenerator(
@@ -4431,7 +4442,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.graph().argumentsRegisterFor(node.codeOrigin)).m_type)) {
speculationCheck(
ArgumentsEscaped, JSValueRegs(), NoNode,
- m_jit.branchTestPtr(
+ m_jit.branchTest64(
JITCompiler::NonZero,
JITCompiler::addressFor(
m_jit.argumentsRegisterFor(node.codeOrigin))));
@@ -4448,22 +4459,19 @@ void SpeculativeJIT::compile(Node& node)
GPRTemporary result(this);
GPRReg resultGPR = result.gpr();
- JITCompiler::Jump created = m_jit.branchTestPtr(
+ JITCompiler::Jump created = m_jit.branchTest64(
JITCompiler::NonZero,
JITCompiler::addressFor(
m_jit.argumentsRegisterFor(node.codeOrigin)));
if (node.codeOrigin.inlineCallFrame) {
m_jit.move(
- ImmPtr(
- bitwise_cast<void*>(
- JSValue::encode(
- jsNumber(node.codeOrigin.inlineCallFrame->arguments.size() - 1)))),
+ Imm64(JSValue::encode(jsNumber(node.codeOrigin.inlineCallFrame->arguments.size() - 1))),
resultGPR);
} else {
m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR);
m_jit.sub32(TrustedImm32(1), resultGPR);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, resultGPR);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, resultGPR);
}
// FIXME: the slow path generator should perform a forward speculation that the
@@ -4490,7 +4498,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.graph().argumentsRegisterFor(node.codeOrigin)).m_type)) {
speculationCheck(
ArgumentsEscaped, JSValueRegs(), NoNode,
- m_jit.branchTestPtr(
+ m_jit.branchTest64(
JITCompiler::NonZero,
JITCompiler::addressFor(
m_jit.argumentsRegisterFor(node.codeOrigin))));
@@ -4529,7 +4537,7 @@ void SpeculativeJIT::compile(Node& node)
OBJECT_OFFSETOF(SlowArgument, index)),
resultGPR);
m_jit.signExtend32ToPtr(resultGPR, resultGPR);
- m_jit.loadPtr(
+ m_jit.load64(
JITCompiler::BaseIndex(
GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfLocals(node.codeOrigin)),
resultGPR);
@@ -4540,7 +4548,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.neg32(resultGPR);
m_jit.signExtend32ToPtr(resultGPR, resultGPR);
- m_jit.loadPtr(
+ m_jit.load64(
JITCompiler::BaseIndex(
GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfArgumentsIncludingThis(node.codeOrigin)),
resultGPR);
@@ -4558,7 +4566,7 @@ void SpeculativeJIT::compile(Node& node)
JITCompiler::JumpList slowPath;
slowPath.append(
- m_jit.branchTestPtr(
+ m_jit.branchTest64(
JITCompiler::NonZero,
JITCompiler::addressFor(
m_jit.argumentsRegisterFor(node.codeOrigin))));
@@ -4594,7 +4602,7 @@ void SpeculativeJIT::compile(Node& node)
OBJECT_OFFSETOF(SlowArgument, index)),
resultGPR);
m_jit.signExtend32ToPtr(resultGPR, resultGPR);
- m_jit.loadPtr(
+ m_jit.load64(
JITCompiler::BaseIndex(
GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfLocals(node.codeOrigin)),
resultGPR);
@@ -4605,7 +4613,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.neg32(resultGPR);
m_jit.signExtend32ToPtr(resultGPR, resultGPR);
- m_jit.loadPtr(
+ m_jit.load64(
JITCompiler::BaseIndex(
GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfArgumentsIncludingThis(node.codeOrigin)),
resultGPR);
@@ -4636,7 +4644,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.graph().argumentsRegisterFor(node.codeOrigin)).m_type));
speculationCheck(
ArgumentsEscaped, JSValueRegs(), NoNode,
- m_jit.branchTestPtr(
+ m_jit.branchTest64(
JITCompiler::NonZero,
JITCompiler::addressFor(
m_jit.argumentsRegisterFor(node.codeOrigin))));
@@ -4657,7 +4665,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(valueGPR, resultGPR);
- JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR);
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR);
addSlowPathGenerator(
slowPathCall(
@@ -4672,6 +4680,11 @@ void SpeculativeJIT::compile(Node& node)
compileNewFunctionExpression(node);
break;
+ case GarbageValue:
+ // We should never get to the point of code emission for a GarbageValue
+ CRASH();
+ break;
+
case ForceOSRExit: {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp b/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp
index 2e44af2d7..22b9395b5 100644
--- a/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp
@@ -144,6 +144,8 @@ public:
m_graph.vote(node, VoteOther);
break;
}
+ case GarbageValue:
+ break;
default:
m_graph.vote(node, VoteOther);
diff --git a/Source/JavaScriptCore/dfg/DFGThunks.cpp b/Source/JavaScriptCore/dfg/DFGThunks.cpp
index 25fcad10a..74d1967a8 100644
--- a/Source/JavaScriptCore/dfg/DFGThunks.cpp
+++ b/Source/JavaScriptCore/dfg/DFGThunks.cpp
@@ -44,8 +44,13 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(JSGlobalData* globalData)
ScratchBuffer* scratchBuffer = globalData->scratchBufferForSize(scratchSize);
EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
- jit.storePtr(GPRInfo::toRegister(i), buffer + i);
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ jit.store64(GPRInfo::toRegister(i), buffer + i);
+#else
+ jit.store32(GPRInfo::toRegister(i), buffer + i);
+#endif
+ }
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
jit.move(MacroAssembler::TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
jit.storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
@@ -71,8 +76,13 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(JSGlobalData* globalData)
jit.move(MacroAssembler::TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
jit.loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
}
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
- jit.loadPtr(buffer + i, GPRInfo::toRegister(i));
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ jit.load64(buffer + i, GPRInfo::toRegister(i));
+#else
+ jit.load32(buffer + i, GPRInfo::toRegister(i));
+#endif
+ }
jit.jump(MacroAssembler::AbsoluteAddress(&globalData->osrExitJumpDestination));
@@ -115,7 +125,11 @@ MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(JSGlobalData* glob
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::CallerFrame),
GPRInfo::callFrameRegister);
+#if USE(JSVALUE64)
+ jit.peek64(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
+#else
jit.peek(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
+#endif
jit.setupArgumentsWithExecState(GPRInfo::nonPreservedNonReturnGPR);
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
@@ -138,7 +152,11 @@ static void slowPathFor(
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC));
jit.storePtr(GPRInfo::callFrameRegister, &globalData->topCallFrame);
+#if USE(JSVALUE64)
+ jit.poke64(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
+#else
jit.poke(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
+#endif
jit.setupArgumentsExecState();
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
@@ -211,7 +229,7 @@ static MacroAssemblerCodeRef virtualForThunkGenerator(
#if USE(JSVALUE64)
slowCase.append(
- jit.branchTestPtr(
+ jit.branchTest64(
CCallHelpers::NonZero, GPRInfo::nonArgGPR0, GPRInfo::tagMaskRegister));
#else
slowCase.append(
@@ -245,7 +263,7 @@ static MacroAssemblerCodeRef virtualForThunkGenerator(
CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfScopeChain()),
GPRInfo::nonArgGPR1);
#if USE(JSVALUE64)
- jit.storePtr(
+ jit.store64(
GPRInfo::nonArgGPR1,
CCallHelpers::Address(
GPRInfo::callFrameRegister,