summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore')
-rw-r--r--Source/JavaScriptCore/API/JSStringRef.cpp6
-rw-r--r--Source/JavaScriptCore/API/JSStringRefCF.cpp2
-rw-r--r--Source/JavaScriptCore/API/OpaqueJSString.h12
-rw-r--r--Source/JavaScriptCore/CMakeLists.txt24
-rw-r--r--Source/JavaScriptCore/ChangeLog859
-rw-r--r--Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig5
-rw-r--r--Source/JavaScriptCore/GNUmakefile.list.am3
-rwxr-xr-xSource/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def4
-rw-r--r--Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj15
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssembler.h26
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayProfile.cpp36
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayProfile.h33
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.cpp248
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.h59
-rw-r--r--Source/JavaScriptCore/bytecode/DFGExitProfile.h1
-rw-r--r--Source/JavaScriptCore/bytecode/Opcode.h36
-rw-r--r--Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp44
-rw-r--r--Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h3
-rw-r--r--Source/JavaScriptCore/bytecode/StructureSet.h11
-rw-r--r--Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp358
-rw-r--r--Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h226
-rw-r--r--Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp81
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.cpp16
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractValue.h161
-rw-r--r--Source/JavaScriptCore/dfg/DFGArrayMode.cpp20
-rw-r--r--Source/JavaScriptCore/dfg/DFGArrayMode.h45
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp12
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h37
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp488
-rw-r--r--Source/JavaScriptCore/dfg/DFGCCallHelpers.h14
-rw-r--r--Source/JavaScriptCore/dfg/DFGCapabilities.h89
-rw-r--r--Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp3
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h19
-rw-r--r--Source/JavaScriptCore/dfg/DFGNode.h9
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeType.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.h4
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp9
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp105
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp153
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.cpp21
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.h14
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp31
-rw-r--r--Source/JavaScriptCore/dfg/DFGRepatch.cpp24
-rw-r--r--Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h22
-rw-r--r--Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp95
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h105
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp33
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp439
-rw-r--r--Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGThunks.cpp30
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpace.cpp49
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h1
-rw-r--r--Source/JavaScriptCore/heap/CopyVisitorInlineMethods.h4
-rw-r--r--Source/JavaScriptCore/heap/GCThread.cpp10
-rw-r--r--Source/JavaScriptCore/heap/GCThreadSharedData.cpp57
-rw-r--r--Source/JavaScriptCore/heap/GCThreadSharedData.h5
-rw-r--r--Source/JavaScriptCore/heap/Heap.cpp7
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.cpp10
-rw-r--r--Source/JavaScriptCore/interpreter/Interpreter.cpp26
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp55
-rw-r--r--Source/JavaScriptCore/jit/JIT.h38
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic.cpp84
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp22
-rw-r--r--Source/JavaScriptCore/jit/JITInlineMethods.h195
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp650
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp143
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp127
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp75
-rw-r--r--Source/JavaScriptCore/jit/JITStubCall.h32
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp53
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h9
-rw-r--r--Source/JavaScriptCore/jit/JSInterfaceJIT.h20
-rw-r--r--Source/JavaScriptCore/jit/SpecializedThunkJIT.h8
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.cpp118
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.h7
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.asm450
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm127
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter64.asm140
-rw-r--r--Source/JavaScriptCore/offlineasm/armv7.rb535
-rw-r--r--Source/JavaScriptCore/offlineasm/risc.rb555
-rw-r--r--Source/JavaScriptCore/parser/Lexer.cpp4
-rw-r--r--Source/JavaScriptCore/parser/Lexer.h29
-rw-r--r--Source/JavaScriptCore/parser/Parser.h2
-rw-r--r--Source/JavaScriptCore/runtime/JSScope.cpp597
-rw-r--r--Source/JavaScriptCore/runtime/JSScope.h36
-rw-r--r--Source/JavaScriptCore/runtime/JSValue.cpp2
-rw-r--r--Source/JavaScriptCore/runtime/JSValue.h8
-rw-r--r--Source/JavaScriptCore/runtime/JSValueInlineMethods.h33
-rw-r--r--Source/JavaScriptCore/runtime/JSVariableObject.cpp2
-rw-r--r--Source/JavaScriptCore/runtime/JSVariableObject.h2
-rw-r--r--Source/JavaScriptCore/runtime/RegExpKey.h19
-rw-r--r--Source/JavaScriptCore/runtime/StringRecursionChecker.h3
-rw-r--r--Source/JavaScriptCore/runtime/Structure.h2
95 files changed, 5198 insertions, 3251 deletions
diff --git a/Source/JavaScriptCore/API/JSStringRef.cpp b/Source/JavaScriptCore/API/JSStringRef.cpp
index ea31da66b..da1a3057a 100644
--- a/Source/JavaScriptCore/API/JSStringRef.cpp
+++ b/Source/JavaScriptCore/API/JSStringRef.cpp
@@ -46,8 +46,12 @@ JSStringRef JSStringCreateWithUTF8CString(const char* string)
size_t length = strlen(string);
Vector<UChar, 1024> buffer(length);
UChar* p = buffer.data();
- if (conversionOK == convertUTF8ToUTF16(&string, string + length, &p, p + length))
+ bool sourceIsAllASCII;
+ if (conversionOK == convertUTF8ToUTF16(&string, string + length, &p, p + length, &sourceIsAllASCII)) {
+ if (sourceIsAllASCII)
+ return OpaqueJSString::create(reinterpret_cast<const LChar*>(string), length).leakRef();
return OpaqueJSString::create(buffer.data(), p - buffer.data()).leakRef();
+ }
}
// Null string.
diff --git a/Source/JavaScriptCore/API/JSStringRefCF.cpp b/Source/JavaScriptCore/API/JSStringRefCF.cpp
index e87fd838d..69cf3f8c4 100644
--- a/Source/JavaScriptCore/API/JSStringRefCF.cpp
+++ b/Source/JavaScriptCore/API/JSStringRefCF.cpp
@@ -46,7 +46,7 @@ JSStringRef JSStringCreateWithCFString(CFStringRef string)
COMPILE_ASSERT(sizeof(UniChar) == sizeof(UChar), unichar_and_uchar_must_be_same_size);
return OpaqueJSString::create(reinterpret_cast<UChar*>(buffer.get()), length).leakRef();
} else {
- return OpaqueJSString::create(0, 0).leakRef();
+ return OpaqueJSString::create(static_cast<const LChar*>(0), 0).leakRef();
}
}
diff --git a/Source/JavaScriptCore/API/OpaqueJSString.h b/Source/JavaScriptCore/API/OpaqueJSString.h
index 36680388d..0464e8813 100644
--- a/Source/JavaScriptCore/API/OpaqueJSString.h
+++ b/Source/JavaScriptCore/API/OpaqueJSString.h
@@ -41,6 +41,11 @@ struct OpaqueJSString : public ThreadSafeRefCounted<OpaqueJSString> {
return adoptRef(new OpaqueJSString);
}
+ static PassRefPtr<OpaqueJSString> create(const LChar* characters, unsigned length)
+ {
+ return adoptRef(new OpaqueJSString(characters, length));
+ }
+
static PassRefPtr<OpaqueJSString> create(const UChar* characters, unsigned length)
{
return adoptRef(new OpaqueJSString(characters, length));
@@ -51,7 +56,7 @@ struct OpaqueJSString : public ThreadSafeRefCounted<OpaqueJSString> {
const UChar* characters() { return !!this ? m_string.characters() : 0; }
unsigned length() { return !!this ? m_string.length() : 0; }
- String string() const;
+ JS_EXPORT_PRIVATE String string() const;
JSC::Identifier identifier(JSC::JSGlobalData*) const;
private:
@@ -70,6 +75,11 @@ private:
m_string = String(string.characters16(), string.length());
}
+ OpaqueJSString(const LChar* characters, unsigned length)
+ {
+ m_string = String(characters, length);
+ }
+
OpaqueJSString(const UChar* characters, unsigned length)
{
m_string = String(characters, length);
diff --git a/Source/JavaScriptCore/CMakeLists.txt b/Source/JavaScriptCore/CMakeLists.txt
index 4656c5aab..c706f65e9 100644
--- a/Source/JavaScriptCore/CMakeLists.txt
+++ b/Source/JavaScriptCore/CMakeLists.txt
@@ -324,6 +324,7 @@ IF (ENABLE_LLINT)
offlineasm/opt.rb
offlineasm/parser.rb
offlineasm/registers.rb
+ offlineasm/risc.rb
offlineasm/self_hash.rb
offlineasm/settings.rb
offlineasm/transform.rb
@@ -337,8 +338,19 @@ IF (ENABLE_LLINT)
COMMAND ${RUBY_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/offlineasm/generate_offset_extractor.rb ${JAVASCRIPTCORE_DIR}/llint/LowLevelInterpreter.asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntDesiredOffsets.h
VERBATIM)
- ADD_SOURCE_DEPENDENCIES(${JAVASCRIPTCORE_DIR}/llint/LLIntOffsetsExtractor.cpp ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntDesiredOffsets.h)
- ADD_EXECUTABLE(LLIntOffsetsExtractor ${JAVASCRIPTCORE_DIR}/llint/LLIntOffsetsExtractor.cpp)
+ # We add the header file directly to the ADD_EXECUTABLE call instead of setting the
+ # OBJECT_DEPENDS property in LLIntOffsetsExtractor.cpp because generate_offset_extractor.rb may
+ # not regenerate it in case the hash it calculates does not change.
+ # In this case, if some of the dependencies specified in the ADD_CUSTOM_COMMAND above have
+ # changed the command will always be called because the mtime of LLIntDesiredOffsets.h will
+ # always be older than that of its dependencies.
+ # Additionally, setting the OBJECT_DEPENDS property will make LLIntDesiredOffsets.h a Makefile
+ # dependency of both LLIntOffsetsExtractor and LLIntOffsetsExtractor.cpp, so the command will
+ # actually be run twice!
+ ADD_EXECUTABLE(LLIntOffsetsExtractor
+ ${JAVASCRIPTCORE_DIR}/llint/LLIntOffsetsExtractor.cpp
+ ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntDesiredOffsets.h
+ )
TARGET_LINK_LIBRARIES(LLIntOffsetsExtractor ${WTF_LIBRARY_NAME})
ADD_CUSTOM_COMMAND(
@@ -348,7 +360,13 @@ IF (ENABLE_LLINT)
COMMAND ${RUBY_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/offlineasm/asm.rb ${JAVASCRIPTCORE_DIR}/llint/LowLevelInterpreter.asm $<TARGET_FILE:LLIntOffsetsExtractor> ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntAssembly.h
VERBATIM)
- ADD_SOURCE_DEPENDENCIES(${JAVASCRIPTCORE_DIR}/llint/LowLevelInterpreter.cpp ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntAssembly.h)
+ # The explanation for not making LLIntAssembly.h part of the OBJECT_DEPENDS property of some of
+ # the .cpp files below is similar to the one in the previous comment. However, since these .cpp
+ # files are used to build JavaScriptCore itself, we can just add LLIntAssembly.h to JSC_HEADERS
+ # since it is used in the ADD_LIBRARY() call at the end of this file.
+ LIST(APPEND JavaScriptCore_HEADERS
+ ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntAssembly.h
+ )
LIST(APPEND JavaScriptCore_SOURCES
llint/LLIntCLoop.cpp
llint/LLIntData.cpp
diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog
index 3574aa0d8..c6b5ce758 100644
--- a/Source/JavaScriptCore/ChangeLog
+++ b/Source/JavaScriptCore/ChangeLog
@@ -1,63 +1,631 @@
-2012-10-17 Zoltan Horvath <zoltan@webkit.org>
+2012-10-20 Martin Robinson <mrobinson@igalia.com>
- Remove the JSHeap memory measurement of the PageLoad performacetests since it creates bogus JSGlobalDatas
- https://bugs.webkit.org/show_bug.cgi?id=99609
+ Fix 'make dist' for the GTK+ port
- Reviewed by Ryosuke Niwa.
+ * GNUmakefile.list.am: Add missing files to the source list.
- Remove the implementation since it creates bogus JSGlobalDatas in the layout tests.
+2012-10-21 Raphael Kubo da Costa <raphael.kubo.da.costa@intel.com>
- * heap/HeapStatistics.cpp:
+ [CMake][JSC] Depend on risc.rb to decide when to run the LLInt scripts.
+ https://bugs.webkit.org/show_bug.cgi?id=99917
+
+ Reviewed by Geoffrey Garen.
+
+ Depend on the newly-added risc.rb to make sure we always run the
+ LLInt scripts when one of them changes.
+
+ * CMakeLists.txt:
+
+2012-10-20 Filip Pizlo <fpizlo@apple.com>
+
+ LLInt backends of non-ARM RISC platforms should be able to share code with the existing ARMv7 backend
+ https://bugs.webkit.org/show_bug.cgi?id=99745
+
+ Reviewed by Geoffrey Garen.
+
+ This moves all of the things in armv7.rb that I thought are generally useful out
+ into risc.rb. It also separates some phases (branch ops is separated into one
+ phase that does sensible things, and another that does things that are painfully
+ ARM-specific), and removes ARM assumptions from others by using a callback to
+ drive exactly what lowering must happen. The goal here is to minimize the future
+ maintenance burden of LLInt by ensuring that the various platforms share as much
+ lowering code as possible.
+
+ * offlineasm/armv7.rb:
+ * offlineasm/risc.rb: Added.
+
+2012-10-19 Filip Pizlo <fpizlo@apple.com>
+
+ DFG should have some facility for recognizing redundant CheckArrays and Arrayifies
+ https://bugs.webkit.org/show_bug.cgi?id=99287
+
+ Reviewed by Mark Hahnenberg.
+
+ Adds reasoning about indexing type sets (i.e. ArrayModes) to AbstractValue, which
+ then enables us to fold away CheckArray's and Arrayify's that are redundant.
+
+ * bytecode/ArrayProfile.cpp:
+ (JSC::arrayModesToString):
(JSC):
- * heap/HeapStatistics.h:
- (HeapStatistics):
+ * bytecode/ArrayProfile.h:
+ (JSC):
+ (JSC::mergeArrayModes):
+ (JSC::arrayModesAlreadyChecked):
+ * bytecode/StructureSet.h:
+ (JSC::StructureSet::arrayModesFromStructures):
+ (StructureSet):
+ * dfg/DFGAbstractState.cpp:
+ (JSC::DFG::AbstractState::execute):
+ * dfg/DFGAbstractValue.h:
+ (JSC::DFG::AbstractValue::AbstractValue):
+ (JSC::DFG::AbstractValue::clear):
+ (JSC::DFG::AbstractValue::isClear):
+ (JSC::DFG::AbstractValue::makeTop):
+ (JSC::DFG::AbstractValue::clobberStructures):
+ (AbstractValue):
+ (JSC::DFG::AbstractValue::setMostSpecific):
+ (JSC::DFG::AbstractValue::set):
+ (JSC::DFG::AbstractValue::operator==):
+ (JSC::DFG::AbstractValue::merge):
+ (JSC::DFG::AbstractValue::filter):
+ (JSC::DFG::AbstractValue::filterArrayModes):
+ (JSC::DFG::AbstractValue::validate):
+ (JSC::DFG::AbstractValue::checkConsistency):
+ (JSC::DFG::AbstractValue::dump):
+ (JSC::DFG::AbstractValue::clobberArrayModes):
+ (JSC::DFG::AbstractValue::clobberArrayModesSlow):
+ (JSC::DFG::AbstractValue::setFuturePossibleStructure):
+ (JSC::DFG::AbstractValue::filterFuturePossibleStructure):
+ * dfg/DFGArrayMode.cpp:
+ (JSC::DFG::modeAlreadyChecked):
+ * dfg/DFGArrayMode.h:
+ (JSC::DFG::arrayModesFor):
+ (DFG):
+ * dfg/DFGConstantFoldingPhase.cpp:
+ (JSC::DFG::ConstantFoldingPhase::foldConstants):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::arrayify):
-2012-10-17 Sam Weinig <sam@webkit.org>
+2012-10-19 Filip Pizlo <fpizlo@apple.com>
- Attempt to fix the build.
+ Baseline JIT should not inline array allocations, to make them easier to instrument
+ https://bugs.webkit.org/show_bug.cgi?id=99905
- * bytecode/GlobalResolveInfo.h: Copied from bytecode/GlobalResolveInfo.h.
+ Reviewed by Mark Hahnenberg.
-2012-10-17 Oliver Hunt <oliver@apple.com>
+ This will make it easier to instrument array allocations for the purposes of profiling.
+ It also allows us to kill off a bunch of code. And, this doesn't appear to hurt
+ performance at all. That's expected because these days any hot allocation will end up
+ in the DFG JIT, which does inline these allocations.
+
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileSlowCases):
+ * jit/JIT.h:
+ (JIT):
+ * jit/JITInlineMethods.h:
+ (JSC):
+ * jit/JITOpcodes.cpp:
+ (JSC::JIT::emit_op_new_array):
+
+2012-10-19 Oliver Hunt <oliver@apple.com>
+
+ Fix some of the regression cause by the non-local variable reworking
+ https://bugs.webkit.org/show_bug.cgi?id=99896
+
+ Reviewed by Filip Pizlo.
+
+ The non0local variable reworking led to some of the optimisations performed by
+ the bytecode generator being dropped. This in turn put more pressure on the DFG
+ optimisations. This exposed a short coming in our double speculation propogation.
+ Now we try to distinguish between places where we should SpecDoubleReal vs generic
+ SpecDouble.
+
+ * dfg/DFGPredictionPropagationPhase.cpp:
+ (PredictionPropagationPhase):
+ (JSC::DFG::PredictionPropagationPhase::speculatedDoubleTypeForPrediction):
+ (JSC::DFG::PredictionPropagationPhase::speculatedDoubleTypeForPredictions):
+ (JSC::DFG::PredictionPropagationPhase::propagate):
+
+2012-10-19 Michael Saboff <msaboff@apple.com>
+
+ Lexer should create 8 bit Identifiers for RegularExpressions and ASCII identifiers
+ https://bugs.webkit.org/show_bug.cgi?id=99855
+
+ Reviewed by Filip Pizlo.
+
+ Added makeIdentifier helpers that will always make an 8 bit Identifier or make an
+ Identifier that is the same size as the template parameter. Used the first in the fast
+ path when looking for a JS identifier and the second when scanning regular expressions.
+
+ * parser/Lexer.cpp:
+ (JSC::::scanRegExp):
+ * parser/Lexer.h:
+ (Lexer):
+ (JSC::::makeIdentifierSameType):
+ (JSC::::makeLCharIdentifier):
+ (JSC::::lexExpectIdentifier):
+
+2012-10-19 Mark Lam <mark.lam@apple.com>
+
+ Added WTF::StackStats mechanism.
+ https://bugs.webkit.org/show_bug.cgi?id=99805.
+
+ Reviewed by Geoffrey Garen.
+
+ Added StackStats checkpoints and probes.
+
+ * bytecompiler/BytecodeGenerator.h:
+ (JSC::BytecodeGenerator::emitNode):
+ (JSC::BytecodeGenerator::emitNodeInConditionContext):
+ * heap/SlotVisitor.cpp:
+ (JSC::SlotVisitor::append):
+ (JSC::visitChildren):
+ (JSC::SlotVisitor::donateKnownParallel):
+ (JSC::SlotVisitor::drain):
+ (JSC::SlotVisitor::drainFromShared):
+ (JSC::SlotVisitor::mergeOpaqueRoots):
+ (JSC::SlotVisitor::internalAppend):
+ (JSC::SlotVisitor::harvestWeakReferences):
+ (JSC::SlotVisitor::finalizeUnconditionalFinalizers):
+ * interpreter/Interpreter.cpp:
+ (JSC::Interpreter::execute):
+ (JSC::Interpreter::executeCall):
+ (JSC::Interpreter::executeConstruct):
+ (JSC::Interpreter::prepareForRepeatCall):
+ * parser/Parser.h:
+ (JSC::Parser::canRecurse):
+ * runtime/StringRecursionChecker.h:
+ (StringRecursionChecker):
+
+2012-10-19 Oliver Hunt <oliver@apple.com>
+
+ REGRESSION(r131822): It made 500+ tests crash on 32 bit platforms
+ https://bugs.webkit.org/show_bug.cgi?id=99814
+
+ Reviewed by Filip Pizlo.
+
+ Call the correct macro in 32bit.
- Roll out r131645 as it causes random site crashes.
+ * llint/LowLevelInterpreter.asm:
+
+2012-10-19 Dongwoo Joshua Im <dw.im@samsung.com>
+
+ Rename ENABLE_CSS3_TEXT_DECORATION to ENABLE_CSS3_TEXT
+ https://bugs.webkit.org/show_bug.cgi?id=99804
+
+ Reviewed by Julien Chaffraix.
+
+ CSS3 text related properties will be implemented under this flag,
+ including text decoration, text-align-last, and text-justify.
+
+ * Configurations/FeatureDefines.xcconfig:
+
+2012-10-18 Anders Carlsson <andersca@apple.com>
+
+ Clean up RegExpKey
+ https://bugs.webkit.org/show_bug.cgi?id=99798
+
+ Reviewed by Darin Adler.
+
+ RegExpHash doesn't need to be a class template specialization when the class template is specialized
+ for JSC::RegExpKey only. Make it a nested class of RegExp instead. Also, make operator== a friend function
+ so Hash::equal can see it.
+
+ * runtime/RegExpKey.h:
+ (JSC::RegExpKey::RegExpKey):
+ (JSC::RegExpKey::operator==):
+ (RegExpKey):
+ (JSC::RegExpKey::Hash::hash):
+ (JSC::RegExpKey::Hash::equal):
+ (Hash):
+
+2012-10-19 Mark Lam <mark.lam@apple.com>
+
+ Bot greening: Follow up to r131877 to fix the Windows build.
+ https://bugs.webkit.org/show_bug.cgi?id=99739.
+
+ Not reviewed.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2012-10-19 Mark Lam <mark.lam@apple.com>
+
+ Bot greening: Attempt to fix broken Window build after r131836.
+ https://bugs.webkit.org/show_bug.cgi?id=99739.
+
+ Not reviewed.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2012-10-19 Yuqiang Xian <yuqiang.xian@intel.com>
+
+ Unreviewed fix after r131868.
+
+ On JSVALUE64 platforms, JSValue constants can be Imm64 instead of ImmPtr for JIT compilers.
+
+ * dfg/DFGOSRExitCompiler64.cpp:
+ (JSC::DFG::OSRExitCompiler::compileExit):
+
+2012-10-18 Filip Pizlo <fpizlo@apple.com>
+
+ Baseline array profiling should be less accurate, and DFG OSR exit should update array profiles on CheckArray and CheckStructure failure
+ https://bugs.webkit.org/show_bug.cgi?id=99261
+
+ Reviewed by Oliver Hunt.
+
+ This makes array profiling stochastic, like value profiling. The point is to avoid
+ noticing one-off indexing types that we'll never see again, but instead to:
+
+ Notice the big ones: We want the DFG to compile based on the things that happen with
+ high probability. So, this change makes array profiling do like value profiling and
+ only notice a random subsampling of indexing types that flowed through an array
+ access. Prior to this patch array profiles noticed all indexing types and weighted
+ them identically.
+
+ Bias the recent: Often an array access will see awkward indexing types during the
+ first handful of executions because of artifacts of program startup. So, we want to
+ bias towards the indexing types that we saw most recently. With this change, array
+ profiling does like value profiling and usually tells use a random sampling that
+ is biased to what happened recently.
+
+ Have a backup plan: The above two things don't work by themselves because our
+ randomness is not that random (nor do we care enough to make it more random), and
+ because some procedures will have a <1/10 probability event that we must handle
+ without bailing because it dominates a hot loop. So, like value profiling, this
+ patch makes array profiling use OSR exits to tell us why we are bailing out, so
+ that we don't make the same mistake again in the future.
+ This change also makes the way that the 32-bit OSR exit compiler snatches scratch
+ registers more uniform. We don't need a scratch buffer when we can push and pop.
+
+ * bytecode/DFGExitProfile.h:
+ * dfg/DFGOSRExitCompiler32_64.cpp:
+ (JSC::DFG::OSRExitCompiler::compileExit):
+ * dfg/DFGOSRExitCompiler64.cpp:
+ (JSC::DFG::OSRExitCompiler::compileExit):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::checkArray):
+ (JSC::DFG::SpeculativeJIT::arrayify):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ * jit/JITInlineMethods.h:
+ (JSC::JIT::emitArrayProfilingSite):
+ * llint/LowLevelInterpreter.asm:
+
+2012-10-18 Yuqiang Xian <yuqiang.xian@intel.com>
+
+ [Qt] REGRESSION(r131858): It broke the ARM build
+ https://bugs.webkit.org/show_bug.cgi?id=99809
+
+ Reviewed by Csaba Osztrogonác.
+
+ * dfg/DFGCCallHelpers.h:
+ (CCallHelpers):
+ (JSC::DFG::CCallHelpers::setupArgumentsWithExecState):
+
+2012-10-18 Yuqiang Xian <yuqiang.xian@intel.com>
+
+ Refactor MacroAssembler interfaces to differentiate the pointer operands from the 64-bit integer operands
+ https://bugs.webkit.org/show_bug.cgi?id=99154
+
+ Reviewed by Gavin Barraclough.
+
+ In current JavaScriptCore implementation for JSVALUE64 platform (i.e.,
+ the X64 platform), we assume that the JSValue size is same to the
+ pointer size, and thus EncodedJSValue is simply type defined as a
+ "void*". In the JIT compiler, we also take this assumption and invoke
+ the same macro assembler interfaces for both JSValue and pointer
+ operands. We need to differentiate the operations on pointers from the
+ operations on JSValues, and let them invoking different macro
+ assembler interfaces. For example, we now use the interface of
+ "loadPtr" to load either a pointer or a JSValue, and we need to switch
+ to using "loadPtr" to load a pointer and some new "load64" interface
+ to load a JSValue. This would help us supporting other JSVALUE64
+ platforms where pointer size is not necessarily 64-bits, for example
+ x32 (bug #99153).
+
+ The major modification I made is to introduce the "*64" interfaces in
+ the MacroAssembler for those operations on JSValues, keep the "*Ptr"
+ interfaces for those operations on real pointers, and go through all
+ the JIT compiler code to correct the usage.
+
+ This is the second part of the work, i.e, to correct the usage of the
+ new MacroAssembler interfaces in the JIT compilers, which also means
+ that now EncodedJSValue is defined as a 64-bit integer, and the "*64"
+ interfaces are used for it.
+
+ * assembler/MacroAssembler.h: JSValue immediates should be in Imm64 instead of ImmPtr.
+ (MacroAssembler):
+ (JSC::MacroAssembler::shouldBlind):
+ * dfg/DFGAssemblyHelpers.cpp: Correct the JIT compilers usage of the new interfaces.
+ (JSC::DFG::AssemblyHelpers::jitAssertIsInt32):
+ (JSC::DFG::AssemblyHelpers::jitAssertIsJSInt32):
+ (JSC::DFG::AssemblyHelpers::jitAssertIsJSNumber):
+ (JSC::DFG::AssemblyHelpers::jitAssertIsJSDouble):
+ (JSC::DFG::AssemblyHelpers::jitAssertIsCell):
+ * dfg/DFGAssemblyHelpers.h:
+ (JSC::DFG::AssemblyHelpers::emitPutToCallFrameHeader):
+ (JSC::DFG::AssemblyHelpers::branchIfNotCell):
+ (JSC::DFG::AssemblyHelpers::debugCall):
+ (JSC::DFG::AssemblyHelpers::boxDouble):
+ (JSC::DFG::AssemblyHelpers::unboxDouble):
+ (JSC::DFG::AssemblyHelpers::emitExceptionCheck):
+ * dfg/DFGCCallHelpers.h:
+ (JSC::DFG::CCallHelpers::setupArgumentsWithExecState):
+ (CCallHelpers):
+ * dfg/DFGOSRExitCompiler64.cpp:
+ (JSC::DFG::OSRExitCompiler::compileExit):
+ * dfg/DFGRepatch.cpp:
+ (JSC::DFG::generateProtoChainAccessStub):
+ (JSC::DFG::tryCacheGetByID):
+ (JSC::DFG::tryBuildGetByIDList):
+ (JSC::DFG::emitPutReplaceStub):
+ (JSC::DFG::emitPutTransitionStub):
+ * dfg/DFGScratchRegisterAllocator.h:
+ (JSC::DFG::ScratchRegisterAllocator::preserveUsedRegistersToScratchBuffer):
+ (JSC::DFG::ScratchRegisterAllocator::restoreUsedRegistersFromScratchBuffer):
+ * dfg/DFGSilentRegisterSavePlan.h:
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::checkArgumentTypes):
+ (JSC::DFG::SpeculativeJIT::compileValueToInt32):
+ (JSC::DFG::SpeculativeJIT::compileInt32ToDouble):
+ (JSC::DFG::SpeculativeJIT::compileInstanceOfForObject):
+ (JSC::DFG::SpeculativeJIT::compileInstanceOf):
+ (JSC::DFG::SpeculativeJIT::compileStrictEqForConstant):
+ (JSC::DFG::SpeculativeJIT::compileGetByValOnArguments):
+ * dfg/DFGSpeculativeJIT.h:
+ (SpeculativeJIT):
+ (JSC::DFG::SpeculativeJIT::silentSavePlanForGPR):
+ (JSC::DFG::SpeculativeJIT::silentSpill):
+ (JSC::DFG::SpeculativeJIT::silentFill):
+ (JSC::DFG::SpeculativeJIT::spill):
+ (JSC::DFG::SpeculativeJIT::valueOfJSConstantAsImm64):
+ (JSC::DFG::SpeculativeJIT::callOperation):
+ (JSC::DFG::SpeculativeJIT::branch64):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::fillInteger):
+ (JSC::DFG::SpeculativeJIT::fillDouble):
+ (JSC::DFG::SpeculativeJIT::fillJSValue):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativeValueToNumber):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativeValueToInt32):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativeUInt32ToNumber):
+ (JSC::DFG::SpeculativeJIT::cachedGetById):
+ (JSC::DFG::SpeculativeJIT::cachedPutById):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNull):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranch):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompare):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeStrictEq):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq):
+ (JSC::DFG::SpeculativeJIT::emitCall):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateIntInternal):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateCell):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
+ (JSC::DFG::SpeculativeJIT::convertToDouble):
+ (JSC::DFG::SpeculativeJIT::compileObjectEquality):
+ (JSC::DFG::SpeculativeJIT::compileObjectToObjectOrOtherEquality):
+ (JSC::DFG::SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality):
+ (JSC::DFG::SpeculativeJIT::compileDoubleCompare):
+ (JSC::DFG::SpeculativeJIT::compileNonStringCellOrOtherLogicalNot):
+ (JSC::DFG::SpeculativeJIT::compileLogicalNot):
+ (JSC::DFG::SpeculativeJIT::emitNonStringCellOrOtherBranch):
+ (JSC::DFG::SpeculativeJIT::emitBranch):
+ (JSC::DFG::SpeculativeJIT::compileContiguousGetByVal):
+ (JSC::DFG::SpeculativeJIT::compileArrayStorageGetByVal):
+ (JSC::DFG::SpeculativeJIT::compileContiguousPutByVal):
+ (JSC::DFG::SpeculativeJIT::compileArrayStoragePutByVal):
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGThunks.cpp:
+ (JSC::DFG::osrExitGenerationThunkGenerator):
+ (JSC::DFG::throwExceptionFromCallSlowPathGenerator):
+ (JSC::DFG::slowPathFor):
+ (JSC::DFG::virtualForThunkGenerator):
+ * interpreter/Interpreter.cpp:
+ (JSC::Interpreter::dumpRegisters):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompile):
+ * jit/JIT.h:
+ (JIT):
+ * jit/JITArithmetic.cpp:
+ (JSC::JIT::emit_op_negate):
+ (JSC::JIT::emitSlow_op_negate):
+ (JSC::JIT::emit_op_rshift):
+ (JSC::JIT::emitSlow_op_urshift):
+ (JSC::JIT::emit_compareAndJumpSlow):
+ (JSC::JIT::emit_op_bitand):
+ (JSC::JIT::compileBinaryArithOpSlowCase):
+ (JSC::JIT::emit_op_div):
+ * jit/JITCall.cpp:
+ (JSC::JIT::compileLoadVarargs):
+ (JSC::JIT::compileCallEval):
+ (JSC::JIT::compileCallEvalSlowCase):
+ (JSC::JIT::compileOpCall):
+ * jit/JITInlineMethods.h: Have some clean-up work as well.
+ (JSC):
+ (JSC::JIT::emitPutCellToCallFrameHeader):
+ (JSC::JIT::emitPutIntToCallFrameHeader):
+ (JSC::JIT::emitPutToCallFrameHeader):
+ (JSC::JIT::emitGetFromCallFrameHeader32):
+ (JSC::JIT::emitGetFromCallFrameHeader64):
+ (JSC::JIT::emitAllocateJSArray):
+ (JSC::JIT::emitValueProfilingSite):
+ (JSC::JIT::emitGetJITStubArg):
+ (JSC::JIT::emitGetVirtualRegister):
+ (JSC::JIT::emitPutVirtualRegister):
+ (JSC::JIT::emitInitRegister):
+ (JSC::JIT::emitJumpIfJSCell):
+ (JSC::JIT::emitJumpIfBothJSCells):
+ (JSC::JIT::emitJumpIfNotJSCell):
+ (JSC::JIT::emitLoadInt32ToDouble):
+ (JSC::JIT::emitJumpIfImmediateInteger):
+ (JSC::JIT::emitJumpIfNotImmediateInteger):
+ (JSC::JIT::emitJumpIfNotImmediateIntegers):
+ (JSC::JIT::emitFastArithReTagImmediate):
+ (JSC::JIT::emitFastArithIntToImmNoCheck):
+ * jit/JITOpcodes.cpp:
+ (JSC::JIT::privateCompileCTINativeCall):
+ (JSC::JIT::emit_op_mov):
+ (JSC::JIT::emit_op_instanceof):
+ (JSC::JIT::emit_op_is_undefined):
+ (JSC::JIT::emit_op_is_boolean):
+ (JSC::JIT::emit_op_is_number):
+ (JSC::JIT::emit_op_tear_off_activation):
+ (JSC::JIT::emit_op_not):
+ (JSC::JIT::emit_op_jfalse):
+ (JSC::JIT::emit_op_jeq_null):
+ (JSC::JIT::emit_op_jneq_null):
+ (JSC::JIT::emit_op_jtrue):
+ (JSC::JIT::emit_op_bitxor):
+ (JSC::JIT::emit_op_bitor):
+ (JSC::JIT::emit_op_get_pnames):
+ (JSC::JIT::emit_op_next_pname):
+ (JSC::JIT::compileOpStrictEq):
+ (JSC::JIT::emit_op_catch):
+ (JSC::JIT::emit_op_throw_reference_error):
+ (JSC::JIT::emit_op_eq_null):
+ (JSC::JIT::emit_op_neq_null):
+ (JSC::JIT::emit_op_create_activation):
+ (JSC::JIT::emit_op_create_arguments):
+ (JSC::JIT::emit_op_init_lazy_reg):
+ (JSC::JIT::emitSlow_op_convert_this):
+ (JSC::JIT::emitSlow_op_not):
+ (JSC::JIT::emit_op_get_argument_by_val):
+ (JSC::JIT::emit_op_put_to_base):
+ (JSC::JIT::emit_resolve_operations):
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::emit_op_get_by_val):
+ (JSC::JIT::emitContiguousGetByVal):
+ (JSC::JIT::emitArrayStorageGetByVal):
+ (JSC::JIT::emitSlow_op_get_by_val):
+ (JSC::JIT::compileGetDirectOffset):
+ (JSC::JIT::emit_op_get_by_pname):
+ (JSC::JIT::emitContiguousPutByVal):
+ (JSC::JIT::emitArrayStoragePutByVal):
+ (JSC::JIT::compileGetByIdHotPath):
+ (JSC::JIT::emit_op_put_by_id):
+ (JSC::JIT::compilePutDirectOffset):
+ (JSC::JIT::emit_op_init_global_const):
+ (JSC::JIT::emit_op_init_global_const_check):
+ (JSC::JIT::emitIntTypedArrayGetByVal):
+ (JSC::JIT::emitFloatTypedArrayGetByVal):
+ (JSC::JIT::emitFloatTypedArrayPutByVal):
+ * jit/JITStubCall.h:
+ (JITStubCall):
+ (JSC::JITStubCall::JITStubCall):
+ (JSC::JITStubCall::addArgument):
+ (JSC::JITStubCall::call):
+ (JSC::JITStubCall::callWithValueProfiling):
+ * jit/JSInterfaceJIT.h:
+ (JSC::JSInterfaceJIT::emitJumpIfImmediateNumber):
+ (JSC::JSInterfaceJIT::emitJumpIfNotImmediateNumber):
+ (JSC::JSInterfaceJIT::emitLoadJSCell):
+ (JSC::JSInterfaceJIT::emitLoadInt32):
+ (JSC::JSInterfaceJIT::emitLoadDouble):
+ * jit/SpecializedThunkJIT.h:
+ (JSC::SpecializedThunkJIT::returnDouble):
+ (JSC::SpecializedThunkJIT::tagReturnAsInt32):
+ * runtime/JSValue.cpp:
+ (JSC::JSValue::description):
+ * runtime/JSValue.h: Define JSVALUE64 EncodedJSValue as int64_t, which is also unified with JSVALUE32_64.
+ (JSC):
+ * runtime/JSValueInlineMethods.h: New implementation of some JSValue methods to make them more conformant
+ with the new rule that "JSValue is a 64-bit integer rather than a pointer" for JSVALUE64 platforms.
+ (JSC):
+ (JSC::JSValue::JSValue):
+ (JSC::JSValue::operator bool):
+ (JSC::JSValue::operator==):
+ (JSC::JSValue::operator!=):
+ (JSC::reinterpretDoubleToInt64):
+ (JSC::reinterpretInt64ToDouble):
+ (JSC::JSValue::asDouble):
+
+2012-10-18 Michael Saboff <msaboff@apple.com>
+
+ convertUTF8ToUTF16() Should Check for ASCII Input
+ ihttps://bugs.webkit.org/show_bug.cgi?id=99739
+
+ Reviewed by Geoffrey Garen.
+
+ Using the updated convertUTF8ToUTF16() , we can determine if is makes more sense to
+ create a string using the 8 bit source. Added a new OpaqueJSString::create(LChar*, unsigned).
+ Had to add a cast n JSStringCreateWithCFString to differentiate which create() to call.
+
+ * API/JSStringRef.cpp:
+ (JSStringCreateWithUTF8CString):
+ * API/JSStringRefCF.cpp:
+ (JSStringCreateWithCFString):
+ * API/OpaqueJSString.h:
+ (OpaqueJSString::create):
+ (OpaqueJSString):
+ (OpaqueJSString::OpaqueJSString):
+
+2012-10-18 Oliver Hunt <oliver@apple.com>
+
+ Unbreak jsc tests. Last minute "clever"-ness is clearly just not
+ a good plan.
+
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::parseBlock):
+
+2012-10-18 Oliver Hunt <oliver@apple.com>
+
+ Bytecode should not have responsibility for determining how to perform non-local resolves
+ https://bugs.webkit.org/show_bug.cgi?id=99349
+
+ Reviewed by Gavin Barraclough.
+
+ This patch removes lexical analysis from the bytecode generation. This allows
+ us to delay lookup of a non-local variables until the lookup is actually necessary,
+ and simplifies a lot of the resolve logic in BytecodeGenerator.
+
+ Once a lookup is performed we cache the lookup information in a set of out-of-line
+ buffers in CodeBlock. This allows subsequent lookups to avoid unnecessary hashing,
+ etc, and allows the respective JITs to recreated optimal lookup code.
+
+ This is currently still a performance regression in LLInt, but most of the remaining
+ regression is caused by a lot of indirection that I'll remove in future work, as well
+ as some work necessary to allow LLInt to perform in line instruction repatching.
+ We will also want to improve the behaviour of the baseline JIT for some of the lookup
+ operations, however this patch was getting quite large already so I'm landing it now
+ that we've reached the bar of "performance-neutral".
+
+ Basic browsing seems to work.
+
* GNUmakefile.list.am:
* JavaScriptCore.xcodeproj/project.pbxproj:
* bytecode/CodeBlock.cpp:
- (JSC):
- (JSC::isGlobalResolve):
- (JSC::instructionOffsetForNth):
- (JSC::printGlobalResolveInfo):
(JSC::CodeBlock::printStructures):
(JSC::CodeBlock::dump):
(JSC::CodeBlock::CodeBlock):
(JSC::CodeBlock::visitStructures):
+ (JSC):
(JSC::CodeBlock::finalizeUnconditionally):
- (JSC::CodeBlock::hasGlobalResolveInfoAtBytecodeOffset):
- (JSC::CodeBlock::globalResolveInfoForBytecodeOffset):
(JSC::CodeBlock::shrinkToFit):
* bytecode/CodeBlock.h:
+ (JSC::CodeBlock::addResolve):
+ (JSC::CodeBlock::addPutToBase):
(CodeBlock):
- (JSC::CodeBlock::addGlobalResolveInstruction):
- (JSC::CodeBlock::addGlobalResolveInfo):
- (JSC::CodeBlock::globalResolveInfo):
- (JSC::CodeBlock::numberOfGlobalResolveInfos):
- (JSC::CodeBlock::globalResolveInfoCount):
+ (JSC::CodeBlock::resolveOperations):
+ (JSC::CodeBlock::putToBaseOperation):
+ (JSC::CodeBlock::numberOfResolveOperations):
+ (JSC::CodeBlock::numberOfPutToBaseOperations):
+ (JSC::CodeBlock::addPropertyAccessInstruction):
+ (JSC::CodeBlock::globalObjectConstant):
+ (JSC::CodeBlock::setGlobalObjectConstant):
* bytecode/Opcode.h:
(JSC):
(JSC::padOpcodeName):
* bytecode/ResolveGlobalStatus.cpp:
- (JSC):
(JSC::computeForStructure):
- (JSC::computeForLLInt):
(JSC::ResolveGlobalStatus::computeFor):
* bytecode/ResolveGlobalStatus.h:
(JSC):
(ResolveGlobalStatus):
* bytecompiler/BytecodeGenerator.cpp:
(JSC::ResolveResult::checkValidity):
- (JSC::ResolveResult::registerPointer):
(JSC):
(JSC::BytecodeGenerator::BytecodeGenerator):
(JSC::BytecodeGenerator::resolve):
@@ -66,30 +634,29 @@
(JSC::BytecodeGenerator::emitResolve):
(JSC::BytecodeGenerator::emitResolveBase):
(JSC::BytecodeGenerator::emitResolveBaseForPut):
- (JSC::BytecodeGenerator::emitResolveWithBase):
+ (JSC::BytecodeGenerator::emitResolveWithBaseForPut):
(JSC::BytecodeGenerator::emitResolveWithThis):
- (JSC::BytecodeGenerator::emitGetStaticVar):
+ (JSC::BytecodeGenerator::emitGetLocalVar):
(JSC::BytecodeGenerator::emitInitGlobalConst):
- (JSC::BytecodeGenerator::emitPutStaticVar):
+ (JSC::BytecodeGenerator::emitPutToBase):
* bytecompiler/BytecodeGenerator.h:
(JSC::ResolveResult::registerResolve):
(JSC::ResolveResult::dynamicResolve):
- (JSC::ResolveResult::lexicalResolve):
- (JSC::ResolveResult::indexedGlobalResolve):
- (JSC::ResolveResult::dynamicIndexedGlobalResolve):
- (JSC::ResolveResult::globalResolve):
- (JSC::ResolveResult::dynamicGlobalResolve):
- (JSC::ResolveResult::type):
- (JSC::ResolveResult::index):
- (JSC::ResolveResult::depth):
- (JSC::ResolveResult::globalObject):
(ResolveResult):
- (JSC::ResolveResult::isStatic):
- (JSC::ResolveResult::isIndexed):
- (JSC::ResolveResult::isScoped):
- (JSC::ResolveResult::isGlobal):
(JSC::ResolveResult::ResolveResult):
+ (JSC):
+ (NonlocalResolveInfo):
+ (JSC::NonlocalResolveInfo::NonlocalResolveInfo):
+ (JSC::NonlocalResolveInfo::~NonlocalResolveInfo):
+ (JSC::NonlocalResolveInfo::resolved):
+ (JSC::NonlocalResolveInfo::put):
(BytecodeGenerator):
+ (JSC::BytecodeGenerator::getResolveOperations):
+ (JSC::BytecodeGenerator::getResolveWithThisOperations):
+ (JSC::BytecodeGenerator::getResolveBaseOperations):
+ (JSC::BytecodeGenerator::getResolveBaseForPutOperations):
+ (JSC::BytecodeGenerator::getResolveWithBaseForPutOperations):
+ (JSC::BytecodeGenerator::getPutToBaseOperation):
* bytecompiler/NodesCodegen.cpp:
(JSC::ResolveNode::isPure):
(JSC::FunctionCallResolveNode::emitBytecode):
@@ -105,18 +672,25 @@
(ByteCodeParser):
(InlineStackEntry):
(JSC::DFG::ByteCodeParser::handleGetByOffset):
+ (DFG):
+ (JSC::DFG::ByteCodeParser::parseResolveOperations):
(JSC::DFG::ByteCodeParser::parseBlock):
(JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
* dfg/DFGCapabilities.h:
+ (JSC::DFG::canInlineResolveOperations):
(DFG):
(JSC::DFG::canCompileOpcode):
(JSC::DFG::canInlineOpcode):
* dfg/DFGGraph.h:
(ResolveGlobalData):
+ (ResolveOperationData):
(DFG):
+ (PutToBaseOperationData):
(Graph):
* dfg/DFGNode.h:
(JSC::DFG::Node::hasIdentifier):
+ (JSC::DFG::Node::resolveOperationsDataIndex):
+ (Node):
* dfg/DFGNodeType.h:
(DFG):
* dfg/DFGOSRExit.cpp:
@@ -130,7 +704,6 @@
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOperations.cpp:
* dfg/DFGOperations.h:
- (JSC):
* dfg/DFGPredictionPropagationPhase.cpp:
(JSC::DFG::PredictionPropagationPhase::propagate):
* dfg/DFGRepatch.cpp:
@@ -138,6 +711,9 @@
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::convertLastOSRExitToForward):
* dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::SpeculativeJIT::resolveOperations):
+ (SpeculativeJIT):
+ (JSC::DFG::SpeculativeJIT::putToBaseOperation):
(JSC::DFG::SpeculativeJIT::callOperation):
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
@@ -150,43 +726,31 @@
(JSC::JIT::privateCompileSlowCases):
* jit/JIT.h:
(JIT):
- (JSC::JIT::emit_op_get_global_var_watchable):
* jit/JITOpcodes.cpp:
- (JSC::JIT::emit_op_resolve):
+ (JSC::JIT::emit_op_put_to_base):
(JSC):
+ (JSC::JIT::emit_resolve_operations):
+ (JSC::JIT::emitSlow_link_resolve_operations):
+ (JSC::JIT::emit_op_resolve):
+ (JSC::JIT::emitSlow_op_resolve):
(JSC::JIT::emit_op_resolve_base):
- (JSC::JIT::emit_op_resolve_skip):
- (JSC::JIT::emit_op_resolve_global):
- (JSC::JIT::emitSlow_op_resolve_global):
+ (JSC::JIT::emitSlow_op_resolve_base):
(JSC::JIT::emit_op_resolve_with_base):
+ (JSC::JIT::emitSlow_op_resolve_with_base):
(JSC::JIT::emit_op_resolve_with_this):
- (JSC::JIT::emit_op_resolve_global_dynamic):
- (JSC::JIT::emitSlow_op_resolve_global_dynamic):
+ (JSC::JIT::emitSlow_op_resolve_with_this):
+ (JSC::JIT::emitSlow_op_put_to_base):
* jit/JITOpcodes32_64.cpp:
- (JSC::JIT::emit_op_resolve):
+ (JSC::JIT::emit_op_put_to_base):
(JSC):
- (JSC::JIT::emit_op_resolve_base):
- (JSC::JIT::emit_op_resolve_skip):
- (JSC::JIT::emit_op_resolve_global):
- (JSC::JIT::emitSlow_op_resolve_global):
- (JSC::JIT::emit_op_resolve_with_base):
- (JSC::JIT::emit_op_resolve_with_this):
* jit/JITPropertyAccess.cpp:
- (JSC::JIT::emit_op_get_scoped_var):
- (JSC):
- (JSC::JIT::emit_op_put_scoped_var):
- (JSC::JIT::emit_op_get_global_var):
- (JSC::JIT::emit_op_put_global_var):
- (JSC::JIT::emit_op_put_global_var_check):
- (JSC::JIT::emitSlow_op_put_global_var_check):
+ (JSC::JIT::emit_op_init_global_const):
+ (JSC::JIT::emit_op_init_global_const_check):
+ (JSC::JIT::emitSlow_op_init_global_const_check):
* jit/JITPropertyAccess32_64.cpp:
- (JSC::JIT::emit_op_get_scoped_var):
- (JSC):
- (JSC::JIT::emit_op_put_scoped_var):
- (JSC::JIT::emit_op_get_global_var):
- (JSC::JIT::emit_op_put_global_var):
- (JSC::JIT::emit_op_put_global_var_check):
- (JSC::JIT::emitSlow_op_put_global_var_check):
+ (JSC::JIT::emit_op_init_global_const):
+ (JSC::JIT::emit_op_init_global_const_check):
+ (JSC::JIT::emitSlow_op_init_global_const_check):
* jit/JITStubs.cpp:
(JSC::DEFINE_STUB_FUNCTION):
(JSC):
@@ -200,18 +764,163 @@
* llint/LowLevelInterpreter32_64.asm:
* llint/LowLevelInterpreter64.asm:
* runtime/JSScope.cpp:
+ (JSC::LookupResult::base):
+ (JSC::LookupResult::value):
+ (JSC::LookupResult::setBase):
+ (JSC::LookupResult::setValue):
+ (LookupResult):
+ (JSC):
+ (JSC::setPutPropertyAccessOffset):
+ (JSC::executeResolveOperations):
+ (JSC::JSScope::resolveContainingScopeInternal):
+ (JSC::JSScope::resolveContainingScope):
(JSC::JSScope::resolve):
- (JSC::JSScope::resolveSkip):
- (JSC::JSScope::resolveGlobal):
- (JSC::JSScope::resolveGlobalDynamic):
(JSC::JSScope::resolveBase):
(JSC::JSScope::resolveWithBase):
(JSC::JSScope::resolveWithThis):
+ (JSC::JSScope::resolvePut):
+ (JSC::JSScope::resolveGlobal):
* runtime/JSScope.h:
(JSScope):
* runtime/JSVariableObject.cpp:
+ (JSC):
* runtime/JSVariableObject.h:
+ (JSVariableObject):
* runtime/Structure.h:
+ (JSC::Structure::propertyAccessesAreCacheable):
+ (Structure):
+
+2012-10-18 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Live oversize copied blocks should count toward overall heap fragmentation
+ https://bugs.webkit.org/show_bug.cgi?id=99548
+
+ Reviewed by Filip Pizlo.
+
+ The CopiedSpace uses overall heap fragmentation to determine whether or not it should do any copying.
+ Currently it doesn't include live oversize CopiedBlocks in the calculation, but it should. We should
+ treat them as 100% utilized, since running a copying phase won't be able to free/compact any of their
+ memory. We can also free any dead oversize CopiedBlocks while we're iterating over them, rather than
+ iterating over them again at the end of the copying phase.
+
+ * heap/CopiedSpace.cpp:
+ (JSC::CopiedSpace::doneFillingBlock):
+ (JSC::CopiedSpace::startedCopying):
+ (JSC::CopiedSpace::doneCopying): Also removed a branch when iterating over from-space at the end of
+ copying. Since we eagerly recycle blocks as soon as they're fully evacuated, we should see no
+ unpinned blocks in from-space at the end of copying.
+ * heap/CopiedSpaceInlineMethods.h:
+ (JSC::CopiedSpace::recycleBorrowedBlock):
+ * heap/CopyVisitorInlineMethods.h:
+ (JSC::CopyVisitor::checkIfShouldCopy):
+
+2012-10-18 Roger Fong <roger_fong@apple.com>
+
+ Unreviewed. Build fix after r131701 and r131777.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2012-10-18 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Race condition between GCThread and main thread during copying phase
+ https://bugs.webkit.org/show_bug.cgi?id=99641
+
+ Reviewed by Filip Pizlo.
+
+ When a GCThread returns from copyFromShared(), it then calls doneCopying(), which returns
+ its borrowed CopiedBlock to the CopiedSpace. This final block allows the CopiedSpace to
+ continue and finish the cleanup of the copying phase. However, the GCThread can loop back
+ around, see that m_currentPhase is still "Copy", and try to go through the copying phase again.
+ This can cause all sorts of issues. To fix this, we should add a cyclic barrier to GCThread::waitForNextPhase().
+
+ * heap/GCThread.cpp:
+ (JSC::GCThread::waitForNextPhase): All GCThreads will wait when they finish one iteration until the main thread
+ notifies them to move down to the second while loop, where they wait for the next GCPhase to start. They also
+ decrement the m_numberOfActiveGCThreads counter as they begin to wait for the next phase and increment it as
+ they enter the next phase. This allows the main thread to wait in endCurrentPhase() until all the threads have
+ finished the current phase and are waiting on the next phase to begin. Without the counter, there would be
+ no way to ensure that every thread was available for each GCPhase.
+ (JSC::GCThread::gcThreadMain): We now use the m_phaseLock to synchronize with the main thread when we're being created.
+ * heap/GCThreadSharedData.cpp:
+ (JSC::GCThreadSharedData::GCThreadSharedData): As we create each GCThread, we increment the m_numberOfActiveGCThreads
+ counter. When we are done creating the threads, we wait until they're all waiting for the next GCPhase. This prevents
+ us from leaving some GCThreads behind during the first GCPhase, which could hurt us on our very short-running
+ benchmarks (e.g. SunSpider).
+ (JSC::GCThreadSharedData::~GCThreadSharedData):
+ (JSC::GCThreadSharedData::startNextPhase): We atomically swap the two flags, m_gcThreadsShouldWait and m_currentPhase,
+ so that if the threads finish very quickly, they will wait until the main thread is ready to end the current phase.
+ (JSC::GCThreadSharedData::endCurrentPhase): Here atomically we swap the two flags again to allow the threads to
+ advance to waiting on the next GCPhase. We wait until all of the GCThreads have settled into the second wait loop
+ before allowing the main thread to continue. This prevents us from leaving one of the GCThreads stuck in the first
+ wait loop if we were to call startNextPhase() before it had time to wake up and move on to the second wait loop.
+ (JSC):
+ (JSC::GCThreadSharedData::didStartMarking): We now use startNextPhase() to properly swap the flags.
+ (JSC::GCThreadSharedData::didFinishMarking): Ditto for endCurrentPhase().
+ (JSC::GCThreadSharedData::didStartCopying): Ditto.
+ (JSC::GCThreadSharedData::didFinishCopying): Ditto.
+ * heap/GCThreadSharedData.h:
+ (GCThreadSharedData):
+ * heap/Heap.cpp:
+ (JSC::Heap::copyBackingStores): No reason to use the extra reference.
+
+2012-10-18 Pablo Flouret <pablof@motorola.com>
+
+ Implement css3-conditional's @supports rule
+ https://bugs.webkit.org/show_bug.cgi?id=86146
+
+ Reviewed by Antti Koivisto.
+
+ * Configurations/FeatureDefines.xcconfig:
+ Add an ENABLE_CSS3_CONDITIONAL_RULES flag.
+
+2012-10-18 Michael Saboff <msaboff@apple.com>
+
+ Make conversion between JSStringRef and WKStringRef work without character size conversions
+ https://bugs.webkit.org/show_bug.cgi?id=99727
+
+ Reviewed by Anders Carlsson.
+
+ Export the string() method for use in WebKit.
+
+ * API/OpaqueJSString.h:
+ (OpaqueJSString::string):
+
+2012-10-18 Raphael Kubo da Costa <raphael.kubo.da.costa@intel.com>
+
+ [CMake] Avoid unnecessarily running the LLInt generation commands.
+ https://bugs.webkit.org/show_bug.cgi?id=99708
+
+ Reviewed by Rob Buis.
+
+ As described in the comments in the change itself, in some cases
+ the Ruby generation scripts used when LLInt is on would each be
+ run twice in every build even if nothing had changed.
+
+ Fix that by not setting the OBJECT_DEPENDS property of some source
+ files to depend on the generated headers; instead, they are now
+ just part of the final binaries/libraries which use them.
+
+ * CMakeLists.txt:
+
+2012-10-17 Zoltan Horvath <zoltan@webkit.org>
+
+ Remove the JSHeap memory measurement of the PageLoad performacetests since it creates bogus JSGlobalDatas
+ https://bugs.webkit.org/show_bug.cgi?id=99609
+
+ Reviewed by Ryosuke Niwa.
+
+ Remove the implementation since it creates bogus JSGlobalDatas in the layout tests.
+
+ * heap/HeapStatistics.cpp:
+ (JSC):
+ * heap/HeapStatistics.h:
+ (HeapStatistics):
+
+2012-10-17 Sam Weinig <sam@webkit.org>
+
+ Attempt to fix the build.
+
+ * bytecode/GlobalResolveInfo.h: Copied from bytecode/GlobalResolveInfo.h.
2012-10-17 Filip Pizlo <fpizlo@apple.com>
diff --git a/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig b/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig
index 19d4b4037..79a458eca 100644
--- a/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig
+++ b/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig
@@ -47,7 +47,8 @@ ENABLE_CSS_SHADERS = ENABLE_CSS_SHADERS;
ENABLE_CSS_COMPOSITING = ENABLE_CSS_COMPOSITING;
ENABLE_CSS_STICKY_POSITION = ENABLE_CSS_STICKY_POSITION;
ENABLE_CSS_VARIABLES = ;
-ENABLE_CSS3_TEXT_DECORATION = ;
+ENABLE_CSS3_CONDITIONAL_RULES = ;
+ENABLE_CSS3_TEXT = ;
ENABLE_CUSTOM_SCHEME_HANDLER = ;
ENABLE_DASHBOARD_SUPPORT = $(ENABLE_DASHBOARD_SUPPORT_$(REAL_PLATFORM_NAME));
ENABLE_DASHBOARD_SUPPORT_macosx = ENABLE_DASHBOARD_SUPPORT;
@@ -147,4 +148,4 @@ ENABLE_WEB_TIMING = ;
ENABLE_WORKERS = ENABLE_WORKERS;
ENABLE_XSLT = ENABLE_XSLT;
-FEATURE_DEFINES = $(ENABLE_3D_RENDERING) $(ENABLE_ACCELERATED_2D_CANVAS) $(ENABLE_ANIMATION_API) $(ENABLE_BLOB) $(ENABLE_CHANNEL_MESSAGING) $(ENABLE_CSP_NEXT) $(ENABLE_CSS_BOX_DECORATION_BREAK) $(ENABLE_CSS_EXCLUSIONS) $(ENABLE_CSS_FILTERS) $(ENABLE_CSS_HIERARCHIES) $(ENABLE_CSS_IMAGE_ORIENTATION) $(ENABLE_CSS_IMAGE_RESOLUTION) $(ENABLE_CSS_REGIONS) $(ENABLE_CSS_SHADERS) $(ENABLE_CSS_COMPOSITING) $(ENABLE_CSS_STICKY_POSITION) $(ENABLE_CSS_VARIABLES) $(ENABLE_CSS3_TEXT_DECORATION) $(ENABLE_CUSTOM_SCHEME_HANDLER) $(ENABLE_DASHBOARD_SUPPORT) $(ENABLE_DATALIST_ELEMENT) $(ENABLE_DATA_TRANSFER_ITEMS) $(ENABLE_DETAILS_ELEMENT) $(ENABLE_DEVICE_ORIENTATION) $(ENABLE_DIALOG_ELEMENT) $(ENABLE_DIRECTORY_UPLOAD) $(ENABLE_DRAGGABLE_REGION) $(ENABLE_ENCRYPTED_MEDIA) $(ENABLE_FILE_SYSTEM) $(ENABLE_FILTERS) $(ENABLE_FULLSCREEN_API) $(ENABLE_GAMEPAD) $(ENABLE_GEOLOCATION) $(ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING) $(ENABLE_HIGH_DPI_CANVAS) $(ENABLE_ICONDATABASE) $(ENABLE_IFRAME_SEAMLESS) $(ENABLE_INDEXED_DATABASE) $(ENABLE_INPUT_SPEECH) $(ENABLE_INPUT_TYPE_COLOR) $(ENABLE_INPUT_TYPE_DATE) $(ENABLE_INPUT_TYPE_DATETIME) $(ENABLE_INPUT_TYPE_DATETIMELOCAL) $(ENABLE_INPUT_TYPE_MONTH) $(ENABLE_INPUT_TYPE_TIME) $(ENABLE_INPUT_TYPE_WEEK) $(ENABLE_JAVASCRIPT_DEBUGGER) $(ENABLE_LEGACY_CSS_VENDOR_PREFIXES) $(ENABLE_LEGACY_NOTIFICATIONS) $(ENABLE_LEGACY_VENDOR_PREFIXES) $(ENABLE_LEGACY_WEB_AUDIO) $(ENABLE_LINK_PREFETCH) $(ENABLE_LINK_PRERENDER) $(ENABLE_MATHML) $(ENABLE_MEDIA_SOURCE) $(ENABLE_MEDIA_STATISTICS) $(ENABLE_METER_ELEMENT) $(ENABLE_MHTML) $(ENABLE_MICRODATA) $(ENABLE_MUTATION_OBSERVERS) $(ENABLE_NAVIGATOR_CONTENT_UTILS) $(ENABLE_NOTIFICATIONS) $(ENABLE_PAGE_VISIBILITY_API) $(ENABLE_PROGRESS_ELEMENT) $(ENABLE_QUOTA) $(ENABLE_REQUEST_ANIMATION_FRAME) $(ENABLE_SCRIPTED_SPEECH) $(ENABLE_SHADOW_DOM) $(ENABLE_SHARED_WORKERS) $(ENABLE_SQL_DATABASE) $(ENABLE_STYLE_SCOPED) $(ENABLE_SVG) $(ENABLE_SVG_DOM_OBJC_BINDINGS) $(ENABLE_SVG_FONTS) $(ENABLE_TEXT_AUTOSIZING) $(ENABLE_TEXT_NOTIFICATIONS_ONLY) $(ENABLE_TOUCH_ICON_LOADING) $(ENABLE_UNDO_MANAGER) $(ENABLE_VIDEO) $(ENABLE_VIDEO_TRACK) $(ENABLE_WEBGL) $(ENABLE_WEB_AUDIO) $(ENABLE_WEB_SOCKETS) $(ENABLE_WEB_TIMING) $(ENABLE_WORKERS) $(ENABLE_XSLT);
+FEATURE_DEFINES = $(ENABLE_3D_RENDERING) $(ENABLE_ACCELERATED_2D_CANVAS) $(ENABLE_ANIMATION_API) $(ENABLE_BLOB) $(ENABLE_CHANNEL_MESSAGING) $(ENABLE_CSP_NEXT) $(ENABLE_CSS_BOX_DECORATION_BREAK) $(ENABLE_CSS_EXCLUSIONS) $(ENABLE_CSS_FILTERS) $(ENABLE_CSS_HIERARCHIES) $(ENABLE_CSS_IMAGE_ORIENTATION) $(ENABLE_CSS_IMAGE_RESOLUTION) $(ENABLE_CSS_REGIONS) $(ENABLE_CSS_SHADERS) $(ENABLE_CSS_COMPOSITING) $(ENABLE_CSS_STICKY_POSITION) $(ENABLE_CSS_VARIABLES) $(ENABLE_CSS3_CONDITIONAL_RULES) $(ENABLE_CSS3_TEXT) $(ENABLE_CUSTOM_SCHEME_HANDLER) $(ENABLE_DASHBOARD_SUPPORT) $(ENABLE_DATALIST_ELEMENT) $(ENABLE_DATA_TRANSFER_ITEMS) $(ENABLE_DETAILS_ELEMENT) $(ENABLE_DEVICE_ORIENTATION) $(ENABLE_DIALOG_ELEMENT) $(ENABLE_DIRECTORY_UPLOAD) $(ENABLE_DRAGGABLE_REGION) $(ENABLE_ENCRYPTED_MEDIA) $(ENABLE_FILE_SYSTEM) $(ENABLE_FILTERS) $(ENABLE_FULLSCREEN_API) $(ENABLE_GAMEPAD) $(ENABLE_GEOLOCATION) $(ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING) $(ENABLE_HIGH_DPI_CANVAS) $(ENABLE_ICONDATABASE) $(ENABLE_IFRAME_SEAMLESS) $(ENABLE_INDEXED_DATABASE) $(ENABLE_INPUT_SPEECH) $(ENABLE_INPUT_TYPE_COLOR) $(ENABLE_INPUT_TYPE_DATE) $(ENABLE_INPUT_TYPE_DATETIME) $(ENABLE_INPUT_TYPE_DATETIMELOCAL) $(ENABLE_INPUT_TYPE_MONTH) $(ENABLE_INPUT_TYPE_TIME) $(ENABLE_INPUT_TYPE_WEEK) $(ENABLE_JAVASCRIPT_DEBUGGER) $(ENABLE_LEGACY_CSS_VENDOR_PREFIXES) $(ENABLE_LEGACY_NOTIFICATIONS) $(ENABLE_LEGACY_VENDOR_PREFIXES) $(ENABLE_LEGACY_WEB_AUDIO) $(ENABLE_LINK_PREFETCH) $(ENABLE_LINK_PRERENDER) $(ENABLE_MATHML) $(ENABLE_MEDIA_SOURCE) $(ENABLE_MEDIA_STATISTICS) $(ENABLE_METER_ELEMENT) $(ENABLE_MHTML) $(ENABLE_MICRODATA) $(ENABLE_MUTATION_OBSERVERS) $(ENABLE_NAVIGATOR_CONTENT_UTILS) $(ENABLE_NOTIFICATIONS) $(ENABLE_PAGE_VISIBILITY_API) $(ENABLE_PROGRESS_ELEMENT) $(ENABLE_QUOTA) $(ENABLE_REQUEST_ANIMATION_FRAME) $(ENABLE_SCRIPTED_SPEECH) $(ENABLE_SHADOW_DOM) $(ENABLE_SHARED_WORKERS) $(ENABLE_SQL_DATABASE) $(ENABLE_STYLE_SCOPED) $(ENABLE_SVG) $(ENABLE_SVG_DOM_OBJC_BINDINGS) $(ENABLE_SVG_FONTS) $(ENABLE_TEXT_AUTOSIZING) $(ENABLE_TEXT_NOTIFICATIONS_ONLY) $(ENABLE_TOUCH_ICON_LOADING) $(ENABLE_UNDO_MANAGER) $(ENABLE_VIDEO) $(ENABLE_VIDEO_TRACK) $(ENABLE_WEBGL) $(ENABLE_WEB_AUDIO) $(ENABLE_WEB_SOCKETS) $(ENABLE_WEB_TIMING) $(ENABLE_WORKERS) $(ENABLE_XSLT);
diff --git a/Source/JavaScriptCore/GNUmakefile.list.am b/Source/JavaScriptCore/GNUmakefile.list.am
index ae5854b91..243894d39 100644
--- a/Source/JavaScriptCore/GNUmakefile.list.am
+++ b/Source/JavaScriptCore/GNUmakefile.list.am
@@ -106,7 +106,6 @@ javascriptcore_sources += \
Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h \
Source/JavaScriptCore/bytecode/GetByIdStatus.cpp \
Source/JavaScriptCore/bytecode/GetByIdStatus.h \
- Source/JavaScriptCore/bytecode/GlobalResolveInfo.h \
Source/JavaScriptCore/bytecode/HandlerInfo.h \
Source/JavaScriptCore/bytecode/Instruction.h \
Source/JavaScriptCore/bytecode/JumpTable.cpp \
@@ -133,6 +132,7 @@ javascriptcore_sources += \
Source/JavaScriptCore/bytecode/PutKind.h \
Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp \
Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h \
+ Source/JavaScriptCore/bytecode/ResolveOperation.h \
Source/JavaScriptCore/bytecode/SamplingTool.cpp \
Source/JavaScriptCore/bytecode/SamplingTool.h \
Source/JavaScriptCore/bytecode/SpecialPointer.cpp \
@@ -732,6 +732,7 @@ offlineasm_nosources += \
Source/JavaScriptCore/offlineasm/opt.rb \
Source/JavaScriptCore/offlineasm/parser.rb \
Source/JavaScriptCore/offlineasm/registers.rb \
+ Source/JavaScriptCore/offlineasm/risc.rb \
Source/JavaScriptCore/offlineasm/self_hash.rb \
Source/JavaScriptCore/offlineasm/settings.rb \
Source/JavaScriptCore/offlineasm/transform.rb \
diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
index 0724ca1ca..a386b4c2b 100755
--- a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
+++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
@@ -115,10 +115,11 @@ EXPORTS
?constructString@JSC@@YAPAVStringObject@1@PAVExecState@1@PAVJSGlobalObject@1@VJSValue@1@@Z
?convertLatin1ToUTF8@Unicode@WTF@@YA?AW4ConversionResult@12@PAPBEPBEPAPADPAD@Z
?convertUTF16ToUTF8@Unicode@WTF@@YA?AW4ConversionResult@12@PAPB_WPB_WPAPADPAD_N@Z
- ?convertUTF8ToUTF16@Unicode@WTF@@YA?AW4ConversionResult@12@PAPBDPBDPAPA_WPA_W_N@Z
+ ?convertUTF8ToUTF16@Unicode@WTF@@YA?AW4ConversionResult@12@PAPBDPBDPAPA_WPA_WPA_N_N@Z
?copyBackingStore@JSObject@JSC@@SAXPAVJSCell@2@AAVCopyVisitor@2@@Z
?create@JSFunction@JSC@@SAPAV12@PAVExecState@2@PAVJSGlobalObject@2@HABVString@WTF@@P6I_J0@ZW4Intrinsic@2@3@Z
?create@JSGlobalData@JSC@@SA?AV?$PassRefPtr@VJSGlobalData@JSC@@@WTF@@W4ThreadStackType@2@W4HeapType@2@@Z
+ ?create@OpaqueJSString@@SA?AV?$PassRefPtr@UOpaqueJSString@@@WTF@@ABVString@3@@Z
?create@RegExp@JSC@@SAPAV12@AAVJSGlobalData@2@ABVString@WTF@@W4RegExpFlags@2@@Z
?createEmptyString@SmallStrings@JSC@@AAEXPAVJSGlobalData@2@@Z
?createError@JSC@@YAPAVJSObject@1@PAVExecState@1@ABVString@WTF@@@Z
@@ -354,6 +355,7 @@ EXPORTS
?startSampling@JSGlobalData@JSC@@QAEXXZ
?stopProfiling@Profiler@JSC@@QAE?AV?$PassRefPtr@VProfile@JSC@@@WTF@@PAVExecState@2@ABVString@4@@Z
?stopSampling@JSGlobalData@JSC@@QAEXXZ
+ ?string@OpaqueJSString@@QBE?AVString@WTF@@XZ
?StringToDouble@StringToDoubleConverter@double_conversion@WTF@@SANPBDIPAI@Z
?suggestedNewOutOfLineStorageCapacity@Structure@JSC@@QAEIXZ
?sweeper@Heap@JSC@@QAEPAVIncrementalSweeper@2@XZ
diff --git a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
index 28c2746dd..1cf109a0b 100644
--- a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
+++ b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
@@ -71,7 +71,6 @@
0F0B83B114BCF71800885B4F /* CallLinkInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F0B83B414BCF86000885B4F /* MethodCallLinkInfo.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F0B83B214BCF85E00885B4F /* MethodCallLinkInfo.cpp */; };
0F0B83B514BCF86200885B4F /* MethodCallLinkInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83B314BCF85E00885B4F /* MethodCallLinkInfo.h */; settings = {ATTRIBUTES = (Private, ); }; };
- 0F0B83B714BCF8E100885B4F /* GlobalResolveInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83B614BCF8DF00885B4F /* GlobalResolveInfo.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F0B83B914BCF95F00885B4F /* CallReturnOffsetToBytecodeOffset.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83B814BCF95B00885B4F /* CallReturnOffsetToBytecodeOffset.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F0CD4C215F1A6070032F1C0 /* PutDirectIndexMode.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0CD4C015F1A6040032F1C0 /* PutDirectIndexMode.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F0CD4C415F6B6BB0032F1C0 /* SparseArrayValueMap.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F0CD4C315F6B6B50032F1C0 /* SparseArrayValueMap.cpp */; };
@@ -588,6 +587,7 @@
A76F54A313B28AAB00EF2BCE /* JITWriteBarrier.h in Headers */ = {isa = PBXBuildFile; fileRef = A76F54A213B28AAB00EF2BCE /* JITWriteBarrier.h */; };
A784A26111D16622005776AC /* ASTBuilder.h in Headers */ = {isa = PBXBuildFile; fileRef = A7A7EE7411B98B8D0065A14F /* ASTBuilder.h */; };
A784A26411D16622005776AC /* SyntaxChecker.h in Headers */ = {isa = PBXBuildFile; fileRef = A7A7EE7711B98B8D0065A14F /* SyntaxChecker.h */; };
+ A7AFC17915F7EFE30048F57B /* ResolveOperation.h in Headers */ = {isa = PBXBuildFile; fileRef = A7AFC17715F7EFE30048F57B /* ResolveOperation.h */; settings = {ATTRIBUTES = (Private, ); }; };
A7B48F490EE8936F00DCBDB6 /* ExecutableAllocator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A7B48DB60EE74CFC00DCBDB6 /* ExecutableAllocator.cpp */; };
A7B4ACAF1484C9CE00B38A36 /* JSExportMacros.h in Headers */ = {isa = PBXBuildFile; fileRef = A7B4ACAE1484C9CE00B38A36 /* JSExportMacros.h */; settings = {ATTRIBUTES = (Private, ); }; };
A7C1E8E4112E72EF00A37F98 /* JITPropertyAccess32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A7C1E8C8112E701C00A37F98 /* JITPropertyAccess32_64.cpp */; };
@@ -855,7 +855,6 @@
0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallLinkInfo.h; sourceTree = "<group>"; };
0F0B83B214BCF85E00885B4F /* MethodCallLinkInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MethodCallLinkInfo.cpp; sourceTree = "<group>"; };
0F0B83B314BCF85E00885B4F /* MethodCallLinkInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MethodCallLinkInfo.h; sourceTree = "<group>"; };
- 0F0B83B614BCF8DF00885B4F /* GlobalResolveInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GlobalResolveInfo.h; sourceTree = "<group>"; };
0F0B83B814BCF95B00885B4F /* CallReturnOffsetToBytecodeOffset.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallReturnOffsetToBytecodeOffset.h; sourceTree = "<group>"; };
0F0CD4C015F1A6040032F1C0 /* PutDirectIndexMode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PutDirectIndexMode.h; sourceTree = "<group>"; };
0F0CD4C315F6B6B50032F1C0 /* SparseArrayValueMap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SparseArrayValueMap.cpp; sourceTree = "<group>"; };
@@ -1390,6 +1389,7 @@
A79EDB0811531CD60019E912 /* JSObjectRefPrivate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSObjectRefPrivate.h; sourceTree = "<group>"; };
A7A7EE7411B98B8D0065A14F /* ASTBuilder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ASTBuilder.h; sourceTree = "<group>"; };
A7A7EE7711B98B8D0065A14F /* SyntaxChecker.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SyntaxChecker.h; sourceTree = "<group>"; };
+ A7AFC17715F7EFE30048F57B /* ResolveOperation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ResolveOperation.h; sourceTree = "<group>"; };
A7B48DB50EE74CFC00DCBDB6 /* ExecutableAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ExecutableAllocator.h; sourceTree = "<group>"; };
A7B48DB60EE74CFC00DCBDB6 /* ExecutableAllocator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ExecutableAllocator.cpp; sourceTree = "<group>"; };
A7B4ACAE1484C9CE00B38A36 /* JSExportMacros.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSExportMacros.h; sourceTree = "<group>"; };
@@ -2520,6 +2520,14 @@
0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */,
0F93329314CA7DC10085F3C6 /* CallLinkStatus.cpp */,
0F93329414CA7DC10085F3C6 /* CallLinkStatus.h */,
+ 0F93329514CA7DC10085F3C6 /* GetByIdStatus.cpp */,
+ 0F93329614CA7DC10085F3C6 /* GetByIdStatus.h */,
+ 0F93329714CA7DC10085F3C6 /* MethodCallLinkStatus.cpp */,
+ 0F93329814CA7DC10085F3C6 /* MethodCallLinkStatus.h */,
+ 0F93329914CA7DC10085F3C6 /* PutByIdStatus.cpp */,
+ 0F93329A14CA7DC10085F3C6 /* PutByIdStatus.h */,
+ A7AFC17715F7EFE30048F57B /* ResolveOperation.h */,
+ 0F93329B14CA7DC10085F3C6 /* StructureSet.h */,
0F0B83B814BCF95B00885B4F /* CallReturnOffsetToBytecodeOffset.h */,
969A07900ED1D3AE00F1F681 /* CodeBlock.cpp */,
969A07910ED1D3AE00F1F681 /* CodeBlock.h */,
@@ -2535,7 +2543,6 @@
0F0B83AA14BCF5B900885B4F /* ExpressionRangeInfo.h */,
0F93329514CA7DC10085F3C6 /* GetByIdStatus.cpp */,
0F93329614CA7DC10085F3C6 /* GetByIdStatus.h */,
- 0F0B83B614BCF8DF00885B4F /* GlobalResolveInfo.h */,
0F0B83A814BCF55E00885B4F /* HandlerInfo.h */,
969A07930ED1D3AE00F1F681 /* Instruction.h */,
BCFD8C900EEB2EE700283848 /* JumpTable.cpp */,
@@ -2894,7 +2901,6 @@
0F0B83AD14BCF60400885B4F /* LineInfo.h in Headers */,
0F0B83B114BCF71800885B4F /* CallLinkInfo.h in Headers */,
0F0B83B514BCF86200885B4F /* MethodCallLinkInfo.h in Headers */,
- 0F0B83B714BCF8E100885B4F /* GlobalResolveInfo.h in Headers */,
0F0B83B914BCF95F00885B4F /* CallReturnOffsetToBytecodeOffset.h in Headers */,
0F0FC45A14BD15F500B81154 /* LLIntCallLinkInfo.h in Headers */,
0F21C26814BE5F6800ADC64B /* JITDriver.h in Headers */,
@@ -2987,6 +2993,7 @@
14874AE615EBDE4A002E3587 /* JSScope.h in Headers */,
FED287B215EC9A5700DA8161 /* LLIntOpcode.h in Headers */,
1442566215EDE98D0066A49B /* JSWithScope.h in Headers */,
+ A7AFC17915F7EFE30048F57B /* ResolveOperation.h in Headers */,
0FB7F39515ED8E4600F167B2 /* ArrayConventions.h in Headers */,
0FB7F39615ED8E4600F167B2 /* ArrayStorage.h in Headers */,
0FB7F39715ED8E4600F167B2 /* Butterfly.h in Headers */,
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h
index 4d4a960d3..642b5ca6b 100644
--- a/Source/JavaScriptCore/assembler/MacroAssembler.h
+++ b/Source/JavaScriptCore/assembler/MacroAssembler.h
@@ -728,16 +728,6 @@ public:
return store64WithAddressOffsetPatch(src, address);
}
- void movePtrToDouble(RegisterID src, FPRegisterID dest)
- {
- move64ToDouble(src, dest);
- }
-
- void moveDoubleToPtr(FPRegisterID src, RegisterID dest)
- {
- moveDoubleTo64(src, dest);
- }
-
void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
{
compare64(cond, left, right, dest);
@@ -895,14 +885,6 @@ public:
default: {
if (value <= 0xff)
return false;
- JSValue jsValue = JSValue::decode(reinterpret_cast<void*>(value));
- if (jsValue.isInt32())
- return shouldBlind(Imm32(jsValue.asInt32()));
- if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
- return false;
-
- if (!shouldBlindDouble(bitwise_cast<double>(value)))
- return false;
}
}
return shouldBlindForSpecificArch(value);
@@ -956,6 +938,14 @@ public:
default: {
if (value <= 0xff)
return false;
+ JSValue jsValue = JSValue::decode(value);
+ if (jsValue.isInt32())
+ return shouldBlind(Imm32(jsValue.asInt32()));
+ if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
+ return false;
+
+ if (!shouldBlindDouble(bitwise_cast<double>(value)))
+ return false;
}
}
return shouldBlindForSpecificArch(value);
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
index 3ba974d74..de7f67887 100644
--- a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
@@ -26,8 +26,44 @@
#include "config.h"
#include "ArrayProfile.h"
+#include <wtf/StringExtras.h>
+
namespace JSC {
+const char* arrayModesToString(ArrayModes arrayModes)
+{
+ if (!arrayModes)
+ return "0:<empty>";
+
+ if (arrayModes == ALL_ARRAY_MODES)
+ return "TOP";
+
+ bool isNonArray = !!(arrayModes & NonArray);
+ bool isNonArrayWithContiguous = !!(arrayModes & NonArrayWithContiguous);
+ bool isNonArrayWithArrayStorage = !!(arrayModes & NonArrayWithArrayStorage);
+ bool isNonArrayWithSlowPutArrayStorage = !!(arrayModes & NonArrayWithSlowPutArrayStorage);
+ bool isArray = !!(arrayModes & ArrayClass);
+ bool isArrayWithContiguous = !!(arrayModes & ArrayWithContiguous);
+ bool isArrayWithArrayStorage = !!(arrayModes & ArrayWithArrayStorage);
+ bool isArrayWithSlowPutArrayStorage = !!(arrayModes & ArrayWithSlowPutArrayStorage);
+
+ static char result[256];
+ snprintf(
+ result, sizeof(result),
+ "%u:%s%s%s%s%s%s%s%s",
+ arrayModes,
+ isNonArray ? "NonArray" : "",
+ isNonArrayWithContiguous ? "NonArrayWithContiguous" : "",
+ isNonArrayWithArrayStorage ? " NonArrayWithArrayStorage" : "",
+ isNonArrayWithSlowPutArrayStorage ? "NonArrayWithSlowPutArrayStorage" : "",
+ isArray ? "ArrayClass" : "",
+ isArrayWithContiguous ? "ArrayWithContiguous" : "",
+ isArrayWithArrayStorage ? " ArrayWithArrayStorage" : "",
+ isArrayWithSlowPutArrayStorage ? "ArrayWithSlowPutArrayStorage" : "");
+
+ return result;
+}
+
void ArrayProfile::computeUpdatedPrediction(OperationInProgress operation)
{
if (m_lastSeenStructure) {
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.h b/Source/JavaScriptCore/bytecode/ArrayProfile.h
index 3b462eaba..ffc136258 100644
--- a/Source/JavaScriptCore/bytecode/ArrayProfile.h
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.h
@@ -40,13 +40,44 @@ class LLIntOffsetsExtractor;
typedef unsigned ArrayModes;
#define asArrayModes(type) \
- (1 << static_cast<unsigned>(type))
+ (static_cast<unsigned>(1) << static_cast<unsigned>(type))
+
+#define ALL_NON_ARRAY_ARRAY_MODES \
+ (asArrayModes(NonArray) \
+ | asArrayModes(NonArrayWithContiguous) \
+ | asArrayModes(NonArrayWithArrayStorage) \
+ | asArrayModes(NonArrayWithSlowPutArrayStorage))
+
+#define ALL_ARRAY_ARRAY_MODES \
+ (asArrayModes(ArrayClass) \
+ | asArrayModes(ArrayWithContiguous) \
+ | asArrayModes(ArrayWithArrayStorage) \
+ | asArrayModes(ArrayWithSlowPutArrayStorage))
+
+#define ALL_ARRAY_MODES (ALL_NON_ARRAY_ARRAY_MODES | ALL_ARRAY_ARRAY_MODES)
inline ArrayModes arrayModeFromStructure(Structure* structure)
{
return asArrayModes(structure->indexingType());
}
+const char* arrayModesToString(ArrayModes);
+
+inline bool mergeArrayModes(ArrayModes& left, ArrayModes right)
+{
+ ArrayModes newModes = left | right;
+ if (newModes == left)
+ return false;
+ left = newModes;
+ return true;
+}
+
+// Checks if proven is a subset of expected.
+inline bool arrayModesAlreadyChecked(ArrayModes proven, ArrayModes expected)
+{
+ return (expected | proven) == expected;
+}
+
class ArrayProfile {
public:
ArrayProfile()
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index d1151482e..7f86186a0 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -439,34 +439,6 @@ void CodeBlock::printPutByIdOp(ExecState* exec, int location, Vector<Instruction
it += 5;
}
-#if ENABLE(JIT)
-static bool isGlobalResolve(OpcodeID opcodeID)
-{
- return opcodeID == op_resolve_global || opcodeID == op_resolve_global_dynamic;
-}
-
-static unsigned instructionOffsetForNth(ExecState* exec, const RefCountedArray<Instruction>& instructions, int nth, bool (*predicate)(OpcodeID))
-{
- size_t i = 0;
- while (i < instructions.size()) {
- OpcodeID currentOpcode = exec->interpreter()->getOpcodeID(instructions[i].u.opcode);
- if (predicate(currentOpcode)) {
- if (!--nth)
- return i;
- }
- i += opcodeLengths[currentOpcode];
- }
-
- ASSERT_NOT_REACHED();
- return 0;
-}
-
-static void printGlobalResolveInfo(const GlobalResolveInfo& resolveInfo, unsigned instructionOffset)
-{
- dataLog(" [%4d] %s: %s\n", instructionOffset, "resolve_global", pointerToSourceString(resolveInfo.structure).utf8().data());
-}
-#endif
-
void CodeBlock::printStructure(const char* name, const Instruction* vPC, int operand)
{
unsigned instructionOffset = vPC - instructions().begin();
@@ -506,14 +478,6 @@ void CodeBlock::printStructures(const Instruction* vPC)
printStructure("put_by_id_replace", vPC, 4);
return;
}
- if (vPC[0].u.opcode == interpreter->getOpcode(op_resolve_global)) {
- printStructure("resolve_global", vPC, 4);
- return;
- }
- if (vPC[0].u.opcode == interpreter->getOpcode(op_resolve_global_dynamic)) {
- printStructure("resolve_global_dynamic", vPC, 4);
- return;
- }
// These m_instructions doesn't ref Structures.
ASSERT(vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_call) || vPC[0].u.opcode == interpreter->getOpcode(op_call_eval) || vPC[0].u.opcode == interpreter->getOpcode(op_construct));
@@ -577,16 +541,8 @@ void CodeBlock::dump(ExecState* exec)
}
#if ENABLE(JIT)
- if (!m_globalResolveInfos.isEmpty() || !m_structureStubInfos.isEmpty())
+ if (!m_structureStubInfos.isEmpty())
dataLog("\nStructures:\n");
-
- if (!m_globalResolveInfos.isEmpty()) {
- size_t i = 0;
- do {
- printGlobalResolveInfo(m_globalResolveInfos[i], instructionOffsetForNth(exec, instructions(), i + 1, isGlobalResolve));
- ++i;
- } while (i < m_globalResolveInfos.size());
- }
#endif
if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
@@ -909,92 +865,30 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
printBinaryOp(exec, location, it, "in");
break;
}
- case op_resolve: {
- int r0 = (++it)->u.operand;
- int id0 = (++it)->u.operand;
- dataLog("[%4d] resolve\t\t %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
- dumpBytecodeCommentAndNewLine(location);
- it++;
- break;
- }
- case op_resolve_skip: {
- int r0 = (++it)->u.operand;
- int id0 = (++it)->u.operand;
- int skipLevels = (++it)->u.operand;
- dataLog("[%4d] resolve_skip\t %s, %s, %d", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), skipLevels);
- dumpBytecodeCommentAndNewLine(location);
- it++;
- break;
- }
- case op_resolve_global: {
- int r0 = (++it)->u.operand;
+ case op_put_to_base_variable:
+ case op_put_to_base: {
+ int base = (++it)->u.operand;
int id0 = (++it)->u.operand;
- dataLog("[%4d] resolve_global\t %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
+ int value = (++it)->u.operand;
+ int resolveInfo = (++it)->u.operand;
+ dataLog("[%4d] put_to_base\t %s, %s, %s, %d", location, registerName(exec, base).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, value).data(), resolveInfo);
dumpBytecodeCommentAndNewLine(location);
- it += 3;
break;
}
- case op_resolve_global_dynamic: {
+ case op_resolve:
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check: {
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- JSValue scope = JSValue((++it)->u.jsCell.get());
- ++it;
- int depth = (++it)->u.operand;
- dataLog("[%4d] resolve_global_dynamic\t %s, %s, %s, %d", location, registerName(exec, r0).data(), valueToSourceString(exec, scope).utf8().data(), idName(id0, m_identifiers[id0]).data(), depth);
- dumpBytecodeCommentAndNewLine(location);
- ++it;
- break;
- }
- case op_get_scoped_var: {
- int r0 = (++it)->u.operand;
- int index = (++it)->u.operand;
- int skipLevels = (++it)->u.operand;
- dataLog("[%4d] get_scoped_var\t %s, %d, %d", location, registerName(exec, r0).data(), index, skipLevels);
- dumpBytecodeCommentAndNewLine(location);
- it++;
- break;
- }
- case op_put_scoped_var: {
- int index = (++it)->u.operand;
- int skipLevels = (++it)->u.operand;
- int r0 = (++it)->u.operand;
- dataLog("[%4d] put_scoped_var\t %d, %d, %s", location, index, skipLevels, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
- break;
- }
- case op_get_global_var: {
- int r0 = (++it)->u.operand;
- WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
- dataLog("[%4d] get_global_var\t %s, g%d(%p)", location, registerName(exec, r0).data(), m_globalObject->findRegisterIndex(registerPointer), registerPointer);
+ int resolveInfo = (++it)->u.operand;
+ dataLog("[%4d] resolve\t\t %s, %s, %d", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo);
dumpBytecodeCommentAndNewLine(location);
it++;
break;
}
- case op_get_global_var_watchable: {
- int r0 = (++it)->u.operand;
- WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
- dataLog("[%4d] get_global_var_watchable\t %s, g%d(%p)", location, registerName(exec, r0).data(), m_globalObject->findRegisterIndex(registerPointer), registerPointer);
- dumpBytecodeCommentAndNewLine(location);
- it++;
- it++;
- break;
- }
- case op_put_global_var: {
- WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
- int r0 = (++it)->u.operand;
- dataLog("[%4d] put_global_var\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
- break;
- }
- case op_put_global_var_check: {
- WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
- int r0 = (++it)->u.operand;
- dataLog("[%4d] put_global_var_check\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data());
- dumpBytecodeCommentAndNewLine(location);
- it++;
- it++;
- break;
- }
case op_init_global_const: {
WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
int r0 = (++it)->u.operand;
@@ -1011,11 +905,17 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
it++;
break;
}
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check:
case op_resolve_base: {
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
int isStrict = (++it)->u.operand;
- dataLog("[%4d] resolve_base%s\t %s, %s", location, isStrict ? "_strict" : "", registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
+ int resolveInfo = (++it)->u.operand;
+ int putToBaseInfo = (++it)->u.operand;
+ dataLog("[%4d] resolve_base%s\t %s, %s, %d, %d", location, isStrict ? "_strict" : "", registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo, putToBaseInfo);
dumpBytecodeCommentAndNewLine(location);
it++;
break;
@@ -1031,7 +931,9 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- dataLog("[%4d] resolve_with_base %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
+ int resolveInfo = (++it)->u.operand;
+ int putToBaseInfo = (++it)->u.operand;
+ dataLog("[%4d] resolve_with_base %s, %s, %s, %d, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo, putToBaseInfo);
dumpBytecodeCommentAndNewLine(location);
it++;
break;
@@ -1040,7 +942,8 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- dataLog("[%4d] resolve_with_this %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
+ int resolveInfo = (++it)->u.operand;
+ dataLog("[%4d] resolve_with_this %s, %s, %s, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo);
dumpBytecodeCommentAndNewLine(location);
it++;
break;
@@ -1704,6 +1607,7 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
, m_thisRegister(other.m_thisRegister)
, m_argumentsRegister(other.m_argumentsRegister)
, m_activationRegister(other.m_activationRegister)
+ , m_globalObjectConstant(other.m_globalObjectConstant)
, m_needsFullScopeChain(other.m_needsFullScopeChain)
, m_usesEval(other.m_usesEval)
, m_isNumericCompareFunction(other.m_isNumericCompareFunction)
@@ -1711,9 +1615,6 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
, m_codeType(other.m_codeType)
, m_source(other.m_source)
, m_sourceOffset(other.m_sourceOffset)
-#if ENABLE(JIT)
- , m_globalResolveInfos(other.m_globalResolveInfos.size())
-#endif
#if ENABLE(VALUE_PROFILER)
, m_executionEntryCount(0)
#endif
@@ -1728,6 +1629,8 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
, m_optimizationDelayCounter(0)
, m_reoptimizationRetryCounter(0)
, m_lineInfo(other.m_lineInfo)
+ , m_resolveOperations(other.m_resolveOperations)
+ , m_putToBaseOperations(other.m_putToBaseOperations)
#if ENABLE(BYTECODE_COMMENTS)
, m_bytecodeCommentIterator(0)
#endif
@@ -1739,11 +1642,6 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
optimizeAfterWarmUp();
jitAfterWarmUp();
-#if ENABLE(JIT)
- for (unsigned i = m_globalResolveInfos.size(); i--;)
- m_globalResolveInfos[i] = GlobalResolveInfo(other.m_globalResolveInfos[i].bytecodeOffset);
-#endif
-
if (other.m_rareData) {
createRareDataIfNecessary();
@@ -1787,13 +1685,16 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlo
#endif
{
ASSERT(m_source);
-
+
optimizeAfterWarmUp();
jitAfterWarmUp();
#if DUMP_CODE_BLOCK_STATISTICS
liveCodeBlockSet.add(this);
#endif
+ // We have a stub putToBase operation to allow resolve_base to
+ // remain branchless
+ m_putToBaseOperations.append(PutToBaseOperation(isStrictMode()));
}
CodeBlock::~CodeBlock()
@@ -1892,11 +1793,6 @@ void CodeBlock::visitStructures(SlotVisitor& visitor, Instruction* vPC)
visitor.append(&vPC[4].u.structure);
return;
}
- if (vPC[0].u.opcode == interpreter->getOpcode(op_resolve_global) || vPC[0].u.opcode == interpreter->getOpcode(op_resolve_global_dynamic)) {
- if (vPC[3].u.structure)
- visitor.append(&vPC[3].u.structure);
- return;
- }
// These instructions don't ref their Structures.
ASSERT(vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_get_array_length) || vPC[0].u.opcode == interpreter->getOpcode(op_get_string_length));
@@ -2048,7 +1944,7 @@ static const bool verboseUnlinking = true;
#else
static const bool verboseUnlinking = false;
#endif
-
+
void CodeBlock::finalizeUnconditionally()
{
#if ENABLE(LLINT)
@@ -2093,17 +1989,7 @@ void CodeBlock::finalizeUnconditionally()
ASSERT_NOT_REACHED();
}
}
- for (size_t size = m_globalResolveInstructions.size(), i = 0; i < size; ++i) {
- Instruction* curInstruction = &instructions()[m_globalResolveInstructions[i]];
- ASSERT(interpreter->getOpcodeID(curInstruction[0].u.opcode) == op_resolve_global
- || interpreter->getOpcodeID(curInstruction[0].u.opcode) == op_resolve_global_dynamic);
- if (!curInstruction[3].u.structure || Heap::isMarked(curInstruction[3].u.structure.get()))
- continue;
- if (verboseUnlinking)
- dataLog("Clearing LLInt global resolve cache with structure %p.\n", curInstruction[3].u.structure.get());
- curInstruction[3].u.structure.clear();
- curInstruction[4].u.operand = 0;
- }
+
for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
if (verboseUnlinking)
@@ -2130,7 +2016,29 @@ void CodeBlock::finalizeUnconditionally()
return;
}
#endif // ENABLE(DFG_JIT)
-
+
+ for (size_t size = m_putToBaseOperations.size(), i = 0; i < size; ++i) {
+ if (m_putToBaseOperations[i].m_structure && !Heap::isMarked(m_putToBaseOperations[i].m_structure.get())) {
+ if (verboseUnlinking)
+ dataLog("Clearing putToBase info in %p.\n", this);
+ m_putToBaseOperations[i].m_structure.clear();
+ }
+ }
+ for (size_t size = m_resolveOperations.size(), i = 0; i < size; ++i) {
+ if (m_resolveOperations[i].isEmpty())
+ continue;
+#ifndef NDEBUG
+ for (size_t insnSize = m_resolveOperations[i].size() - 1, k = 0; k < insnSize; ++k)
+ ASSERT(!m_resolveOperations[i][k].m_structure);
+#endif
+ m_resolveOperations[i].last().m_structure.clear();
+ if (m_resolveOperations[i].last().m_structure && !Heap::isMarked(m_resolveOperations[i].last().m_structure.get())) {
+ if (verboseUnlinking)
+ dataLog("Clearing resolve info in %p.\n", this);
+ m_resolveOperations[i].last().m_structure.clear();
+ }
+ }
+
#if ENABLE(JIT)
// Handle inline caches.
if (!!getJITCode()) {
@@ -2145,14 +2053,6 @@ void CodeBlock::finalizeUnconditionally()
&& !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
callLinkInfo(i).lastSeenCallee.clear();
}
- for (size_t size = m_globalResolveInfos.size(), i = 0; i < size; ++i) {
- if (m_globalResolveInfos[i].structure && !Heap::isMarked(m_globalResolveInfos[i].structure.get())) {
- if (verboseUnlinking)
- dataLog("Clearing resolve info in %p.\n", this);
- m_globalResolveInfos[i].structure.clear();
- }
- }
-
for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i) {
StructureStubInfo& stubInfo = m_structureStubInfos[i];
@@ -2422,43 +2322,14 @@ void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& d
return;
}
-#if ENABLE(JIT)
-bool CodeBlock::hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset)
-{
- if (m_globalResolveInfos.isEmpty())
- return false;
-
- int low = 0;
- int high = m_globalResolveInfos.size();
- while (low < high) {
- int mid = low + (high - low) / 2;
- if (m_globalResolveInfos[mid].bytecodeOffset <= bytecodeOffset)
- low = mid + 1;
- else
- high = mid;
- }
-
- if (!low || m_globalResolveInfos[low - 1].bytecodeOffset != bytecodeOffset)
- return false;
- return true;
-}
-GlobalResolveInfo& CodeBlock::globalResolveInfoForBytecodeOffset(unsigned bytecodeOffset)
-{
- return *(binarySearch<GlobalResolveInfo, unsigned, getGlobalResolveInfoBytecodeOffset>(m_globalResolveInfos.begin(), m_globalResolveInfos.size(), bytecodeOffset));
-}
-#endif
-
void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
{
m_propertyAccessInstructions.shrinkToFit();
- m_globalResolveInstructions.shrinkToFit();
#if ENABLE(LLINT)
m_llintCallLinkInfos.shrinkToFit();
#endif
#if ENABLE(JIT)
m_structureStubInfos.shrinkToFit();
- if (shrinkMode == EarlyShrink)
- m_globalResolveInfos.shrinkToFit();
m_callLinkInfos.shrinkToFit();
m_methodCallLinkInfos.shrinkToFit();
#endif
@@ -2477,6 +2348,7 @@ void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
m_constantRegisters.shrinkToFit();
} // else don't shrink these, because we would have already pointed pointers into these tables.
+ m_resolveOperations.shrinkToFit();
m_lineInfo.shrinkToFit();
if (m_rareData) {
m_rareData->m_exceptionHandlers.shrinkToFit();
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
index 01a8ef4a1..fe588c787 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -49,7 +49,6 @@
#include "EvalCodeCache.h"
#include "ExecutionCounter.h"
#include "ExpressionRangeInfo.h"
-#include "GlobalResolveInfo.h"
#include "HandlerInfo.h"
#include "MethodCallLinkInfo.h"
#include "Options.h"
@@ -64,6 +63,7 @@
#include "LineInfo.h"
#include "Nodes.h"
#include "RegExpObject.h"
+#include "ResolveOperation.h"
#include "StructureStubInfo.h"
#include "UnconditionalFinalizer.h"
#include "ValueProfile.h"
@@ -197,6 +197,30 @@ namespace JSC {
int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
+ uint32_t addResolve()
+ {
+ m_resolveOperations.grow(m_resolveOperations.size() + 1);
+ return m_resolveOperations.size() - 1;
+ }
+ uint32_t addPutToBase()
+ {
+ m_putToBaseOperations.append(PutToBaseOperation(isStrictMode()));
+ return m_putToBaseOperations.size() - 1;
+ }
+
+ ResolveOperations* resolveOperations(uint32_t i)
+ {
+ return &m_resolveOperations[i];
+ }
+
+ PutToBaseOperation* putToBaseOperation(uint32_t i)
+ {
+ return &m_putToBaseOperations[i];
+ }
+
+ size_t numberOfResolveOperations() const { return m_resolveOperations.size(); }
+ size_t numberOfPutToBaseOperations() const { return m_putToBaseOperations.size(); }
+
#if ENABLE(JIT)
StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
@@ -600,11 +624,6 @@ namespace JSC {
{
m_propertyAccessInstructions.append(propertyAccessInstruction);
}
- void addGlobalResolveInstruction(unsigned globalResolveInstruction)
- {
- m_globalResolveInstructions.append(globalResolveInstruction);
- }
- bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset);
#if ENABLE(LLINT)
LLIntCallLinkInfo* addLLIntCallLinkInfo()
{
@@ -621,15 +640,6 @@ namespace JSC {
size_t numberOfByValInfos() const { return m_byValInfos.size(); }
ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
- void addGlobalResolveInfo(unsigned globalResolveInstruction)
- {
- m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction));
- }
- GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; }
- bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset);
- GlobalResolveInfo& globalResolveInfoForBytecodeOffset(unsigned bytecodeOffset);
- unsigned numberOfGlobalResolveInfos() { return m_globalResolveInfos.size(); }
-
void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
@@ -781,15 +791,6 @@ namespace JSC {
ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
#endif
-
- unsigned globalResolveInfoCount() const
- {
-#if ENABLE(JIT)
- if (m_globalData->canUseJIT())
- return m_globalResolveInfos.size();
-#endif
- return 0;
- }
// Exception handling support
@@ -1215,13 +1216,16 @@ namespace JSC {
int m_numVars;
bool m_isConstructor;
+ int globalObjectConstant() const { return m_globalObjectConstant; }
+ void setGlobalObjectConstant(int globalRegister) { m_globalObjectConstant = globalRegister; }
+
protected:
#if ENABLE(JIT)
virtual bool jitCompileImpl(ExecState*) = 0;
#endif
virtual void visitWeakReferences(SlotVisitor&);
virtual void finalizeUnconditionally();
-
+
private:
friend class DFGCodeBlocks;
@@ -1294,6 +1298,7 @@ namespace JSC {
int m_thisRegister;
int m_argumentsRegister;
int m_activationRegister;
+ int m_globalObjectConstant;
bool m_needsFullScopeChain;
bool m_usesEval;
@@ -1306,7 +1311,6 @@ namespace JSC {
unsigned m_sourceOffset;
Vector<unsigned> m_propertyAccessInstructions;
- Vector<unsigned> m_globalResolveInstructions;
#if ENABLE(LLINT)
SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos;
SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls;
@@ -1314,7 +1318,6 @@ namespace JSC {
#if ENABLE(JIT)
Vector<StructureStubInfo> m_structureStubInfos;
Vector<ByValInfo> m_byValInfos;
- Vector<GlobalResolveInfo> m_globalResolveInfos;
Vector<CallLinkInfo> m_callLinkInfos;
Vector<MethodCallLinkInfo> m_methodCallLinkInfos;
JITCode m_jitCode;
@@ -1406,6 +1409,8 @@ namespace JSC {
Vector<Comment> m_bytecodeComments;
size_t m_bytecodeCommentIterator;
#endif
+ Vector<ResolveOperations> m_resolveOperations;
+ Vector<PutToBaseOperation> m_putToBaseOperations;
struct RareData {
WTF_MAKE_FAST_ALLOCATED;
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.h b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
index 45947c8af..57fb06bda 100644
--- a/Source/JavaScriptCore/bytecode/DFGExitProfile.h
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
@@ -36,6 +36,7 @@ enum ExitKind {
ExitKindUnset,
BadType, // We exited because a type prediction was wrong.
BadCache, // We exited because an inline cache was wrong.
+ BadIndexingType, // We exited because an indexing type was wrong.
Overflow, // We exited because of overflow.
NegativeZero, // We exited because we encountered negative zero.
OutOfBounds, // We had an out-of-bounds access to an array.
diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h
index dd62df700..3ce56c80e 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.h
+++ b/Source/JavaScriptCore/bytecode/Opcode.h
@@ -96,22 +96,30 @@ namespace JSC {
macro(op_is_function, 3) \
macro(op_in, 4) \
\
- macro(op_resolve, 4) /* has value profiling */ \
- macro(op_resolve_skip, 5) /* has value profiling */ \
- macro(op_resolve_global, 6) /* has value profiling */ \
- macro(op_resolve_global_dynamic, 7) /* has value profiling */ \
- macro(op_get_scoped_var, 5) /* has value profiling */ \
- macro(op_put_scoped_var, 4) \
- macro(op_get_global_var, 4) /* has value profiling */ \
- macro(op_get_global_var_watchable, 5) /* has value profiling */ \
- macro(op_put_global_var, 3) \
- macro(op_put_global_var_check, 5) \
+ macro(op_resolve, 5) /* has value profiling */ \
+ macro(op_resolve_global_property, 5) /* has value profiling */ \
+ macro(op_resolve_global_var, 5) /* has value profiling */ \
+ macro(op_resolve_scoped_var, 5) /* has value profiling */ \
+ macro(op_resolve_scoped_var_on_top_scope, 5) /* has value profiling */ \
+ macro(op_resolve_scoped_var_with_top_scope_check, 5) /* has value profiling */ \
+ \
+ macro(op_resolve_base_to_global, 7) /* has value profiling */ \
+ macro(op_resolve_base_to_global_dynamic, 7) /* has value profiling */ \
+ macro(op_resolve_base_to_scope, 7) /* has value profiling */ \
+ macro(op_resolve_base_to_scope_with_top_scope_check, 7) /* has value profiling */ \
+ macro(op_resolve_base, 7) /* has value profiling */ \
+ \
+ macro(op_ensure_property_exists, 3) \
+ \
+ macro(op_resolve_with_base, 7) /* has value profiling */ \
+ \
+ macro(op_resolve_with_this, 6) /* has value profiling */ \
+ \
+ macro(op_put_to_base, 5) \
+ macro(op_put_to_base_variable, 5) \
+ \
macro(op_init_global_const, 3) \
macro(op_init_global_const_check, 5) \
- macro(op_resolve_base, 5) /* has value profiling */ \
- macro(op_ensure_property_exists, 3) \
- macro(op_resolve_with_base, 5) /* has value profiling */ \
- macro(op_resolve_with_this, 5) /* has value profiling */ \
macro(op_get_by_id, 9) /* has value profiling */ \
macro(op_get_by_id_out_of_line, 9) /* has value profiling */ \
macro(op_get_by_id_self, 9) /* has value profiling */ \
diff --git a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp
index c9fd7dca2..7814f8c99 100644
--- a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp
@@ -32,13 +32,11 @@
namespace JSC {
-#if ENABLE(LLINT) || (ENABLE(JIT) && ENABLE(VALUE_PROFILER))
static ResolveGlobalStatus computeForStructure(CodeBlock* codeBlock, Structure* structure, Identifier& identifier)
{
unsigned attributesIgnored;
JSCell* specificValue;
- PropertyOffset offset = structure->get(
- *codeBlock->globalData(), identifier, attributesIgnored, specificValue);
+ PropertyOffset offset = structure->get(*codeBlock->globalData(), identifier, attributesIgnored, specificValue);
if (structure->isDictionary())
specificValue = 0;
if (!isValidOffset(offset))
@@ -46,46 +44,14 @@ static ResolveGlobalStatus computeForStructure(CodeBlock* codeBlock, Structure*
return ResolveGlobalStatus(ResolveGlobalStatus::Simple, structure, offset, specificValue);
}
-#endif // ENABLE(LLINT) || ENABLE(JIT)
-static ResolveGlobalStatus computeForLLInt(CodeBlock* codeBlock, unsigned bytecodeIndex, Identifier& identifier)
+ResolveGlobalStatus ResolveGlobalStatus::computeFor(CodeBlock* codeBlock, int, ResolveOperation* operation, Identifier& identifier)
{
-#if ENABLE(LLINT)
- Instruction* instruction = codeBlock->instructions().begin() + bytecodeIndex;
-
- ASSERT(instruction[0].u.opcode == LLInt::getOpcode(op_resolve_global));
-
- Structure* structure = instruction[3].u.structure.get();
- if (!structure)
+ ASSERT(operation->m_operation == ResolveOperation::GetAndReturnGlobalProperty);
+ if (!operation->m_structure)
return ResolveGlobalStatus();
- return computeForStructure(codeBlock, structure, identifier);
-#else
- UNUSED_PARAM(codeBlock);
- UNUSED_PARAM(bytecodeIndex);
- UNUSED_PARAM(identifier);
- return ResolveGlobalStatus();
-#endif
-}
-
-ResolveGlobalStatus ResolveGlobalStatus::computeFor(CodeBlock* codeBlock, unsigned bytecodeIndex, Identifier& identifier)
-{
-#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
- if (!codeBlock->numberOfGlobalResolveInfos())
- return computeForLLInt(codeBlock, bytecodeIndex, identifier);
-
- if (codeBlock->likelyToTakeSlowCase(bytecodeIndex))
- return ResolveGlobalStatus(TakesSlowPath);
-
- GlobalResolveInfo& globalResolveInfo = codeBlock->globalResolveInfoForBytecodeOffset(bytecodeIndex);
-
- if (!globalResolveInfo.structure)
- return computeForLLInt(codeBlock, bytecodeIndex, identifier);
-
- return computeForStructure(codeBlock, globalResolveInfo.structure.get(), identifier);
-#else
- return computeForLLInt(codeBlock, bytecodeIndex, identifier);
-#endif
+ return computeForStructure(codeBlock, operation->m_structure.get(), identifier);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h
index cbe4d3b5f..46a9254e7 100644
--- a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h
+++ b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h
@@ -34,6 +34,7 @@ namespace JSC {
class CodeBlock;
class Identifier;
+struct ResolveOperation;
class Structure;
class ResolveGlobalStatus {
@@ -61,7 +62,7 @@ public:
{
}
- static ResolveGlobalStatus computeFor(CodeBlock*, unsigned bytecodeIndex, Identifier&);
+ static ResolveGlobalStatus computeFor(CodeBlock*, int bytecodeIndex, ResolveOperation*, Identifier&);
State state() const { return m_state; }
diff --git a/Source/JavaScriptCore/bytecode/StructureSet.h b/Source/JavaScriptCore/bytecode/StructureSet.h
index ebde9779f..c95d3047b 100644
--- a/Source/JavaScriptCore/bytecode/StructureSet.h
+++ b/Source/JavaScriptCore/bytecode/StructureSet.h
@@ -26,6 +26,7 @@
#ifndef StructureSet_h
#define StructureSet_h
+#include "ArrayProfile.h"
#include "SpeculatedType.h"
#include "Structure.h"
#include <stdio.h>
@@ -137,6 +138,16 @@ public:
return result;
}
+ ArrayModes arrayModesFromStructures() const
+ {
+ ArrayModes result = 0;
+
+ for (size_t i = 0; i < m_structures.size(); ++i)
+ mergeArrayModes(result, asArrayModes(m_structures[i]->indexingType()));
+
+ return result;
+ }
+
bool operator==(const StructureSet& other) const
{
if (m_structures.size() != other.m_structures.size())
diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
index 1160a1888..228277328 100644
--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
@@ -137,25 +137,8 @@ void ResolveResult::checkValidity()
case ReadOnlyRegister:
ASSERT(m_local);
return;
- case Lexical:
- case ReadOnlyLexical:
- case DynamicLexical:
- case DynamicReadOnlyLexical:
- ASSERT(m_index != missingSymbolMarker());
- return;
- case Global:
- case DynamicGlobal:
- ASSERT(m_globalObject);
- return;
- case IndexedGlobal:
- case ReadOnlyIndexedGlobal:
- case WatchedIndexedGlobal:
- case DynamicIndexedGlobal:
- case DynamicReadOnlyIndexedGlobal:
- ASSERT(m_index != missingSymbolMarker());
- ASSERT(m_globalObject);
- return;
case Dynamic:
+ ASSERT(!m_local);
return;
default:
ASSERT_NOT_REACHED();
@@ -163,11 +146,6 @@ void ResolveResult::checkValidity()
}
#endif
-WriteBarrier<Unknown>* ResolveResult::registerPointer() const
-{
- return &jsCast<JSGlobalObject*>(globalObject())->registerAt(index());
-}
-
static bool s_dumpsGeneratedCode = false;
void BytecodeGenerator::setDumpsGeneratedCode(bool dumpsGeneratedCode)
@@ -292,6 +270,8 @@ BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, JSScope* scope, S
, m_expressionTooDeep(false)
{
m_globalData->startedCompiling(m_codeBlock);
+ m_codeBlock->setGlobalObjectConstant(emitLoad(0, JSValue(m_codeBlock->globalObject()))->index());
+
if (m_shouldEmitDebugHooks)
m_codeBlock->setNeedsFullScopeChain(true);
@@ -324,7 +304,7 @@ BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, JSScope* scope, S
FunctionBodyNode* function = functionStack[i];
bool propertyDidExist =
globalObject->removeDirect(*m_globalData, function->ident()); // Newly declared functions overwrite existing properties.
-
+
JSValue value = JSFunction::create(exec, FunctionExecutable::create(*m_globalData, function), scope);
int index = addGlobalVar(
function->ident(), IsVariable,
@@ -374,6 +354,8 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, JSScope* sc
, m_expressionTooDeep(false)
{
m_globalData->startedCompiling(m_codeBlock);
+ m_codeBlock->setGlobalObjectConstant(emitLoad(0, JSValue(m_codeBlock->globalObject()))->index());
+
if (m_shouldEmitDebugHooks)
m_codeBlock->setNeedsFullScopeChain(true);
@@ -588,6 +570,8 @@ BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, JSScope* scope, SharedS
, m_expressionTooDeep(false)
{
m_globalData->startedCompiling(m_codeBlock);
+ m_codeBlock->setGlobalObjectConstant(emitLoad(0, JSValue(m_codeBlock->globalObject()))->index());
+
if (m_shouldEmitDebugHooks || m_baseScopeDepth)
m_codeBlock->setNeedsFullScopeChain(true);
@@ -1365,67 +1349,7 @@ ResolveResult BytecodeGenerator::resolve(const Identifier& property)
return ResolveResult::registerResolve(local, flags);
}
}
-
- // Cases where we cannot statically optimize the lookup.
- if (property == propertyNames().arguments || !canOptimizeNonLocals())
- return ResolveResult::dynamicResolve(0);
-
- ScopeChainIterator iter = m_scope->begin();
- ScopeChainIterator end = m_scope->end();
- size_t depth = 0;
- size_t depthOfFirstScopeWithDynamicChecks = 0;
- unsigned flags = 0;
- for (; iter != end; ++iter, ++depth) {
- JSObject* currentScope = iter.get();
- if (!currentScope->isVariableObject()) {
- flags |= ResolveResult::DynamicFlag;
- break;
- }
- JSSymbolTableObject* currentVariableObject = jsCast<JSSymbolTableObject*>(currentScope);
- SymbolTableEntry entry = currentVariableObject->symbolTable()->get(property.impl());
-
- // Found the property
- if (!entry.isNull()) {
- if (entry.isReadOnly())
- flags |= ResolveResult::ReadOnlyFlag;
- depth += m_codeBlock->needsFullScopeChain();
- if (++iter == end) {
- if (flags & ResolveResult::DynamicFlag)
- return ResolveResult::dynamicIndexedGlobalResolve(entry.getIndex(), depth, currentScope, flags);
- return ResolveResult::indexedGlobalResolve(
- entry.getIndex(), currentScope,
- flags | (entry.couldBeWatched() ? ResolveResult::WatchedFlag : 0));
- }
-#if !ASSERT_DISABLED
- if (JSActivation* activation = jsDynamicCast<JSActivation*>(currentVariableObject))
- ASSERT(activation->isValid(entry));
-#endif
- return ResolveResult::lexicalResolve(entry.getIndex(), depth, flags);
- }
- bool scopeRequiresDynamicChecks = false;
- if (currentVariableObject->isDynamicScope(scopeRequiresDynamicChecks))
- break;
- if (!(flags & ResolveResult::DynamicFlag)) {
- if (scopeRequiresDynamicChecks)
- flags |= ResolveResult::DynamicFlag;
- else
- ++depthOfFirstScopeWithDynamicChecks;
- }
- }
-
- // Can't locate the property but we're able to avoid a few lookups.
- JSObject* scope = iter.get();
- // Step over the function's activation, if it needs one. At this point we
- // know there is no dynamic scope in the function itself, so this is safe to
- // do.
- depth += m_codeBlock->needsFullScopeChain();
- depthOfFirstScopeWithDynamicChecks += m_codeBlock->needsFullScopeChain();
- if (++iter == end) {
- if ((flags & ResolveResult::DynamicFlag) && depth)
- return ResolveResult::dynamicGlobalResolve(depth, scope);
- return ResolveResult::globalResolve(scope);
- }
- return ResolveResult::dynamicResolve(depthOfFirstScopeWithDynamicChecks);
+ return ResolveResult::dynamicResolve();
}
ResolveResult BytecodeGenerator::resolveConstDecl(const Identifier& property)
@@ -1440,26 +1364,7 @@ ResolveResult BytecodeGenerator::resolveConstDecl(const Identifier& property)
}
}
- // Const declarations in eval code or global code.
- ScopeChainIterator iter = scope()->begin();
- ScopeChainIterator end = scope()->end();
- size_t depth = 0;
- for (; iter != end; ++iter, ++depth) {
- JSObject* currentScope = iter.get();
- if (!currentScope->isVariableObject())
- continue;
- JSSymbolTableObject* currentVariableObject = jsCast<JSSymbolTableObject*>(currentScope);
- SymbolTableEntry entry = currentVariableObject->symbolTable()->get(property.impl());
- if (entry.isNull())
- continue;
- if (++iter == end)
- return ResolveResult::indexedGlobalResolve(entry.getIndex(), currentVariableObject, 0);
- return ResolveResult::lexicalResolve(entry.getIndex(), depth + scopeDepth(), 0);
- }
-
- // FIXME: While this code should only be hit in an eval block, it will assign
- // to the wrong base if property exists in an intervening with scope.
- return ResolveResult::dynamicResolve(scopeDepth());
+ return ResolveResult::dynamicResolve();
}
void BytecodeGenerator::emitCheckHasInstance(RegisterID* dst, RegisterID* value, RegisterID* base, Label* target)
@@ -1481,158 +1386,89 @@ RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value
return dst;
}
-static const unsigned maxGlobalResolves = 128;
-
bool BytecodeGenerator::shouldAvoidResolveGlobal()
{
- return m_codeBlock->globalResolveInfoCount() > maxGlobalResolves && !m_labelScopes.size();
+ return !m_labelScopes.size();
}
RegisterID* BytecodeGenerator::emitResolve(RegisterID* dst, const ResolveResult& resolveResult, const Identifier& property)
{
- if (resolveResult.isStatic())
- return emitGetStaticVar(dst, resolveResult, property);
-
- if (resolveResult.isGlobal() && !shouldAvoidResolveGlobal()) {
-#if ENABLE(JIT)
- m_codeBlock->addGlobalResolveInfo(instructions().size());
-#endif
- m_codeBlock->addGlobalResolveInstruction(instructions().size());
- bool dynamic = resolveResult.isDynamic() && resolveResult.depth();
- ValueProfile* profile = emitProfiledOpcode(dynamic ? op_resolve_global_dynamic : op_resolve_global);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(0);
- instructions().append(0);
- if (dynamic)
- instructions().append(resolveResult.depth());
- instructions().append(profile);
- return dst;
- }
-
- if (resolveResult.type() == ResolveResult::Dynamic && resolveResult.depth()) {
- // In this case we are at least able to drop a few scope chains from the
- // lookup chain, although we still need to hash from then on.
- ValueProfile* profile = emitProfiledOpcode(op_resolve_skip);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- instructions().append(resolveResult.depth());
- instructions().append(profile);
- return dst;
- }
+
+ if (resolveResult.isRegister())
+ return emitGetLocalVar(dst, resolveResult, property);
ValueProfile* profile = emitProfiledOpcode(op_resolve);
instructions().append(dst->index());
instructions().append(addConstant(property));
+ instructions().append(getResolveOperations(property));
instructions().append(profile);
return dst;
}
RegisterID* BytecodeGenerator::emitResolveBase(RegisterID* dst, const ResolveResult& resolveResult, const Identifier& property)
{
- if (resolveResult.isGlobal() && !resolveResult.isDynamic())
- // Global object is the base
- return emitLoad(dst, JSValue(resolveResult.globalObject()));
-
+ ASSERT_UNUSED(resolveResult, !resolveResult.isRegister());
// We can't optimise at all :-(
ValueProfile* profile = emitProfiledOpcode(op_resolve_base);
instructions().append(dst->index());
instructions().append(addConstant(property));
instructions().append(false);
+ instructions().append(getResolveBaseOperations(property));
+ instructions().append(0);
instructions().append(profile);
return dst;
}
-RegisterID* BytecodeGenerator::emitResolveBaseForPut(RegisterID* dst, const ResolveResult& resolveResult, const Identifier& property)
+RegisterID* BytecodeGenerator::emitResolveBaseForPut(RegisterID* dst, const ResolveResult& resolveResult, const Identifier& property, NonlocalResolveInfo& verifier)
{
- if (!m_codeBlock->isStrictMode())
- return emitResolveBase(dst, resolveResult, property);
-
- if (resolveResult.isGlobal() && !resolveResult.isDynamic()) {
- // Global object is the base
- RefPtr<RegisterID> result = emitLoad(dst, JSValue(resolveResult.globalObject()));
- emitOpcode(op_ensure_property_exists);
- instructions().append(dst->index());
- instructions().append(addConstant(property));
- return result.get();
- }
-
+ ASSERT_UNUSED(resolveResult, !resolveResult.isRegister());
// We can't optimise at all :-(
ValueProfile* profile = emitProfiledOpcode(op_resolve_base);
instructions().append(dst->index());
instructions().append(addConstant(property));
- instructions().append(true);
+ instructions().append(m_codeBlock->isStrictMode());
+ uint32_t putToBaseIndex = 0;
+ instructions().append(getResolveBaseForPutOperations(property, putToBaseIndex));
+ verifier.resolved(putToBaseIndex);
+ instructions().append(putToBaseIndex);
instructions().append(profile);
return dst;
}
-RegisterID* BytecodeGenerator::emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const ResolveResult& resolveResult, const Identifier& property)
+RegisterID* BytecodeGenerator::emitResolveWithBaseForPut(RegisterID* baseDst, RegisterID* propDst, const ResolveResult& resolveResult, const Identifier& property, NonlocalResolveInfo& verifier)
{
- if (resolveResult.isGlobal() && !resolveResult.isDynamic()) {
- // Global object is the base
- emitLoad(baseDst, JSValue(resolveResult.globalObject()));
-
- if (resolveResult.isStatic()) {
- // Directly index the property lookup across multiple scopes.
- emitGetStaticVar(propDst, resolveResult, property);
- return baseDst;
- }
-
- if (shouldAvoidResolveGlobal()) {
- ValueProfile* profile = emitProfiledOpcode(op_resolve);
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
- instructions().append(profile);
- return baseDst;
- }
-
-#if ENABLE(JIT)
- m_codeBlock->addGlobalResolveInfo(instructions().size());
-#endif
- m_codeBlock->addGlobalResolveInstruction(instructions().size());
- ValueProfile* profile = emitProfiledOpcode(op_resolve_global);
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
- instructions().append(0);
- instructions().append(0);
- instructions().append(profile);
- return baseDst;
- }
-
+ ASSERT_UNUSED(resolveResult, !resolveResult.isRegister());
ValueProfile* profile = emitProfiledOpcode(op_resolve_with_base);
instructions().append(baseDst->index());
instructions().append(propDst->index());
instructions().append(addConstant(property));
+ uint32_t putToBaseIndex = 0;
+ instructions().append(getResolveWithBaseForPutOperations(property, putToBaseIndex));
+ verifier.resolved(putToBaseIndex);
+ instructions().append(putToBaseIndex);
instructions().append(profile);
return baseDst;
}
RegisterID* BytecodeGenerator::emitResolveWithThis(RegisterID* baseDst, RegisterID* propDst, const ResolveResult& resolveResult, const Identifier& property)
{
- if (resolveResult.isStatic()) {
+ if (resolveResult.isRegister()) {
emitLoad(baseDst, jsUndefined());
- emitGetStaticVar(propDst, resolveResult, property);
+ emitGetLocalVar(propDst, resolveResult, property);
return baseDst;
}
- if (resolveResult.type() == ResolveResult::Dynamic) {
- // We can't optimise at all :-(
- ValueProfile* profile = emitProfiledOpcode(op_resolve_with_this);
- instructions().append(baseDst->index());
- instructions().append(propDst->index());
- instructions().append(addConstant(property));
- instructions().append(profile);
- return baseDst;
- }
-
- emitLoad(baseDst, jsUndefined());
- return emitResolve(propDst, resolveResult, property);
+ ValueProfile* profile = emitProfiledOpcode(op_resolve_with_this);
+ instructions().append(baseDst->index());
+ instructions().append(propDst->index());
+ instructions().append(addConstant(property));
+ instructions().append(getResolveWithThisOperations(property));
+ instructions().append(profile);
+ return baseDst;
}
-RegisterID* BytecodeGenerator::emitGetStaticVar(RegisterID* dst, const ResolveResult& resolveResult, const Identifier& identifier)
+RegisterID* BytecodeGenerator::emitGetLocalVar(RegisterID* dst, const ResolveResult& resolveResult, const Identifier&)
{
- ValueProfile* profile = 0;
-
switch (resolveResult.type()) {
case ResolveResult::Register:
case ResolveResult::ReadOnlyRegister:
@@ -1640,107 +1476,33 @@ RegisterID* BytecodeGenerator::emitGetStaticVar(RegisterID* dst, const ResolveRe
return 0;
return moveToDestinationIfNeeded(dst, resolveResult.local());
- case ResolveResult::Lexical:
- case ResolveResult::ReadOnlyLexical:
- profile = emitProfiledOpcode(op_get_scoped_var);
- instructions().append(dst->index());
- instructions().append(resolveResult.index());
- instructions().append(resolveResult.depth());
- instructions().append(profile);
- return dst;
-
- case ResolveResult::IndexedGlobal:
- case ResolveResult::ReadOnlyIndexedGlobal:
- if (m_lastOpcodeID == op_put_global_var) {
- WriteBarrier<Unknown>* dstPointer;
- int srcIndex;
- retrieveLastUnaryOp(dstPointer, srcIndex);
- if (dstPointer == resolveResult.registerPointer() && srcIndex == dst->index())
- return dst;
- }
-
- profile = emitProfiledOpcode(op_get_global_var);
- instructions().append(dst->index());
- instructions().append(resolveResult.registerPointer());
- instructions().append(profile);
- return dst;
-
- case ResolveResult::WatchedIndexedGlobal:
- // Skip the peephole for now. It's not clear that it's profitable given
- // the DFG's capabilities, and the fact that if it's watchable then we
- // don't expect to see any put_global_var's anyway.
- profile = emitProfiledOpcode(op_get_global_var_watchable);
- instructions().append(dst->index());
- instructions().append(resolveResult.registerPointer());
- instructions().append(addConstant(identifier)); // For the benefit of the DFG.
- instructions().append(profile);
- return dst;
-
default:
ASSERT_NOT_REACHED();
return 0;
}
}
-RegisterID* BytecodeGenerator::emitInitGlobalConst(const ResolveResult& resolveResult, const Identifier& identifier, RegisterID* value)
+RegisterID* BytecodeGenerator::emitInitGlobalConst(const Identifier& identifier, RegisterID* value)
{
ASSERT(m_codeType == GlobalCode);
- switch (resolveResult.type()) {
- case ResolveResult::IndexedGlobal:
- case ResolveResult::ReadOnlyIndexedGlobal:
- emitOpcode(op_init_global_const);
- instructions().append(resolveResult.registerPointer());
- instructions().append(value->index());
- return value;
-
- case ResolveResult::WatchedIndexedGlobal:
- emitOpcode(op_init_global_const_check);
- instructions().append(resolveResult.registerPointer());
- instructions().append(value->index());
- instructions().append(jsCast<JSGlobalObject*>(resolveResult.globalObject())->symbolTable()->get(identifier.impl()).addressOfIsWatched());
- instructions().append(addConstant(identifier));
- return value;
-
- default:
- ASSERT_NOT_REACHED();
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ SymbolTableEntry entry = globalObject->symbolTable()->get(identifier.impl());
+ if (entry.isNull())
return 0;
- }
-}
-
-RegisterID* BytecodeGenerator::emitPutStaticVar(const ResolveResult& resolveResult, const Identifier& identifier, RegisterID* value)
-{
- switch (resolveResult.type()) {
- case ResolveResult::Register:
- case ResolveResult::ReadOnlyRegister:
- return moveToDestinationIfNeeded(resolveResult.local(), value);
-
- case ResolveResult::Lexical:
- case ResolveResult::ReadOnlyLexical:
- emitOpcode(op_put_scoped_var);
- instructions().append(resolveResult.index());
- instructions().append(resolveResult.depth());
- instructions().append(value->index());
- return value;
-
- case ResolveResult::IndexedGlobal:
- case ResolveResult::ReadOnlyIndexedGlobal:
- emitOpcode(op_put_global_var);
- instructions().append(resolveResult.registerPointer());
- instructions().append(value->index());
- return value;
-
- case ResolveResult::WatchedIndexedGlobal:
- emitOpcode(op_put_global_var_check);
- instructions().append(resolveResult.registerPointer());
+
+ if (entry.couldBeWatched()) {
+ emitOpcode(op_init_global_const_check);
+ instructions().append(&globalObject->registerAt(entry.getIndex()));
instructions().append(value->index());
- instructions().append(jsCast<JSGlobalObject*>(resolveResult.globalObject())->symbolTable()->get(identifier.impl()).addressOfIsWatched());
+ instructions().append(entry.addressOfIsWatched());
instructions().append(addConstant(identifier));
return value;
-
- default:
- ASSERT_NOT_REACHED();
- return 0;
}
+
+ emitOpcode(op_init_global_const);
+ instructions().append(&globalObject->registerAt(entry.getIndex()));
+ instructions().append(value->index());
+ return value;
}
void BytecodeGenerator::emitMethodCheck()
@@ -1790,6 +1552,16 @@ RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& p
return value;
}
+RegisterID* BytecodeGenerator::emitPutToBase(RegisterID* base, const Identifier& property, RegisterID* value, NonlocalResolveInfo& resolveInfo)
+{
+ emitOpcode(op_put_to_base);
+ instructions().append(base->index());
+ instructions().append(addConstant(property));
+ instructions().append(value->index());
+ instructions().append(resolveInfo.put());
+ return value;
+}
+
RegisterID* BytecodeGenerator::emitDirectPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
m_codeBlock->addPropertyAccessInstruction(instructions().size());
diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h
index ae79a13ae..246530ab2 100644
--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h
+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h
@@ -123,124 +123,42 @@ namespace JSC {
// We need to traverse the scope chain at runtime, checking for
// non-strict eval and/or `with' nodes.
DynamicFlag = 0x2,
- // The property was resolved to a definite location, and the
- // identifier is not needed any more.
- StaticFlag = 0x4,
- // Once we have the base object, the property will be located at a
- // known index.
- IndexedFlag = 0x8,
- // Skip some number of objects in the scope chain, given by "depth".
- ScopedFlag = 0x10,
// The resolved binding is immutable.
- ReadOnlyFlag = 0x20,
- // The base object is the global object.
- GlobalFlag = 0x40,
- // The property is being watched, so writes should be special.
- WatchedFlag = 0x80
+ ReadOnlyFlag = 0x4,
};
+
enum Type {
// The property is local, and stored in a register.
- Register = RegisterFlag | StaticFlag,
+ Register = RegisterFlag,
// A read-only local, created by "const".
- ReadOnlyRegister = RegisterFlag | ReadOnlyFlag | StaticFlag,
- // The property is statically scoped free variable. Its coordinates
- // are in "index" and "depth".
- Lexical = IndexedFlag | ScopedFlag | StaticFlag,
- // A read-only Lexical, created by "const".
- ReadOnlyLexical = IndexedFlag | ScopedFlag | ReadOnlyFlag | StaticFlag,
- // The property was not bound lexically, so at runtime we should
- // look directly in the global object.
- Global = GlobalFlag,
- // Like Global, but we could actually resolve the property to a
- // DontDelete property in the global object, for instance, any
- // binding created with "var" at the top level. At runtime we'll
- // just index into the global object.
- IndexedGlobal = IndexedFlag | GlobalFlag | StaticFlag,
- // Like IndexedGlobal, but the property is being watched.
- WatchedIndexedGlobal = IndexedFlag | GlobalFlag | StaticFlag | WatchedFlag,
- // Like IndexedGlobal, but the property is also read-only, like NaN,
- // Infinity, or undefined.
- ReadOnlyIndexedGlobal = IndexedFlag | ReadOnlyFlag | GlobalFlag | StaticFlag,
- // The property could not be resolved statically, due to the
- // presence of `with' blocks. At runtime we'll have to walk the
- // scope chain. ScopedFlag is set to indicate that "depth" will
- // hold some number of nodes to skip in the scope chain, before
- // beginning the search.
- Dynamic = DynamicFlag | ScopedFlag,
- // The property was located as a statically scoped free variable,
- // but while traversing the scope chain, there was an intermediate
- // activation that used non-strict `eval'. At runtime we'll have to
- // check for the absence of this property in those intervening
- // scopes.
- DynamicLexical = DynamicFlag | IndexedFlag | ScopedFlag,
- // Like ReadOnlyLexical, but with intervening non-strict `eval'.
- DynamicReadOnlyLexical = DynamicFlag | IndexedFlag | ScopedFlag | ReadOnlyFlag,
- // Like Global, but with intervening non-strict `eval'. As with
- // Dynamic, ScopeFlag is set to indicate that "depth" does indeed
- // store a number of frames to skip before doing the dynamic checks.
- DynamicGlobal = DynamicFlag | GlobalFlag | ScopedFlag,
- // Like IndexedGlobal, but with intervening non-strict `eval'.
- DynamicIndexedGlobal = DynamicFlag | IndexedFlag | GlobalFlag | ScopedFlag,
- // Like ReadOnlyIndexedGlobal, but with intervening non-strict
- // `eval'.
- DynamicReadOnlyIndexedGlobal = DynamicFlag | IndexedFlag | ReadOnlyFlag | GlobalFlag | ScopedFlag,
+ ReadOnlyRegister = RegisterFlag | ReadOnlyFlag,
+ // Any form of non-local lookup
+ Dynamic = DynamicFlag,
};
static ResolveResult registerResolve(RegisterID *local, unsigned flags)
{
- return ResolveResult(Register | flags, local, missingSymbolMarker(), 0, 0);
- }
- static ResolveResult dynamicResolve(size_t depth)
- {
- return ResolveResult(Dynamic, 0, missingSymbolMarker(), depth, 0);
- }
- static ResolveResult lexicalResolve(int index, size_t depth, unsigned flags)
- {
- unsigned type = (flags & DynamicFlag) ? DynamicLexical : Lexical;
- return ResolveResult(type | flags, 0, index, depth, 0);
- }
- static ResolveResult indexedGlobalResolve(int index, JSObject *globalObject, unsigned flags)
- {
- return ResolveResult(IndexedGlobal | flags, 0, index, 0, globalObject);
+ return ResolveResult(Register | flags, local);
}
- static ResolveResult dynamicIndexedGlobalResolve(int index, size_t depth, JSObject *globalObject, unsigned flags)
+ static ResolveResult dynamicResolve()
{
- return ResolveResult(DynamicIndexedGlobal | flags, 0, index, depth, globalObject);
+ return ResolveResult(Dynamic, 0);
}
- static ResolveResult globalResolve(JSObject *globalObject)
- {
- return ResolveResult(Global, 0, missingSymbolMarker(), 0, globalObject);
- }
- static ResolveResult dynamicGlobalResolve(size_t dynamicDepth, JSObject *globalObject)
- {
- return ResolveResult(DynamicGlobal, 0, missingSymbolMarker(), dynamicDepth, globalObject);
- }
-
unsigned type() const { return m_type; }
+
// Returns the register corresponding to a local variable, or 0 if no
// such register exists. Registers returned by ResolveResult::local() do
// not require explicit reference counting.
RegisterID* local() const { return m_local; }
- int index() const { ASSERT (isIndexed() || isRegister()); return m_index; }
- size_t depth() const { ASSERT(isScoped()); return m_depth; }
- JSObject* globalObject() const { ASSERT(isGlobal()); ASSERT(m_globalObject); return m_globalObject; }
- WriteBarrier<Unknown>* registerPointer() const;
bool isRegister() const { return m_type & RegisterFlag; }
bool isDynamic() const { return m_type & DynamicFlag; }
- bool isStatic() const { return m_type & StaticFlag; }
- bool isIndexed() const { return m_type & IndexedFlag; }
- bool isScoped() const { return m_type & ScopedFlag; }
bool isReadOnly() const { return (m_type & ReadOnlyFlag) && !isDynamic(); }
- bool isGlobal() const { return m_type & GlobalFlag; }
private:
- ResolveResult(unsigned type, RegisterID* local, int index, size_t depth, JSObject* globalObject)
+ ResolveResult(unsigned type, RegisterID* local)
: m_type(type)
- , m_index(index)
, m_local(local)
- , m_depth(depth)
- , m_globalObject(globalObject)
{
#ifndef NDEBUG
checkValidity();
@@ -252,10 +170,36 @@ namespace JSC {
#endif
unsigned m_type;
- int m_index; // Index in scope, if IndexedFlag is set
RegisterID* m_local; // Local register, if RegisterFlag is set
- size_t m_depth; // Depth in scope chain, if ScopedFlag is set
- JSObject* m_globalObject; // If GlobalFlag is set.
+ };
+
+ struct NonlocalResolveInfo {
+ friend class BytecodeGenerator;
+ NonlocalResolveInfo()
+ : m_state(Unused)
+ {
+ }
+ ~NonlocalResolveInfo()
+ {
+ ASSERT(m_state == Put);
+ }
+ private:
+ void resolved(uint32_t putToBaseIndex)
+ {
+ ASSERT(putToBaseIndex);
+ ASSERT(m_state == Unused);
+ m_state = Resolved;
+ m_putToBaseIndex = putToBaseIndex;
+ }
+ uint32_t put()
+ {
+ ASSERT(m_state == Resolved);
+ m_state = Put;
+ return m_putToBaseIndex;
+ }
+ enum State { Unused, Resolved, Put };
+ State m_state;
+ uint32_t m_putToBaseIndex;
};
class BytecodeGenerator {
@@ -367,7 +311,7 @@ namespace JSC {
// Node::emitCode assumes that dst, if provided, is either a local or a referenced temporary.
ASSERT(!dst || dst == ignoredResult() || !dst->isTemporary() || dst->refCount());
addLineInfo(n->lineNo());
- return m_stack.recursionCheck()
+ return m_stack.isSafeToRecurse()
? n->emitBytecode(*this, dst)
: emitThrowExpressionTooDeepException();
}
@@ -380,7 +324,7 @@ namespace JSC {
void emitNodeInConditionContext(ExpressionNode* n, Label* trueTarget, Label* falseTarget, bool fallThroughMeansTrue)
{
addLineInfo(n->lineNo());
- if (m_stack.recursionCheck())
+ if (m_stack.isSafeToRecurse())
n->emitBytecodeInConditionContext(*this, trueTarget, falseTarget, fallThroughMeansTrue);
else
emitThrowExpressionTooDeepException();
@@ -466,16 +410,17 @@ namespace JSC {
RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_typeof, dst, src); }
RegisterID* emitIn(RegisterID* dst, RegisterID* property, RegisterID* base) { return emitBinaryOp(op_in, dst, property, base, OperandTypes()); }
- RegisterID* emitGetStaticVar(RegisterID* dst, const ResolveResult&, const Identifier&);
- RegisterID* emitPutStaticVar(const ResolveResult&, const Identifier&, RegisterID* value);
- RegisterID* emitInitGlobalConst(const ResolveResult&, const Identifier&, RegisterID* value);
+ RegisterID* emitGetLocalVar(RegisterID* dst, const ResolveResult&, const Identifier&);
+ RegisterID* emitInitGlobalConst(const Identifier&, RegisterID* value);
RegisterID* emitResolve(RegisterID* dst, const ResolveResult&, const Identifier& property);
RegisterID* emitResolveBase(RegisterID* dst, const ResolveResult&, const Identifier& property);
- RegisterID* emitResolveBaseForPut(RegisterID* dst, const ResolveResult&, const Identifier& property);
- RegisterID* emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const ResolveResult&, const Identifier& property);
+ RegisterID* emitResolveBaseForPut(RegisterID* dst, const ResolveResult&, const Identifier& property, NonlocalResolveInfo&);
+ RegisterID* emitResolveWithBaseForPut(RegisterID* baseDst, RegisterID* propDst, const ResolveResult&, const Identifier& property, NonlocalResolveInfo&);
RegisterID* emitResolveWithThis(RegisterID* baseDst, RegisterID* propDst, const ResolveResult&, const Identifier& property);
+ RegisterID* emitPutToBase(RegisterID* base, const Identifier&, RegisterID* value, NonlocalResolveInfo&);
+
void emitMethodCheck();
RegisterID* emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property);
@@ -596,6 +541,12 @@ namespace JSC {
typedef HashMap<double, JSValue> NumberMap;
typedef HashMap<StringImpl*, JSString*, IdentifierRepHash> IdentifierStringMap;
+ typedef struct {
+ int resolveOperations;
+ int putOperations;
+ } ResolveCacheEntry;
+ typedef HashMap<StringImpl*, ResolveCacheEntry, IdentifierRepHash> IdentifierResolvePutMap;
+ typedef HashMap<StringImpl*, uint32_t, IdentifierRepHash> IdentifierResolveMap;
// Helper for emitCall() and emitConstruct(). This works because the set of
// expected functions have identical behavior for both call and construct
@@ -766,6 +717,75 @@ namespace JSC {
NumberMap m_numberMap;
IdentifierStringMap m_stringMap;
+ uint32_t getResolveOperations(const Identifier& property)
+ {
+ if (m_dynamicScopeDepth)
+ return m_codeBlock->addResolve();
+ IdentifierResolveMap::AddResult result = m_resolveCacheMap.add(property.impl(), 0);
+ if (result.isNewEntry)
+ result.iterator->value = m_codeBlock->addResolve();
+ return result.iterator->value;
+ }
+
+ uint32_t getResolveWithThisOperations(const Identifier& property)
+ {
+ if (m_dynamicScopeDepth)
+ return m_codeBlock->addResolve();
+ IdentifierResolveMap::AddResult result = m_resolveWithThisCacheMap.add(property.impl(), 0);
+ if (result.isNewEntry)
+ result.iterator->value = m_codeBlock->addResolve();
+ return result.iterator->value;
+ }
+
+ uint32_t getResolveBaseOperations(IdentifierResolvePutMap& map, const Identifier& property, uint32_t& putToBaseOperation)
+ {
+ if (m_dynamicScopeDepth) {
+ putToBaseOperation = m_codeBlock->addPutToBase();
+ return m_codeBlock->addResolve();
+ }
+ ResolveCacheEntry entry = {-1, -1};
+ IdentifierResolvePutMap::AddResult result = map.add(property.impl(), entry);
+ if (result.isNewEntry)
+ result.iterator->value.resolveOperations = m_codeBlock->addResolve();
+ if (result.iterator->value.putOperations == -1)
+ result.iterator->value.putOperations = getPutToBaseOperation(property);
+ putToBaseOperation = result.iterator->value.putOperations;
+ return result.iterator->value.resolveOperations;
+ }
+
+ uint32_t getResolveBaseOperations(const Identifier& property)
+ {
+ uint32_t scratch;
+ return getResolveBaseOperations(m_resolveBaseMap, property, scratch);
+ }
+
+ uint32_t getResolveBaseForPutOperations(const Identifier& property, uint32_t& putToBaseOperation)
+ {
+ return getResolveBaseOperations(m_resolveBaseForPutMap, property, putToBaseOperation);
+ }
+
+ uint32_t getResolveWithBaseForPutOperations(const Identifier& property, uint32_t& putToBaseOperation)
+ {
+ return getResolveBaseOperations(m_resolveWithBaseForPutMap, property, putToBaseOperation);
+ }
+
+ uint32_t getPutToBaseOperation(const Identifier& property)
+ {
+ if (m_dynamicScopeDepth)
+ return m_codeBlock->addPutToBase();
+ IdentifierResolveMap::AddResult result = m_putToBaseMap.add(property.impl(), 0);
+ if (result.isNewEntry)
+ result.iterator->value = m_codeBlock->addPutToBase();
+ return result.iterator->value;
+ }
+
+ IdentifierResolveMap m_putToBaseMap;
+ IdentifierResolveMap m_resolveCacheMap;
+ IdentifierResolveMap m_resolveWithThisCacheMap;
+ IdentifierResolvePutMap m_resolveBaseMap;
+ IdentifierResolvePutMap m_resolveBaseForPutMap;
+ IdentifierResolvePutMap m_resolveWithBaseForPutMap;
+
JSGlobalData* m_globalData;
OpcodeID m_lastOpcodeID;
diff --git a/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp b/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp
index 10a873d1c..68811955f 100644
--- a/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp
+++ b/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp
@@ -141,7 +141,7 @@ RegisterID* ThisNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst
bool ResolveNode::isPure(BytecodeGenerator& generator) const
{
- return generator.resolve(m_ident).isStatic();
+ return generator.resolve(m_ident).isRegister();
}
RegisterID* ResolveNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst)
@@ -439,14 +439,6 @@ RegisterID* FunctionCallResolveNode::emitBytecode(BytecodeGenerator& generator,
return generator.emitCall(generator.finalDestinationOrIgnored(dst, callArguments.thisRegister()), func.get(), NoExpectedFunction, callArguments, divot(), startOffset(), endOffset());
}
- if (resolveResult.isStatic()) {
- RefPtr<RegisterID> func = generator.newTemporary();
- CallArguments callArguments(generator, m_args);
- generator.emitGetStaticVar(func.get(), resolveResult, m_ident);
- generator.emitLoad(callArguments.thisRegister(), jsUndefined());
- return generator.emitCall(generator.finalDestinationOrIgnored(dst, func.get()), func.get(), expectedFunction, callArguments, divot(), startOffset(), endOffset());
- }
-
RefPtr<RegisterID> func = generator.newTemporary();
CallArguments callArguments(generator, m_args);
int identifierStart = divot() - startOffset();
@@ -631,29 +623,18 @@ RegisterID* PostfixNode::emitResolve(BytecodeGenerator& generator, RegisterID* d
return emitPreIncOrDec(generator, local, m_operator);
return emitPostIncOrDec(generator, generator.finalDestination(dst), local, m_operator);
}
-
- if (resolveResult.isStatic() && !resolveResult.isReadOnly()) {
- RefPtr<RegisterID> value = generator.emitGetStaticVar(generator.newTemporary(), resolveResult, ident);
- RegisterID* oldValue;
- if (dst == generator.ignoredResult()) {
- oldValue = 0;
- emitPreIncOrDec(generator, value.get(), m_operator);
- } else
- oldValue = emitPostIncOrDec(generator, generator.finalDestination(dst), value.get(), m_operator);
- generator.emitPutStaticVar(resolveResult, ident, value.get());
- return oldValue;
- }
generator.emitExpressionInfo(divot(), startOffset(), endOffset());
RefPtr<RegisterID> value = generator.newTemporary();
- RefPtr<RegisterID> base = generator.emitResolveWithBase(generator.newTemporary(), value.get(), resolveResult, ident);
+ NonlocalResolveInfo resolveInfo;
+ RefPtr<RegisterID> base = generator.emitResolveWithBaseForPut(generator.newTemporary(), value.get(), resolveResult, ident, resolveInfo);
RegisterID* oldValue;
if (dst == generator.ignoredResult()) {
oldValue = 0;
emitPreIncOrDec(generator, value.get(), m_operator);
} else
oldValue = emitPostIncOrDec(generator, generator.finalDestination(dst), value.get(), m_operator);
- generator.emitPutById(base.get(), ident, value.get());
+ generator.emitPutToBase(base.get(), ident, value.get(), resolveInfo);
return oldValue;
}
@@ -828,18 +809,12 @@ RegisterID* PrefixNode::emitResolve(BytecodeGenerator& generator, RegisterID* ds
return generator.moveToDestinationIfNeeded(dst, local);
}
- if (resolveResult.isStatic() && !resolveResult.isReadOnly()) {
- RefPtr<RegisterID> propDst = generator.emitGetStaticVar(generator.tempDestination(dst), resolveResult, ident);
- emitPreIncOrDec(generator, propDst.get(), m_operator);
- generator.emitPutStaticVar(resolveResult, ident, propDst.get());
- return generator.moveToDestinationIfNeeded(dst, propDst.get());
- }
-
generator.emitExpressionInfo(divot(), startOffset(), endOffset());
RefPtr<RegisterID> propDst = generator.tempDestination(dst);
- RefPtr<RegisterID> base = generator.emitResolveWithBase(generator.newTemporary(), propDst.get(), resolveResult, ident);
+ NonlocalResolveInfo resolveVerifier;
+ RefPtr<RegisterID> base = generator.emitResolveWithBaseForPut(generator.newTemporary(), propDst.get(), resolveResult, ident, resolveVerifier);
emitPreIncOrDec(generator, propDst.get(), m_operator);
- generator.emitPutById(base.get(), ident, propDst.get());
+ generator.emitPutToBase(base.get(), ident, propDst.get(), resolveVerifier);
return generator.moveToDestinationIfNeeded(dst, propDst.get());
}
@@ -1265,18 +1240,12 @@ RegisterID* ReadModifyResolveNode::emitBytecode(BytecodeGenerator& generator, Re
return generator.moveToDestinationIfNeeded(dst, result);
}
- if (resolveResult.isStatic() && !resolveResult.isReadOnly()) {
- RefPtr<RegisterID> src1 = generator.emitGetStaticVar(generator.tempDestination(dst), resolveResult, m_ident);
- RegisterID* result = emitReadModifyAssignment(generator, generator.finalDestination(dst, src1.get()), src1.get(), m_right, m_operator, OperandTypes(ResultType::unknownType(), m_right->resultDescriptor()));
- generator.emitPutStaticVar(resolveResult, m_ident, result);
- return result;
- }
-
RefPtr<RegisterID> src1 = generator.tempDestination(dst);
generator.emitExpressionInfo(divot() - startOffset() + m_ident.length(), m_ident.length(), 0);
- RefPtr<RegisterID> base = generator.emitResolveWithBase(generator.newTemporary(), src1.get(), resolveResult, m_ident);
+ NonlocalResolveInfo resolveVerifier;
+ RefPtr<RegisterID> base = generator.emitResolveWithBaseForPut(generator.newTemporary(), src1.get(), resolveResult, m_ident, resolveVerifier);
RegisterID* result = emitReadModifyAssignment(generator, generator.finalDestination(dst, src1.get()), src1.get(), m_right, m_operator, OperandTypes(ResultType::unknownType(), m_right->resultDescriptor()), this);
- return generator.emitPutById(base.get(), m_ident, result);
+ return generator.emitPutToBase(base.get(), m_ident, result, resolveVerifier);
}
// ------------------------------ AssignResolveNode -----------------------------------
@@ -1285,7 +1254,7 @@ RegisterID* AssignResolveNode::emitBytecode(BytecodeGenerator& generator, Regist
{
ResolveResult resolveResult = generator.resolve(m_ident);
- if (RegisterID *local = resolveResult.local()) {
+ if (RegisterID* local = resolveResult.local()) {
if (resolveResult.isReadOnly()) {
generator.emitReadOnlyExceptionIfNeeded();
return generator.emitNode(dst, m_right);
@@ -1294,20 +1263,13 @@ RegisterID* AssignResolveNode::emitBytecode(BytecodeGenerator& generator, Regist
return generator.moveToDestinationIfNeeded(dst, result);
}
- if (resolveResult.isStatic() && !resolveResult.isReadOnly()) {
- if (dst == generator.ignoredResult())
- dst = 0;
- RegisterID* value = generator.emitNode(dst, m_right);
- generator.emitPutStaticVar(resolveResult, m_ident, value);
- return value;
- }
-
- RefPtr<RegisterID> base = generator.emitResolveBaseForPut(generator.newTemporary(), resolveResult, m_ident);
+ NonlocalResolveInfo resolveVerifier;
+ RefPtr<RegisterID> base = generator.emitResolveBaseForPut(generator.newTemporary(), resolveResult, m_ident, resolveVerifier);
if (dst == generator.ignoredResult())
dst = 0;
RegisterID* value = generator.emitNode(dst, m_right);
generator.emitExpressionInfo(divot(), startOffset(), endOffset());
- return generator.emitPutById(base.get(), m_ident, value);
+ return generator.emitPutToBase(base.get(), m_ident, value, resolveVerifier);
}
// ------------------------------ AssignDotNode -----------------------------------
@@ -1402,16 +1364,14 @@ RegisterID* ConstDeclNode::emitCodeSingle(BytecodeGenerator& generator)
RefPtr<RegisterID> value = m_init ? generator.emitNode(m_init) : generator.emitLoad(0, jsUndefined());
- if (resolveResult.isStatic()) {
- if (generator.codeType() == GlobalCode)
- return generator.emitInitGlobalConst(resolveResult, m_ident, value.get());
- return generator.emitPutStaticVar(resolveResult, m_ident, value.get());
+ if (generator.codeType() == GlobalCode) {
+ if (RegisterID* result = generator.emitInitGlobalConst(m_ident, value.get()))
+ return result;
}
if (generator.codeType() != EvalCode)
return value.get();
- // FIXME: While this code should only be hit in an eval block, it will assign
- // to the wrong base if m_ident exists in an intervening with scope.
+ // FIXME: This will result in incorrect assignment if m_ident exists in an intervening with scope.
RefPtr<RegisterID> base = generator.emitResolveBase(generator.newTemporary(), resolveResult, m_ident);
return generator.emitPutById(base.get(), m_ident, value.get());
}
@@ -1699,10 +1659,11 @@ RegisterID* ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* ds
if (!propertyName) {
propertyName = generator.newTemporary();
RefPtr<RegisterID> protect = propertyName;
- RegisterID* base = generator.emitResolveBaseForPut(generator.newTemporary(), resolveResult, ident);
+ NonlocalResolveInfo resolveVerifier;
+ RegisterID* base = generator.emitResolveBaseForPut(generator.newTemporary(), resolveResult, ident, resolveVerifier);
generator.emitExpressionInfo(divot(), startOffset(), endOffset());
- generator.emitPutById(base, ident, propertyName);
+ generator.emitPutToBase(base, ident, propertyName, resolveVerifier);
} else {
expectedSubscript = generator.emitMove(generator.newTemporary(), propertyName);
generator.pushOptimisedForIn(expectedSubscript.get(), iter.get(), i.get(), propertyName);
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
index da5682f55..928788bf3 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
@@ -1420,9 +1420,15 @@ bool AbstractState::execute(unsigned indexInBlock)
ASSERT_NOT_REACHED();
break;
}
+ forNode(node.child1()).filterArrayModes(arrayModesFor(node.arrayMode()));
break;
}
case Arrayify: {
+ if (modeAlreadyChecked(forNode(node.child1()), node.arrayMode())) {
+ m_foundConstants = true;
+ node.setCanExit(false);
+ break;
+ }
switch (node.arrayMode()) {
case ALL_EFFECTFUL_MODES:
node.setCanExit(true);
@@ -1431,9 +1437,10 @@ bool AbstractState::execute(unsigned indexInBlock)
forNode(node.child2()).filter(SpecInt32);
forNode(nodeIndex).clear();
clobberStructures(indexInBlock);
+ forNode(node.child1()).filterArrayModes(arrayModesFor(node.arrayMode()));
break;
default:
- ASSERT_NOT_REACHED();
+ CRASH();
break;
}
break;
@@ -1524,7 +1531,12 @@ bool AbstractState::execute(unsigned indexInBlock)
clobberWorld(node.codeOrigin, indexInBlock);
forNode(nodeIndex).makeTop();
break;
-
+
+ case GarbageValue:
+ clobberWorld(node.codeOrigin, indexInBlock);
+ forNode(nodeIndex).makeTop();
+ break;
+
case ForceOSRExit:
node.setCanExit(true);
m_isValid = false;
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractValue.h b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
index ff1c6d205..5382cd3ad 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractValue.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractValue.h
@@ -30,6 +30,7 @@
#if ENABLE(DFG_JIT)
+#include "ArrayProfile.h"
#include "DFGStructureAbstractValue.h"
#include "JSCell.h"
#include "SpeculatedType.h"
@@ -40,12 +41,14 @@ namespace JSC { namespace DFG {
struct AbstractValue {
AbstractValue()
: m_type(SpecNone)
+ , m_arrayModes(0)
{
}
void clear()
{
m_type = SpecNone;
+ m_arrayModes = 0;
m_currentKnownStructure.clear();
m_futurePossibleStructure.clear();
m_value = JSValue();
@@ -54,7 +57,7 @@ struct AbstractValue {
bool isClear() const
{
- bool result = m_type == SpecNone && m_currentKnownStructure.isClear() && m_futurePossibleStructure.isClear();
+ bool result = m_type == SpecNone && !m_arrayModes && m_currentKnownStructure.isClear() && m_futurePossibleStructure.isClear();
if (result)
ASSERT(!m_value);
return result;
@@ -63,6 +66,7 @@ struct AbstractValue {
void makeTop()
{
m_type = SpecTop;
+ m_arrayModes = ALL_ARRAY_MODES;
m_currentKnownStructure.makeTop();
m_futurePossibleStructure.makeTop();
m_value = JSValue();
@@ -71,13 +75,16 @@ struct AbstractValue {
void clobberStructures()
{
- if (m_type & SpecCell)
+ if (m_type & SpecCell) {
m_currentKnownStructure.makeTop();
- else
+ clobberArrayModes();
+ } else {
ASSERT(m_currentKnownStructure.isClear());
+ ASSERT(!m_arrayModes);
+ }
checkConsistency();
}
-
+
void clobberValue()
{
m_value = JSValue();
@@ -105,29 +112,17 @@ struct AbstractValue {
return result;
}
- void setFuturePossibleStructure(Structure* structure)
- {
- if (structure->transitionWatchpointSetIsStillValid())
- m_futurePossibleStructure = structure;
- else
- m_futurePossibleStructure.makeTop();
- }
-
- void filterFuturePossibleStructure(Structure* structure)
- {
- if (structure->transitionWatchpointSetIsStillValid())
- m_futurePossibleStructure.filter(StructureAbstractValue(structure));
- }
-
void setMostSpecific(JSValue value)
{
if (!!value && value.isCell()) {
Structure* structure = value.asCell()->structure();
m_currentKnownStructure = structure;
setFuturePossibleStructure(structure);
+ m_arrayModes = asArrayModes(structure->indexingType());
} else {
m_currentKnownStructure.clear();
m_futurePossibleStructure.clear();
+ m_arrayModes = 0;
}
m_type = speculationFromValue(value);
@@ -140,10 +135,14 @@ struct AbstractValue {
{
if (!!value && value.isCell()) {
m_currentKnownStructure.makeTop();
- setFuturePossibleStructure(value.asCell()->structure());
+ Structure* structure = value.asCell()->structure();
+ setFuturePossibleStructure(structure);
+ m_arrayModes = asArrayModes(structure->indexingType());
+ clobberArrayModes();
} else {
m_currentKnownStructure.clear();
m_futurePossibleStructure.clear();
+ m_arrayModes = 0;
}
m_type = speculationFromValue(value);
@@ -156,6 +155,7 @@ struct AbstractValue {
{
m_currentKnownStructure = structure;
setFuturePossibleStructure(structure);
+ m_arrayModes = asArrayModes(structure->indexingType());
m_type = speculationFromStructure(structure);
m_value = JSValue();
@@ -167,9 +167,11 @@ struct AbstractValue {
if (type & SpecCell) {
m_currentKnownStructure.makeTop();
m_futurePossibleStructure.makeTop();
+ m_arrayModes = ALL_ARRAY_MODES;
} else {
m_currentKnownStructure.clear();
m_futurePossibleStructure.clear();
+ m_arrayModes = 0;
}
m_type = type;
m_value = JSValue();
@@ -179,6 +181,7 @@ struct AbstractValue {
bool operator==(const AbstractValue& other) const
{
return m_type == other.m_type
+ && m_arrayModes == other.m_arrayModes
&& m_currentKnownStructure == other.m_currentKnownStructure
&& m_futurePossibleStructure == other.m_futurePossibleStructure
&& m_value == other.m_value;
@@ -199,6 +202,7 @@ struct AbstractValue {
result = !other.isClear();
} else {
result |= mergeSpeculation(m_type, other.m_type);
+ result |= mergeArrayModes(m_arrayModes, other.m_arrayModes);
result |= m_currentKnownStructure.addAll(other.m_currentKnownStructure);
result |= m_futurePossibleStructure.addAll(other.m_futurePossibleStructure);
if (m_value != other.m_value) {
@@ -218,6 +222,7 @@ struct AbstractValue {
if (type & SpecCell) {
m_currentKnownStructure.makeTop();
m_futurePossibleStructure.makeTop();
+ m_arrayModes = ALL_ARRAY_MODES;
}
m_value = JSValue();
@@ -227,6 +232,7 @@ struct AbstractValue {
void filter(const StructureSet& other)
{
m_type &= other.speculationFromStructures();
+ m_arrayModes &= other.arrayModesFromStructures();
m_currentKnownStructure.filter(other);
if (m_currentKnownStructure.isClear())
m_futurePossibleStructure.clear();
@@ -241,11 +247,24 @@ struct AbstractValue {
m_currentKnownStructure.filter(m_type);
m_futurePossibleStructure.filter(m_type);
+ filterArrayModesByType();
filterValueByType();
checkConsistency();
}
+ void filterArrayModes(ArrayModes arrayModes)
+ {
+ ASSERT(arrayModes);
+
+ m_type &= SpecCell;
+ m_arrayModes &= arrayModes;
+
+ // I could do more fancy filtering here. But it probably won't make any difference.
+
+ checkConsistency();
+ }
+
void filter(SpeculatedType type)
{
if (type == SpecTop)
@@ -258,31 +277,13 @@ struct AbstractValue {
// the new type (None) rather than the one passed (Array).
m_currentKnownStructure.filter(m_type);
m_futurePossibleStructure.filter(m_type);
-
+
+ filterArrayModesByType();
filterValueByType();
checkConsistency();
}
- // We could go further, and ensure that if the futurePossibleStructure contravenes
- // the value, then we could clear both of those things. But that's unlikely to help
- // in any realistic scenario, so we don't do it. Simpler is better.
- void filterValueByType()
- {
- if (!!m_type) {
- // The type is still non-empty. This implies that regardless of what filtering
- // was done, we either didn't have a value to begin with, or that value is still
- // valid.
- ASSERT(!m_value || validateType(m_value));
- return;
- }
-
- // The type has been rendered empty. That means that the value must now be invalid,
- // as well.
- ASSERT(!m_value || !validateType(m_value));
- m_value = JSValue();
- }
-
bool validateType(JSValue value) const
{
if (isTop())
@@ -319,7 +320,8 @@ struct AbstractValue {
ASSERT(m_type & SpecCell);
Structure* structure = value.asCell()->structure();
return m_currentKnownStructure.contains(structure)
- && m_futurePossibleStructure.contains(structure);
+ && m_futurePossibleStructure.contains(structure)
+ && (m_arrayModes & asArrayModes(structure->indexingType()));
}
return true;
@@ -330,6 +332,7 @@ struct AbstractValue {
if (!(m_type & SpecCell)) {
ASSERT(m_currentKnownStructure.isClear());
ASSERT(m_futurePossibleStructure.isClear());
+ ASSERT(!m_arrayModes);
}
if (isClear())
@@ -346,7 +349,7 @@ struct AbstractValue {
void dump(FILE* out) const
{
- fprintf(out, "(%s, ", speculationToString(m_type));
+ fprintf(out, "(%s, %s, ", speculationToString(m_type), arrayModesToString(m_arrayModes));
m_currentKnownStructure.dump(out);
dataLog(", ");
m_futurePossibleStructure.dump(out);
@@ -437,6 +440,13 @@ struct AbstractValue {
// unified with the set of all objects with structure 0x12345.
SpeculatedType m_type;
+ // This is a proven constraint on the possible indexing types that this value
+ // can have right now. It also implicitly constraints the set of structures
+ // that the value may have right now, since a structure has an immutable
+ // indexing type. This is subject to change upon reassignment, or any side
+ // effect that makes non-obvious changes to the heap.
+ ArrayModes m_arrayModes;
+
// This is a proven constraint on the possible values that this value can
// have now or any time in the future, unless it is reassigned. Note that this
// implies nothing about the structure. Oddly, JSValue() (i.e. the empty value)
@@ -444,6 +454,75 @@ struct AbstractValue {
// BOTTOM then JSValue() means BOTTOM; if m_type is not BOTTOM then JSValue()
// means TOP.
JSValue m_value;
+
+private:
+ void clobberArrayModes()
+ {
+ if (m_arrayModes == ALL_ARRAY_MODES)
+ return;
+
+ if (LIKELY(m_arrayModes & asArrayModes(NonArray)))
+ m_arrayModes = ALL_ARRAY_MODES;
+ else
+ clobberArrayModesSlow();
+ }
+
+ void clobberArrayModesSlow()
+ {
+ if (m_arrayModes & asArrayModes(ArrayClass))
+ m_arrayModes = ALL_ARRAY_MODES;
+ else if (m_arrayModes & asArrayModes(NonArrayWithContiguous))
+ m_arrayModes |= asArrayModes(NonArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage);
+ else if (m_arrayModes & asArrayModes(ArrayWithContiguous))
+ m_arrayModes |= asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage);
+ else if (m_arrayModes & asArrayModes(NonArrayWithArrayStorage))
+ m_arrayModes |= asArrayModes(NonArrayWithSlowPutArrayStorage);
+ else if (m_arrayModes & asArrayModes(ArrayWithArrayStorage))
+ m_arrayModes |= asArrayModes(ArrayWithArrayStorage);
+ }
+
+ void setFuturePossibleStructure(Structure* structure)
+ {
+ if (structure->transitionWatchpointSetIsStillValid())
+ m_futurePossibleStructure = structure;
+ else
+ m_futurePossibleStructure.makeTop();
+ }
+
+ void filterFuturePossibleStructure(Structure* structure)
+ {
+ if (structure->transitionWatchpointSetIsStillValid())
+ m_futurePossibleStructure.filter(StructureAbstractValue(structure));
+ }
+
+ // We could go further, and ensure that if the futurePossibleStructure contravenes
+ // the value, then we could clear both of those things. But that's unlikely to help
+ // in any realistic scenario, so we don't do it. Simpler is better.
+ void filterValueByType()
+ {
+ if (!!m_type) {
+ // The type is still non-empty. This implies that regardless of what filtering
+ // was done, we either didn't have a value to begin with, or that value is still
+ // valid.
+ ASSERT(!m_value || validateType(m_value));
+ return;
+ }
+
+ // The type has been rendered empty. That means that the value must now be invalid,
+ // as well.
+ ASSERT(!m_value || !validateType(m_value));
+ m_value = JSValue();
+ }
+
+ void filterArrayModesByType()
+ {
+ if (!(m_type & SpecCell))
+ m_arrayModes = 0;
+ else if (!(m_type & ~SpecArray))
+ m_arrayModes &= ALL_ARRAY_ARRAY_MODES;
+ else if (!(m_type & SpecArray))
+ m_arrayModes &= ALL_NON_ARRAY_ARRAY_MODES;
+ }
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp
index 3985d769c..623e9d743 100644
--- a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp
+++ b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp
@@ -167,12 +167,17 @@ bool modeAlreadyChecked(AbstractValue& value, Array::Mode arrayMode)
case Array::PossiblyArrayWithContiguous:
case Array::PossiblyArrayWithContiguousToTail:
case Array::PossiblyArrayWithContiguousOutOfBounds:
+ case Array::ToContiguous:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(NonArrayWithContiguous) | asArrayModes(ArrayWithContiguous)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasContiguous(value.m_currentKnownStructure.singleton()->indexingType());
case Array::ArrayWithContiguous:
case Array::ArrayWithContiguousToTail:
case Array::ArrayWithContiguousOutOfBounds:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithContiguous)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasContiguous(value.m_currentKnownStructure.singleton()->indexingType())
&& (value.m_currentKnownStructure.singleton()->indexingType() & IsArray);
@@ -183,29 +188,38 @@ bool modeAlreadyChecked(AbstractValue& value, Array::Mode arrayMode)
case Array::PossiblyArrayWithArrayStorage:
case Array::PossiblyArrayWithArrayStorageToHole:
case Array::PossiblyArrayWithArrayStorageOutOfBounds:
+ case Array::ToArrayStorage:
+ case Array::PossiblyArrayToArrayStorage:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasFastArrayStorage(value.m_currentKnownStructure.singleton()->indexingType());
case Array::SlowPutArrayStorage:
case Array::PossiblyArrayWithSlowPutArrayStorage:
+ case Array::ToSlowPutArrayStorage:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType());
case Array::ArrayWithArrayStorage:
case Array::ArrayWithArrayStorageToHole:
case Array::ArrayWithArrayStorageOutOfBounds:
+ case Array::ArrayToArrayStorage:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithArrayStorage)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasFastArrayStorage(value.m_currentKnownStructure.singleton()->indexingType())
&& (value.m_currentKnownStructure.singleton()->indexingType() & IsArray);
case Array::ArrayWithSlowPutArrayStorage:
+ if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage)))
+ return true;
return value.m_currentKnownStructure.hasSingleton()
&& hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType())
&& (value.m_currentKnownStructure.singleton()->indexingType() & IsArray);
- case ALL_EFFECTFUL_MODES:
- return false;
-
case Array::Arguments:
return isArgumentsSpeculation(value.m_type);
diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.h b/Source/JavaScriptCore/dfg/DFGArrayMode.h
index a666bb83f..f7ac92733 100644
--- a/Source/JavaScriptCore/dfg/DFGArrayMode.h
+++ b/Source/JavaScriptCore/dfg/DFGArrayMode.h
@@ -349,6 +349,51 @@ inline bool isEffectful(Array::Mode mode)
}
}
+// This returns the set of array modes that will pass filtering of a CheckArray or
+// Arrayify with the given mode.
+inline ArrayModes arrayModesFor(Array::Mode arrayMode)
+{
+ switch (arrayMode) {
+ case Array::Generic:
+ return ALL_ARRAY_MODES;
+ case Array::Contiguous:
+ case Array::ContiguousToTail:
+ case Array::ContiguousOutOfBounds:
+ case Array::ToContiguous:
+ return asArrayModes(NonArrayWithContiguous);
+ case Array::PossiblyArrayWithContiguous:
+ case Array::PossiblyArrayWithContiguousToTail:
+ case Array::PossiblyArrayWithContiguousOutOfBounds:
+ return asArrayModes(NonArrayWithContiguous) | asArrayModes(ArrayWithContiguous);
+ case ARRAY_WITH_CONTIGUOUS_MODES:
+ return asArrayModes(ArrayWithContiguous);
+ case Array::ArrayStorage:
+ case Array::ArrayStorageToHole:
+ case Array::ArrayStorageOutOfBounds:
+ case Array::ToArrayStorage:
+ return asArrayModes(NonArrayWithArrayStorage);
+ case Array::ToSlowPutArrayStorage:
+ case Array::SlowPutArrayStorage:
+ return asArrayModes(NonArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage);
+ case Array::PossiblyArrayWithArrayStorage:
+ case Array::PossiblyArrayWithArrayStorageToHole:
+ case Array::PossiblyArrayWithArrayStorageOutOfBounds:
+ case Array::PossiblyArrayToArrayStorage:
+ return asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage);
+ case Array::PossiblyArrayWithSlowPutArrayStorage:
+ return asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage);
+ case Array::ArrayWithArrayStorage:
+ case Array::ArrayWithArrayStorageToHole:
+ case Array::ArrayWithArrayStorageOutOfBounds:
+ case Array::ArrayToArrayStorage:
+ return asArrayModes(ArrayWithArrayStorage);
+ case Array::ArrayWithSlowPutArrayStorage:
+ return asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage);
+ default:
+ return asArrayModes(NonArray);
+ }
+}
+
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
index a19b723d8..ca8683ead 100644
--- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
@@ -73,7 +73,7 @@ void AssemblyHelpers::clearSamplingFlag(int32_t flag)
void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
{
#if CPU(X86_64)
- Jump checkInt32 = branchPtr(BelowOrEqual, gpr, TrustedImmPtr(reinterpret_cast<void*>(static_cast<uintptr_t>(0xFFFFFFFFu))));
+ Jump checkInt32 = branch64(BelowOrEqual, gpr, TrustedImm64(static_cast<uintptr_t>(0xFFFFFFFFu)));
breakpoint();
checkInt32.link(this);
#else
@@ -83,22 +83,22 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
{
- Jump checkJSInt32 = branchPtr(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
breakpoint();
checkJSInt32.link(this);
}
void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
{
- Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
+ Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
breakpoint();
checkJSNumber.link(this);
}
void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
{
- Jump checkJSInt32 = branchPtr(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
- Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
+ Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
checkJSInt32.link(this);
breakpoint();
checkJSNumber.link(this);
@@ -106,7 +106,7 @@ void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
{
- Jump checkCell = branchTestPtr(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
+ Jump checkCell = branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
breakpoint();
checkCell.link(this);
}
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
index 5d338fa57..953a743ff 100644
--- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
@@ -99,7 +99,11 @@ public:
}
void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry)
{
- storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+#if USE(JSVALUE64)
+ store64(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+#else
+ store32(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+#endif
}
void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
@@ -110,7 +114,7 @@ public:
Jump branchIfNotCell(GPRReg reg)
{
#if USE(JSVALUE64)
- return branchTestPtr(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
+ return branchTest64(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
#else
return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
#endif
@@ -172,8 +176,14 @@ public:
ScratchBuffer* scratchBuffer = m_globalData->scratchBufferForSize(scratchSize);
EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
- storePtr(GPRInfo::toRegister(i), buffer + i);
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ store64(GPRInfo::toRegister(i), buffer + i);
+#else
+ store32(GPRInfo::toRegister(i), buffer + i);
+#endif
+ }
+
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
@@ -204,8 +214,13 @@ public:
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
}
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
- loadPtr(buffer + i, GPRInfo::toRegister(i));
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ load64(buffer + i, GPRInfo::toRegister(i));
+#else
+ load32(buffer + i, GPRInfo::toRegister(i));
+#endif
+ }
}
// These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
@@ -229,16 +244,16 @@ public:
#if USE(JSVALUE64)
GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
{
- moveDoubleToPtr(fpr, gpr);
- subPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ moveDoubleTo64(fpr, gpr);
+ sub64(GPRInfo::tagTypeNumberRegister, gpr);
jitAssertIsJSDouble(gpr);
return gpr;
}
FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
{
jitAssertIsJSDouble(gpr);
- addPtr(GPRInfo::tagTypeNumberRegister, gpr);
- movePtrToDouble(gpr, fpr);
+ add64(GPRInfo::tagTypeNumberRegister, gpr);
+ move64ToDouble(gpr, fpr);
return fpr;
}
#endif
@@ -258,7 +273,7 @@ public:
Jump emitExceptionCheck(ExceptionCheckKind kind = NormalExceptionCheck)
{
#if USE(JSVALUE64)
- return branchTestPtr(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(&globalData()->exception));
+ return branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(&globalData()->exception));
#elif USE(JSVALUE32_64)
return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast<char*>(&globalData()->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index 4869cf8c1..36d18d7b3 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -169,12 +169,17 @@ private:
// Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
bool handleConstantInternalFunction(bool usesResult, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
+ NodeIndex handleGetByOffset(SpeculatedType, NodeIndex base, unsigned identifierNumber, PropertyOffset);
void handleGetByOffset(
int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber,
PropertyOffset);
void handleGetById(
int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber,
const GetByIdStatus&);
+
+ // Convert a set of ResolveOperations into graph nodes
+ bool parseResolveOperations(SpeculatedType, unsigned identifierNumber, unsigned operations, unsigned putToBaseOperation, NodeIndex* base, NodeIndex* value);
+
// Prepare to parse a block.
void prepareToParseBlock();
// Parse a single basic block of bytecode instructions.
@@ -1143,6 +1148,8 @@ private:
Vector<unsigned> m_identifierRemap;
Vector<unsigned> m_constantRemap;
Vector<unsigned> m_constantBufferRemap;
+ Vector<unsigned> m_resolveOperationRemap;
+ Vector<unsigned> m_putToBaseOperationRemap;
// Blocks introduced by this code block, which need successor linking.
// May include up to one basic block that includes the continuation after
@@ -1779,24 +1786,28 @@ bool ByteCodeParser::handleConstantInternalFunction(
return false;
}
-void ByteCodeParser::handleGetByOffset(
- int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber,
- PropertyOffset offset)
+NodeIndex ByteCodeParser::handleGetByOffset(SpeculatedType prediction, NodeIndex base, unsigned identifierNumber, PropertyOffset offset)
{
NodeIndex propertyStorage;
if (isInlineOffset(offset))
propertyStorage = base;
else
propertyStorage = addToGraph(GetButterfly, base);
- set(destinationOperand,
- addToGraph(
- GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction),
- propertyStorage));
-
+ NodeIndex getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage);
+
StorageAccessData storageAccessData;
storageAccessData.offset = indexRelativeToBase(offset);
storageAccessData.identifierNumber = identifierNumber;
m_graph.m_storageAccessData.append(storageAccessData);
+
+ return getByOffset;
+}
+
+void ByteCodeParser::handleGetByOffset(
+ int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber,
+ PropertyOffset offset)
+{
+ set(destinationOperand, handleGetByOffset(prediction, base, identifierNumber, offset));
}
void ByteCodeParser::handleGetById(
@@ -1860,10 +1871,174 @@ void ByteCodeParser::prepareToParseBlock()
m_cellConstantNodes.clear();
}
+bool ByteCodeParser::parseResolveOperations(SpeculatedType prediction, unsigned identifier, unsigned operations, unsigned putToBaseOperation, NodeIndex* base, NodeIndex* value)
+{
+ ResolveOperations* resolveOperations = m_codeBlock->resolveOperations(operations);
+ if (resolveOperations->isEmpty()) {
+ addToGraph(ForceOSRExit);
+ return false;
+ }
+ JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+ int skipCount = 0;
+ bool skippedScopes = false;
+ bool setBase = false;
+ ResolveOperation* pc = resolveOperations->data();
+ NodeIndex localBase = 0;
+ bool resolvingBase = true;
+ while (resolvingBase) {
+ switch (pc->m_operation) {
+ case ResolveOperation::ReturnGlobalObjectAsBase:
+ *base = get(m_codeBlock->globalObjectConstant());
+ ASSERT(!value);
+ return true;
+
+ case ResolveOperation::SetBaseToGlobal:
+ *base = get(m_codeBlock->globalObjectConstant());
+ setBase = true;
+ resolvingBase = false;
+ ++pc;
+ break;
+
+ case ResolveOperation::SetBaseToUndefined:
+ *base = constantUndefined();
+ setBase = true;
+ resolvingBase = false;
+ ++pc;
+ break;
+
+ case ResolveOperation::SetBaseToScope:
+ localBase = addToGraph(GetScope, OpInfo(skipCount));
+ *base = localBase;
+ setBase = true;
+
+ resolvingBase = false;
+
+ // Reset the scope skipping as we've already loaded it
+ skippedScopes = false;
+ ++pc;
+ break;
+ case ResolveOperation::ReturnScopeAsBase:
+ *base = addToGraph(GetScope, OpInfo(skipCount));
+ ASSERT(!value);
+ return true;
+
+ case ResolveOperation::SkipTopScopeNode:
+ if (m_inlineStackTop->m_inlineCallFrame)
+ return false;
+ skipCount = 1;
+ skippedScopes = true;
+ ++pc;
+ break;
+
+ case ResolveOperation::SkipScopes:
+ if (m_inlineStackTop->m_inlineCallFrame)
+ return false;
+ skipCount += pc->m_scopesToSkip;
+ skippedScopes = true;
+ ++pc;
+ break;
+
+ case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope:
+ return false;
+
+ case ResolveOperation::Fail:
+ return false;
+
+ default:
+ resolvingBase = false;
+ }
+ }
+ if (skippedScopes)
+ localBase = addToGraph(GetScope, OpInfo(skipCount));
+
+ if (base && !setBase)
+ *base = localBase;
+
+ ASSERT(value);
+ ResolveOperation* resolveValueOperation = pc;
+ switch (resolveValueOperation->m_operation) {
+ case ResolveOperation::GetAndReturnGlobalProperty: {
+ ResolveGlobalStatus status = ResolveGlobalStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex, resolveValueOperation, m_codeBlock->identifier(identifier));
+ if (status.isSimple()) {
+ ASSERT(status.structure());
+
+ NodeIndex globalObjectNode = addStructureTransitionCheck(globalObject, status.structure());
+
+ if (status.specificValue()) {
+ ASSERT(status.specificValue().isCell());
+ *value = cellConstant(status.specificValue().asCell());
+ } else
+ *value = handleGetByOffset(prediction, globalObjectNode, identifier, status.offset());
+ return true;
+ }
+
+ NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
+ m_graph.m_resolveGlobalData.append(ResolveGlobalData());
+ ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
+ data.identifierNumber = identifier;
+ data.resolveOperationsIndex = operations;
+ data.putToBaseOperationIndex = putToBaseOperation;
+ data.resolvePropertyIndex = resolveValueOperation - resolveOperations->data();
+ *value = resolve;
+ return true;
+ }
+ case ResolveOperation::GetAndReturnGlobalVar: {
+ *value = addToGraph(GetGlobalVar,
+ OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)),
+ OpInfo(prediction));
+ return true;
+ }
+ case ResolveOperation::GetAndReturnGlobalVarWatchable: {
+ SpeculatedType prediction = getPrediction();
+
+ JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
+
+ Identifier ident = m_codeBlock->identifier(identifier);
+ SymbolTableEntry entry = globalObject->symbolTable()->get(ident.impl());
+ if (!entry.couldBeWatched()) {
+ *value = addToGraph(GetGlobalVar, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(prediction));
+ return true;
+ }
+
+ // The watchpoint is still intact! This means that we will get notified if the
+ // current value in the global variable changes. So, we can inline that value.
+ // Moreover, currently we can assume that this value is a JSFunction*, which
+ // implies that it's a cell. This simplifies things, since in general we'd have
+ // to use a JSConstant for non-cells and a WeakJSConstant for cells. So instead
+ // of having both cases we just assert that the value is a cell.
+
+ // NB. If it wasn't for CSE, GlobalVarWatchpoint would have no need for the
+ // register pointer. But CSE tracks effects on global variables by comparing
+ // register pointers. Because CSE executes multiple times while the backend
+ // executes once, we use the following performance trade-off:
+ // - The node refers directly to the register pointer to make CSE super cheap.
+ // - To perform backend code generation, the node only contains the identifier
+ // number, from which it is possible to get (via a few average-time O(1)
+ // lookups) to the WatchpointSet.
+
+ addToGraph(GlobalVarWatchpoint, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(identifier));
+
+ JSValue specificValue = globalObject->registerAt(entry.getIndex()).get();
+ ASSERT(specificValue.isCell());
+ *value = cellConstant(specificValue.asCell());
+ return true;
+ }
+ case ResolveOperation::GetAndReturnScopedVar: {
+ NodeIndex getScopeRegisters = addToGraph(GetScopeRegisters, localBase);
+ *value = addToGraph(GetScopedVar, OpInfo(resolveValueOperation->m_offset), OpInfo(prediction), getScopeRegisters);
+ return true;
+ }
+ default:
+ CRASH();
+ return false;
+ }
+
+}
+
bool ByteCodeParser::parseBlock(unsigned limit)
{
bool shouldContinueParsing = true;
-
+
Interpreter* interpreter = m_globalData->interpreter;
Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
unsigned blockBegin = m_currentIndex;
@@ -2364,26 +2539,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
continue;
}
- case op_get_scoped_var: {
- SpeculatedType prediction = getPrediction();
- int dst = currentInstruction[1].u.operand;
- int slot = currentInstruction[2].u.operand;
- int depth = currentInstruction[3].u.operand;
- NodeIndex getScope = addToGraph(GetScope, OpInfo(depth));
- NodeIndex getScopeRegisters = addToGraph(GetScopeRegisters, getScope);
- NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeRegisters);
- set(dst, getScopedVar);
- NEXT_OPCODE(op_get_scoped_var);
- }
- case op_put_scoped_var: {
- int slot = currentInstruction[1].u.operand;
- int depth = currentInstruction[2].u.operand;
- int source = currentInstruction[3].u.operand;
- NodeIndex getScope = addToGraph(GetScope, OpInfo(depth));
- NodeIndex getScopeRegisters = addToGraph(GetScopeRegisters, getScope);
- addToGraph(PutScopedVar, OpInfo(slot), getScope, getScopeRegisters, get(source));
- NEXT_OPCODE(op_put_scoped_var);
- }
case op_get_by_id:
case op_get_by_id_out_of_line:
case op_get_array_length: {
@@ -2510,75 +2665,15 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_put_by_id);
}
- case op_get_global_var: {
- SpeculatedType prediction = getPrediction();
-
- JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
-
- NodeIndex getGlobalVar = addToGraph(
- GetGlobalVar,
- OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[2].u.registerPointer)),
- OpInfo(prediction));
- set(currentInstruction[1].u.operand, getGlobalVar);
- NEXT_OPCODE(op_get_global_var);
- }
-
- case op_get_global_var_watchable: {
- SpeculatedType prediction = getPrediction();
-
- JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
-
- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
- Identifier identifier = m_codeBlock->identifier(identifierNumber);
- SymbolTableEntry entry = globalObject->symbolTable()->get(identifier.impl());
- if (!entry.couldBeWatched()) {
- NodeIndex getGlobalVar = addToGraph(
- GetGlobalVar,
- OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[2].u.registerPointer)),
- OpInfo(prediction));
- set(currentInstruction[1].u.operand, getGlobalVar);
- NEXT_OPCODE(op_get_global_var_watchable);
- }
-
- // The watchpoint is still intact! This means that we will get notified if the
- // current value in the global variable changes. So, we can inline that value.
- // Moreover, currently we can assume that this value is a JSFunction*, which
- // implies that it's a cell. This simplifies things, since in general we'd have
- // to use a JSConstant for non-cells and a WeakJSConstant for cells. So instead
- // of having both cases we just assert that the value is a cell.
-
- // NB. If it wasn't for CSE, GlobalVarWatchpoint would have no need for the
- // register pointer. But CSE tracks effects on global variables by comparing
- // register pointers. Because CSE executes multiple times while the backend
- // executes once, we use the following performance trade-off:
- // - The node refers directly to the register pointer to make CSE super cheap.
- // - To perform backend code generation, the node only contains the identifier
- // number, from which it is possible to get (via a few average-time O(1)
- // lookups) to the WatchpointSet.
-
- addToGraph(
- GlobalVarWatchpoint,
- OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[2].u.registerPointer)),
- OpInfo(identifierNumber));
-
- JSValue specificValue = globalObject->registerAt(entry.getIndex()).get();
- ASSERT(specificValue.isCell());
- set(currentInstruction[1].u.operand, cellConstant(specificValue.asCell()));
-
- NEXT_OPCODE(op_get_global_var_watchable);
- }
-
- case op_put_global_var:
case op_init_global_const: {
NodeIndex value = get(currentInstruction[2].u.operand);
addToGraph(
PutGlobalVar,
OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
value);
- NEXT_OPCODE(op_put_global_var);
+ NEXT_OPCODE(op_init_global_const);
}
- case op_put_global_var_check:
case op_init_global_const_check: {
NodeIndex value = get(currentInstruction[2].u.operand);
CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
@@ -2591,16 +2686,17 @@ bool ByteCodeParser::parseBlock(unsigned limit)
PutGlobalVar,
OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
value);
- NEXT_OPCODE(op_put_global_var_check);
+ NEXT_OPCODE(op_init_global_const_check);
}
addToGraph(
PutGlobalVarCheck,
OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
OpInfo(identifierNumber),
value);
- NEXT_OPCODE(op_put_global_var_check);
+ NEXT_OPCODE(op_init_global_const_check);
}
+
// === Block terminators. ===
case op_jmp: {
@@ -2869,69 +2965,175 @@ bool ByteCodeParser::parseBlock(unsigned limit)
addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
LAST_OPCODE(op_jneq_ptr);
- case op_resolve: {
+ case op_resolve:
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check: {
SpeculatedType prediction = getPrediction();
unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ unsigned operations = m_inlineStackTop->m_resolveOperationRemap[currentInstruction[3].u.operand];
+ NodeIndex value = 0;
+ if (parseResolveOperations(prediction, identifier, operations, 0, 0, &value)) {
+ set(currentInstruction[1].u.operand, value);
+ NEXT_OPCODE(op_resolve);
+ }
+
+ NodeIndex resolve = addToGraph(Resolve, OpInfo(m_graph.m_resolveOperationsData.size()), OpInfo(prediction));
+ m_graph.m_resolveOperationsData.append(ResolveOperationData());
+ ResolveOperationData& data = m_graph.m_resolveOperationsData.last();
+ data.identifierNumber = identifier;
+ data.resolveOperationsIndex = operations;
- NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
set(currentInstruction[1].u.operand, resolve);
NEXT_OPCODE(op_resolve);
}
+ case op_put_to_base_variable:
+ case op_put_to_base: {
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ unsigned value = currentInstruction[3].u.operand;
+ unsigned operation = m_inlineStackTop->m_putToBaseOperationRemap[currentInstruction[4].u.operand];
+ PutToBaseOperation* putToBase = m_codeBlock->putToBaseOperation(operation);
+
+ if (putToBase->m_isDynamic) {
+ addToGraph(Phantom, get(base));
+ addToGraph(PutById, OpInfo(identifier), get(base), get(value));
+ NEXT_OPCODE(op_put_to_base);
+ }
+
+ switch (putToBase->m_kind) {
+ case PutToBaseOperation::Uninitialised:
+ addToGraph(Phantom, get(base));
+ addToGraph(ForceOSRExit);
+ break;
+
+ case PutToBaseOperation::GlobalVariablePutChecked: {
+ CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
+ JSGlobalObject* globalObject = codeBlock->globalObject();
+ SymbolTableEntry entry = globalObject->symbolTable()->get(m_codeBlock->identifier(identifier).impl());
+ if (entry.couldBeWatched()) {
+ addToGraph(PutGlobalVarCheck,
+ OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
+ OpInfo(identifier),
+ get(value));
+ break;
+ }
+ }
+ case PutToBaseOperation::GlobalVariablePut:
+ addToGraph(PutGlobalVar,
+ OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
+ get(value));
+ break;
+ case PutToBaseOperation::VariablePut: {
+ addToGraph(Phantom, get(base));
+ NodeIndex getScope = addToGraph(GetScope, OpInfo(putToBase->m_scopeDepth));
+ NodeIndex getScopeRegisters = addToGraph(GetScopeRegisters, getScope);
+ addToGraph(PutScopedVar, OpInfo(putToBase->m_offset), getScope, getScopeRegisters, get(value));
+ break;
+ }
+ case PutToBaseOperation::GlobalPropertyPut: {
+ if (!putToBase->m_structure) {
+ addToGraph(Phantom, get(base));
+ addToGraph(ForceOSRExit);
+ NEXT_OPCODE(op_put_to_base);
+ }
+ NodeIndex baseNode = get(base);
+ addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putToBase->m_structure.get())), baseNode);
+ NodeIndex propertyStorage;
+ if (isInlineOffset(putToBase->m_offset))
+ propertyStorage = baseNode;
+ else
+ propertyStorage = addToGraph(GetButterfly, baseNode);
+ addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, baseNode, get(value));
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = indexRelativeToBase(putToBase->m_offset);
+ storageAccessData.identifierNumber = identifier;
+ m_graph.m_storageAccessData.append(storageAccessData);
+ break;
+ }
+ case PutToBaseOperation::Readonly:
+ case PutToBaseOperation::Generic:
+ addToGraph(Phantom, get(base));
+ addToGraph(PutById, OpInfo(identifier), get(base), get(value));
+ }
+ NEXT_OPCODE(op_put_to_base);
+ }
+
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check:
case op_resolve_base: {
SpeculatedType prediction = getPrediction();
unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
+ unsigned operations = m_inlineStackTop->m_resolveOperationRemap[currentInstruction[4].u.operand];
+ unsigned putToBaseOperation = m_inlineStackTop->m_putToBaseOperationRemap[currentInstruction[5].u.operand];
- NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
+ NodeIndex base = 0;
+ if (parseResolveOperations(prediction, identifier, operations, 0, &base, 0)) {
+ set(currentInstruction[1].u.operand, base);
+ NEXT_OPCODE(op_resolve_base);
+ }
+
+ NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(m_graph.m_resolveOperationsData.size()), OpInfo(prediction));
+ m_graph.m_resolveOperationsData.append(ResolveOperationData());
+ ResolveOperationData& data = m_graph.m_resolveOperationsData.last();
+ data.identifierNumber = identifier;
+ data.resolveOperationsIndex = operations;
+ data.putToBaseOperationIndex = putToBaseOperation;
+
set(currentInstruction[1].u.operand, resolve);
NEXT_OPCODE(op_resolve_base);
}
-
- case op_resolve_global: {
+ case op_resolve_with_base: {
SpeculatedType prediction = getPrediction();
-
- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[
- currentInstruction[2].u.operand];
-
- ResolveGlobalStatus status = ResolveGlobalStatus::computeFor(
- m_inlineStackTop->m_profiledBlock, m_currentIndex,
- m_codeBlock->identifier(identifierNumber));
- if (status.isSimple()) {
- ASSERT(status.structure());
-
- NodeIndex globalObject = addStructureTransitionCheck(
- m_inlineStackTop->m_codeBlock->globalObject(), status.structure());
-
- if (status.specificValue()) {
- ASSERT(status.specificValue().isCell());
-
- set(currentInstruction[1].u.operand,
- cellConstant(status.specificValue().asCell()));
- } else {
- handleGetByOffset(
- currentInstruction[1].u.operand, prediction, globalObject,
- identifierNumber, status.offset());
- }
-
- m_globalResolveNumber++; // Skip over the unused global resolve info.
-
- NEXT_OPCODE(op_resolve_global);
+ unsigned baseDst = currentInstruction[1].u.operand;
+ unsigned valueDst = currentInstruction[2].u.operand;
+ unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+ unsigned operations = m_inlineStackTop->m_resolveOperationRemap[currentInstruction[4].u.operand];
+ unsigned putToBaseOperation = m_inlineStackTop->m_putToBaseOperationRemap[currentInstruction[5].u.operand];
+
+ NodeIndex base = 0;
+ NodeIndex value = 0;
+ if (parseResolveOperations(prediction, identifier, operations, putToBaseOperation, &base, &value)) {
+ set(baseDst, base);
+ set(valueDst, value);
+ } else {
+ addToGraph(ForceOSRExit);
+ set(baseDst, addToGraph(GarbageValue));
+ set(valueDst, addToGraph(GarbageValue));
}
-
- NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
- m_graph.m_resolveGlobalData.append(ResolveGlobalData());
- ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
- data.identifierNumber = identifierNumber;
- data.resolveInfoIndex = m_globalResolveNumber++;
- set(currentInstruction[1].u.operand, resolve);
- NEXT_OPCODE(op_resolve_global);
+ NEXT_OPCODE(op_resolve_with_base);
}
+ case op_resolve_with_this: {
+ SpeculatedType prediction = getPrediction();
+ unsigned baseDst = currentInstruction[1].u.operand;
+ unsigned valueDst = currentInstruction[2].u.operand;
+ unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
+ unsigned operations = m_inlineStackTop->m_resolveOperationRemap[currentInstruction[4].u.operand];
+
+ NodeIndex base = 0;
+ NodeIndex value = 0;
+ if (parseResolveOperations(prediction, identifier, operations, 0, &base, &value)) {
+ set(baseDst, base);
+ set(valueDst, value);
+ } else {
+ addToGraph(ForceOSRExit);
+ set(baseDst, addToGraph(GarbageValue));
+ set(valueDst, addToGraph(GarbageValue));
+ }
+ NEXT_OPCODE(op_resolve_with_this);
+ }
case op_loop_hint: {
// Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
// OSR can only happen at basic block boundaries. Assert that these two statements
@@ -2943,7 +3145,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// block. Hence, machine code block = true code block = not inline code block.
if (!m_inlineStackTop->m_caller)
m_currentBlock->isOSRTarget = true;
-
+
// Emit a phantom node to ensure that there is a placeholder node for this bytecode
// op.
addToGraph(Phantom);
@@ -3331,6 +3533,8 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
+ m_resolveOperationRemap.resize(codeBlock->numberOfResolveOperations());
+ m_putToBaseOperationRemap.resize(codeBlock->numberOfPutToBaseOperations());
for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
StringImpl* rep = codeBlock->identifier(i).impl();
@@ -3357,8 +3561,11 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
}
m_constantRemap[i] = result.iterator->value;
}
- for (unsigned i = 0; i < codeBlock->numberOfGlobalResolveInfos(); ++i)
- byteCodeParser->m_codeBlock->addGlobalResolveInfo(std::numeric_limits<unsigned>::max());
+ for (size_t i = 0; i < codeBlock->numberOfResolveOperations(); i++) {
+ uint32_t newResolve = byteCodeParser->m_codeBlock->addResolve();
+ m_resolveOperationRemap[i] = newResolve;
+ byteCodeParser->m_codeBlock->resolveOperations(newResolve)->append(*codeBlock->resolveOperations(i));
+ }
for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) {
// If we inline the same code block multiple times, we don't want to needlessly
// duplicate its constant buffers.
@@ -3373,6 +3580,11 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_constantBufferRemap[i] = newIndex;
byteCodeParser->m_constantBufferCache.add(ConstantBufferKey(codeBlock, i), newIndex);
}
+ for (size_t i = 0; i < codeBlock->numberOfPutToBaseOperations(); i++) {
+ uint32_t putToBaseResolve = byteCodeParser->m_codeBlock->addPutToBase();
+ m_putToBaseOperationRemap[i] = putToBaseResolve;
+ *byteCodeParser->m_codeBlock->putToBaseOperation(putToBaseResolve) = *codeBlock->putToBaseOperation(i);
+ }
m_callsiteBlockHeadNeedsLinking = true;
} else {
@@ -3389,6 +3601,8 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
+ m_resolveOperationRemap.resize(codeBlock->numberOfResolveOperations());
+ m_putToBaseOperationRemap.resize(codeBlock->numberOfPutToBaseOperations());
for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
m_identifierRemap[i] = i;
@@ -3396,6 +3610,10 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_constantRemap[i] = i + FirstConstantRegisterIndex;
for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i)
m_constantBufferRemap[i] = i;
+ for (size_t i = 0; i < codeBlock->numberOfResolveOperations(); ++i)
+ m_resolveOperationRemap[i] = i;
+ for (size_t i = 0; i < codeBlock->numberOfPutToBaseOperations(); ++i)
+ m_putToBaseOperationRemap[i] = i;
m_callsiteBlockHeadNeedsLinking = false;
}
diff --git a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
index 4a6024305..a2570b7ea 100644
--- a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
@@ -551,7 +551,21 @@ public:
move(arg2, GPRInfo::argumentGPR2);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+#if CPU(X86_64)
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm64 arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm64 arg1, GPRReg arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample!
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+#endif
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2)
{
move(arg1, GPRInfo::argumentGPR1);
diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h
index dc6f7aa1c..e80cc28ae 100644
--- a/Source/JavaScriptCore/dfg/DFGCapabilities.h
+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h
@@ -68,6 +68,48 @@ inline bool mightInlineFunctionForConstruct(CodeBlock* codeBlock)
}
// Opcode checking.
+inline bool canInlineResolveOperations(OpcodeID opcode, ResolveOperations* operations)
+{
+ // Don't try to inline a resolve for which we have no information
+ if (operations->isEmpty())
+ return false;
+
+ for (unsigned i = 0; i < operations->size(); i++) {
+ switch (operations->data()[i].m_operation) {
+ case ResolveOperation::ReturnGlobalObjectAsBase:
+ case ResolveOperation::SetBaseToGlobal:
+ case ResolveOperation::SetBaseToUndefined:
+ case ResolveOperation::GetAndReturnGlobalProperty:
+ case ResolveOperation::GetAndReturnGlobalVar:
+ case ResolveOperation::GetAndReturnGlobalVarWatchable:
+ continue;
+
+ case ResolveOperation::Fail:
+ // The DFG can handle generic cases of failed resolves
+ ASSERT(opcode != op_resolve_base_to_global_dynamic);
+ ASSERT(opcode != op_resolve_base_to_scope_with_top_scope_check);
+ ASSERT(opcode != op_resolve_base_to_global);
+ ASSERT(opcode != op_resolve_base_to_scope);
+ if (opcode != op_resolve && opcode != op_resolve_base)
+ return false;
+
+ case ResolveOperation::SkipTopScopeNode:
+ case ResolveOperation::SkipScopes:
+ case ResolveOperation::SetBaseToScope:
+ case ResolveOperation::ReturnScopeAsBase:
+ case ResolveOperation::GetAndReturnScopedVar:
+ // These opcodes would be easy to support with inlining, but we currently don't do it.
+ // The issue is that the scope chain will not be set correctly.
+ return false;
+
+ case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope:
+ // This would be easy to support in all cases.
+ return false;
+ }
+ }
+ return true;
+}
+
inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instruction*)
{
switch (opcodeID) {
@@ -116,8 +158,6 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi
case op_get_by_val:
case op_put_by_val:
case op_method_check:
- case op_get_scoped_var:
- case op_put_scoped_var:
case op_get_by_id:
case op_get_by_id_out_of_line:
case op_get_array_length:
@@ -127,10 +167,6 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi
case op_put_by_id_transition_direct_out_of_line:
case op_put_by_id_transition_normal:
case op_put_by_id_transition_normal_out_of_line:
- case op_get_global_var:
- case op_get_global_var_watchable:
- case op_put_global_var:
- case op_put_global_var_check:
case op_init_global_const:
case op_init_global_const_check:
case op_jmp:
@@ -157,9 +193,6 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi
case op_ret:
case op_end:
case op_call_put_result:
- case op_resolve:
- case op_resolve_base:
- case op_resolve_global:
case op_new_object:
case op_new_array:
case op_new_array_with_size:
@@ -181,11 +214,30 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi
case op_get_argument_by_val:
case op_get_arguments_length:
case op_jneq_ptr:
+ case op_put_to_base_variable:
+ case op_put_to_base:
return CanCompile;
case op_call_varargs:
return ShouldProfile;
+ case op_resolve:
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check:
+ return CanCompile;
+
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check:
+ case op_resolve_base:
+ case op_resolve_with_base:
+ case op_resolve_with_this:
+ return CanCompile;
+
default:
return CannotCompile;
}
@@ -194,13 +246,22 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi
inline bool canInlineOpcode(OpcodeID opcodeID, CodeBlock* codeBlock, Instruction* pc)
{
switch (opcodeID) {
-
- // These opcodes would be easy to support with inlining, but we currently don't do it.
- // The issue is that the scope chain will not be set correctly.
- case op_get_scoped_var:
- case op_put_scoped_var:
case op_resolve:
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check:
+ return canInlineResolveOperations(opcodeID, codeBlock->resolveOperations(pc[3].u.operand));
+
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check:
case op_resolve_base:
+ case op_resolve_with_base:
+ case op_resolve_with_this:
+ return canInlineResolveOperations(opcodeID, codeBlock->resolveOperations(pc[4].u.operand));
// Inlining doesn't correctly remap regular expression operands.
case op_new_regexp:
diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
index 8a261ad2b..25915cfd4 100644
--- a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
@@ -102,7 +102,8 @@ private:
break;
}
- case CheckArray: {
+ case CheckArray:
+ case Arrayify: {
if (!modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode()))
break;
ASSERT(node.refCount() == 1);
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index 212c8bbd2..b2c754f85 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -57,10 +57,23 @@ struct StorageAccessData {
struct ResolveGlobalData {
unsigned identifierNumber;
- unsigned resolveInfoIndex;
+ unsigned resolveOperationsIndex;
+ unsigned putToBaseOperationIndex;
+ unsigned resolvePropertyIndex;
};
-//
+struct ResolveOperationData {
+ unsigned identifierNumber;
+ unsigned resolveOperationsIndex;
+ unsigned putToBaseOperationIndex;
+};
+
+struct PutToBaseOperationData {
+ unsigned putToBaseOperationIndex;
+};
+
+
+//
// === Graph ===
//
// The dataflow graph is an ordered vector of nodes.
@@ -669,6 +682,8 @@ public:
Vector<Edge, 16> m_varArgChildren;
Vector<StorageAccessData> m_storageAccessData;
Vector<ResolveGlobalData> m_resolveGlobalData;
+ Vector<ResolveOperationData> m_resolveOperationsData;
+ Vector<PutToBaseOperationData> m_putToBaseOperationData;
Vector<NodeIndex, 8> m_arguments;
SegmentedVector<VariableAccessData, 16> m_variableAccessData;
SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h
index df6191eab..40b3ed7ec 100644
--- a/Source/JavaScriptCore/dfg/DFGNode.h
+++ b/Source/JavaScriptCore/dfg/DFGNode.h
@@ -352,9 +352,6 @@ struct Node {
case GetByIdFlush:
case PutById:
case PutByIdDirect:
- case Resolve:
- case ResolveBase:
- case ResolveBaseStrictPut:
return true;
default:
return false;
@@ -373,6 +370,12 @@ struct Node {
return m_opInfo;
}
+ unsigned resolveOperationsDataIndex()
+ {
+ ASSERT(op() == Resolve || op() == ResolveBase || op() == ResolveBaseStrictPut);
+ return m_opInfo;
+ }
+
bool hasArithNodeFlags()
{
switch (op()) {
diff --git a/Source/JavaScriptCore/dfg/DFGNodeType.h b/Source/JavaScriptCore/dfg/DFGNodeType.h
index 9c93a8ba3..1d2460659 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeType.h
+++ b/Source/JavaScriptCore/dfg/DFGNodeType.h
@@ -234,6 +234,8 @@ namespace JSC { namespace DFG {
macro(Throw, NodeMustGenerate) \
macro(ThrowReferenceError, NodeMustGenerate) \
\
+ macro(GarbageValue, NodeResultJS | NodeClobbersWorld) \
+ \
/* This is a pseudo-terminal. It means that execution should fall out of DFG at */\
/* this point, but execution does continue in the basic block - just in a */\
/* different compiler. */\
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
index b3701722e..6560088fd 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
@@ -45,9 +45,9 @@ OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAVal
, m_kind(kind)
, m_count(0)
, m_streamIndex(streamIndex)
- , m_lastSetOperand(jit->m_lastSetOperand)
{
ASSERT(m_codeOrigin.isSet());
+ m_setOperands.append(jit->m_lastSetOperand);
}
bool OSRExit::considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock)
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.h b/Source/JavaScriptCore/dfg/DFGOSRExit.h
index cd2434c11..0ecefe386 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.h
@@ -110,9 +110,9 @@ struct OSRExit {
}
unsigned m_streamIndex;
- int m_lastSetOperand;
+ Vector<int, 1> m_setOperands;
- RefPtr<ValueRecoveryOverride> m_valueRecoveryOverride;
+ Vector<RefPtr<ValueRecoveryOverride>, 1> m_valueRecoveryOverrides;
private:
bool considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock);
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
index 2ce1c887b..55a903c7a 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
@@ -70,11 +70,10 @@ void compileOSRExit(ExecState* exec)
Operands<ValueRecovery> operands;
codeBlock->variableEventStream().reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->minifiedDFG(), exit.m_streamIndex, operands);
- // There may be an override, for forward speculations.
- if (!!exit.m_valueRecoveryOverride) {
- operands.setOperand(
- exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery);
- }
+ // There may be overrides, for forward speculations.
+ for (size_t i = 0; i < exit.m_valueRecoveryOverrides.size(); i++)
+ operands.setOperand(exit.m_valueRecoveryOverrides[i]->operand, exit.m_valueRecoveryOverrides[i]->recovery);
+
SpeculationRecovery* recovery = 0;
if (exit.m_recoveryIndex)
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
index cb13dcc50..b64ce3fa1 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
@@ -83,28 +83,85 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// 3) Refine some value profile, if appropriate.
- if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
- EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+ if (!!exit.m_jsValueSource) {
+ if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
+ // If the instruction that this originated from has an array profile, then
+ // refine it. If it doesn't, then do nothing. The latter could happen for
+ // hoisted checks, or checks emitted for operations that didn't have array
+ // profiling - either ops that aren't array accesses at all, or weren't
+ // known to be array acceses in the bytecode. The latter case is a FIXME
+ // while the former case is an outcome of a CheckStructure not knowing why
+ // it was emitted (could be either due to an inline cache of a property
+ // property access, or due to an array profile).
+
+ // Note: We are free to assume that the jsValueSource is already known to
+ // be a cell since both BadCache and BadIndexingType exits occur after
+ // the cell check would have already happened.
+
+ CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
+ if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
+ GPRReg usedRegister1;
+ GPRReg usedRegister2;
+ if (exit.m_jsValueSource.isAddress()) {
+ usedRegister1 = exit.m_jsValueSource.base();
+ usedRegister2 = InvalidGPRReg;
+ } else {
+ usedRegister1 = exit.m_jsValueSource.payloadGPR();
+ if (exit.m_jsValueSource.hasKnownTag())
+ usedRegister2 = InvalidGPRReg;
+ else
+ usedRegister2 = exit.m_jsValueSource.tagGPR();
+ }
+
+ GPRReg scratch1;
+ GPRReg scratch2;
+ scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
+ scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
+
+ m_jit.push(scratch1);
+ m_jit.push(scratch2);
+
+ GPRReg value;
+ if (exit.m_jsValueSource.isAddress()) {
+ value = scratch1;
+ m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
+ } else
+ value = exit.m_jsValueSource.payloadGPR();
+
+ m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
+ m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
+ m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
+ m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
+ m_jit.lshift32(scratch1, scratch2);
+ m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
+
+ m_jit.pop(scratch2);
+ m_jit.pop(scratch1);
+ }
+ }
- if (exit.m_jsValueSource.isAddress()) {
- // Save a register so we can use it.
- GPRReg scratch = GPRInfo::regT0;
- if (scratch == exit.m_jsValueSource.base())
- scratch = GPRInfo::regT1;
- ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(sizeof(uint32_t));
- EncodedJSValue* scratchDataBuffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
- m_jit.store32(scratch, scratchDataBuffer);
- m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
- m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
- m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
- m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
- m_jit.load32(scratchDataBuffer, scratch);
- } else if (exit.m_jsValueSource.hasKnownTag()) {
- m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
- m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
- } else {
- m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
- m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+ if (!!exit.m_valueProfile) {
+ EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+
+ if (exit.m_jsValueSource.isAddress()) {
+ // Save a register so we can use it.
+ GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
+
+ m_jit.push(scratch);
+
+ m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
+ m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
+ m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
+ m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+
+ m_jit.pop(scratch);
+ } else if (exit.m_jsValueSource.hasKnownTag()) {
+ m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
+ m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+ } else {
+ m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
+ m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+ }
}
}
@@ -675,9 +732,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// 15) Load the result of the last bytecode operation into regT0.
- if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) {
- m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
- m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2);
+ for (size_t i = 0; i < exit.m_setOperands.size(); i++) {
+ m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_setOperands[i]), GPRInfo::cachedResultRegister);
+ m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_setOperands[i]), GPRInfo::cachedResultRegister2);
}
// 16) Adjust the call frame pointer.
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
index 968e56f1a..65b89a550 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
@@ -73,12 +73,12 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
switch (recovery->type()) {
case SpeculativeAdd:
m_jit.sub32(recovery->src(), recovery->dest());
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery->dest());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery->dest());
alreadyBoxed = recovery->dest();
break;
case BooleanSpeculationCheck:
- m_jit.xorPtr(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
+ m_jit.xor64(AssemblyHelpers::TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
break;
default:
@@ -86,23 +86,70 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
}
}
- // 3) Refine some value profile, if appropriate.
-
- if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
- EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+ // 3) Refine some array and/or value profile, if appropriate.
+
+ if (!!exit.m_jsValueSource) {
+ if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
+ // If the instruction that this originated from has an array profile, then
+ // refine it. If it doesn't, then do nothing. The latter could happen for
+ // hoisted checks, or checks emitted for operations that didn't have array
+ // profiling - either ops that aren't array accesses at all, or weren't
+ // known to be array acceses in the bytecode. The latter case is a FIXME
+ // while the former case is an outcome of a CheckStructure not knowing why
+ // it was emitted (could be either due to an inline cache of a property
+ // property access, or due to an array profile).
+
+ CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
+ if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
+ GPRReg usedRegister;
+ if (exit.m_jsValueSource.isAddress())
+ usedRegister = exit.m_jsValueSource.base();
+ else
+ usedRegister = exit.m_jsValueSource.gpr();
+
+ GPRReg scratch1;
+ GPRReg scratch2;
+ scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
+ scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
+
+ m_jit.push(scratch1);
+ m_jit.push(scratch2);
+
+ GPRReg value;
+ if (exit.m_jsValueSource.isAddress()) {
+ value = scratch1;
+ m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
+ } else
+ value = exit.m_jsValueSource.gpr();
+
+ m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
+ m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
+ m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
+ m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
+ m_jit.lshift32(scratch1, scratch2);
+ m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
+
+ m_jit.pop(scratch2);
+ m_jit.pop(scratch1);
+ }
+ }
+ if (!!exit.m_valueProfile) {
+ EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+
#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
- dataLog(" (have exit profile, bucket %p) ", bucket);
+ dataLog(" (have exit profile, bucket %p) ", bucket);
#endif
- if (exit.m_jsValueSource.isAddress()) {
- // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
- // since we know how to restore it.
- m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
- m_jit.storePtr(GPRInfo::tagTypeNumberRegister, bucket);
- m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(TagTypeNumber)), GPRInfo::tagTypeNumberRegister);
- } else
- m_jit.storePtr(exit.m_jsValueSource.gpr(), bucket);
+ if (exit.m_jsValueSource.isAddress()) {
+ // We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
+ // since we know how to restore it.
+ m_jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
+ m_jit.store64(GPRInfo::tagTypeNumberRegister, bucket);
+ m_jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
+ } else
+ m_jit.store64(exit.m_jsValueSource.gpr(), bucket);
+ }
}
// 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
@@ -230,7 +277,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
switch (recovery.technique()) {
case UnboxedInt32InGPR:
if (recovery.gpr() != alreadyBoxed)
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
break;
case AlreadyInJSStackAsUnboxedInt32:
@@ -252,7 +299,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
if (addressGPR == recovery.gpr())
addressGPR = GPRInfo::regT1;
- m_jit.storePtr(addressGPR, scratchDataBuffer);
+ m_jit.store64(addressGPR, scratchDataBuffer);
m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + 1), addressGPR);
m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
@@ -266,12 +313,12 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
positive.link(&m_jit);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, recovery.gpr());
done.link(&m_jit);
m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
- m_jit.loadPtr(scratchDataBuffer, addressGPR);
+ m_jit.load64(scratchDataBuffer, addressGPR);
break;
}
@@ -296,11 +343,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case UnboxedInt32InGPR:
case UInt32InGPR:
if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
- m_jit.storePtr(recovery.gpr(), scratchDataBuffer + currentPoisonIndex);
+ m_jit.store64(recovery.gpr(), scratchDataBuffer + currentPoisonIndex);
m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else
- m_jit.storePtr(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand));
+ m_jit.store64(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand));
break;
default:
break;
@@ -330,11 +377,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
continue;
GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr()));
if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
- m_jit.storePtr(gpr, scratchDataBuffer + currentPoisonIndex);
+ m_jit.store64(gpr, scratchDataBuffer + currentPoisonIndex);
m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
currentPoisonIndex++;
} else
- m_jit.storePtr(gpr, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store64(gpr, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
}
}
@@ -348,7 +395,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
continue;
m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)), FPRInfo::fpRegT0);
m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
}
}
@@ -368,20 +415,20 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
const ValueRecovery& recovery = operands[index];
switch (recovery.technique()) {
case DisplacedInJSStack:
- m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
+ m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
break;
case Int32DisplacedInJSStack: {
GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
break;
}
case DoubleDisplacedInJSStack: {
GPRReg gpr = GPRInfo::toRegister(displacementIndex++);
- m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
- m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr);
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
break;
}
@@ -397,7 +444,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case DisplacedInJSStack:
case Int32DisplacedInJSStack:
case DoubleDisplacedInJSStack:
- m_jit.storePtr(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store64(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
break;
default:
@@ -428,21 +475,21 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
switch (recovery.technique()) {
case DisplacedInJSStack:
- m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
+ m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
break;
case Int32DisplacedInJSStack: {
m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
break;
}
case DoubleDisplacedInJSStack: {
- m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
- m_jit.subPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
+ m_jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, scratchDataBuffer + scratchIndex++);
break;
}
@@ -458,8 +505,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case DisplacedInJSStack:
case Int32DisplacedInJSStack:
case DoubleDisplacedInJSStack:
- m_jit.loadPtr(scratchDataBuffer + scratchIndex++, GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.load64(scratchDataBuffer + scratchIndex++, GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
break;
default:
@@ -484,8 +531,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case UnboxedInt32InGPR:
case UInt32InGPR:
case InFPR:
- m_jit.loadPtr(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
+ m_jit.load64(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)virtualRegister));
break;
default:
@@ -499,16 +546,16 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
if (haveConstants) {
if (haveUndefined)
- m_jit.move(AssemblyHelpers::TrustedImmPtr(JSValue::encode(jsUndefined())), GPRInfo::regT0);
+ m_jit.move(AssemblyHelpers::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0);
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
if (recovery.technique() != Constant)
continue;
if (recovery.constant().isUndefined())
- m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
else
- m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
+ m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
}
}
@@ -576,11 +623,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
callerFrameGPR = GPRInfo::callFrameRegister;
m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
- m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
- m_jit.storePtr(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
+ m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
+ m_jit.store64(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC)));
m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
- m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
+ m_jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->callee.get()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
}
// 15) Create arguments if necessary and place them into the appropriate aliased
@@ -620,23 +667,23 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
bitwise_cast<void*>(operationCreateArguments)),
GPRInfo::nonArgGPR0);
m_jit.call(GPRInfo::nonArgGPR0);
- m_jit.storePtr(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
- m_jit.storePtr(
+ m_jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
+ m_jit.store64(
GPRInfo::returnValueGPR,
AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister)));
m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
}
- m_jit.loadPtr(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
- m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
+ m_jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
+ m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
}
}
// 16) Load the result of the last bytecode operation into regT0.
- if (exit.m_lastSetOperand != std::numeric_limits<int>::max())
- m_jit.loadPtr(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
-
+ for (size_t i = 0; i < exit.m_setOperands.size(); i++)
+ m_jit.load64(AssemblyHelpers::addressFor((VirtualRegister)exit.m_setOperands[i]), GPRInfo::cachedResultRegister);
+
// 17) Adjust the call frame pointer.
if (exit.m_codeOrigin.inlineCallFrame)
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp
index db736feeb..11c2c1cef 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp
@@ -1059,35 +1059,38 @@ void DFG_OPERATION operationNotifyGlobalVarWrite(WatchpointSet* watchpointSet)
watchpointSet->notifyWrite();
}
-EncodedJSValue DFG_OPERATION operationResolve(ExecState* exec, Identifier* propertyName)
+EncodedJSValue DFG_OPERATION operationResolve(ExecState* exec, Identifier* propertyName, ResolveOperations* operations)
{
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
- return JSValue::encode(JSScope::resolve(exec, *propertyName));
+ return JSValue::encode(JSScope::resolve(exec, *propertyName, operations));
}
-EncodedJSValue DFG_OPERATION operationResolveBase(ExecState* exec, Identifier* propertyName)
+EncodedJSValue DFG_OPERATION operationResolveBase(ExecState* exec, Identifier* propertyName, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations)
{
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
- return JSValue::encode(JSScope::resolveBase(exec, *propertyName, false));
+ return JSValue::encode(JSScope::resolveBase(exec, *propertyName, false, operations, putToBaseOperations));
}
-EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState* exec, Identifier* propertyName)
+EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState* exec, Identifier* propertyName, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations)
{
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
- return JSValue::encode(JSScope::resolveBase(exec, *propertyName, true));
+ return JSValue::encode(JSScope::resolveBase(exec, *propertyName, true, operations, putToBaseOperations));
}
-EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState* exec, GlobalResolveInfo* resolveInfo, JSGlobalObject* globalObject, Identifier* propertyName)
+EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState* exec, ResolveOperation* resolveOperation, JSGlobalObject* globalObject, Identifier* propertyName)
{
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
-
- return JSValue::encode(JSScope::resolveGlobal(exec, *propertyName, globalObject, &resolveInfo->structure, &resolveInfo->offset));
+ ASSERT(globalObject);
+ UNUSED_PARAM(resolveOperation);
+ UNUSED_PARAM(globalObject);
+ ASSERT(resolveOperation->m_operation == ResolveOperation::GetAndReturnGlobalProperty);
+ return JSValue::encode(JSScope::resolveGlobal(exec, *propertyName, globalObject, resolveOperation));
}
EncodedJSValue DFG_OPERATION operationToPrimitive(ExecState* exec, EncodedJSValue value)
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h
index b6530b755..8d2beacec 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.h
+++ b/Source/JavaScriptCore/dfg/DFGOperations.h
@@ -33,8 +33,6 @@
namespace JSC {
-struct GlobalResolveInfo;
-
namespace DFG {
extern "C" {
@@ -66,8 +64,10 @@ typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EAZ)(ExecState*, JSArray*,
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECC)(ExecState*, JSCell*, JSCell*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECI)(ExecState*, JSCell*, Identifier*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECJ)(ExecState*, JSCell*, EncodedJSValue);
-typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EGriJsgI)(ExecState*, GlobalResolveInfo*, JSGlobalObject*, Identifier*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EGriJsgI)(ExecState*, ResolveOperation*, JSGlobalObject*, Identifier*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EI)(ExecState*, Identifier*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EIRo)(ExecState*, Identifier*, ResolveOperations*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EIRoPtbo)(ExecState*, Identifier*, ResolveOperations*, PutToBaseOperation*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJ)(ExecState*, EncodedJSValue);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJA)(ExecState*, EncodedJSValue, JSArray*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJI)(ExecState*, EncodedJSValue, Identifier*);
@@ -131,10 +131,10 @@ EncodedJSValue DFG_OPERATION operationGetByIdOptimize(ExecState*, EncodedJSValue
EncodedJSValue DFG_OPERATION operationCallCustomGetter(ExecState*, JSCell*, PropertySlot::GetValueFunc, Identifier*) WTF_INTERNAL;
EncodedJSValue DFG_OPERATION operationCallGetter(ExecState*, JSCell*, JSCell*) WTF_INTERNAL;
void DFG_OPERATION operationNotifyGlobalVarWrite(WatchpointSet* watchpointSet) WTF_INTERNAL;
-EncodedJSValue DFG_OPERATION operationResolve(ExecState*, Identifier*) WTF_INTERNAL;
-EncodedJSValue DFG_OPERATION operationResolveBase(ExecState*, Identifier*) WTF_INTERNAL;
-EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState*, Identifier*) WTF_INTERNAL;
-EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState*, GlobalResolveInfo*, JSGlobalObject*, Identifier*) WTF_INTERNAL;
+EncodedJSValue DFG_OPERATION operationResolve(ExecState*, Identifier*, ResolveOperations*) WTF_INTERNAL;
+EncodedJSValue DFG_OPERATION operationResolveBase(ExecState*, Identifier*, ResolveOperations*, PutToBaseOperation*) WTF_INTERNAL;
+EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState*, Identifier*, ResolveOperations*, PutToBaseOperation*) WTF_INTERNAL;
+EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState*, ResolveOperation*, JSGlobalObject*, Identifier*) WTF_INTERNAL;
EncodedJSValue DFG_OPERATION operationToPrimitive(ExecState*, EncodedJSValue) WTF_INTERNAL;
EncodedJSValue DFG_OPERATION operationStrCat(ExecState*, void*, size_t) WTF_INTERNAL;
char* DFG_OPERATION operationNewArray(ExecState*, Structure*, void*, size_t) WTF_INTERNAL;
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
index d76fd8018..fee7a3ca2 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
@@ -116,7 +116,21 @@ private:
return false;
return !!m_graph.valueOfNumberConstant(nodeIndex);
}
-
+
+ SpeculatedType speculatedDoubleTypeForPrediction(SpeculatedType value)
+ {
+ if (!isNumberSpeculation(value))
+ return SpecDouble;
+ if (value & SpecDoubleNaN)
+ return SpecDouble;
+ return SpecDoubleReal;
+ }
+
+ SpeculatedType speculatedDoubleTypeForPredictions(SpeculatedType left, SpeculatedType right)
+ {
+ return speculatedDoubleTypeForPrediction(mergeSpeculations(left, right));
+ }
+
void propagate(Node& node)
{
if (!node.shouldGenerate())
@@ -248,7 +262,7 @@ private:
if (m_graph.addShouldSpeculateInteger(node))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
} else if (!(left & SpecNumber) || !(right & SpecNumber)) {
// left or right is definitely something other than a number.
changed |= mergePrediction(SpecString);
@@ -272,7 +286,7 @@ private:
if (m_graph.addShouldSpeculateInteger(node))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
}
if (isNotNegZero(node.child1().index()) || isNotNegZero(node.child2().index()))
@@ -291,7 +305,7 @@ private:
if (m_graph.addShouldSpeculateInteger(node))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
}
if (isNotZero(node.child1().index()) || isNotZero(node.child2().index()))
@@ -307,7 +321,7 @@ private:
if (m_graph.negateShouldSpeculateInteger(node))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPrediction(m_graph[node.child1()].prediction()));
}
changed |= m_graph[node.child1()].mergeFlags(flags);
@@ -323,7 +337,7 @@ private:
&& nodeCanSpeculateInteger(node.arithNodeFlags()))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
}
flags |= NodeUsedAsNumber;
@@ -340,7 +354,7 @@ private:
if (m_graph.mulShouldSpeculateInteger(node))
changed |= mergePrediction(SpecInt32);
else
- changed |= mergePrediction(SpecDouble);
+ changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right));
}
// As soon as a multiply happens, we can easily end up in the part
@@ -388,7 +402,7 @@ private:
if (nodeCanSpeculateInteger(node.arithNodeFlags()))
changed |= mergePrediction(child);
else
- changed |= setPrediction(SpecDouble);
+ changed |= setPrediction(speculatedDoubleTypeForPrediction(child));
flags &= ~NodeNeedsNegZero;
changed |= m_graph[node.child1()].mergeFlags(flags);
@@ -674,6 +688,7 @@ private:
case CheckNumber:
case CheckArgumentsNotCreated:
case GlobalVarWatchpoint:
+ case GarbageValue:
changed |= mergeDefaultFlags(node);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.cpp b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
index 6fb185c12..531a525d5 100644
--- a/Source/JavaScriptCore/dfg/DFGRepatch.cpp
+++ b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
@@ -192,7 +192,7 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu
if (isInlineOffset(offset)) {
#if USE(JSVALUE64)
- stubJit.loadPtr(protoObject->locationForOffset(offset), resultGPR);
+ stubJit.load64(protoObject->locationForOffset(offset), resultGPR);
#elif USE(JSVALUE32_64)
stubJit.move(MacroAssembler::TrustedImmPtr(protoObject->locationForOffset(offset)), resultGPR);
stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
@@ -201,7 +201,7 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu
} else {
stubJit.loadPtr(protoObject->butterflyAddress(), resultGPR);
#if USE(JSVALUE64)
- stubJit.loadPtr(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>)), resultGPR);
+ stubJit.load64(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>)), resultGPR);
#elif USE(JSVALUE32_64)
stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
@@ -263,7 +263,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0)));
#if USE(JSVALUE64)
- stubJit.orPtr(GPRInfo::tagTypeNumberRegister, scratchGPR, resultGPR);
+ stubJit.or64(GPRInfo::tagTypeNumberRegister, scratchGPR, resultGPR);
#elif USE(JSVALUE32_64)
stubJit.move(scratchGPR, resultGPR);
stubJit.move(JITCompiler::TrustedImm32(0xffffffff), resultTagGPR); // JSValue::Int32Tag
@@ -299,7 +299,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
Structure* structure = baseCell->structure();
if (!slot.isCacheable())
return false;
- if (structure->isUncacheableDictionary() || structure->typeInfo().prohibitsPropertyCaching())
+ if (!structure->propertyAccessesAreCacheable())
return false;
// Optimize self access.
@@ -421,14 +421,14 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
ASSERT(baseGPR != scratchGPR);
if (isInlineOffset(slot.cachedOffset())) {
#if USE(JSVALUE64)
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+ stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
#else
stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
#endif
} else {
stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
#if USE(JSVALUE64)
- stubJit.loadPtr(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+ stubJit.load64(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
#else
stubJit.load32(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
#endif
@@ -465,7 +465,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
} else {
if (isInlineOffset(slot.cachedOffset())) {
#if USE(JSVALUE64)
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
+ stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
#else
if (baseGPR == resultTagGPR) {
stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
@@ -478,7 +478,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
} else {
stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
#if USE(JSVALUE64)
- stubJit.loadPtr(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
+ stubJit.load64(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
#else
stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
@@ -682,10 +682,10 @@ static void emitPutReplaceStub(
#if USE(JSVALUE64)
if (isInlineOffset(slot.cachedOffset()))
- stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
+ stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
else {
stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
- stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
+ stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
}
#elif USE(JSVALUE32_64)
if (isInlineOffset(slot.cachedOffset())) {
@@ -854,11 +854,11 @@ static void emitPutTransitionStub(
stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
#if USE(JSVALUE64)
if (isInlineOffset(slot.cachedOffset()))
- stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
+ stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
else {
if (!scratchGPR1HasStorage)
stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
- stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
+ stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
}
#elif USE(JSVALUE32_64)
if (isInlineOffset(slot.cachedOffset())) {
diff --git a/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h b/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h
index 9a65e8b7d..706bcd61d 100644
--- a/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h
+++ b/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h
@@ -127,15 +127,20 @@ public:
{
unsigned count = 0;
for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
- if (m_usedRegisters.getGPRByIndex(i))
- jit.storePtr(GPRInfo::toRegister(i), scratchBuffer->m_buffer + (count++));
+ if (m_usedRegisters.getGPRByIndex(i)) {
+#if USE(JSVALUE64)
+ jit.store64(GPRInfo::toRegister(i), static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++));
+#else
+ jit.store32(GPRInfo::toRegister(i), static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++));
+#endif
+ }
if (scratchGPR == InvalidGPRReg && !m_lockedRegisters.getGPRByIndex(i) && !m_scratchRegisters.getGPRByIndex(i))
scratchGPR = GPRInfo::toRegister(i);
}
ASSERT(scratchGPR != InvalidGPRReg);
for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
if (m_usedRegisters.getFPRByIndex(i)) {
- jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->m_buffer + (count++)), scratchGPR);
+ jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
jit.storeDouble(FPRInfo::toRegister(i), scratchGPR);
}
}
@@ -165,15 +170,20 @@ public:
unsigned count = m_usedRegisters.numberOfSetGPRs();
for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
if (m_usedRegisters.getFPRByIndex(i)) {
- jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->m_buffer + (count++)), scratchGPR);
+ jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
jit.loadDouble(scratchGPR, FPRInfo::toRegister(i));
}
}
count = 0;
for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
- if (m_usedRegisters.getGPRByIndex(i))
- jit.loadPtr(scratchBuffer->m_buffer + (count++), GPRInfo::toRegister(i));
+ if (m_usedRegisters.getGPRByIndex(i)) {
+#if USE(JSVALUE64)
+ jit.load64(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), GPRInfo::toRegister(i));
+#else
+ jit.load32(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), GPRInfo::toRegister(i));
+#endif
+ }
}
}
diff --git a/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h b/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h
index ab99b014d..bb04646bf 100644
--- a/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h
+++ b/Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h
@@ -41,6 +41,7 @@ enum SilentSpillAction {
Store32Tag,
Store32Payload,
StorePtr,
+ Store64,
StoreDouble
};
@@ -61,6 +62,7 @@ enum SilentFillAction {
Load32Payload,
Load32PayloadBoxInt,
LoadPtr,
+ Load64,
LoadDouble,
LoadDoubleBoxDouble,
LoadJSUnboxDouble
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index 850d5aa74..a9b91d046 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -163,7 +163,9 @@ void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecov
#endif
unsigned setLocalIndexInBlock = m_indexInBlock + 1;
-
+
+ OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
+
Node* setLocal = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock));
bool hadInt32ToDouble = false;
@@ -173,7 +175,7 @@ void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecov
}
if (setLocal->op() == Flush || setLocal->op() == Phantom)
setLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock));
-
+
if (!!valueRecovery) {
if (hadInt32ToDouble)
ASSERT(at(setLocal->child1()).child1() == m_compileIndex);
@@ -188,16 +190,34 @@ void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecov
// We're at an inlined return. Use a backward speculation instead.
return;
}
+
+ exit.m_setOperands[0] = setLocal->local();
+ while (nextNode->codeOrigin == at(m_compileIndex).codeOrigin) {
+ ++setLocalIndexInBlock;
+ Node* nextSetLocal = nextNode;
+ if (nextSetLocal->op() == Int32ToDouble)
+ nextSetLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock));
+
+ if (nextSetLocal->op() == Flush || nextSetLocal->op() == Phantom)
+ nextSetLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock));
+
+ nextNode = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 1));
+ ASSERT(nextNode->op() != Jump || nextNode->codeOrigin != at(m_compileIndex).codeOrigin);
+ ASSERT(nextSetLocal->op() == SetLocal);
+ exit.m_setOperands.append(nextSetLocal->local());
+ }
+
ASSERT(nextNode->codeOrigin != at(m_compileIndex).codeOrigin);
-
- OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
+
exit.m_codeOrigin = nextNode->codeOrigin;
if (!valueRecovery)
return;
- exit.m_lastSetOperand = setLocal->local();
- exit.m_valueRecoveryOverride = adoptRef(
- new ValueRecoveryOverride(setLocal->local(), valueRecovery));
+
+ ASSERT(exit.m_setOperands.size() == 1);
+ for (size_t i = 0; i < exit.m_setOperands.size(); i++)
+ exit.m_valueRecoveryOverrides.append(adoptRef(new ValueRecoveryOverride(exit.m_setOperands[i], valueRecovery)));
+
}
JumpReplacementWatchpoint* SpeculativeJIT::forwardSpeculationWatchpoint(ExitKind kind)
@@ -417,7 +437,7 @@ void SpeculativeJIT::checkArray(Node& node)
MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
speculationCheck(
- Uncountable, JSValueRegs(), NoNode,
+ BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode,
jumpSlowForUnwantedArrayMode(tempGPR, node.arrayMode()));
noResult(m_compileIndex);
@@ -515,7 +535,7 @@ void SpeculativeJIT::arrayify(Node& node, GPRReg baseReg, GPRReg propertyReg)
// Next check that the object does not intercept indexed accesses. If it does,
// then this mode won't work.
speculationCheck(
- Uncountable, JSValueRegs(), NoNode,
+ BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode,
m_jit.branchTest8(
MacroAssembler::NonZero,
MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
@@ -549,7 +569,7 @@ void SpeculativeJIT::arrayify(Node& node, GPRReg baseReg, GPRReg propertyReg)
m_jit.load8(
MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), structureGPR);
speculationCheck(
- Uncountable, JSValueRegs(), NoNode,
+ BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode,
jumpSlowForUnwantedArrayMode(structureGPR, desiredArrayMode));
done.link(&m_jit);
@@ -559,10 +579,17 @@ void SpeculativeJIT::arrayify(Node& node, GPRReg baseReg, GPRReg propertyReg)
void SpeculativeJIT::arrayify(Node& node)
{
ASSERT(modeIsSpecific(node.arrayMode()));
- ASSERT(!modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode()));
SpeculateCellOperand base(this, node.child1());
+ if (modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode())) {
+ GPRTemporary temp(this);
+ m_jit.loadPtr(
+ MacroAssembler::Address(base.gpr(), JSObject::butterflyOffset()), temp.gpr());
+ storageResult(temp.gpr(), m_compileIndex);
+ return;
+ }
+
if (!node.child2()) {
arrayify(node, base.gpr(), InvalidGPRReg);
return;
@@ -1686,14 +1713,14 @@ void SpeculativeJIT::checkArgumentTypes()
#if USE(JSVALUE64)
if (isInt32Speculation(predictedType))
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
else if (isBooleanSpeculation(predictedType)) {
GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
} else if (isCellSpeculation(predictedType))
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
#else
if (isInt32Speculation(predictedType))
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
@@ -1953,10 +1980,10 @@ void SpeculativeJIT::compileValueToInt32(Node& node)
FPRTemporary tempFpr(this);
FPRReg fpr = tempFpr.fpr();
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
if (!isNumberSpeculation(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueRegs(gpr), node.child1().index(), m_jit.branchTestPtr(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
+ speculationCheck(BadType, JSValueRegs(gpr), node.child1().index(), m_jit.branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
// First, if we get here we have a double encoded as a JSValue
m_jit.move(gpr, resultGpr);
@@ -2099,8 +2126,8 @@ void SpeculativeJIT::compileInt32ToDouble(Node& node)
ASSERT(isInt32Constant(node.child1().index()));
FPRTemporary result(this);
GPRTemporary temp(this);
- m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(node.child1().index())))), temp.gpr());
- m_jit.movePtrToDouble(temp.gpr(), result.fpr());
+ m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(node.child1().index()))), temp.gpr());
+ m_jit.move64ToDouble(temp.gpr(), result.fpr());
doubleResult(result.fpr(), m_compileIndex);
return;
}
@@ -2124,13 +2151,13 @@ void SpeculativeJIT::compileInt32ToDouble(Node& node)
GPRReg tempGPR = temp.gpr();
FPRReg resultFPR = result.fpr();
- JITCompiler::Jump isInteger = m_jit.branchPtr(
+ JITCompiler::Jump isInteger = m_jit.branch64(
MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
if (!isNumberSpeculation(m_state.forNode(node.child1()).m_type)) {
speculationCheck(
BadType, JSValueRegs(op1GPR), node.child1(),
- m_jit.branchTestPtr(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
+ m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
}
m_jit.move(op1GPR, tempGPR);
@@ -2460,20 +2487,18 @@ void SpeculativeJIT::compileInstanceOfForObject(Node&, GPRReg valueReg, GPRReg p
MacroAssembler::Label loop(&m_jit);
m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
#if USE(JSVALUE64)
- m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
+ m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
+ MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
+ m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
#else
m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
-#endif
MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
-#if USE(JSVALUE64)
- m_jit.branchTestPtr(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
-#else
m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
#endif
// No match - result is false.
#if USE(JSVALUE64)
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), scratchReg);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
#else
m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
#endif
@@ -2481,7 +2506,7 @@ void SpeculativeJIT::compileInstanceOfForObject(Node&, GPRReg valueReg, GPRReg p
isInstance.link(&m_jit);
#if USE(JSVALUE64)
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), scratchReg);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
#else
m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
#endif
@@ -2507,8 +2532,8 @@ void SpeculativeJIT::compileInstanceOf(Node& node)
#if USE(JSVALUE64)
GPRReg valueReg = value.gpr();
- MacroAssembler::Jump isCell = m_jit.branchTestPtr(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), scratchReg);
+ MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
#else
GPRReg valueTagReg = value.tagGPR();
GPRReg valueReg = value.payloadGPR();
@@ -3071,7 +3096,7 @@ bool SpeculativeJIT::compileStrictEqForConstant(Node& node, Edge value, JSValue
}
#if USE(JSVALUE64)
- branchPtr(condition, op1.gpr(), MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(constant))), taken);
+ branch64(condition, op1.gpr(), MacroAssembler::TrustedImm64(JSValue::encode(constant)), taken);
#else
GPRReg payloadGPR = op1.payloadGPR();
GPRReg tagGPR = op1.tagGPR();
@@ -3101,8 +3126,8 @@ bool SpeculativeJIT::compileStrictEqForConstant(Node& node, Edge value, JSValue
#if USE(JSVALUE64)
GPRReg op1GPR = op1.gpr();
GPRReg resultGPR = result.gpr();
- m_jit.move(MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(ValueFalse)), resultGPR);
- MacroAssembler::Jump notEqual = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(constant))));
+ m_jit.move(MacroAssembler::TrustedImm64(ValueFalse), resultGPR);
+ MacroAssembler::Jump notEqual = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImm64(JSValue::encode(constant)));
m_jit.or32(MacroAssembler::TrustedImm32(1), resultGPR);
notEqual.link(&m_jit);
jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean);
@@ -3282,7 +3307,7 @@ void SpeculativeJIT::compileGetByValOnArguments(Node& node)
resultReg);
jsValueResult(resultTagReg, resultReg, m_compileIndex);
#else
- m_jit.loadPtr(
+ m_jit.load64(
MacroAssembler::BaseIndex(
scratchReg, resultReg, MacroAssembler::TimesEight,
CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register)),
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
index 90b6d483a..3796cc704 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -71,6 +71,8 @@ private:
typedef JITCompiler::Imm32 Imm32;
typedef JITCompiler::TrustedImmPtr TrustedImmPtr;
typedef JITCompiler::ImmPtr ImmPtr;
+ typedef JITCompiler::TrustedImm64 TrustedImm64;
+ typedef JITCompiler::Imm64 Imm64;
// These constants are used to set priorities for spill order for
// the register allocator.
@@ -347,9 +349,11 @@ public:
ASSERT(info.gpr() == source);
if (registerFormat == DataFormatInteger)
spillAction = Store32Payload;
- else {
- ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage);
+ else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
spillAction = StorePtr;
+ else {
+ ASSERT(registerFormat & DataFormatJS);
+ spillAction = Store64;
}
#elif USE(JSVALUE32_64)
if (registerFormat & DataFormatJS) {
@@ -414,7 +418,7 @@ public:
ASSERT(registerFormat == DataFormatJSDouble);
fillAction = LoadDoubleBoxDouble;
} else
- fillAction = LoadPtr;
+ fillAction = Load64;
#else
ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
if (node.hasConstant())
@@ -501,6 +505,11 @@ public:
case StorePtr:
m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()));
break;
+#if USE(JSVALUE64)
+ case Store64:
+ m_jit.store64(plan.gpr(), JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()));
+ break;
+#endif
case StoreDouble:
m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()));
break;
@@ -528,25 +537,25 @@ public:
break;
#if USE(JSVALUE64)
case SetTrustedJSConstant:
- m_jit.move(valueOfJSConstantAsImmPtr(plan.nodeIndex()).asTrustedImmPtr(), plan.gpr());
+ m_jit.move(valueOfJSConstantAsImm64(plan.nodeIndex()).asTrustedImm64(), plan.gpr());
break;
case SetJSConstant:
- m_jit.move(valueOfJSConstantAsImmPtr(plan.nodeIndex()), plan.gpr());
+ m_jit.move(valueOfJSConstantAsImm64(plan.nodeIndex()), plan.gpr());
break;
case SetDoubleConstant:
- m_jit.move(ImmPtr(bitwise_cast<void*>(valueOfNumberConstant(plan.nodeIndex()))), canTrample);
- m_jit.movePtrToDouble(canTrample, plan.fpr());
+ m_jit.move(Imm64(valueOfNumberConstant(plan.nodeIndex())), canTrample);
+ m_jit.move64ToDouble(canTrample, plan.fpr());
break;
case Load32PayloadBoxInt:
m_jit.load32(JITCompiler::payloadFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, plan.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
break;
case LoadDoubleBoxDouble:
- m_jit.loadPtr(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
- m_jit.subPtr(GPRInfo::tagTypeNumberRegister, plan.gpr());
+ m_jit.load64(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
break;
case LoadJSUnboxDouble:
- m_jit.loadPtr(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), canTrample);
+ m_jit.load64(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), canTrample);
unboxDouble(canTrample, plan.fpr());
break;
#else
@@ -578,6 +587,11 @@ public:
case LoadPtr:
m_jit.loadPtr(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
break;
+#if USE(JSVALUE64)
+ case Load64:
+ m_jit.load64(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.gpr());
+ break;
+#endif
case LoadDouble:
m_jit.loadDouble(JITCompiler::addressFor(at(plan.nodeIndex()).virtualRegister()), plan.fpr());
break;
@@ -752,10 +766,10 @@ public:
// We need to box int32 and cell values ...
// but on JSVALUE64 boxing a cell is a no-op!
if (spillFormat == DataFormatInteger)
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, reg);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, reg);
// Spill the value, and record it as spilled in its boxed form.
- m_jit.storePtr(reg, JITCompiler::addressFor(spillMe));
+ m_jit.store64(reg, JITCompiler::addressFor(spillMe));
info.spill(*m_stream, spillMe, (DataFormat)(spillFormat | DataFormatJS));
return;
#elif USE(JSVALUE32_64)
@@ -830,6 +844,16 @@ public:
return &m_jit.codeBlock()->identifier(index);
}
+ ResolveOperations* resolveOperations(unsigned index)
+ {
+ return m_jit.codeBlock()->resolveOperations(index);
+ }
+
+ PutToBaseOperation* putToBaseOperation(unsigned index)
+ {
+ return m_jit.codeBlock()->putToBaseOperation(index);
+ }
+
// Spill all VirtualRegisters back to the JSStack.
void flushRegisters()
{
@@ -865,9 +889,9 @@ public:
#endif
#if USE(JSVALUE64)
- MacroAssembler::ImmPtr valueOfJSConstantAsImmPtr(NodeIndex nodeIndex)
+ MacroAssembler::Imm64 valueOfJSConstantAsImm64(NodeIndex nodeIndex)
{
- return MacroAssembler::ImmPtr(JSValue::encode(valueOfJSConstant(nodeIndex)));
+ return MacroAssembler::Imm64(JSValue::encode(valueOfJSConstant(nodeIndex)));
}
#endif
@@ -1209,6 +1233,16 @@ public:
m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier));
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(J_DFGOperation_EIRo operation, GPRReg result, Identifier* identifier, ResolveOperations* operations)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_DFGOperation_EIRoPtbo operation, GPRReg result, Identifier* identifier, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations), TrustedImmPtr(putToBaseOperations));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg result, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
@@ -1339,6 +1373,11 @@ public:
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(J_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
{
m_jit.setupArgumentsWithExecState(arg1, arg2);
@@ -1354,14 +1393,19 @@ public:
m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::TrustedImm32 imm)
{
- m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::TrustedImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))));
+ m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value))));
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, MacroAssembler::TrustedImm32 imm, GPRReg arg2)
{
- m_jit.setupArgumentsWithExecState(MacroAssembler::TrustedImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))), arg2);
+ m_jit.setupArgumentsWithExecState(MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value))), arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
JITCompiler::Call callOperation(J_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
@@ -1707,6 +1751,19 @@ public:
m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG imm, TrustedImm32(JSValue::Int32Tag), arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
+
+ JITCompiler::Call callOperation(J_DFGOperation_EIRo operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier, ResolveOperations* operations)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+
+ JITCompiler::Call callOperation(J_DFGOperation_EIRoPtbo operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations), TrustedImmPtr(putToBaseOperations));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+
JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload)
{
m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
@@ -2030,6 +2087,20 @@ public:
notTaken.link(&m_jit);
}
+#if USE(JSVALUE64)
+ template<typename T, typename U>
+ void branch64(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination)
+ {
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branch64(cond, left, right), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branch64(JITCompiler::invert(cond), left, right);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+#endif
+
template<typename T, typename U>
void branchPtr(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination)
{
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
index 41fe8db0f..453851ba3 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
@@ -3879,7 +3879,7 @@ void SpeculativeJIT::compile(Node& node)
if (node.structureSet().size() == 1) {
speculationCheckWithConditionalDirection(
- BadCache, JSValueRegs(), NoNode,
+ BadCache, JSValueSource::unboxedCell(base.gpr()), NoNode,
m_jit.branchWeakPtr(
JITCompiler::NotEqual,
JITCompiler::Address(base.gpr(), JSCell::structureOffset()),
@@ -3896,7 +3896,7 @@ void SpeculativeJIT::compile(Node& node)
done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node.structureSet()[i]));
speculationCheckWithConditionalDirection(
- BadCache, JSValueRegs(), NoNode,
+ BadCache, JSValueSource::unboxedCell(base.gpr()), NoNode,
m_jit.branchWeakPtr(
JITCompiler::NotEqual, structure.gpr(), node.structureSet().last()),
node.op() == ForwardCheckStructure);
@@ -3910,6 +3910,13 @@ void SpeculativeJIT::compile(Node& node)
case StructureTransitionWatchpoint:
case ForwardStructureTransitionWatchpoint: {
+ // There is a fascinating question here of what to do about array profiling.
+ // We *could* try to tell the OSR exit about where the base of the access is.
+ // The DFG will have kept it alive, though it may not be in a register, and
+ // we shouldn't really load it since that could be a waste. For now though,
+ // we'll just rely on the fact that when a watchpoint fires then that's
+ // quite a hint already.
+
m_jit.addWeakReference(node.structure());
node.structure()->addTransitionWatchpoint(
speculationWatchpointWithConditionalDirection(
@@ -4288,7 +4295,8 @@ void SpeculativeJIT::compile(Node& node)
flushRegisters();
GPRResult resultPayload(this);
GPRResult2 resultTag(this);
- callOperation(operationResolve, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolve, resultTag.gpr(), resultPayload.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex));
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
break;
}
@@ -4297,7 +4305,8 @@ void SpeculativeJIT::compile(Node& node)
flushRegisters();
GPRResult resultPayload(this);
GPRResult2 resultTag(this);
- callOperation(operationResolveBase, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolveBase, resultTag.gpr(), resultPayload.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex), putToBaseOperation(data.putToBaseOperationIndex));
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
break;
}
@@ -4306,7 +4315,8 @@ void SpeculativeJIT::compile(Node& node)
flushRegisters();
GPRResult resultPayload(this);
GPRResult2 resultTag(this);
- callOperation(operationResolveBaseStrictPut, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolveBaseStrictPut, resultTag.gpr(), resultPayload.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex), putToBaseOperation(data.putToBaseOperationIndex));
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
break;
}
@@ -4323,18 +4333,18 @@ void SpeculativeJIT::compile(Node& node)
GPRReg resultPayloadGPR = resultPayload.gpr();
ResolveGlobalData& data = m_jit.graph().m_resolveGlobalData[node.resolveGlobalDataIndex()];
- GlobalResolveInfo* resolveInfoAddress = &(m_jit.codeBlock()->globalResolveInfo(data.resolveInfoIndex));
+ ResolveOperation* resolveOperationAddress = &(m_jit.codeBlock()->resolveOperations(data.resolveOperationsIndex)->data()[data.resolvePropertyIndex]);
// Check Structure of global object
m_jit.move(JITCompiler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectGPR);
- m_jit.move(JITCompiler::TrustedImmPtr(resolveInfoAddress), resolveInfoGPR);
- m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), resultPayloadGPR);
+ m_jit.move(JITCompiler::TrustedImmPtr(resolveOperationAddress), resolveInfoGPR);
+ m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(ResolveOperation, m_structure)), resultPayloadGPR);
JITCompiler::Jump structuresNotMatch = m_jit.branchPtr(JITCompiler::NotEqual, resultPayloadGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset()));
// Fast case
m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::butterflyOffset()), resultPayloadGPR);
- m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR);
+ m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(ResolveOperation, m_offset)), resolveInfoGPR);
#if DFG_ENABLE(JIT_ASSERT)
JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(firstOutOfLineOffset));
m_jit.breakpoint();
@@ -4739,6 +4749,11 @@ void SpeculativeJIT::compile(Node& node)
compileNewFunctionExpression(node);
break;
+ case GarbageValue:
+ // We should never get to the point of code emission for a GarbageValue
+ CRASH();
+ break;
+
case ForceOSRExit: {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
index daca71da7..42ab40341 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
@@ -55,21 +55,21 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat
}
if (isNumberConstant(nodeIndex)) {
JSValue jsValue = jsNumber(valueOfNumberConstant(nodeIndex));
- m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
} else {
ASSERT(isJSConstant(nodeIndex));
JSValue jsValue = valueOfJSConstant(nodeIndex);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
}
} else if (info.spillFormat() == DataFormatInteger) {
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
// Tag it, since fillInteger() is used when we want a boxed integer.
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
} else {
ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
}
// Since we statically know that we're filling an integer, and values
@@ -133,8 +133,8 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
unlock(gpr);
} else if (isNumberConstant(nodeIndex)) {
FPRReg fpr = fprAllocate();
- m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex)))), gpr);
- m_jit.movePtrToDouble(gpr, fpr);
+ m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(nodeIndex))), gpr);
+ m_jit.move64ToDouble(gpr, fpr);
unlock(gpr);
m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
@@ -144,7 +144,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
// FIXME: should not be reachable?
ASSERT(isJSConstant(nodeIndex));
JSValue jsValue = valueOfJSConstant(nodeIndex);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
info.fillJSValue(*m_stream, gpr, DataFormatJS);
unlock(gpr);
@@ -175,7 +175,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
ASSERT(spillFormat & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, spillFormat);
unlock(gpr);
break;
@@ -200,7 +200,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
FPRReg fpr = fprAllocate();
GPRReg tempGpr = allocate(); // FIXME: can we skip this allocation on the last use of the virtual register?
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
m_jit.jitAssertIsJSDouble(jsValueGpr);
@@ -279,15 +279,15 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex)
if (isInt32Constant(nodeIndex)) {
info.fillJSValue(*m_stream, gpr, DataFormatJSInteger);
JSValue jsValue = jsNumber(valueOfInt32Constant(nodeIndex));
- m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
} else if (isNumberConstant(nodeIndex)) {
info.fillJSValue(*m_stream, gpr, DataFormatJSDouble);
JSValue jsValue(JSValue::EncodeAsDouble, valueOfNumberConstant(nodeIndex));
- m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::Imm64(JSValue::encode(jsValue)), gpr);
} else {
ASSERT(isJSConstant(nodeIndex));
JSValue jsValue = valueOfJSConstant(nodeIndex);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJS);
}
@@ -297,13 +297,13 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex)
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
if (spillFormat == DataFormatInteger) {
m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
spillFormat = DataFormatJSInteger;
} else {
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
if (spillFormat == DataFormatDouble) {
// Need to box the double, since we want a JSValue.
- m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, gpr);
spillFormat = DataFormatJSDouble;
} else
ASSERT(spillFormat & DataFormatJS);
@@ -319,11 +319,11 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex)
// If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger.
if (m_gprs.isLocked(gpr)) {
GPRReg result = allocate();
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr, result);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr, result);
return result;
}
m_gprs.lock(gpr);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJSInteger);
return gpr;
}
@@ -408,8 +408,8 @@ void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node)
GPRReg gpr = result.gpr();
op1.use();
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
- JITCompiler::Jump nonNumeric = m_jit.branchTestPtr(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump nonNumeric = m_jit.branchTest64(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister);
// First, if we get here we have a double encoded as a JSValue
m_jit.move(jsValueGpr, gpr);
@@ -417,7 +417,7 @@ void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node)
// Finally, handle integers.
isInteger.link(&m_jit);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, jsValueGpr, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, jsValueGpr, gpr);
hasUnboxedDouble.link(&m_jit);
addSlowPathGenerator(adoptPtr(new ValueToNumberSlowPathGenerator(nonNumeric, this, gpr, jsValueGpr)));
@@ -459,7 +459,7 @@ void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node)
GPRReg resultGPR = result.gpr();
op1.use();
- JITCompiler::Jump isNotInteger = m_jit.branchPtr(MacroAssembler::Below, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isNotInteger = m_jit.branch64(MacroAssembler::Below, jsValueGpr, GPRInfo::tagTypeNumberRegister);
m_jit.zeroExtend32ToPtr(jsValueGpr, resultGPR);
@@ -486,7 +486,7 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
positive.link(&m_jit);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, op1.gpr(), result.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, op1.gpr(), result.gpr());
done.link(&m_jit);
@@ -500,7 +500,7 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
JITCompiler::ConvertibleLoadLabel propertyStorageLoad =
m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
- JITCompiler::DataLabelCompact loadWithPatch = m_jit.loadPtrWithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR);
+ JITCompiler::DataLabelCompact loadWithPatch = m_jit.load64WithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR);
JITCompiler::Label doneLabel = m_jit.label();
@@ -536,7 +536,7 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
JITCompiler::ConvertibleLoadLabel propertyStorageLoad =
m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
- JITCompiler::DataLabel32 storeWithPatch = m_jit.storePtrWithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0));
+ JITCompiler::DataLabel32 storeWithPatch = m_jit.store64WithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0));
JITCompiler::Label doneLabel = m_jit.label();
@@ -588,7 +588,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
JITCompiler::Jump notCell;
if (!isKnownCell(operand.index()))
- notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
+ notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
JITCompiler::Jump notMasqueradesAsUndefined;
if (m_jit.graph().globalObjectFor(m_jit.graph()[operand].codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
@@ -618,8 +618,8 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv
notCell.link(&m_jit);
m_jit.move(argGPR, resultGPR);
- m_jit.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
- m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR);
+ m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
+ m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR);
done.link(&m_jit);
}
@@ -652,7 +652,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex br
JITCompiler::Jump notCell;
if (!isKnownCell(operand.index()))
- notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
+ notCell = m_jit.branchTest64(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
if (m_jit.graph().globalObjectFor(m_jit.graph()[operand].codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
m_jit.graph().globalObjectFor(m_jit.graph()[operand].codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
@@ -676,8 +676,8 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex br
notCell.link(&m_jit);
m_jit.move(argGPR, resultGPR);
- m_jit.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
- branchPtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull)), taken);
+ m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
+ branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm64(ValueNull), taken);
}
jump(notTaken);
@@ -750,9 +750,9 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
arg2.use();
if (!isKnownInteger(node.child1().index()))
- slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
+ slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
if (!isKnownInteger(node.child2().index()))
- slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
+ slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
branch32(cond, arg1GPR, arg2GPR, taken);
@@ -833,9 +833,9 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler
arg2.use();
if (!isKnownInteger(node.child1().index()))
- slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
+ slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
if (!isKnownInteger(node.child2().index()))
- slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
+ slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR);
m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
@@ -879,7 +879,7 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch
if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) {
// see if we get lucky: if the arguments are cells and they reference the same
// cell, then they must be strictly equal.
- branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
+ branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
silentSpillAllRegisters(resultGPR);
callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
@@ -887,22 +887,22 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch
branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
} else {
- m_jit.orPtr(arg1GPR, arg2GPR, resultGPR);
+ m_jit.or64(arg1GPR, arg2GPR, resultGPR);
- JITCompiler::Jump twoCellsCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
+ JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
- JITCompiler::Jump leftOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
- JITCompiler::Jump leftDouble = m_jit.branchTestPtr(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump leftDouble = m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister);
leftOK.link(&m_jit);
- JITCompiler::Jump rightOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
- JITCompiler::Jump rightDouble = m_jit.branchTestPtr(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump rightDouble = m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister);
rightOK.link(&m_jit);
- branchPtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken);
+ branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken);
jump(notTaken, ForceJump);
twoCellsCase.link(&m_jit);
- branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
+ branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
leftDouble.link(&m_jit);
rightDouble.link(&m_jit);
@@ -934,9 +934,9 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
// see if we get lucky: if the arguments are cells and they reference the same
// cell, then they must be strictly equal.
// FIXME: this should flush registers instead of silent spill/fill.
- JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR);
+ JITCompiler::Jump notEqualCase = m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR);
- m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR);
+ m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
JITCompiler::Jump done = m_jit.jump();
@@ -946,33 +946,33 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
silentFillAllRegisters(resultGPR);
- m_jit.andPtr(JITCompiler::TrustedImm32(1), resultGPR);
+ m_jit.and64(JITCompiler::TrustedImm32(1), resultGPR);
m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
done.link(&m_jit);
} else {
- m_jit.orPtr(arg1GPR, arg2GPR, resultGPR);
+ m_jit.or64(arg1GPR, arg2GPR, resultGPR);
JITCompiler::JumpList slowPathCases;
- JITCompiler::Jump twoCellsCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
+ JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
- JITCompiler::Jump leftOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
- slowPathCases.append(m_jit.branchTestPtr(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister));
+ JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
+ slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister));
leftOK.link(&m_jit);
- JITCompiler::Jump rightOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
- slowPathCases.append(m_jit.branchTestPtr(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister));
+ JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
+ slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister));
rightOK.link(&m_jit);
- m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
+ m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
JITCompiler::Jump done = m_jit.jump();
twoCellsCase.link(&m_jit);
- slowPathCases.append(m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR));
+ slowPathCases.append(m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR));
- m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR);
+ m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
addSlowPathGenerator(
adoptPtr(
@@ -1007,8 +1007,8 @@ void SpeculativeJIT::emitCall(Node& node)
int numPassedArgs = node.numChildren() - 1;
m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(JSStack::ArgumentCount));
- m_jit.storePtr(GPRInfo::callFrameRegister, callFrameSlot(JSStack::CallerFrame));
- m_jit.storePtr(calleeGPR, callFrameSlot(JSStack::Callee));
+ m_jit.store64(GPRInfo::callFrameRegister, callFrameSlot(JSStack::CallerFrame));
+ m_jit.store64(calleeGPR, callFrameSlot(JSStack::Callee));
for (int i = 0; i < numPassedArgs; i++) {
Edge argEdge = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i];
@@ -1016,7 +1016,7 @@ void SpeculativeJIT::emitCall(Node& node)
GPRReg argGPR = arg.gpr();
use(argEdge);
- m_jit.storePtr(argGPR, argumentSlot(i + dummyThisArgument));
+ m_jit.store64(argGPR, argumentSlot(i + dummyThisArgument));
}
flushRegisters();
@@ -1025,16 +1025,17 @@ void SpeculativeJIT::emitCall(Node& node)
GPRReg resultGPR = result.gpr();
JITCompiler::DataLabelPtr targetToCheck;
- JITCompiler::Jump slowPath;
+ JITCompiler::JumpList slowPath;
CallBeginToken token;
m_jit.beginCall(node.codeOrigin, token);
m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
- slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(JSValue::encode(JSValue())));
+ slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0)));
+
m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultGPR);
- m_jit.storePtr(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
+ m_jit.store64(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
JITCompiler::Call fastCall = m_jit.nearCall();
@@ -1104,14 +1105,14 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat&
}
if (spillFormat == DataFormatInteger) {
m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
} else
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJSInteger);
returnFormat = DataFormatJSInteger;
return gpr;
}
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
// Fill as JSValue, and fall through.
info.fillJSValue(*m_stream, gpr, DataFormatJSInteger);
@@ -1123,7 +1124,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat&
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
if (!isInt32Speculation(type))
- speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchPtr(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branch64(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
info.fillJSValue(*m_stream, gpr, DataFormatJSInteger);
// If !strict we're done, return.
if (!strict) {
@@ -1223,8 +1224,8 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
if (isInt32Constant(nodeIndex)) {
FPRReg fpr = fprAllocate();
- m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(static_cast<double>(valueOfInt32Constant(nodeIndex))))), gpr);
- m_jit.movePtrToDouble(gpr, fpr);
+ m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(static_cast<double>(valueOfInt32Constant(nodeIndex)))), gpr);
+ m_jit.move64ToDouble(gpr, fpr);
unlock(gpr);
m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
@@ -1233,8 +1234,8 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
}
if (isNumberConstant(nodeIndex)) {
FPRReg fpr = fprAllocate();
- m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex)))), gpr);
- m_jit.movePtrToDouble(gpr, fpr);
+ m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(nodeIndex))), gpr);
+ m_jit.move64ToDouble(gpr, fpr);
unlock(gpr);
m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
@@ -1270,7 +1271,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
ASSERT(spillFormat & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, spillFormat);
unlock(gpr);
break;
@@ -1295,10 +1296,10 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
FPRReg fpr = fprAllocate();
GPRReg tempGpr = allocate();
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister);
if (!isNumberSpeculation(type))
- speculationCheck(BadType, JSValueRegs(jsValueGpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister));
+ speculationCheck(BadType, JSValueRegs(jsValueGpr), nodeIndex, m_jit.branchTest64(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister));
// First, if we get here we have a double encoded as a JSValue
m_jit.move(jsValueGpr, tempGpr);
@@ -1384,7 +1385,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex, bool isForwardSpec
JSValue jsValue = valueOfJSConstant(nodeIndex);
if (jsValue.isCell()) {
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
return gpr;
}
@@ -1393,11 +1394,11 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex, bool isForwardSpec
}
ASSERT(info.spillFormat() & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJS);
if (!isCellSpeculation(type))
- speculationCheckWithConditionalDirection(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister), isForwardSpeculation);
+ speculationCheckWithConditionalDirection(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister), isForwardSpeculation);
info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
return gpr;
}
@@ -1413,7 +1414,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex, bool isForwardSpec
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
if (!isCellSpeculation(type))
- speculationCheckWithConditionalDirection(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister), isForwardSpeculation);
+ speculationCheckWithConditionalDirection(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister), isForwardSpeculation);
info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
return gpr;
}
@@ -1460,7 +1461,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
JSValue jsValue = valueOfJSConstant(nodeIndex);
if (jsValue.isBoolean()) {
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
return gpr;
}
@@ -1469,13 +1470,13 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
}
ASSERT(info.spillFormat() & DataFormatJS);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
info.fillJSValue(*m_stream, gpr, DataFormatJS);
if (!isBooleanSpeculation(type)) {
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
- speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
}
info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
return gpr;
@@ -1492,9 +1493,9 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
if (!isBooleanSpeculation(type)) {
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
- speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
+ speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
}
info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
return gpr;
@@ -1521,9 +1522,9 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
JITCompiler::Jump SpeculativeJIT::convertToDouble(GPRReg value, FPRReg result, GPRReg tmp)
{
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, value, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, value, GPRInfo::tagTypeNumberRegister);
- JITCompiler::Jump notNumber = m_jit.branchTestPtr(MacroAssembler::Zero, value, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump notNumber = m_jit.branchTest64(MacroAssembler::Zero, value, GPRInfo::tagTypeNumberRegister);
m_jit.move(value, tmp);
unboxDouble(tmp, result);
@@ -1590,7 +1591,7 @@ void SpeculativeJIT::compileObjectEquality(Node& node)
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
- MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR);
+ MacroAssembler::Jump falseCase = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, op2GPR);
m_jit.move(TrustedImm32(ValueTrue), resultGPR);
MacroAssembler::Jump done = m_jit.jump();
falseCase.link(&m_jit);
@@ -1638,7 +1639,7 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r
// It seems that most of the time when programs do a == b where b may be either null/undefined
// or an object, b is usually an object. Balance the branches to make that case fast.
MacroAssembler::Jump rightNotCell =
- m_jit.branchTestPtr(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
+ m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
// We know that within this branch, rightChild must be a cell.
if (m_jit.graph().globalObjectFor(leftNode.codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
@@ -1668,7 +1669,7 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r
// At this point we know that we can perform a straight-forward equality comparison on pointer
// values because both left and right are pointers to objects that have no special equality
// protocols.
- MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR);
+ MacroAssembler::Jump falseCase = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, op2GPR);
MacroAssembler::Jump trueCase = m_jit.jump();
rightNotCell.link(&m_jit);
@@ -1677,13 +1678,13 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge r
// prove that it is either null or undefined.
if (!isOtherOrEmptySpeculation(m_state.forNode(rightChild).m_type & ~SpecCell)) {
m_jit.move(op2GPR, resultGPR);
- m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
speculationCheck(
BadType, JSValueRegs(op2GPR), rightChild.index(),
- m_jit.branchPtr(
+ m_jit.branch64(
MacroAssembler::NotEqual, resultGPR,
- MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ MacroAssembler::TrustedImm64(ValueNull)));
}
falseCase.link(&m_jit);
@@ -1737,7 +1738,7 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
// It seems that most of the time when programs do a == b where b may be either null/undefined
// or an object, b is usually an object. Balance the branches to make that case fast.
MacroAssembler::Jump rightNotCell =
- m_jit.branchTestPtr(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
+ m_jit.branchTest64(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
// We know that within this branch, rightChild must be a cell.
if (m_jit.graph().globalObjectFor(branchNode.codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
@@ -1767,7 +1768,7 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
// At this point we know that we can perform a straight-forward equality comparison on pointer
// values because both left and right are pointers to objects that have no special equality
// protocols.
- branchPtr(MacroAssembler::Equal, op1GPR, op2GPR, taken);
+ branch64(MacroAssembler::Equal, op1GPR, op2GPR, taken);
// We know that within this branch, rightChild must not be a cell. Check if that is enough to
// prove that it is either null or undefined.
@@ -1778,13 +1779,13 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
rightNotCell.link(&m_jit);
m_jit.move(op2GPR, resultGPR);
- m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
speculationCheck(
BadType, JSValueRegs(op2GPR), rightChild.index(),
- m_jit.branchPtr(
+ m_jit.branch64(
MacroAssembler::NotEqual, resultGPR,
- MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ MacroAssembler::TrustedImm64(ValueNull)));
}
jump(notTaken);
@@ -1811,7 +1812,7 @@ void SpeculativeJIT::compileDoubleCompare(Node& node, MacroAssembler::DoubleCond
m_jit.move(TrustedImm32(ValueTrue), result.gpr());
MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr());
- m_jit.xorPtr(TrustedImm32(true), result.gpr());
+ m_jit.xor64(TrustedImm32(true), result.gpr());
trueCase.link(&m_jit);
jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
@@ -1843,7 +1844,7 @@ void SpeculativeJIT::compileNonStringCellOrOtherLogicalNot(Edge nodeUse, bool ne
GPRReg valueGPR = value.gpr();
GPRReg resultGPR = result.gpr();
- MacroAssembler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
+ MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
if (m_jit.graph().globalObjectFor(m_jit.graph()[nodeUse.index()].codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
m_jit.graph().globalObjectFor(m_jit.graph()[nodeUse.index()].codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
@@ -1889,12 +1890,12 @@ void SpeculativeJIT::compileNonStringCellOrOtherLogicalNot(Edge nodeUse, bool ne
if (needSpeculationCheck) {
m_jit.move(valueGPR, resultGPR);
- m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
- m_jit.branchPtr(
+ m_jit.branch64(
MacroAssembler::NotEqual,
resultGPR,
- MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ MacroAssembler::TrustedImm64(ValueNull)));
}
m_jit.move(TrustedImm32(ValueTrue), resultGPR);
@@ -1937,7 +1938,7 @@ void SpeculativeJIT::compileLogicalNot(Node& node)
GPRTemporary result(this, value);
m_jit.move(value.gpr(), result.gpr());
- m_jit.xorPtr(TrustedImm32(true), result.gpr());
+ m_jit.xor64(TrustedImm32(true), result.gpr());
jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
return;
@@ -1947,9 +1948,9 @@ void SpeculativeJIT::compileLogicalNot(Node& node)
GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
m_jit.move(value.gpr(), result.gpr());
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
- speculationCheck(BadType, JSValueRegs(value.gpr()), node.child1(), m_jit.branchTestPtr(JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr());
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
+ speculationCheck(BadType, JSValueRegs(value.gpr()), node.child1(), m_jit.branchTest64(JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr());
// If we add a DataFormatBool, we should use it here.
jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
@@ -1965,13 +1966,13 @@ void SpeculativeJIT::compileLogicalNot(Node& node)
arg1.use();
m_jit.move(arg1GPR, resultGPR);
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR);
- JITCompiler::Jump slowCase = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR, TrustedImm32(static_cast<int32_t>(~1)));
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR);
+ JITCompiler::Jump slowCase = m_jit.branchTest64(JITCompiler::NonZero, resultGPR, TrustedImm32(static_cast<int32_t>(~1)));
addSlowPathGenerator(
slowPathCall(slowCase, this, dfgConvertJSValueToBoolean, resultGPR, arg1GPR));
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR);
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR);
jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
}
@@ -1982,7 +1983,7 @@ void SpeculativeJIT::emitNonStringCellOrOtherBranch(Edge nodeUse, BlockIndex tak
GPRReg valueGPR = value.gpr();
GPRReg scratchGPR = scratch.gpr();
- MacroAssembler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
+ MacroAssembler::Jump notCell = m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
if (m_jit.graph().globalObjectFor(m_jit.graph()[nodeUse.index()].codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
m_jit.graph().globalObjectFor(m_jit.graph()[nodeUse.index()].codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
@@ -2020,8 +2021,8 @@ void SpeculativeJIT::emitNonStringCellOrOtherBranch(Edge nodeUse, BlockIndex tak
if (needSpeculationCheck) {
m_jit.move(valueGPR, scratchGPR);
- m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
- speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse.index(), m_jit.branchPtr(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
+ speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse.index(), m_jit.branch64(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImm64(ValueNull)));
}
jump(notTaken);
@@ -2078,8 +2079,8 @@ void SpeculativeJIT::emitBranch(Node& node)
branchTest32(condition, valueGPR, TrustedImm32(true), taken);
jump(notTaken);
} else {
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), notTaken);
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), taken);
+ branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
+ branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
speculationCheck(BadType, JSValueRegs(valueGPR), node.child1(), m_jit.jump());
}
@@ -2088,12 +2089,12 @@ void SpeculativeJIT::emitBranch(Node& node)
GPRTemporary result(this);
GPRReg resultGPR = result.gpr();
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsNumber(0))), notTaken);
- branchPtr(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken);
+ branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(0))), notTaken);
+ branch64(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken);
if (!predictBoolean) {
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), notTaken);
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), taken);
+ branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
+ branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
}
value.use();
@@ -2115,8 +2116,8 @@ MacroAssembler::JumpList SpeculativeJIT::compileContiguousGetByVal(Node&, GPRReg
MacroAssembler::JumpList slowCases;
slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
- m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr), resultReg);
- slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultReg));
+ m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
+ slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
return slowCases;
}
@@ -2125,8 +2126,8 @@ MacroAssembler::JumpList SpeculativeJIT::compileArrayStorageGetByVal(Node&, GPRR
{
MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
- m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg);
- MacroAssembler::Jump hole = m_jit.branchTestPtr(MacroAssembler::Zero, resultReg);
+ m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg);
+ MacroAssembler::Jump hole = m_jit.branchTest64(MacroAssembler::Zero, resultReg);
MacroAssembler::JumpList slowCases;
slowCases.append(outOfBounds);
@@ -2158,7 +2159,7 @@ MacroAssembler::JumpList SpeculativeJIT::compileContiguousPutByVal(Node& node, G
inBounds.link(&m_jit);
}
- m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr));
+ m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
if (isInBoundsAccess(arrayMode))
return MacroAssembler::JumpList();
@@ -2184,9 +2185,9 @@ MacroAssembler::JumpList SpeculativeJIT::compileArrayStoragePutByVal(Node& node,
// profiling.
speculationCheck(
Uncountable, JSValueRegs(), NoNode,
- m_jit.branchTestPtr(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
+ m_jit.branchTest64(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
} else {
- MacroAssembler::Jump notHoleValue = m_jit.branchTestPtr(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ MacroAssembler::Jump notHoleValue = m_jit.branchTest64(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
if (isSlowPutAccess(arrayMode)) {
// This is sort of strange. If we wanted to optimize this code path, we would invert
// the above branch. But it's simply not worth it since this only happens if we're
@@ -2206,7 +2207,7 @@ MacroAssembler::JumpList SpeculativeJIT::compileArrayStoragePutByVal(Node& node,
}
// Store the value to the array.
- m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
return slowCases;
}
@@ -2270,7 +2271,7 @@ void SpeculativeJIT::compile(Node& node)
}
GPRTemporary result(this);
- m_jit.loadPtr(JITCompiler::addressFor(node.local()), result.gpr());
+ m_jit.load64(JITCompiler::addressFor(node.local()), result.gpr());
// Like jsValueResult, but don't useChildren - our children are phi nodes,
// and don't represent values within this dataflow with virtual registers.
@@ -2294,7 +2295,7 @@ void SpeculativeJIT::compile(Node& node)
case GetLocalUnlinked: {
GPRTemporary result(this);
- m_jit.loadPtr(JITCompiler::addressFor(node.unlinkedLocal()), result.gpr());
+ m_jit.load64(JITCompiler::addressFor(node.unlinkedLocal()), result.gpr());
jsValueResult(result.gpr(), m_compileIndex);
break;
@@ -2360,14 +2361,14 @@ void SpeculativeJIT::compile(Node& node)
if (isCellSpeculation(predictedType)) {
SpeculateCellOperand cell(this, node.child1());
GPRReg cellGPR = cell.gpr();
- m_jit.storePtr(cellGPR, JITCompiler::addressFor(node.local()));
+ m_jit.store64(cellGPR, JITCompiler::addressFor(node.local()));
noResult(m_compileIndex);
recordSetLocal(node.local(), ValueSource(CellInJSStack));
break;
}
if (isBooleanSpeculation(predictedType)) {
SpeculateBooleanOperand boolean(this, node.child1());
- m_jit.storePtr(boolean.gpr(), JITCompiler::addressFor(node.local()));
+ m_jit.store64(boolean.gpr(), JITCompiler::addressFor(node.local()));
noResult(m_compileIndex);
recordSetLocal(node.local(), ValueSource(BooleanInJSStack));
break;
@@ -2375,7 +2376,7 @@ void SpeculativeJIT::compile(Node& node)
}
JSValueOperand value(this, node.child1());
- m_jit.storePtr(value.gpr(), JITCompiler::addressFor(node.local()));
+ m_jit.store64(value.gpr(), JITCompiler::addressFor(node.local()));
noResult(m_compileIndex);
recordSetLocal(node.local(), ValueSource(ValueInJSStack));
@@ -2475,10 +2476,10 @@ void SpeculativeJIT::compile(Node& node)
case CheckNumber: {
if (!isNumberSpeculation(m_state.forNode(node.child1()).m_type)) {
JSValueOperand op1(this, node.child1());
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, op1.gpr(), GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, op1.gpr(), GPRInfo::tagTypeNumberRegister);
speculationCheck(
BadType, JSValueRegs(op1.gpr()), node.child1().index(),
- m_jit.branchTestPtr(MacroAssembler::Zero, op1.gpr(), GPRInfo::tagTypeNumberRegister));
+ m_jit.branchTest64(MacroAssembler::Zero, op1.gpr(), GPRInfo::tagTypeNumberRegister));
isInteger.link(&m_jit);
}
noResult(m_compileIndex);
@@ -2710,8 +2711,8 @@ void SpeculativeJIT::compile(Node& node)
speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
GPRTemporary result(this);
- m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr), result.gpr());
- speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr()));
+ m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr());
+ speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
jsValueResult(result.gpr(), m_compileIndex);
break;
}
@@ -2755,8 +2756,8 @@ void SpeculativeJIT::compile(Node& node)
speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
GPRTemporary result(this);
- m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr());
- speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr()));
+ m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr());
+ speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
jsValueResult(result.gpr(), m_compileIndex);
break;
@@ -2900,7 +2901,7 @@ void SpeculativeJIT::compile(Node& node)
// Store the value to the array.
GPRReg propertyReg = property.gpr();
GPRReg valueReg = value.gpr();
- m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr));
+ m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
noResult(m_compileIndex);
break;
@@ -2951,7 +2952,7 @@ void SpeculativeJIT::compile(Node& node)
// Store the value to the array.
GPRReg propertyReg = property.gpr();
GPRReg valueReg = value.gpr();
- m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
noResult(m_compileIndex);
break;
@@ -3013,7 +3014,7 @@ void SpeculativeJIT::compile(Node& node)
MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_registers)),
scratchReg);
- m_jit.storePtr(
+ m_jit.store64(
valueReg,
MacroAssembler::BaseIndex(
scratchReg, scratch2Reg, MacroAssembler::TimesEight,
@@ -3139,10 +3140,10 @@ void SpeculativeJIT::compile(Node& node)
case Array::ArrayWithContiguousOutOfBounds: {
m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
- m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr));
+ m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
m_jit.add32(TrustedImm32(1), storageLengthGPR);
m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
addSlowPathGenerator(
slowPathCall(
@@ -3162,12 +3163,12 @@ void SpeculativeJIT::compile(Node& node)
MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
- m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
m_jit.add32(TrustedImm32(1), storageLengthGPR);
m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
addSlowPathGenerator(
slowPathCall(
@@ -3208,19 +3209,19 @@ void SpeculativeJIT::compile(Node& node)
m_jit.sub32(TrustedImm32(1), storageLengthGPR);
m_jit.store32(
storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
- m_jit.loadPtr(
- MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr),
+ m_jit.load64(
+ MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight),
valueGPR);
// FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
// length and the new length.
- m_jit.storePtr(
- MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr));
- MacroAssembler::Jump slowCase = m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR);
+ m_jit.store64(
+ MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
+ MacroAssembler::Jump slowCase = m_jit.branchTest64(MacroAssembler::Zero, valueGPR);
addSlowPathGenerator(
slowPathMove(
undefinedCase, this,
- MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR));
+ MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
addSlowPathGenerator(
slowPathCall(
slowCase, this, operationArrayPopAndRecoverLength, valueGPR, baseGPR));
@@ -3241,18 +3242,18 @@ void SpeculativeJIT::compile(Node& node)
JITCompiler::JumpList slowCases;
slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())));
- m_jit.loadPtr(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR);
- slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR));
+ m_jit.load64(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR);
+ slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, valueGPR));
m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
- m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ m_jit.store64(MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
addSlowPathGenerator(
slowPathMove(
undefinedCase, this,
- MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR));
+ MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
addSlowPathGenerator(
slowPathCall(
@@ -3345,7 +3346,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(op1.gpr(), result.gpr());
if (op1.format() == DataFormatInteger)
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, result.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, result.gpr());
jsValueResult(result.gpr(), m_compileIndex);
break;
@@ -3364,7 +3365,7 @@ void SpeculativeJIT::compile(Node& node)
if (!(m_state.forNode(node.child1()).m_type & ~(SpecNumber | SpecBoolean)))
m_jit.move(op1GPR, resultGPR);
else {
- MacroAssembler::Jump alreadyPrimitive = m_jit.branchTestPtr(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister);
+ MacroAssembler::Jump alreadyPrimitive = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister);
MacroAssembler::Jump notPrimitive = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->stringStructure.get()));
alreadyPrimitive.link(&m_jit);
@@ -3401,7 +3402,7 @@ void SpeculativeJIT::compile(Node& node)
for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) {
JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]);
GPRReg opGPR = operand.gpr();
- m_jit.storePtr(opGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx));
+ m_jit.store64(opGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx));
}
// Yuck, we should *really* have a way of also returning the storageGPR. But
@@ -3432,7 +3433,7 @@ void SpeculativeJIT::compile(Node& node)
GPRReg opGPR = operand.gpr();
operand.use();
- m_jit.storePtr(opGPR, buffer + operandIdx);
+ m_jit.store64(opGPR, buffer + operandIdx);
}
flushRegisters();
@@ -3532,7 +3533,7 @@ void SpeculativeJIT::compile(Node& node)
GPRReg opGPR = operand.gpr();
operand.use();
- m_jit.storePtr(opGPR, buffer + operandIdx);
+ m_jit.store64(opGPR, buffer + operandIdx);
}
flushRegisters();
@@ -3577,8 +3578,8 @@ void SpeculativeJIT::compile(Node& node)
JSValue* data = m_jit.codeBlock()->constantBuffer(node.startConstant());
for (unsigned index = 0; index < node.numConstants(); ++index) {
- m_jit.storePtr(
- ImmPtr(bitwise_cast<void*>(JSValue::encode(data[index]))),
+ m_jit.store64(
+ Imm64(JSValue::encode(data[index])),
MacroAssembler::Address(storageGPR, sizeof(JSValue) * index));
}
@@ -3622,8 +3623,8 @@ void SpeculativeJIT::compile(Node& node)
if (!isOtherSpeculation(m_state.forNode(node.child1()).m_type)) {
m_jit.move(thisValueGPR, scratchGPR);
- m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
- speculationCheck(BadType, JSValueRegs(thisValueGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
+ speculationCheck(BadType, JSValueRegs(thisValueGPR), node.child1(), m_jit.branch64(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImm64(ValueNull)));
}
m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalThisObjectFor(node.codeOrigin)), scratchGPR);
@@ -3723,7 +3724,7 @@ void SpeculativeJIT::compile(Node& node)
if (checkTopLevel && skip--) {
JITCompiler::Jump activationNotCreated;
if (checkTopLevel)
- activationNotCreated = m_jit.branchTestPtr(JITCompiler::Zero, JITCompiler::addressFor(static_cast<VirtualRegister>(m_jit.codeBlock()->activationRegister())));
+ activationNotCreated = m_jit.branchTest64(JITCompiler::Zero, JITCompiler::addressFor(static_cast<VirtualRegister>(m_jit.codeBlock()->activationRegister())));
m_jit.loadPtr(JITCompiler::Address(resultGPR, JSScope::offsetOfNext()), resultGPR);
activationNotCreated.link(&m_jit);
}
@@ -3749,7 +3750,7 @@ void SpeculativeJIT::compile(Node& node)
GPRReg registersGPR = registers.gpr();
GPRReg resultGPR = result.gpr();
- m_jit.loadPtr(JITCompiler::Address(registersGPR, node.varNumber() * sizeof(Register)), resultGPR);
+ m_jit.load64(JITCompiler::Address(registersGPR, node.varNumber() * sizeof(Register)), resultGPR);
jsValueResult(resultGPR, m_compileIndex);
break;
}
@@ -3764,7 +3765,7 @@ void SpeculativeJIT::compile(Node& node)
GPRReg valueGPR = value.gpr();
GPRReg scratchGPR = scratchRegister.gpr();
- m_jit.storePtr(valueGPR, JITCompiler::Address(registersGPR, node.varNumber() * sizeof(Register)));
+ m_jit.store64(valueGPR, JITCompiler::Address(registersGPR, node.varNumber() * sizeof(Register)));
writeBarrier(scopeGPR, valueGPR, node.child3(), WriteBarrierForVariableAccess, scratchGPR);
noResult(m_compileIndex);
break;
@@ -3798,7 +3799,7 @@ void SpeculativeJIT::compile(Node& node)
base.use();
- JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
+ JITCompiler::Jump notCell = m_jit.branchTest64(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber(), notCell);
@@ -3840,7 +3841,7 @@ void SpeculativeJIT::compile(Node& node)
base.use();
flushRegisters();
- JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
+ JITCompiler::Jump notCell = m_jit.branchTest64(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber(), notCell, DontSpill);
@@ -3874,7 +3875,7 @@ void SpeculativeJIT::compile(Node& node)
if (node.structureSet().size() == 1) {
speculationCheckWithConditionalDirection(
- BadCache, JSValueRegs(), NoNode,
+ BadCache, JSValueRegs(base.gpr()), NoNode,
m_jit.branchWeakPtr(
JITCompiler::NotEqual,
JITCompiler::Address(base.gpr(), JSCell::structureOffset()),
@@ -3891,7 +3892,7 @@ void SpeculativeJIT::compile(Node& node)
done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node.structureSet()[i]));
speculationCheckWithConditionalDirection(
- BadCache, JSValueRegs(), NoNode,
+ BadCache, JSValueRegs(base.gpr()), NoNode,
m_jit.branchWeakPtr(
JITCompiler::NotEqual, structure.gpr(), node.structureSet().last()),
node.op() == ForwardCheckStructure);
@@ -3905,6 +3906,13 @@ void SpeculativeJIT::compile(Node& node)
case StructureTransitionWatchpoint:
case ForwardStructureTransitionWatchpoint: {
+ // There is a fascinating question here of what to do about array profiling.
+ // We *could* try to tell the OSR exit about where the base of the access is.
+ // The DFG will have kept it alive, though it may not be in a register, and
+ // we shouldn't really load it since that could be a waste. For now though,
+ // we'll just rely on the fact that when a watchpoint fires then that's
+ // quite a hint already.
+
m_jit.addWeakReference(node.structure());
node.structure()->addTransitionWatchpoint(
speculationWatchpointWithConditionalDirection(
@@ -3988,7 +3996,7 @@ void SpeculativeJIT::compile(Node& node)
StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
- m_jit.loadPtr(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)), resultGPR);
+ m_jit.load64(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)), resultGPR);
jsValueResult(resultGPR, m_compileIndex);
break;
@@ -4010,7 +4018,7 @@ void SpeculativeJIT::compile(Node& node)
StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
- m_jit.storePtr(valueGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)));
+ m_jit.store64(valueGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)));
noResult(m_compileIndex);
break;
@@ -4055,7 +4063,7 @@ void SpeculativeJIT::compile(Node& node)
case GetGlobalVar: {
GPRTemporary result(this);
- m_jit.loadPtr(node.registerPointer(), result.gpr());
+ m_jit.load64(node.registerPointer(), result.gpr());
jsValueResult(result.gpr(), m_compileIndex);
break;
@@ -4071,7 +4079,7 @@ void SpeculativeJIT::compile(Node& node)
writeBarrier(m_jit.globalObjectFor(node.codeOrigin), value.gpr(), node.child1(), WriteBarrierForVariableAccess, scratchReg);
}
- m_jit.storePtr(value.gpr(), node.registerPointer());
+ m_jit.store64(value.gpr(), node.registerPointer());
noResult(m_compileIndex);
break;
@@ -4097,7 +4105,7 @@ void SpeculativeJIT::compile(Node& node)
writeBarrier(m_jit.globalObjectFor(node.codeOrigin), value.gpr(), node.child1(), WriteBarrierForVariableAccess, scratchReg);
}
- m_jit.storePtr(value.gpr(), node.registerPointer());
+ m_jit.store64(value.gpr(), node.registerPointer());
noResult(m_compileIndex);
break;
@@ -4111,10 +4119,10 @@ void SpeculativeJIT::compile(Node& node)
#if DFG_ENABLE(JIT_ASSERT)
GPRTemporary scratch(this);
GPRReg scratchGPR = scratch.gpr();
- m_jit.loadPtr(node.registerPointer(), scratchGPR);
- JITCompiler::Jump ok = m_jit.branchPtr(
+ m_jit.load64(node.registerPointer(), scratchGPR);
+ JITCompiler::Jump ok = m_jit.branch64(
JITCompiler::Equal, scratchGPR,
- TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(node.registerPointer()->get()))));
+ TrustedImm64(JSValue::encode(node.registerPointer()->get())));
m_jit.breakpoint();
ok.link(&m_jit);
#endif
@@ -4144,9 +4152,9 @@ void SpeculativeJIT::compile(Node& node)
JSValueOperand value(this, node.child1());
GPRTemporary result(this);
- JITCompiler::Jump isCell = m_jit.branchTestPtr(JITCompiler::Zero, value.gpr(), GPRInfo::tagMaskRegister);
+ JITCompiler::Jump isCell = m_jit.branchTest64(JITCompiler::Zero, value.gpr(), GPRInfo::tagMaskRegister);
- m_jit.comparePtr(JITCompiler::Equal, value.gpr(), TrustedImm32(ValueUndefined), result.gpr());
+ m_jit.compare64(JITCompiler::Equal, value.gpr(), TrustedImm32(ValueUndefined), result.gpr());
JITCompiler::Jump done = m_jit.jump();
isCell.link(&m_jit);
@@ -4183,8 +4191,8 @@ void SpeculativeJIT::compile(Node& node)
GPRTemporary result(this, value);
m_jit.move(value.gpr(), result.gpr());
- m_jit.xorPtr(JITCompiler::TrustedImm32(ValueFalse), result.gpr());
- m_jit.testPtr(JITCompiler::Zero, result.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result.gpr());
+ m_jit.xor64(JITCompiler::TrustedImm32(ValueFalse), result.gpr());
+ m_jit.test64(JITCompiler::Zero, result.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result.gpr());
m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
break;
@@ -4194,7 +4202,7 @@ void SpeculativeJIT::compile(Node& node)
JSValueOperand value(this, node.child1());
GPRTemporary result(this, value);
- m_jit.testPtr(JITCompiler::NonZero, value.gpr(), GPRInfo::tagTypeNumberRegister, result.gpr());
+ m_jit.test64(JITCompiler::NonZero, value.gpr(), GPRInfo::tagTypeNumberRegister, result.gpr());
m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
break;
@@ -4204,7 +4212,7 @@ void SpeculativeJIT::compile(Node& node)
JSValueOperand value(this, node.child1());
GPRTemporary result(this, value);
- JITCompiler::Jump isNotCell = m_jit.branchTestPtr(JITCompiler::NonZero, value.gpr(), GPRInfo::tagMaskRegister);
+ JITCompiler::Jump isNotCell = m_jit.branchTest64(JITCompiler::NonZero, value.gpr(), GPRInfo::tagMaskRegister);
m_jit.loadPtr(JITCompiler::Address(value.gpr(), JSCell::structureOffset()), result.gpr());
m_jit.compare8(JITCompiler::Equal, JITCompiler::Address(result.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType), result.gpr());
@@ -4263,7 +4271,8 @@ void SpeculativeJIT::compile(Node& node)
case Resolve: {
flushRegisters();
GPRResult result(this);
- callOperation(operationResolve, result.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolve, result.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex));
jsValueResult(result.gpr(), m_compileIndex);
break;
}
@@ -4271,7 +4280,8 @@ void SpeculativeJIT::compile(Node& node)
case ResolveBase: {
flushRegisters();
GPRResult result(this);
- callOperation(operationResolveBase, result.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolveBase, result.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex), putToBaseOperation(data.putToBaseOperationIndex));
jsValueResult(result.gpr(), m_compileIndex);
break;
}
@@ -4279,7 +4289,8 @@ void SpeculativeJIT::compile(Node& node)
case ResolveBaseStrictPut: {
flushRegisters();
GPRResult result(this);
- callOperation(operationResolveBaseStrictPut, result.gpr(), identifier(node.identifierNumber()));
+ ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node.resolveOperationsDataIndex()];
+ callOperation(operationResolveBaseStrictPut, result.gpr(), identifier(data.identifierNumber), resolveOperations(data.resolveOperationsIndex), putToBaseOperation(data.putToBaseOperationIndex));
jsValueResult(result.gpr(), m_compileIndex);
break;
}
@@ -4294,16 +4305,16 @@ void SpeculativeJIT::compile(Node& node)
GPRReg resultGPR = result.gpr();
ResolveGlobalData& data = m_jit.graph().m_resolveGlobalData[node.resolveGlobalDataIndex()];
- GlobalResolveInfo* resolveInfoAddress = &(m_jit.codeBlock()->globalResolveInfo(data.resolveInfoIndex));
+ ResolveOperation* resolveOperationAddress = &(m_jit.codeBlock()->resolveOperations(data.resolveOperationsIndex)->data()[data.resolvePropertyIndex]);
// Check Structure of global object
m_jit.move(JITCompiler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectGPR);
- m_jit.move(JITCompiler::TrustedImmPtr(resolveInfoAddress), resolveInfoGPR);
- m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), resultGPR);
+ m_jit.move(JITCompiler::TrustedImmPtr(resolveOperationAddress), resolveInfoGPR);
+ m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(ResolveOperation, m_structure)), resultGPR);
JITCompiler::Jump structuresDontMatch = m_jit.branchPtr(JITCompiler::NotEqual, resultGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset()));
// Fast case
- m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR);
+ m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(ResolveOperation, m_offset)), resolveInfoGPR);
#if DFG_ENABLE(JIT_ASSERT)
JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(firstOutOfLineOffset));
m_jit.breakpoint();
@@ -4312,7 +4323,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.neg32(resolveInfoGPR);
m_jit.signExtend32ToPtr(resolveInfoGPR, resolveInfoGPR);
m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::butterflyOffset()), resultGPR);
- m_jit.loadPtr(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::ScalePtr, (firstOutOfLineOffset - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultGPR);
+ m_jit.load64(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::TimesEight, (firstOutOfLineOffset - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultGPR);
addSlowPathGenerator(
slowPathCall(
@@ -4335,7 +4346,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(valueGPR, resultGPR);
- JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR);
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR);
addSlowPathGenerator(
slowPathCall(notCreated, this, operationCreateActivation, resultGPR));
@@ -4353,7 +4364,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(valueGPR, resultGPR);
- JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR);
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR);
if (node.codeOrigin.inlineCallFrame) {
addSlowPathGenerator(
@@ -4377,17 +4388,17 @@ void SpeculativeJIT::compile(Node& node)
GPRReg activationValueGPR = activationValue.gpr();
GPRReg scratchGPR = scratch.gpr();
- JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, activationValueGPR);
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, activationValueGPR);
SharedSymbolTable* symbolTable = m_jit.symbolTableFor(node.codeOrigin);
int registersOffset = JSActivation::registersOffset(symbolTable);
int captureEnd = symbolTable->captureEnd();
for (int i = symbolTable->captureStart(); i < captureEnd; ++i) {
- m_jit.loadPtr(
+ m_jit.load64(
JITCompiler::Address(
GPRInfo::callFrameRegister, i * sizeof(Register)), scratchGPR);
- m_jit.storePtr(
+ m_jit.store64(
scratchGPR, JITCompiler::Address(
activationValueGPR, registersOffset + i * sizeof(Register)));
}
@@ -4405,7 +4416,7 @@ void SpeculativeJIT::compile(Node& node)
GPRReg unmodifiedArgumentsValueGPR = unmodifiedArgumentsValue.gpr();
GPRReg activationValueGPR = activationValue.gpr();
- JITCompiler::Jump created = m_jit.branchTestPtr(JITCompiler::NonZero, unmodifiedArgumentsValueGPR);
+ JITCompiler::Jump created = m_jit.branchTest64(JITCompiler::NonZero, unmodifiedArgumentsValueGPR);
if (node.codeOrigin.inlineCallFrame) {
addSlowPathGenerator(
@@ -4431,7 +4442,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.graph().argumentsRegisterFor(node.codeOrigin)).m_type)) {
speculationCheck(
ArgumentsEscaped, JSValueRegs(), NoNode,
- m_jit.branchTestPtr(
+ m_jit.branchTest64(
JITCompiler::NonZero,
JITCompiler::addressFor(
m_jit.argumentsRegisterFor(node.codeOrigin))));
@@ -4448,22 +4459,19 @@ void SpeculativeJIT::compile(Node& node)
GPRTemporary result(this);
GPRReg resultGPR = result.gpr();
- JITCompiler::Jump created = m_jit.branchTestPtr(
+ JITCompiler::Jump created = m_jit.branchTest64(
JITCompiler::NonZero,
JITCompiler::addressFor(
m_jit.argumentsRegisterFor(node.codeOrigin)));
if (node.codeOrigin.inlineCallFrame) {
m_jit.move(
- ImmPtr(
- bitwise_cast<void*>(
- JSValue::encode(
- jsNumber(node.codeOrigin.inlineCallFrame->arguments.size() - 1)))),
+ Imm64(JSValue::encode(jsNumber(node.codeOrigin.inlineCallFrame->arguments.size() - 1))),
resultGPR);
} else {
m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR);
m_jit.sub32(TrustedImm32(1), resultGPR);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, resultGPR);
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, resultGPR);
}
// FIXME: the slow path generator should perform a forward speculation that the
@@ -4490,7 +4498,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.graph().argumentsRegisterFor(node.codeOrigin)).m_type)) {
speculationCheck(
ArgumentsEscaped, JSValueRegs(), NoNode,
- m_jit.branchTestPtr(
+ m_jit.branchTest64(
JITCompiler::NonZero,
JITCompiler::addressFor(
m_jit.argumentsRegisterFor(node.codeOrigin))));
@@ -4529,7 +4537,7 @@ void SpeculativeJIT::compile(Node& node)
OBJECT_OFFSETOF(SlowArgument, index)),
resultGPR);
m_jit.signExtend32ToPtr(resultGPR, resultGPR);
- m_jit.loadPtr(
+ m_jit.load64(
JITCompiler::BaseIndex(
GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfLocals(node.codeOrigin)),
resultGPR);
@@ -4540,7 +4548,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.neg32(resultGPR);
m_jit.signExtend32ToPtr(resultGPR, resultGPR);
- m_jit.loadPtr(
+ m_jit.load64(
JITCompiler::BaseIndex(
GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfArgumentsIncludingThis(node.codeOrigin)),
resultGPR);
@@ -4558,7 +4566,7 @@ void SpeculativeJIT::compile(Node& node)
JITCompiler::JumpList slowPath;
slowPath.append(
- m_jit.branchTestPtr(
+ m_jit.branchTest64(
JITCompiler::NonZero,
JITCompiler::addressFor(
m_jit.argumentsRegisterFor(node.codeOrigin))));
@@ -4594,7 +4602,7 @@ void SpeculativeJIT::compile(Node& node)
OBJECT_OFFSETOF(SlowArgument, index)),
resultGPR);
m_jit.signExtend32ToPtr(resultGPR, resultGPR);
- m_jit.loadPtr(
+ m_jit.load64(
JITCompiler::BaseIndex(
GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfLocals(node.codeOrigin)),
resultGPR);
@@ -4605,7 +4613,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.neg32(resultGPR);
m_jit.signExtend32ToPtr(resultGPR, resultGPR);
- m_jit.loadPtr(
+ m_jit.load64(
JITCompiler::BaseIndex(
GPRInfo::callFrameRegister, resultGPR, JITCompiler::TimesEight, m_jit.offsetOfArgumentsIncludingThis(node.codeOrigin)),
resultGPR);
@@ -4636,7 +4644,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.graph().argumentsRegisterFor(node.codeOrigin)).m_type));
speculationCheck(
ArgumentsEscaped, JSValueRegs(), NoNode,
- m_jit.branchTestPtr(
+ m_jit.branchTest64(
JITCompiler::NonZero,
JITCompiler::addressFor(
m_jit.argumentsRegisterFor(node.codeOrigin))));
@@ -4657,7 +4665,7 @@ void SpeculativeJIT::compile(Node& node)
m_jit.move(valueGPR, resultGPR);
- JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR);
+ JITCompiler::Jump notCreated = m_jit.branchTest64(JITCompiler::Zero, resultGPR);
addSlowPathGenerator(
slowPathCall(
@@ -4672,6 +4680,11 @@ void SpeculativeJIT::compile(Node& node)
compileNewFunctionExpression(node);
break;
+ case GarbageValue:
+ // We should never get to the point of code emission for a GarbageValue
+ CRASH();
+ break;
+
case ForceOSRExit: {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp b/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp
index 2e44af2d7..22b9395b5 100644
--- a/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp
@@ -144,6 +144,8 @@ public:
m_graph.vote(node, VoteOther);
break;
}
+ case GarbageValue:
+ break;
default:
m_graph.vote(node, VoteOther);
diff --git a/Source/JavaScriptCore/dfg/DFGThunks.cpp b/Source/JavaScriptCore/dfg/DFGThunks.cpp
index 25fcad10a..74d1967a8 100644
--- a/Source/JavaScriptCore/dfg/DFGThunks.cpp
+++ b/Source/JavaScriptCore/dfg/DFGThunks.cpp
@@ -44,8 +44,13 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(JSGlobalData* globalData)
ScratchBuffer* scratchBuffer = globalData->scratchBufferForSize(scratchSize);
EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
- jit.storePtr(GPRInfo::toRegister(i), buffer + i);
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ jit.store64(GPRInfo::toRegister(i), buffer + i);
+#else
+ jit.store32(GPRInfo::toRegister(i), buffer + i);
+#endif
+ }
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
jit.move(MacroAssembler::TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
jit.storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
@@ -71,8 +76,13 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(JSGlobalData* globalData)
jit.move(MacroAssembler::TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
jit.loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
}
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i)
- jit.loadPtr(buffer + i, GPRInfo::toRegister(i));
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ jit.load64(buffer + i, GPRInfo::toRegister(i));
+#else
+ jit.load32(buffer + i, GPRInfo::toRegister(i));
+#endif
+ }
jit.jump(MacroAssembler::AbsoluteAddress(&globalData->osrExitJumpDestination));
@@ -115,7 +125,11 @@ MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(JSGlobalData* glob
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::CallerFrame),
GPRInfo::callFrameRegister);
+#if USE(JSVALUE64)
+ jit.peek64(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
+#else
jit.peek(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
+#endif
jit.setupArgumentsWithExecState(GPRInfo::nonPreservedNonReturnGPR);
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
@@ -138,7 +152,11 @@ static void slowPathFor(
GPRInfo::callFrameRegister,
static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC));
jit.storePtr(GPRInfo::callFrameRegister, &globalData->topCallFrame);
+#if USE(JSVALUE64)
+ jit.poke64(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
+#else
jit.poke(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
+#endif
jit.setupArgumentsExecState();
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
@@ -211,7 +229,7 @@ static MacroAssemblerCodeRef virtualForThunkGenerator(
#if USE(JSVALUE64)
slowCase.append(
- jit.branchTestPtr(
+ jit.branchTest64(
CCallHelpers::NonZero, GPRInfo::nonArgGPR0, GPRInfo::tagMaskRegister));
#else
slowCase.append(
@@ -245,7 +263,7 @@ static MacroAssemblerCodeRef virtualForThunkGenerator(
CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfScopeChain()),
GPRInfo::nonArgGPR1);
#if USE(JSVALUE64)
- jit.storePtr(
+ jit.store64(
GPRInfo::nonArgGPR1,
CCallHelpers::Address(
GPRInfo::callFrameRegister,
diff --git a/Source/JavaScriptCore/heap/CopiedSpace.cpp b/Source/JavaScriptCore/heap/CopiedSpace.cpp
index cedafee3a..c228f9460 100644
--- a/Source/JavaScriptCore/heap/CopiedSpace.cpp
+++ b/Source/JavaScriptCore/heap/CopiedSpace.cpp
@@ -173,6 +173,7 @@ void CopiedSpace::doneFillingBlock(CopiedBlock* block, CopiedBlock** exchange)
{
MutexLocker locker(m_loanedBlocksLock);
ASSERT(m_numberOfLoanedBlocks > 0);
+ ASSERT(m_inCopyingPhase);
m_numberOfLoanedBlocks--;
if (!m_numberOfLoanedBlocks)
m_loanedBlocksCondition.signal();
@@ -199,6 +200,22 @@ void CopiedSpace::startedCopying()
totalUsableBytes += block->payloadCapacity();
}
+ CopiedBlock* block = m_oversizeBlocks.head();
+ while (block) {
+ CopiedBlock* next = block->next();
+ if (block->isPinned()) {
+ m_blockFilter.add(reinterpret_cast<Bits>(block));
+ totalLiveBytes += block->payloadCapacity();
+ totalUsableBytes += block->payloadCapacity();
+ block->didSurviveGC();
+ } else {
+ m_oversizeBlocks.remove(block);
+ m_blockSet.remove(block);
+ m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(block));
+ }
+ block = next;
+ }
+
double markedSpaceBytes = m_heap->objectSpace().capacity();
double totalFragmentation = ((double)totalLiveBytes + markedSpaceBytes) / ((double)totalUsableBytes + markedSpaceBytes);
m_shouldDoCopyPhase = totalFragmentation <= Options::minHeapUtilization();
@@ -224,31 +241,13 @@ void CopiedSpace::doneCopying()
while (!m_fromSpace->isEmpty()) {
CopiedBlock* block = m_fromSpace->removeHead();
- if (block->isPinned() || !m_shouldDoCopyPhase) {
- block->didSurviveGC();
- // We don't add the block to the blockSet because it was never removed.
- ASSERT(m_blockSet.contains(block));
- m_blockFilter.add(reinterpret_cast<Bits>(block));
- m_toSpace->push(block);
- continue;
- }
-
- m_blockSet.remove(block);
- m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
- }
-
- CopiedBlock* curr = m_oversizeBlocks.head();
- while (curr) {
- CopiedBlock* next = curr->next();
- if (!curr->isPinned()) {
- m_oversizeBlocks.remove(curr);
- m_blockSet.remove(curr);
- m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(curr));
- } else {
- m_blockFilter.add(reinterpret_cast<Bits>(curr));
- curr->didSurviveGC();
- }
- curr = next;
+ // All non-pinned blocks in from-space should have been reclaimed as they were evacuated.
+ ASSERT(block->isPinned() || !m_shouldDoCopyPhase);
+ block->didSurviveGC();
+ // We don't add the block to the blockSet because it was never removed.
+ ASSERT(m_blockSet.contains(block));
+ m_blockFilter.add(reinterpret_cast<Bits>(block));
+ m_toSpace->push(block);
}
if (!m_toSpace->head())
diff --git a/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h
index 01e816793..c244015e7 100644
--- a/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h
+++ b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h
@@ -113,6 +113,7 @@ inline void CopiedSpace::recycleBorrowedBlock(CopiedBlock* block)
{
MutexLocker locker(m_loanedBlocksLock);
ASSERT(m_numberOfLoanedBlocks > 0);
+ ASSERT(m_inCopyingPhase);
m_numberOfLoanedBlocks--;
if (!m_numberOfLoanedBlocks)
m_loanedBlocksCondition.signal();
diff --git a/Source/JavaScriptCore/heap/CopyVisitorInlineMethods.h b/Source/JavaScriptCore/heap/CopyVisitorInlineMethods.h
index 73400750f..eb7bd2e82 100644
--- a/Source/JavaScriptCore/heap/CopyVisitorInlineMethods.h
+++ b/Source/JavaScriptCore/heap/CopyVisitorInlineMethods.h
@@ -56,10 +56,8 @@ private:
inline bool CopyVisitor::checkIfShouldCopy(void* oldPtr, size_t bytes)
{
- if (CopiedSpace::isOversize(bytes)) {
- ASSERT(CopiedSpace::oversizeBlockFor(oldPtr)->isPinned());
+ if (CopiedSpace::isOversize(bytes))
return false;
- }
if (CopiedSpace::blockFor(oldPtr)->isPinned())
return false;
diff --git a/Source/JavaScriptCore/heap/GCThread.cpp b/Source/JavaScriptCore/heap/GCThread.cpp
index ea43456bd..ce3bbedc9 100644
--- a/Source/JavaScriptCore/heap/GCThread.cpp
+++ b/Source/JavaScriptCore/heap/GCThread.cpp
@@ -70,8 +70,16 @@ CopyVisitor* GCThread::copyVisitor()
GCPhase GCThread::waitForNextPhase()
{
MutexLocker locker(m_shared.m_phaseLock);
+ while (m_shared.m_gcThreadsShouldWait)
+ m_shared.m_phaseCondition.wait(m_shared.m_phaseLock);
+
+ m_shared.m_numberOfActiveGCThreads--;
+ if (!m_shared.m_numberOfActiveGCThreads)
+ m_shared.m_activityCondition.signal();
+
while (m_shared.m_currentPhase == NoPhase)
m_shared.m_phaseCondition.wait(m_shared.m_phaseLock);
+ m_shared.m_numberOfActiveGCThreads++;
return m_shared.m_currentPhase;
}
@@ -84,7 +92,7 @@ void GCThread::gcThreadMain()
// Wait for the main thread to finish creating and initializing us. The main thread grabs this lock before
// creating this thread. We aren't guaranteed to have a valid threadID until the main thread releases this lock.
{
- MutexLocker locker(m_shared.m_markingLock);
+ MutexLocker locker(m_shared.m_phaseLock);
}
{
ParallelModeEnabler enabler(*m_slotVisitor);
diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp b/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
index d9946d589..446b41c2f 100644
--- a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
+++ b/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
@@ -61,13 +61,16 @@ GCThreadSharedData::GCThreadSharedData(JSGlobalData* globalData)
, m_parallelMarkersShouldExit(false)
, m_blocksToCopy(globalData->heap.m_blockSnapshot)
, m_copyIndex(0)
+ , m_numberOfActiveGCThreads(0)
+ , m_gcThreadsShouldWait(false)
, m_currentPhase(NoPhase)
{
m_copyLock.Init();
#if ENABLE(PARALLEL_GC)
// Grab the lock so the new GC threads can be properly initialized before they start running.
- MutexLocker locker(m_markingLock);
+ MutexLocker locker(m_phaseLock);
for (unsigned i = 1; i < Options::numberOfGCMarkers(); ++i) {
+ m_numberOfActiveGCThreads++;
SlotVisitor* slotVisitor = new SlotVisitor(*this);
CopyVisitor* copyVisitor = new CopyVisitor(*this);
GCThread* newThread = new GCThread(*this, slotVisitor, copyVisitor);
@@ -75,6 +78,10 @@ GCThreadSharedData::GCThreadSharedData(JSGlobalData* globalData)
newThread->initializeThreadID(threadID);
m_gcThreads.append(newThread);
}
+
+ // Wait for all the GCThreads to get to the right place.
+ while (m_numberOfActiveGCThreads)
+ m_activityCondition.wait(m_phaseLock);
#endif
}
@@ -87,6 +94,7 @@ GCThreadSharedData::~GCThreadSharedData()
MutexLocker phaseLocker(m_phaseLock);
ASSERT(m_currentPhase == NoPhase);
m_parallelMarkersShouldExit = true;
+ m_gcThreadsShouldWait = false;
m_currentPhase = Exit;
m_phaseCondition.broadcast();
}
@@ -115,24 +123,44 @@ void GCThreadSharedData::reset()
}
}
-void GCThreadSharedData::didStartMarking()
+void GCThreadSharedData::startNextPhase(GCPhase phase)
{
- MutexLocker markingLocker(m_markingLock);
MutexLocker phaseLocker(m_phaseLock);
+ ASSERT(!m_gcThreadsShouldWait);
ASSERT(m_currentPhase == NoPhase);
- m_currentPhase = Mark;
- m_parallelMarkersShouldExit = false;
+ m_gcThreadsShouldWait = true;
+ m_currentPhase = phase;
m_phaseCondition.broadcast();
}
-void GCThreadSharedData::didFinishMarking()
+void GCThreadSharedData::endCurrentPhase()
+{
+ ASSERT(m_gcThreadsShouldWait);
+ MutexLocker locker(m_phaseLock);
+ m_currentPhase = NoPhase;
+ m_gcThreadsShouldWait = false;
+ m_phaseCondition.broadcast();
+ while (m_numberOfActiveGCThreads)
+ m_activityCondition.wait(m_phaseLock);
+}
+
+void GCThreadSharedData::didStartMarking()
{
MutexLocker markingLocker(m_markingLock);
- MutexLocker phaseLocker(m_phaseLock);
+ m_parallelMarkersShouldExit = false;
+ startNextPhase(Mark);
+}
+
+void GCThreadSharedData::didFinishMarking()
+{
+ {
+ MutexLocker markingLocker(m_markingLock);
+ m_parallelMarkersShouldExit = true;
+ m_markingCondition.broadcast();
+ }
+
ASSERT(m_currentPhase == Mark);
- m_currentPhase = NoPhase;
- m_parallelMarkersShouldExit = true;
- m_markingCondition.broadcast();
+ endCurrentPhase();
}
void GCThreadSharedData::didStartCopying()
@@ -150,18 +178,13 @@ void GCThreadSharedData::didStartCopying()
for (size_t i = 0; i < m_gcThreads.size(); i++)
m_gcThreads[i]->copyVisitor()->startCopying();
- MutexLocker locker(m_phaseLock);
- ASSERT(m_currentPhase == NoPhase);
- m_currentPhase = Copy;
- m_phaseCondition.broadcast();
+ startNextPhase(Copy);
}
void GCThreadSharedData::didFinishCopying()
{
- MutexLocker locker(m_phaseLock);
ASSERT(m_currentPhase == Copy);
- m_currentPhase = NoPhase;
- m_phaseCondition.broadcast();
+ endCurrentPhase();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.h b/Source/JavaScriptCore/heap/GCThreadSharedData.h
index bd48d9263..f341afc04 100644
--- a/Source/JavaScriptCore/heap/GCThreadSharedData.h
+++ b/Source/JavaScriptCore/heap/GCThreadSharedData.h
@@ -74,6 +74,8 @@ private:
friend class CopyVisitor;
void getNextBlocksToCopy(size_t&, size_t&);
+ void startNextPhase(GCPhase);
+ void endCurrentPhase();
JSGlobalData* m_globalData;
CopiedSpace* m_copiedSpace;
@@ -100,6 +102,9 @@ private:
Mutex m_phaseLock;
ThreadCondition m_phaseCondition;
+ ThreadCondition m_activityCondition;
+ unsigned m_numberOfActiveGCThreads;
+ bool m_gcThreadsShouldWait;
GCPhase m_currentPhase;
ListableHandler<WeakReferenceHarvester>::List m_weakReferenceHarvesters;
diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp
index 772d85144..cd3393aa2 100644
--- a/Source/JavaScriptCore/heap/Heap.cpp
+++ b/Source/JavaScriptCore/heap/Heap.cpp
@@ -612,10 +612,9 @@ void Heap::copyBackingStores()
m_storageSpace.startedCopying();
if (m_storageSpace.shouldDoCopyPhase()) {
m_sharedData.didStartCopying();
- CopyVisitor& visitor = m_copyVisitor;
- visitor.startCopying();
- visitor.copyFromShared();
- visitor.doneCopying();
+ m_copyVisitor.startCopying();
+ m_copyVisitor.copyFromShared();
+ m_copyVisitor.doneCopying();
// We need to wait for everybody to finish and return their CopiedBlocks
// before signaling that the phase is complete.
m_storageSpace.doneCopying();
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.cpp b/Source/JavaScriptCore/heap/SlotVisitor.cpp
index 26d056feb..7a30debda 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.cpp
+++ b/Source/JavaScriptCore/heap/SlotVisitor.cpp
@@ -10,6 +10,7 @@
#include "JSGlobalData.h"
#include "JSObject.h"
#include "JSString.h"
+#include <wtf/StackStats.h>
namespace JSC {
@@ -58,6 +59,7 @@ void SlotVisitor::reset()
void SlotVisitor::append(ConservativeRoots& conservativeRoots)
{
+ StackStats::probe();
JSCell** roots = conservativeRoots.roots();
size_t size = conservativeRoots.size();
for (size_t i = 0; i < size; ++i)
@@ -66,6 +68,7 @@ void SlotVisitor::append(ConservativeRoots& conservativeRoots)
ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
{
+ StackStats::probe();
#if ENABLE(SIMPLE_HEAP_PROFILING)
m_visitedTypeCounts.count(cell);
#endif
@@ -92,6 +95,7 @@ ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell
void SlotVisitor::donateKnownParallel()
{
+ StackStats::probe();
// NOTE: Because we re-try often, we can afford to be conservative, and
// assume that donating is not profitable.
@@ -119,6 +123,7 @@ void SlotVisitor::donateKnownParallel()
void SlotVisitor::drain()
{
+ StackStats::probe();
ASSERT(m_isInParallelMode);
#if ENABLE(PARALLEL_GC)
@@ -144,6 +149,7 @@ void SlotVisitor::drain()
void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
{
+ StackStats::probe();
ASSERT(m_isInParallelMode);
ASSERT(Options::numberOfGCMarkers());
@@ -221,6 +227,7 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
void SlotVisitor::mergeOpaqueRoots()
{
+ StackStats::probe();
ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
{
MutexLocker locker(m_shared.m_opaqueRootsLock);
@@ -276,6 +283,7 @@ ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue* slot)
// as it can change the JSValue pointed to be the argument when the original JSValue
// is a string that contains the same contents as another string.
+ StackStats::probe();
ASSERT(slot);
JSValue value = *slot;
ASSERT(value);
@@ -309,12 +317,14 @@ ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue* slot)
void SlotVisitor::harvestWeakReferences()
{
+ StackStats::probe();
for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
current->visitWeakReferences(*this);
}
void SlotVisitor::finalizeUnconditionalFinalizers()
{
+ StackStats::probe();
while (m_shared.m_unconditionalFinalizers.hasNext())
m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
}
diff --git a/Source/JavaScriptCore/interpreter/Interpreter.cpp b/Source/JavaScriptCore/interpreter/Interpreter.cpp
index 00b283393..8c09019da 100644
--- a/Source/JavaScriptCore/interpreter/Interpreter.cpp
+++ b/Source/JavaScriptCore/interpreter/Interpreter.cpp
@@ -65,6 +65,7 @@
#include "StrongInlines.h"
#include <limits.h>
#include <stdio.h>
+#include <wtf/StackStats.h>
#include <wtf/Threading.h>
#include <wtf/text/StringBuilder.h>
@@ -307,11 +308,7 @@ void Interpreter::dumpRegisters(CallFrame* callFrame)
JSValue v = it->jsValue();
int registerNumber = it - callFrame->registers();
String name = codeBlock->nameForRegister(registerNumber);
-#if USE(JSVALUE32_64)
- dataLog("[r% 3d %14s] | %10p | %-16s 0x%llx \n", registerNumber, name.ascii().data(), it, v.description(), JSValue::encode(v));
-#else
- dataLog("[r% 3d %14s] | %10p | %-16s %p \n", registerNumber, name.ascii().data(), it, v.description(), JSValue::encode(v));
-#endif
+ dataLog("[r% 3d %14s] | %10p | %-16s 0x%lld \n", registerNumber, name.ascii().data(), it, v.description(), (long long)JSValue::encode(v));
it++;
}
@@ -346,11 +343,7 @@ void Interpreter::dumpRegisters(CallFrame* callFrame)
JSValue v = it->jsValue();
int registerNumber = it - callFrame->registers();
String name = codeBlock->nameForRegister(registerNumber);
-#if USE(JSVALUE32_64)
- dataLog("[r% 3d %14s] | %10p | %-16s 0x%llx \n", registerNumber, name.ascii().data(), it, v.description(), JSValue::encode(v));
-#else
- dataLog("[r% 3d %14s] | %10p | %-16s %p \n", registerNumber, name.ascii().data(), it, v.description(), JSValue::encode(v));
-#endif
+ dataLog("[r% 3d %14s] | %10p | %-16s 0x%lld \n", registerNumber, name.ascii().data(), it, v.description(), (long long)JSValue::encode(v));
++it;
++registerCount;
} while (it != end);
@@ -361,11 +354,7 @@ void Interpreter::dumpRegisters(CallFrame* callFrame)
if (it != end) {
do {
JSValue v = (*it).jsValue();
-#if USE(JSVALUE32_64)
- dataLog("[r% 3d] | %10p | %-16s 0x%llx \n", registerCount, it, v.description(), JSValue::encode(v));
-#else
- dataLog("[r% 3d] | %10p | %-16s %p \n", registerCount, it, v.description(), JSValue::encode(v));
-#endif
+ dataLog("[r% 3d] | %10p | %-16s 0x%lld \n", registerCount, it, v.description(), (long long)JSValue::encode(v));
++it;
++registerCount;
} while (it != end);
@@ -764,6 +753,7 @@ JSValue Interpreter::execute(ProgramExecutable* program, CallFrame* callFrame, J
if (callFrame->globalData().isCollectorBusy())
CRASH();
+ StackStats::CheckPoint stackCheckPoint;
if (m_reentryDepth >= MaxSmallThreadReentryDepth && m_reentryDepth >= callFrame->globalData().maxReentryDepth)
return checkedReturn(throwStackOverflowError(callFrame));
@@ -926,6 +916,7 @@ JSValue Interpreter::executeCall(CallFrame* callFrame, JSObject* function, CallT
if (callFrame->globalData().isCollectorBusy())
return jsNull();
+ StackStats::CheckPoint stackCheckPoint;
if (m_reentryDepth >= MaxSmallThreadReentryDepth && m_reentryDepth >= callFrame->globalData().maxReentryDepth)
return checkedReturn(throwStackOverflowError(callFrame));
@@ -1021,6 +1012,7 @@ JSObject* Interpreter::executeConstruct(CallFrame* callFrame, JSObject* construc
if (callFrame->globalData().isCollectorBusy())
return checkedReturn(throwStackOverflowError(callFrame));
+ StackStats::CheckPoint stackCheckPoint;
if (m_reentryDepth >= MaxSmallThreadReentryDepth && m_reentryDepth >= callFrame->globalData().maxReentryDepth)
return checkedReturn(throwStackOverflowError(callFrame));
@@ -1118,6 +1110,7 @@ CallFrameClosure Interpreter::prepareForRepeatCall(FunctionExecutable* functionE
if (callFrame->globalData().isCollectorBusy())
return CallFrameClosure();
+ StackStats::CheckPoint stackCheckPoint;
if (m_reentryDepth >= MaxSmallThreadReentryDepth && m_reentryDepth >= callFrame->globalData().maxReentryDepth) {
throwStackOverflowError(callFrame);
return CallFrameClosure();
@@ -1159,6 +1152,8 @@ JSValue Interpreter::execute(CallFrameClosure& closure)
ASSERT(!closure.oldCallFrame->globalData().isCollectorBusy());
if (closure.oldCallFrame->globalData().isCollectorBusy())
return jsNull();
+
+ StackStats::CheckPoint stackCheckPoint;
closure.resetCallFrame();
if (Profiler* profiler = closure.oldCallFrame->globalData().enabledProfiler())
profiler->willExecute(closure.oldCallFrame, closure.function);
@@ -1201,6 +1196,7 @@ JSValue Interpreter::execute(EvalExecutable* eval, CallFrame* callFrame, JSValue
DynamicGlobalObjectScope globalObjectScope(*scope->globalData(), scope->globalObject());
+ StackStats::CheckPoint stackCheckPoint;
if (m_reentryDepth >= MaxSmallThreadReentryDepth && m_reentryDepth >= callFrame->globalData().maxReentryDepth)
return checkedReturn(throwStackOverflowError(callFrame));
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index 4eab17661..2d2991b5f 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -270,10 +270,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_get_by_val)
DEFINE_OP(op_get_argument_by_val)
DEFINE_OP(op_get_by_pname)
- DEFINE_OP(op_get_global_var_watchable)
- DEFINE_OP(op_get_global_var)
DEFINE_OP(op_get_pnames)
- DEFINE_OP(op_get_scoped_var)
DEFINE_OP(op_check_has_instance)
DEFINE_OP(op_instanceof)
DEFINE_OP(op_is_undefined)
@@ -339,17 +336,26 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_put_by_index)
DEFINE_OP(op_put_by_val)
DEFINE_OP(op_put_getter_setter)
- case op_init_global_const:
- DEFINE_OP(op_put_global_var)
- case op_init_global_const_check:
- DEFINE_OP(op_put_global_var_check)
- DEFINE_OP(op_put_scoped_var)
+ DEFINE_OP(op_init_global_const)
+ DEFINE_OP(op_init_global_const_check)
+
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check:
DEFINE_OP(op_resolve)
+
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check:
DEFINE_OP(op_resolve_base)
+
+ case op_put_to_base_variable:
+ DEFINE_OP(op_put_to_base)
+
DEFINE_OP(op_ensure_property_exists)
- DEFINE_OP(op_resolve_global)
- DEFINE_OP(op_resolve_global_dynamic)
- DEFINE_OP(op_resolve_skip)
DEFINE_OP(op_resolve_with_base)
DEFINE_OP(op_resolve_with_this)
DEFINE_OP(op_ret)
@@ -488,7 +494,6 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_mul)
DEFINE_SLOWCASE_OP(op_negate)
DEFINE_SLOWCASE_OP(op_neq)
- DEFINE_SLOWCASE_OP(op_new_array)
DEFINE_SLOWCASE_OP(op_new_object)
DEFINE_SLOWCASE_OP(op_not)
DEFINE_SLOWCASE_OP(op_nstricteq)
@@ -503,16 +508,32 @@ void JIT::privateCompileSlowCases()
case op_put_by_id_transition_normal_out_of_line:
DEFINE_SLOWCASE_OP(op_put_by_id)
DEFINE_SLOWCASE_OP(op_put_by_val)
- case op_init_global_const_check:
- DEFINE_SLOWCASE_OP(op_put_global_var_check);
- DEFINE_SLOWCASE_OP(op_resolve_global)
- DEFINE_SLOWCASE_OP(op_resolve_global_dynamic)
+ DEFINE_SLOWCASE_OP(op_init_global_const_check);
DEFINE_SLOWCASE_OP(op_rshift)
DEFINE_SLOWCASE_OP(op_urshift)
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
DEFINE_SLOWCASE_OP(op_to_jsnumber)
DEFINE_SLOWCASE_OP(op_to_primitive)
+
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check:
+ DEFINE_SLOWCASE_OP(op_resolve)
+
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check:
+ DEFINE_SLOWCASE_OP(op_resolve_base)
+ DEFINE_SLOWCASE_OP(op_resolve_with_base)
+ DEFINE_SLOWCASE_OP(op_resolve_with_this)
+
+ case op_put_to_base_variable:
+ DEFINE_SLOWCASE_OP(op_put_to_base)
+
default:
ASSERT_NOT_REACHED();
}
@@ -645,7 +666,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
continue;
int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
#if USE(JSVALUE64)
- loadPtr(Address(callFrameRegister, offset), regT0);
+ load64(Address(callFrameRegister, offset), regT0);
#elif USE(JSVALUE32_64)
load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index 054718573..cd832c21f 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -475,9 +475,7 @@ namespace JSC {
void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind);
template<typename ClassType, MarkedBlock::DestructorType, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
- void emitAllocateBasicStorage(size_t, ptrdiff_t offsetFromBase, RegisterID result);
template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr);
- void emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr, RegisterID scratch);
#if ENABLE(VALUE_PROFILER)
// This assumes that the value to profile is in regT0 and that regT3 is available for
@@ -592,6 +590,9 @@ namespace JSC {
#endif
#else // USE(JSVALUE32_64)
+ /* This function is deprecated. */
+ void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
+
void emitGetVirtualRegister(int src, RegisterID dst);
void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
@@ -676,9 +677,6 @@ namespace JSC {
void emit_op_get_by_val(Instruction*);
void emit_op_get_argument_by_val(Instruction*);
void emit_op_get_by_pname(Instruction*);
- void emit_op_get_global_var(Instruction*);
- void emit_op_get_global_var_watchable(Instruction* instruction) { emit_op_get_global_var(instruction); }
- void emit_op_get_scoped_var(Instruction*);
void emit_op_init_lazy_reg(Instruction*);
void emit_op_check_has_instance(Instruction*);
void emit_op_instanceof(Instruction*);
@@ -741,17 +739,16 @@ namespace JSC {
void emit_op_put_by_index(Instruction*);
void emit_op_put_by_val(Instruction*);
void emit_op_put_getter_setter(Instruction*);
- void emit_op_put_global_var(Instruction*);
- void emit_op_put_global_var_check(Instruction*);
- void emit_op_put_scoped_var(Instruction*);
+ void emit_op_init_global_const(Instruction*);
+ void emit_op_init_global_const_check(Instruction*);
+ void emit_resolve_operations(ResolveOperations*, const int* base, const int* value);
+ void emitSlow_link_resolve_operations(ResolveOperations*, Vector<SlowCaseEntry>::iterator&);
void emit_op_resolve(Instruction*);
void emit_op_resolve_base(Instruction*);
void emit_op_ensure_property_exists(Instruction*);
- void emit_op_resolve_global(Instruction*, bool dynamic = false);
- void emit_op_resolve_global_dynamic(Instruction*);
- void emit_op_resolve_skip(Instruction*);
void emit_op_resolve_with_base(Instruction*);
void emit_op_resolve_with_this(Instruction*);
+ void emit_op_put_to_base(Instruction*);
void emit_op_ret(Instruction*);
void emit_op_ret_object_or_this(Instruction*);
void emit_op_rshift(Instruction*);
@@ -820,23 +817,23 @@ namespace JSC {
void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_put_global_var_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_resolve_global_dynamic(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_init_global_const_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_new_array(Instruction*, Vector<SlowCaseEntry>::iterator&);
-
+
+ void emitSlow_op_resolve(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_with_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_with_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_to_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
void emitRightShift(Instruction*, bool isUnsigned);
void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
- /* This function is deprecated. */
- void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
-
void emitInitRegister(unsigned dst);
void emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
@@ -845,6 +842,9 @@ namespace JSC {
void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry);
void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+#if USE(JSVALUE64)
+ void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+#endif
JSValue getConstantOperand(unsigned src);
bool isOperandConstantImmediateInt(unsigned src);
diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp
index b66e2cd07..21d59bc33 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -209,8 +209,8 @@ void JIT::emit_op_negate(Instruction* currentInstruction)
srcNotInt.link(this);
emitJumpSlowCaseIfNotImmediateNumber(regT0);
- move(TrustedImmPtr(reinterpret_cast<void*>(0x8000000000000000ull)), regT1);
- xorPtr(regT1, regT0);
+ move(TrustedImm64((int64_t)0x8000000000000000ull), regT1);
+ xor64(regT1, regT0);
end.link(this);
emitPutVirtualRegister(dst);
@@ -224,7 +224,7 @@ void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter); // double check
JITStubCall stubCall(this, cti_op_negate);
- stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(regT0);
stubCall.call(dst);
}
@@ -279,8 +279,8 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
// supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
addSlowCase(emitJumpIfNotImmediateNumber(regT0));
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
lhsIsInt.link(this);
emitJumpSlowCaseIfNotImmediateInteger(regT2);
@@ -377,8 +377,8 @@ void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEn
if (supportsFloatingPointTruncate()) {
JumpList failures;
failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
if (shift)
urshift32(Imm32(shift & 0x1f), regT0);
@@ -399,8 +399,8 @@ void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEn
if (supportsFloatingPointTruncate()) {
JumpList failures;
failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
emitFastArithImmToInt(regT1);
@@ -499,8 +499,8 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
if (supportsFloatingPoint()) {
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
int32_t op2imm = getConstantOperand(op2).asInt32();
@@ -525,8 +525,8 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
if (supportsFloatingPoint()) {
Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT1);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT1, fpRegT1);
int32_t op1imm = getConstantOperand(op1).asInt32();
@@ -552,10 +552,10 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
Jump fail3 = emitJumpIfImmediateInteger(regT1);
- addPtr(tagTypeNumberRegister, regT0);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT0, fpRegT0);
- movePtrToDouble(regT1, fpRegT1);
+ add64(tagTypeNumberRegister, regT0);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT0, fpRegT0);
+ move64ToDouble(regT1, fpRegT1);
emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
@@ -585,19 +585,19 @@ void JIT::emit_op_bitand(Instruction* currentInstruction)
emitGetVirtualRegister(op2, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
int32_t imm = getConstantOperandImmediateInt(op1);
- andPtr(Imm32(imm), regT0);
+ and64(Imm32(imm), regT0);
if (imm >= 0)
emitFastArithIntToImmNoCheck(regT0, regT0);
} else if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
int32_t imm = getConstantOperandImmediateInt(op2);
- andPtr(Imm32(imm), regT0);
+ and64(Imm32(imm), regT0);
if (imm >= 0)
emitFastArithIntToImmNoCheck(regT0, regT0);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
- andPtr(regT1, regT0);
+ and64(regT1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
}
emitPutVirtualRegister(result);
@@ -887,16 +887,16 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
emitGetVirtualRegister(op1, regT1);
convertInt32ToDouble(regT1, fpRegT1);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT2);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT2);
} else if (op2HasImmediateIntFastCase) {
notImm1.link(this);
if (!types.first().definitelyIsNumber())
emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
emitGetVirtualRegister(op2, regT1);
convertInt32ToDouble(regT1, fpRegT1);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT2);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT2);
} else {
// if we get here, eax is not an int32, edx not yet checked.
notImm1.link(this);
@@ -904,8 +904,8 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
if (!types.second().definitelyIsNumber())
emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT1);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT1);
Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
convertInt32ToDouble(regT1, fpRegT2);
Jump op2wasInteger = jump();
@@ -916,8 +916,8 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
convertInt32ToDouble(regT0, fpRegT1);
op2isDouble.link(this);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT2);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT1, fpRegT2);
op2wasInteger.link(this);
}
@@ -931,8 +931,8 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
ASSERT(opcodeID == op_div);
divDouble(fpRegT2, fpRegT1);
}
- moveDoubleToPtr(fpRegT1, regT0);
- subPtr(tagTypeNumberRegister, regT0);
+ moveDoubleTo64(fpRegT1, regT0);
+ sub64(tagTypeNumberRegister, regT0);
emitPutVirtualRegister(result, regT0);
end.link(this);
@@ -1041,8 +1041,8 @@ void JIT::emit_op_div(Instruction* currentInstruction)
if (isOperandConstantImmediateDouble(op1)) {
emitGetVirtualRegister(op1, regT0);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
} else if (isOperandConstantImmediateInt(op1)) {
emitLoadInt32ToDouble(op1, fpRegT0);
} else {
@@ -1053,15 +1053,15 @@ void JIT::emit_op_div(Instruction* currentInstruction)
convertInt32ToDouble(regT0, fpRegT0);
Jump skipDoubleLoad = jump();
notInt.link(this);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
skipDoubleLoad.link(this);
}
if (isOperandConstantImmediateDouble(op2)) {
emitGetVirtualRegister(op2, regT1);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT1);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT1, fpRegT1);
} else if (isOperandConstantImmediateInt(op2)) {
emitLoadInt32ToDouble(op2, fpRegT1);
} else {
@@ -1072,8 +1072,8 @@ void JIT::emit_op_div(Instruction* currentInstruction)
convertInt32ToDouble(regT1, fpRegT1);
Jump skipDoubleLoad = jump();
notInt.link(this);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT1);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT1, fpRegT1);
skipDoubleLoad.link(this);
}
divDouble(fpRegT1, fpRegT0);
@@ -1100,13 +1100,13 @@ void JIT::emit_op_div(Instruction* currentInstruction)
Jump isInteger = jump();
notInteger.link(this);
add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
- moveDoubleToPtr(fpRegT0, regT0);
- subPtr(tagTypeNumberRegister, regT0);
+ moveDoubleTo64(fpRegT0, regT0);
+ sub64(tagTypeNumberRegister, regT0);
isInteger.link(this);
#else
// Double result.
- moveDoubleToPtr(fpRegT0, regT0);
- subPtr(tagTypeNumberRegister, regT0);
+ moveDoubleTo64(fpRegT0, regT0);
+ sub64(tagTypeNumberRegister, regT0);
#endif
emitPutVirtualRegister(dst, regT0);
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index 1de877daa..074bf7f97 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -72,7 +72,7 @@ void JIT::compileLoadVarargs(Instruction* instruction)
if (canOptimize) {
emitGetVirtualRegister(arguments, regT0);
- slowCase.append(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue()))));
+ slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));
emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
@@ -91,18 +91,18 @@ void JIT::compileLoadVarargs(Instruction* instruction)
// Initialize 'this'.
emitGetVirtualRegister(thisValue, regT2);
- storePtr(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+ store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
// Copy arguments.
neg32(regT0);
signExtend32ToPtr(regT0, regT0);
- end.append(branchAddPtr(Zero, TrustedImm32(1), regT0));
+ end.append(branchAdd64(Zero, TrustedImm32(1), regT0));
// regT0: -argumentCount
Label copyLoop = label();
- loadPtr(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
- storePtr(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
- branchAddPtr(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);
+ load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
+ store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+ branchAdd64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);
end.append(jump());
}
@@ -124,7 +124,7 @@ void JIT::compileCallEval()
{
JITStubCall stubCall(this, cti_op_call_eval); // Initializes ScopeChain; ReturnPC; CodeBlock.
stubCall.call();
- addSlowCase(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(JSValue()))));
+ addSlowCase(branch64(Equal, regT0, TrustedImm64(JSValue::encode(JSValue()))));
emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
sampleCodeBlock(m_codeBlock);
@@ -134,7 +134,7 @@ void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
+ emitGetFromCallFrameHeader64(JSStack::Callee, regT0);
emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
sampleCodeBlock(m_codeBlock);
@@ -179,8 +179,8 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
store32(TrustedImm32(instruction - m_codeBlock->instructions().begin()), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
- storePtr(callFrameRegister, Address(regT1, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
- storePtr(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
+ store64(callFrameRegister, Address(regT1, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
+ store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
move(regT1, callFrameRegister);
if (opcodeID == op_call_eval) {
@@ -190,7 +190,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
DataLabelPtr addressOfLinkedFunctionCheck;
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
- Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(JSValue::encode(JSValue())));
+ Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
addSlowCase(slowCase);
diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h
index a7aecd3e8..410bdf710 100644
--- a/Source/JavaScriptCore/jit/JITInlineMethods.h
+++ b/Source/JavaScriptCore/jit/JITInlineMethods.h
@@ -31,14 +31,6 @@
namespace JSC {
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- peek(dst, argumentStackOffset);
-}
-
ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
@@ -50,23 +42,33 @@ ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
return m_codeBlock->getConstant(src);
}
-ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
-{
- storePtr(from, payloadFor(entry, callFrameRegister));
-}
-
ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
{
#if USE(JSVALUE32_64)
store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
+ store32(from, payloadFor(entry, callFrameRegister));
+#else
+ store64(from, addressFor(entry, callFrameRegister));
#endif
- storePtr(from, payloadFor(entry, callFrameRegister));
}
ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
{
+#if USE(JSVALUE32_64)
store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
store32(from, intPayloadFor(entry, callFrameRegister));
+#else
+ store64(from, addressFor(entry, callFrameRegister));
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
+{
+#if USE(JSVALUE32_64)
+ storePtr(from, payloadFor(entry, callFrameRegister));
+#else
+ store64(from, addressFor(entry, callFrameRegister));
+#endif
}
ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
@@ -82,6 +84,22 @@ ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEn
#endif
}
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ load32(Address(from, entry * sizeof(Register)), to);
+#if USE(JSVALUE64)
+ killLastResultRegister();
+#endif
+}
+
+#if USE(JSVALUE64)
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ load64(Address(from, entry * sizeof(Register)), to);
+ killLastResultRegister();
+}
+#endif
+
ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
{
failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
@@ -101,14 +119,6 @@ ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst,
cont8Bit.link(this);
}
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- load32(Address(from, entry * sizeof(Register)), to);
-#if USE(JSVALUE64)
- killLastResultRegister();
-#endif
-}
-
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
@@ -434,56 +444,6 @@ template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, Re
emitAllocateBasicJSObject<JSFinalObject, MarkedBlock::None, T>(structure, result, scratch);
}
-inline void JIT::emitAllocateBasicStorage(size_t size, ptrdiff_t offsetFromBase, RegisterID result)
-{
- CopiedAllocator* allocator = &m_globalData->heap.storageAllocator();
-
- loadPtr(&allocator->m_currentRemaining, result);
- addSlowCase(branchSubPtr(Signed, TrustedImm32(size), result));
- storePtr(result, &allocator->m_currentRemaining);
- negPtr(result);
- addPtr(AbsoluteAddress(&allocator->m_currentPayloadEnd), result);
- subPtr(TrustedImm32(size - offsetFromBase), result);
-}
-
-inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr, RegisterID scratch)
-{
- unsigned initialLength = std::max(length, BASE_VECTOR_LEN);
- size_t initialStorage = Butterfly::totalSize(0, 0, true, initialLength * sizeof(EncodedJSValue));
-
- loadPtr(m_codeBlock->globalObject()->addressOfArrayStructure(), scratch);
- load8(Address(scratch, Structure::indexingTypeOffset()), storagePtr);
- and32(TrustedImm32(IndexingShapeMask), storagePtr);
- addSlowCase(branch32(NotEqual, storagePtr, TrustedImm32(ContiguousShape)));
-
- // We allocate the backing store first to ensure that garbage collection
- // doesn't happen during JSArray initialization.
- emitAllocateBasicStorage(initialStorage, sizeof(IndexingHeader), storageResult);
-
- // Allocate the cell for the array.
- emitAllocateBasicJSObject<JSArray, MarkedBlock::None>(scratch, cellResult, storagePtr);
-
- // Store all the necessary info in the indexing header.
- store32(Imm32(length), Address(storageResult, Butterfly::offsetOfPublicLength()));
- store32(Imm32(initialLength), Address(storageResult, Butterfly::offsetOfVectorLength()));
-
- // Store the newly allocated ArrayStorage.
- storePtr(storageResult, Address(cellResult, JSObject::butterflyOffset()));
-
- // Store the values we have.
- for (unsigned i = 0; i < length; i++) {
-#if USE(JSVALUE64)
- loadPtr(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
- storePtr(storagePtr, Address(storageResult, sizeof(WriteBarrier<Unknown>) * i));
-#else
- load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
- store32(storagePtr, Address(storageResult, sizeof(WriteBarrier<Unknown>) * i));
- load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register) + sizeof(uint32_t)), storagePtr);
- store32(storagePtr, Address(storageResult, sizeof(WriteBarrier<Unknown>) * i + sizeof(uint32_t)));
-#endif
- }
-}
-
#if ENABLE(VALUE_PROFILER)
inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
{
@@ -500,7 +460,7 @@ inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
// We're in a simple configuration: only one bucket, so we can just do a direct
// store.
#if USE(JSVALUE64)
- storePtr(value, valueProfile->m_buckets);
+ store64(value, valueProfile->m_buckets);
#else
EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile->m_buckets);
store32(value, &descriptor->asBits.payload);
@@ -516,7 +476,7 @@ inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
move(TrustedImmPtr(valueProfile->m_buckets), scratch);
#if USE(JSVALUE64)
- storePtr(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
+ store64(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
#elif USE(JSVALUE32_64)
store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
@@ -538,17 +498,15 @@ inline void JIT::emitValueProfilingSite()
inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile* arrayProfile)
{
+ UNUSED_PARAM(scratch); // We had found this scratch register useful here before, so I will keep it for now.
+
RegisterID structure = structureAndIndexingType;
RegisterID indexingType = structureAndIndexingType;
- if (canBeOptimized()) {
+ if (canBeOptimized())
storePtr(structure, arrayProfile->addressOfLastSeenStructure());
- load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
- move(TrustedImm32(1), scratch);
- lshift32(indexingType, scratch);
- or32(scratch, AbsoluteAddress(arrayProfile->addressOfArrayModes()));
- } else
- load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
+
+ load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
}
inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex)
@@ -860,6 +818,14 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op
#else // USE(JSVALUE32_64)
+/* Deprecated: Please use JITStubCall instead. */
+
+ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
+{
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
+ peek64(dst, argumentStackOffset);
+}
+
ALWAYS_INLINE void JIT::killLastResultRegister()
{
m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
@@ -874,9 +840,9 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
if (m_codeBlock->isConstantRegisterIndex(src)) {
JSValue value = m_codeBlock->getConstant(src);
if (!value.isNumber())
- move(TrustedImmPtr(JSValue::encode(value)), dst);
+ move(TrustedImm64(JSValue::encode(value)), dst);
else
- move(ImmPtr(JSValue::encode(value)), dst);
+ move(Imm64(JSValue::encode(value)), dst);
killLastResultRegister();
return;
}
@@ -889,7 +855,7 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
return;
}
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
+ load64(Address(callFrameRegister, src * sizeof(Register)), dst);
killLastResultRegister();
}
@@ -916,28 +882,24 @@ ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
{
- storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
+ store64(from, Address(callFrameRegister, dst * sizeof(Register)));
m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
}
ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
{
- storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
+ store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
{
-#if USE(JSVALUE64)
- return branchTestPtr(Zero, reg, tagMaskRegister);
-#else
- return branchTest32(Zero, reg, TrustedImm32(TagMask));
-#endif
+ return branchTest64(Zero, reg, tagMaskRegister);
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
move(reg1, scratch);
- orPtr(reg2, scratch);
+ or64(reg2, scratch);
return emitJumpIfJSCell(scratch);
}
@@ -948,11 +910,7 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
{
-#if USE(JSVALUE64)
- return branchTestPtr(NonZero, reg, tagMaskRegister);
-#else
- return branchTest32(NonZero, reg, TrustedImm32(TagMask));
-#endif
+ return branchTest64(NonZero, reg, tagMaskRegister);
}
ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
@@ -966,8 +924,6 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
emitJumpSlowCaseIfNotJSCell(reg);
}
-#if USE(JSVALUE64)
-
inline void JIT::emitLoadDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
@@ -985,30 +941,21 @@ inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
} else
convertInt32ToDouble(addressFor(index), value);
}
-#endif
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
{
-#if USE(JSVALUE64)
- return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
-#else
- return branchTest32(NonZero, reg, TrustedImm32(TagTypeNumber));
-#endif
+ return branch64(AboveOrEqual, reg, tagTypeNumberRegister);
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
{
-#if USE(JSVALUE64)
- return branchPtr(Below, reg, tagTypeNumberRegister);
-#else
- return branchTest32(Zero, reg, TrustedImm32(TagTypeNumber));
-#endif
+ return branch64(Below, reg, tagTypeNumberRegister);
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
move(reg1, scratch);
- andPtr(reg2, scratch);
+ and64(reg2, scratch);
return emitJumpIfNotImmediateInteger(scratch);
}
@@ -1027,41 +974,17 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
addSlowCase(emitJumpIfNotImmediateNumber(reg));
}
-#if USE(JSVALUE32_64)
-ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
-{
- subPtr(TrustedImm32(TagTypeNumber), reg);
-}
-
-ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
-{
- return branchSubPtr(Zero, TrustedImm32(TagTypeNumber), reg);
-}
-#endif
-
ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
{
-#if USE(JSVALUE64)
emitFastArithIntToImmNoCheck(src, dest);
-#else
- if (src != dest)
- move(src, dest);
- addPtr(TrustedImm32(TagTypeNumber), dest);
-#endif
}
// operand is int32_t, must have been zero-extended if register is 64-bit.
ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
{
-#if USE(JSVALUE64)
if (src != dest)
move(src, dest);
- orPtr(tagTypeNumberRegister, dest);
-#else
- signExtend32ToPtr(src, dest);
- addPtr(dest, dest);
- emitFastArithReTagImmediate(dest, dest);
-#endif
+ or64(tagTypeNumberRegister, dest);
}
ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index c187e4725..07c8ace2a 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -248,14 +248,14 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// Host function signature: f(ExecState*);
move(callFrameRegister, X86Registers::edi);
- subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+ subPtr(TrustedImm32(16 - sizeof(int64_t)), stackPointerRegister); // Align stack after call.
emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
call(Address(X86Registers::r9, executableOffsetToFunction));
- addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
+ addPtr(TrustedImm32(16 - sizeof(int64_t)), stackPointerRegister);
#elif CPU(ARM)
// Load caller frame's scope chain into this callframe so that whatever we call can
@@ -316,8 +316,8 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
#endif
// Check for an exception
- loadPtr(&(globalData->exception), regT2);
- Jump exceptionHandler = branchTestPtr(NonZero, regT2);
+ load64(&(globalData->exception), regT2);
+ Jump exceptionHandler = branchTest64(NonZero, regT2);
// Return.
ret();
@@ -360,9 +360,9 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
} else {
if (m_codeBlock->isConstantRegisterIndex(src)) {
if (!getConstantOperand(src).isNumber())
- storePtr(TrustedImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+ store64(TrustedImm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
else
- storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+ store64(Imm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
if (dst == m_lastResultBytecodeRegister)
killLastResultRegister();
} else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
@@ -372,8 +372,8 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
} else {
// Perform the copy via regT1; do not disturb any mapping in regT0.
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
- storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
+ load64(Address(callFrameRegister, src * sizeof(Register)), regT1);
+ store64(regT1, Address(callFrameRegister, dst * sizeof(Register)));
}
}
}
@@ -441,18 +441,18 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
// As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
- move(TrustedImmPtr(JSValue::encode(jsBoolean(true))), regT0);
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
Label loop(this);
// Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
- loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
+ load64(Address(regT2, Structure::prototypeOffset()), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
emitJumpIfJSCell(regT2).linkTo(loop, this);
// We get here either by dropping out of the loop, or if value was not an Object. Result is false.
- move(TrustedImmPtr(JSValue::encode(jsBoolean(false))), regT0);
+ move(TrustedImm64(JSValue::encode(jsBoolean(false))), regT0);
// isInstance jumps right down to here, to skip setting the result to false (it has already set true).
isInstance.link(this);
@@ -467,7 +467,7 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
emitGetVirtualRegister(value, regT0);
Jump isCell = emitJumpIfJSCell(regT0);
- comparePtr(Equal, regT0, TrustedImm32(ValueUndefined), regT0);
+ compare64(Equal, regT0, TrustedImm32(ValueUndefined), regT0);
Jump done = jump();
isCell.link(this);
@@ -493,8 +493,8 @@ void JIT::emit_op_is_boolean(Instruction* currentInstruction)
unsigned value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
- testPtr(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
}
@@ -505,7 +505,7 @@ void JIT::emit_op_is_number(Instruction* currentInstruction)
unsigned value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
- testPtr(NonZero, regT0, tagTypeNumberRegister, regT0);
+ test64(NonZero, regT0, tagTypeNumberRegister, regT0);
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
}
@@ -553,7 +553,7 @@ void JIT::emit_op_construct(Instruction* currentInstruction)
void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
{
int activation = currentInstruction[1].u.operand;
- Jump activationNotCreated = branchTestPtr(Zero, addressFor(activation));
+ Jump activationNotCreated = branchTest64(Zero, addressFor(activation));
JITStubCall stubCall(this, cti_op_tear_off_activation);
stubCall.addArgument(activation, regT2);
stubCall.call();
@@ -565,7 +565,7 @@ void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
int arguments = currentInstruction[1].u.operand;
int activation = currentInstruction[2].u.operand;
- Jump argsNotCreated = branchTestPtr(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(arguments))));
+ Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(arguments))));
JITStubCall stubCall(this, cti_op_tear_off_arguments);
stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2);
stubCall.addArgument(activation, regT2);
@@ -631,13 +631,6 @@ void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
ret();
}
-void JIT::emit_op_resolve(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_to_primitive(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -662,13 +655,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_ensure_property_exists);
@@ -677,50 +663,6 @@ void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_skip);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
-{
- // Fast case
- void* globalObject = m_codeBlock->globalObject();
- unsigned currentIndex = m_globalResolveInfoIndex++;
- GlobalResolveInfo* resolveInfoAddress = &(m_codeBlock->globalResolveInfo(currentIndex));
-
- // Check Structure of global object
- move(TrustedImmPtr(globalObject), regT0);
- move(TrustedImmPtr(resolveInfoAddress), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset()))); // Structures don't match
-
- // Load cached property
- // Assume that the global object always uses external storage.
- load32(Address(regT2, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT1);
- compileGetDirectOffset(regT0, regT0, regT1, regT0, KnownNotFinal);
- emitValueProfilingSite();
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(TrustedImm32(currentIndex));
- stubCall.addArgument(regT0);
- stubCall.callWithValueProfiling(dst);
-}
-
void JIT::emit_op_not(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
@@ -728,9 +670,9 @@ void JIT::emit_op_not(Instruction* currentInstruction)
// Invert against JSValue(false); if the value was tagged as a boolean, then all bits will be
// clear other than the low bit (which will be 0 or 1 for false or true inputs respectively).
// Then invert against JSValue(true), which will add the tag back in, and flip the low bit.
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
addSlowCase(branchTestPtr(NonZero, regT0, TrustedImm32(static_cast<int32_t>(~1))));
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -740,11 +682,11 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0)))), target);
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0)))), target);
Jump isNonZero = emitJumpIfImmediateInteger(regT0);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))));
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))), target);
+ addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))));
isNonZero.link(this);
}
@@ -766,8 +708,8 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))), target);
isNotMasqueradesAsUndefined.link(this);
masqueradesGlobalObjectIsForeign.link(this);
@@ -789,8 +731,8 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ addJump(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsNull()))), target);
wasNotImmediate.link(this);
}
@@ -814,32 +756,16 @@ void JIT::emit_op_eq(Instruction* currentInstruction)
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_this);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
-}
-
void JIT::emit_op_jtrue(Instruction* currentInstruction)
{
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- Jump isZero = branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0))));
+ Jump isZero = branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0))));
addJump(emitJumpIfImmediateInteger(regT0), target);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))));
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))), target);
+ addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))));
isZero.link(this);
}
@@ -859,7 +785,7 @@ void JIT::emit_op_bitxor(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- xorPtr(regT1, regT0);
+ xor64(regT1, regT0);
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -868,7 +794,7 @@ void JIT::emit_op_bitor(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- orPtr(regT1, regT0);
+ or64(regT1, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -910,7 +836,7 @@ void JIT::emit_op_get_pnames(Instruction* currentInstruction)
getPnamesStubCall.addArgument(regT0);
getPnamesStubCall.call(dst);
load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- storePtr(tagTypeNumberRegister, payloadFor(i));
+ store64(tagTypeNumberRegister, addressFor(i));
store32(TrustedImm32(Int32Tag), intTagFor(size));
store32(regT3, intPayloadFor(size));
Jump end = jump();
@@ -947,7 +873,7 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
loadPtr(addressFor(it), regT1);
loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
- loadPtr(BaseIndex(regT2, regT0, TimesEight), regT2);
+ load64(BaseIndex(regT2, regT0, TimesEight), regT2);
emitPutVirtualRegister(dst, regT2);
@@ -968,7 +894,7 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
addJump(branchTestPtr(Zero, Address(regT3)), target);
Label checkPrototype(this);
- loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
+ load64(Address(regT2, Structure::prototypeOffset()), regT2);
callHasProperty.append(emitJumpIfNotJSCell(regT2));
loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
@@ -1016,7 +942,7 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
// Jump slow if both are cells (to cover strings).
move(regT0, regT2);
- orPtr(regT1, regT2);
+ or64(regT1, regT2);
addSlowCase(emitJumpIfJSCell(regT2));
// Jump slow if either is a double. First test if it's an integer, which is fine, and then test
@@ -1029,9 +955,9 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
rightOK.link(this);
if (type == OpStrictEq)
- comparePtr(Equal, regT1, regT0, regT0);
+ compare64(Equal, regT1, regT0, regT0);
else
- comparePtr(NotEqual, regT1, regT0, regT0);
+ compare64(NotEqual, regT1, regT0, regT0);
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
@@ -1077,8 +1003,8 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
move(regT0, callFrameRegister);
peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, globalData) / sizeof(void*));
- loadPtr(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)), regT0);
- storePtr(TrustedImmPtr(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
+ load64(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)), regT0);
+ store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -1147,9 +1073,9 @@ void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_throw_reference_error);
if (!m_codeBlock->getConstant(currentInstruction[1].u.operand).isNumber())
- stubCall.addArgument(TrustedImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
+ stubCall.addArgument(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
else
- stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
+ stubCall.addArgument(Imm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
stubCall.call();
}
@@ -1189,8 +1115,8 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- comparePtr(Equal, regT0, TrustedImm32(ValueNull), regT0);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ compare64(Equal, regT0, TrustedImm32(ValueNull), regT0);
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
@@ -1221,8 +1147,8 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- comparePtr(NotEqual, regT0, TrustedImm32(ValueNull), regT0);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ compare64(NotEqual, regT0, TrustedImm32(ValueNull), regT0);
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
@@ -1247,7 +1173,7 @@ void JIT::emit_op_create_activation(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
- Jump activationCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+ Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
emitPutVirtualRegister(dst);
activationCreated.link(this);
@@ -1257,7 +1183,7 @@ void JIT::emit_op_create_arguments(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
- Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+ Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
JITStubCall(this, cti_op_create_arguments).call();
emitPutVirtualRegister(dst);
emitPutVirtualRegister(unmodifiedArgumentsRegister(dst));
@@ -1268,7 +1194,7 @@ void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
- storePtr(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * dst));
+ store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst));
}
void JIT::emit_op_convert_this(Instruction* currentInstruction)
@@ -1327,16 +1253,16 @@ void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowC
linkSlowCase(iter);
if (shouldEmitProfiling())
- move(TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(jsUndefined()))), regT0);
- Jump isNotUndefined = branchPtr(NotEqual, regT1, TrustedImmPtr(JSValue::encode(jsUndefined())));
+ move(TrustedImm64((JSValue::encode(jsUndefined()))), regT0);
+ Jump isNotUndefined = branch64(NotEqual, regT1, TrustedImm64(JSValue::encode(jsUndefined())));
emitValueProfilingSite();
- move(TrustedImmPtr(globalThis), regT0);
+ move(TrustedImm64(JSValue::encode(JSValue(static_cast<JSCell*>(globalThis)))), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_convert_this));
linkSlowCase(iter);
if (shouldEmitProfiling())
- move(TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(m_globalData->stringStructure.get()))), regT0);
+ move(TrustedImm64(JSValue::encode(m_globalData->stringStructure.get())), regT0);
isNotUndefined.link(this);
emitValueProfilingSite();
JITStubCall stubCall(this, cti_op_convert_this);
@@ -1356,7 +1282,7 @@ void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowC
void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
JITStubCall stubCall(this, cti_op_not);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
@@ -1508,7 +1434,7 @@ void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int argumentsRegister = currentInstruction[2].u.operand;
- addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
+ addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
sub32(TrustedImm32(1), regT0);
emitFastArithReTagImmediate(regT0, regT0);
@@ -1534,7 +1460,7 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
int dst = currentInstruction[1].u.operand;
int argumentsRegister = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
- addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
+ addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
emitGetVirtualRegister(property, regT1);
addSlowCase(emitJumpIfNotImmediateInteger(regT1));
add32(TrustedImm32(1), regT1);
@@ -1544,7 +1470,7 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
neg32(regT1);
signExtend32ToPtr(regT1, regT1);
- loadPtr(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
+ load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
emitValueProfilingSite();
emitPutVirtualRegister(dst, regT0);
}
@@ -1571,51 +1497,413 @@ void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vecto
stubCall.callWithValueProfiling(dst);
}
+void JIT::emit_op_put_to_base(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int id = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ PutToBaseOperation* operation = m_codeBlock->putToBaseOperation(currentInstruction[4].u.operand);
+ switch (operation->m_kind) {
+ case PutToBaseOperation::GlobalVariablePutChecked:
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(operation->m_predicatePointer)));
+ case PutToBaseOperation::GlobalVariablePut: {
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ if (operation->m_isDynamic) {
+ emitGetVirtualRegister(base, regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(globalObject)));
+ }
+ emitGetVirtualRegister(value, regT0);
+ store64(regT0, operation->m_registerAddress);
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ return;
+ }
+ case PutToBaseOperation::VariablePut: {
+ emitGetVirtualRegisters(base, regT0, value, regT1);
+ loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT2);
+ store64(regT1, Address(regT2, operation->m_offset * sizeof(Register)));
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ return;
+ }
+
+ case PutToBaseOperation::GlobalPropertyPut: {
+ emitGetVirtualRegisters(base, regT0, value, regT1);
+ loadPtr(&operation->m_structure, regT2);
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), regT2));
+ ASSERT(!operation->m_structure || !operation->m_structure->inlineCapacity());
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+ load32(&operation->m_offsetInButterfly, regT3);
+ signExtend32ToPtr(regT3, regT3);
+ store64(regT1, BaseIndex(regT2, regT3, TimesEight));
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ return;
+ }
+
+ case PutToBaseOperation::Uninitialised:
+ case PutToBaseOperation::Readonly:
+ case PutToBaseOperation::Generic:
+ JITStubCall stubCall(this, cti_op_put_to_base);
+
+ stubCall.addArgument(TrustedImm32(base));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
+ stubCall.addArgument(TrustedImm32(value));
+ stubCall.addArgument(TrustedImmPtr(operation));
+ stubCall.call();
+ return;
+ }
+}
+
#endif // USE(JSVALUE64)
-void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
+void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const int* baseVR, const int* valueVR)
{
- int skip = currentInstruction[5].u.operand;
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT0);
-
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- addSlowCase(checkStructure(regT0, m_codeBlock->globalObject()->activationStructure()));
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
- activationNotCreated.link(this);
+
+#if USE(JSVALUE32_64)
+ unmap();
+#else
+ killLastResultRegister();
+#endif
+
+ if (resolveOperations->isEmpty()) {
+ addSlowCase(jump());
+ return;
}
- while (skip--) {
- addSlowCase(checkStructure(regT0, m_codeBlock->globalObject()->activationStructure()));
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+
+ const RegisterID value = regT0;
+#if USE(JSVALUE32_64)
+ const RegisterID valueTag = regT1;
+#endif
+ const RegisterID scope = regT2;
+ const RegisterID scratch = regT3;
+
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ ResolveOperation* pc = resolveOperations->data();
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, scope);
+ bool setBase = false;
+ bool resolvingBase = true;
+ while (resolvingBase) {
+ switch (pc->m_operation) {
+ case ResolveOperation::ReturnGlobalObjectAsBase:
+ move(TrustedImmPtr(globalObject), value);
+#if USE(JSVALUE32_64)
+ move(TrustedImm32(JSValue::CellTag), valueTag);
+#endif
+ emitValueProfilingSite();
+ emitStoreCell(*baseVR, value);
+ return;
+ case ResolveOperation::SetBaseToGlobal:
+ ASSERT(baseVR);
+ setBase = true;
+ move(TrustedImmPtr(globalObject), scratch);
+ emitStoreCell(*baseVR, scratch);
+ resolvingBase = false;
+ ++pc;
+ break;
+ case ResolveOperation::SetBaseToUndefined: {
+ ASSERT(baseVR);
+ setBase = true;
+#if USE(JSVALUE64)
+ move(TrustedImm64(JSValue::encode(jsUndefined())), scratch);
+ emitPutVirtualRegister(*baseVR, scratch);
+#else
+ emitStore(*baseVR, jsUndefined());
+#endif
+ resolvingBase = false;
+ ++pc;
+ break;
+ }
+ case ResolveOperation::SetBaseToScope:
+ ASSERT(baseVR);
+ setBase = true;
+ emitStoreCell(*baseVR, scope);
+ resolvingBase = false;
+ ++pc;
+ break;
+ case ResolveOperation::ReturnScopeAsBase:
+ emitStoreCell(*baseVR, scope);
+ ASSERT(!value);
+ move(scope, value);
+#if USE(JSVALUE32_64)
+ move(TrustedImm32(JSValue::CellTag), valueTag);
+#endif
+ emitValueProfilingSite();
+ return;
+ case ResolveOperation::SkipTopScopeNode: {
+#if USE(JSVALUE32_64)
+ Jump activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
+#else
+ Jump activationNotCreated = branchTest64(Zero, addressFor(m_codeBlock->activationRegister()));
+#endif
+ loadPtr(Address(scope, JSScope::offsetOfNext()), scope);
+ activationNotCreated.link(this);
+ ++pc;
+ break;
+ }
+ case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope: {
+ move(scope, regT3);
+ loadPtr(Address(regT3, JSScope::offsetOfNext()), regT1);
+ Jump atTopOfScope = branchTestPtr(Zero, regT1);
+ Label loopStart = label();
+ loadPtr(Address(regT3, JSCell::structureOffset()), regT2);
+ Jump isActivation = branchPtr(Equal, regT2, TrustedImmPtr(globalObject->activationStructure()));
+ addSlowCase(branchPtr(NotEqual, regT2, TrustedImmPtr(globalObject->nameScopeStructure())));
+ isActivation.link(this);
+ move(regT1, regT3);
+ loadPtr(Address(regT3, JSScope::offsetOfNext()), regT1);
+ branchTestPtr(NonZero, regT1, loopStart);
+ atTopOfScope.link(this);
+ ++pc;
+ break;
+ }
+ case ResolveOperation::SkipScopes: {
+ for (int i = 0; i < pc->m_scopesToSkip; i++)
+ loadPtr(Address(scope, JSScope::offsetOfNext()), scope);
+ ++pc;
+ break;
+ }
+ case ResolveOperation::Fail:
+ addSlowCase(jump());
+ return;
+ default:
+ resolvingBase = false;
+ }
}
- emit_op_resolve_global(currentInstruction, true);
+ if (baseVR && !setBase)
+ emitStoreCell(*baseVR, scope);
+
+ ASSERT(valueVR);
+ ResolveOperation* resolveValueOperation = pc;
+ switch (resolveValueOperation->m_operation) {
+ case ResolveOperation::GetAndReturnGlobalProperty: {
+ // Verify structure.
+ move(TrustedImmPtr(globalObject), regT2);
+ move(TrustedImmPtr(resolveValueOperation), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(ResolveOperation, m_structure)), regT1);
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT2, JSCell::structureOffset())));
+
+ // Load property.
+ load32(Address(regT3, OBJECT_OFFSETOF(ResolveOperation, m_offset)), regT3);
+
+ // regT2: GlobalObject
+ // regT3: offset
+#if USE(JSVALUE32_64)
+ compileGetDirectOffset(regT2, valueTag, value, regT3, KnownNotFinal);
+#else
+ compileGetDirectOffset(regT2, value, regT3, regT1, KnownNotFinal);
+#endif
+ break;
+ }
+ case ResolveOperation::GetAndReturnGlobalVarWatchable:
+ case ResolveOperation::GetAndReturnGlobalVar: {
+#if USE(JSVALUE32_64)
+ load32(reinterpret_cast<char*>(pc->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.tag), valueTag);
+ load32(reinterpret_cast<char*>(pc->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.payload), value);
+#else
+ load64(reinterpret_cast<char*>(pc->m_registerAddress), value);
+#endif
+ break;
+ }
+ case ResolveOperation::GetAndReturnScopedVar: {
+ loadPtr(Address(scope, JSVariableObject::offsetOfRegisters()), scope);
+#if USE(JSVALUE32_64)
+ load32(Address(scope, pc->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTag);
+ load32(Address(scope, pc->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
+#else
+ load64(Address(scope, pc->m_offset * sizeof(Register)), value);
+#endif
+ break;
+ }
+ default:
+ CRASH();
+ return;
+ }
+
+#if USE(JSVALUE32_64)
+ emitStore(*valueVR, valueTag, value);
+#else
+ emitPutVirtualRegister(*valueVR, value);
+#endif
+ emitValueProfilingSite();
}
-void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_link_resolve_operations(ResolveOperations* resolveOperations, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
- int skip = currentInstruction[5].u.operand;
- while (skip--)
+ if (resolveOperations->isEmpty()) {
linkSlowCase(iter);
- JITStubCall resolveStubCall(this, cti_op_resolve);
- resolveStubCall.addArgument(TrustedImmPtr(ident));
- resolveStubCall.call(dst);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_resolve_global_dynamic));
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed.
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(TrustedImm32(currentIndex));
- stubCall.addArgument(regT0);
- stubCall.callWithValueProfiling(dst);
+ return;
+ }
+
+ ResolveOperation* pc = resolveOperations->data();
+ bool resolvingBase = true;
+ while (resolvingBase) {
+ switch (pc->m_operation) {
+ case ResolveOperation::ReturnGlobalObjectAsBase:
+ return;
+ case ResolveOperation::SetBaseToGlobal:
+ resolvingBase = false;
+ ++pc;
+ break;
+ case ResolveOperation::SetBaseToUndefined: {
+ resolvingBase = false;
+ ++pc;
+ break;
+ }
+ case ResolveOperation::SetBaseToScope:
+ resolvingBase = false;
+ ++pc;
+ break;
+ case ResolveOperation::ReturnScopeAsBase:
+ return;
+ case ResolveOperation::SkipTopScopeNode: {
+ ++pc;
+ break;
+ }
+ case ResolveOperation::SkipScopes:
+ ++pc;
+ break;
+ case ResolveOperation::Fail:
+ linkSlowCase(iter);
+ return;
+ case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope: {
+ linkSlowCase(iter);
+ ++pc;
+ break;
+ }
+ default:
+ resolvingBase = false;
+ }
+ }
+ ResolveOperation* resolveValueOperation = pc;
+ switch (resolveValueOperation->m_operation) {
+ case ResolveOperation::GetAndReturnGlobalProperty: {
+ linkSlowCase(iter);
+ break;
+ }
+ case ResolveOperation::GetAndReturnGlobalVarWatchable:
+ case ResolveOperation::GetAndReturnGlobalVar:
+ break;
+ case ResolveOperation::GetAndReturnScopedVar:
+ break;
+ default:
+ CRASH();
+ return;
+ }
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[3].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ emit_resolve_operations(operations, 0, &dst);
+}
+
+void JIT::emitSlow_op_resolve(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[3].u.operand);
+ emitSlow_link_resolve_operations(operations, iter);
+ JITStubCall stubCall(this, cti_op_resolve);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[3].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ emit_resolve_operations(operations, &dst, 0);
+}
+
+void JIT::emitSlow_op_resolve_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ emitSlow_link_resolve_operations(operations, iter);
+ JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[4].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->putToBaseOperation(currentInstruction[5].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ emit_resolve_operations(operations, &base, &value);
+}
+
+void JIT::emitSlow_op_resolve_with_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ emitSlow_link_resolve_operations(operations, iter);
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[4].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->putToBaseOperation(currentInstruction[5].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ emit_resolve_operations(operations, &base, &value);
+}
+
+void JIT::emitSlow_op_resolve_with_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ emitSlow_link_resolve_operations(operations, iter);
+ JITStubCall stubCall(this, cti_op_resolve_with_this);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[4].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
+}
+
+void JIT::emitSlow_op_put_to_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int base = currentInstruction[1].u.operand;
+ int id = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+ int operation = currentInstruction[4].u.operand;
+
+ PutToBaseOperation* putToBaseOperation = m_codeBlock->putToBaseOperation(currentInstruction[4].u.operand);
+ switch (putToBaseOperation->m_kind) {
+ case PutToBaseOperation::VariablePut:
+ return;
+
+ case PutToBaseOperation::GlobalVariablePut:
+ if (!putToBaseOperation->m_isDynamic)
+ return;
+ linkSlowCase(iter);
+ break;
+
+ case PutToBaseOperation::Uninitialised:
+ case PutToBaseOperation::Readonly:
+ case PutToBaseOperation::Generic:
+ return;
+
+ case PutToBaseOperation::GlobalVariablePutChecked:
+ case PutToBaseOperation::GlobalPropertyPut:
+ linkSlowCase(iter);
+ break;
+
+ }
+
+ JITStubCall stubCall(this, cti_op_put_to_base);
+
+ stubCall.addArgument(TrustedImm32(base));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
+ stubCall.addArgument(TrustedImm32(value));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->putToBaseOperation(operation)));
+ stubCall.call();
}
void JIT::emit_op_new_regexp(Instruction* currentInstruction)
@@ -1633,7 +1921,7 @@ void JIT::emit_op_new_func(Instruction* currentInstruction)
#if USE(JSVALUE32_64)
lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
#else
- lazyJump = branchTestPtr(NonZero, addressFor(dst));
+ lazyJump = branchTest64(NonZero, addressFor(dst));
#endif
}
@@ -1660,34 +1948,6 @@ void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
void JIT::emit_op_new_array(Instruction* currentInstruction)
{
- int length = currentInstruction[3].u.operand;
- if (m_codeBlock->globalObject()->isHavingABadTime()
- || CopiedSpace::isOversize(Butterfly::totalSize(0, 0, true, ArrayStorage::sizeFor(length)))) {
- JITStubCall stubCall(this, cti_op_new_array);
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
- return;
- }
- int dst = currentInstruction[1].u.operand;
- int values = currentInstruction[2].u.operand;
-
- emitAllocateJSArray(values, length, regT0, regT1, regT2, regT3);
- emitStoreCell(dst, regT0);
-}
-
-void JIT::emitSlow_op_new_array(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- // If the allocation would be oversize, we will already make the proper stub call above in
- // emit_op_new_array.
- int length = currentInstruction[3].u.operand;
- if (m_codeBlock->globalObject()->isHavingABadTime()
- || CopiedSpace::isOversize(Butterfly::totalSize(0, 0, true, ArrayStorage::sizeFor(length))))
- return;
- linkSlowCase(iter); // We're having a bad time.
- linkSlowCase(iter); // Not enough space in CopiedSpace for storage.
- linkSlowCase(iter); // Not enough space in MarkedSpace for cell.
-
JITStubCall stubCall(this, cti_op_new_array);
stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index db5365535..44123be19 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -36,6 +36,7 @@
#include "JSCell.h"
#include "JSFunction.h"
#include "JSPropertyNameIterator.h"
+#include "JSVariableObject.h"
#include "LinkBuffer.h"
namespace JSC {
@@ -718,13 +719,6 @@ void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
argsNotCreated.link(this);
}
-void JIT::emit_op_resolve(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_to_primitive(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -760,13 +754,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_ensure_property_exists);
@@ -775,53 +762,6 @@ void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_skip);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
-{
- // FIXME: Optimize to use patching instead of so many memory accesses.
-
- unsigned dst = currentInstruction[1].u.operand;
- void* globalObject = m_codeBlock->globalObject();
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
- GlobalResolveInfo* resolveInfoAddress = &m_codeBlock->globalResolveInfo(currentIndex);
-
-
- // Verify structure.
- move(TrustedImmPtr(globalObject), regT2);
- move(TrustedImmPtr(resolveInfoAddress), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT2, JSCell::structureOffset())));
-
- // Load property.
- load32(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT3);
- compileGetDirectOffset(regT2, regT1, regT0, regT3, KnownNotFinal);
- emitValueProfilingSite();
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + (dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global)), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(TrustedImm32(currentIndex));
- stubCall.callWithValueProfiling(dst);
-}
-
void JIT::emit_op_not(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -1214,22 +1154,6 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
emitStoreBool(dst, regT1);
}
-void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_this);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
-}
-
void JIT::emit_op_throw(Instruction* currentInstruction)
{
unsigned exception = currentInstruction[1].u.operand;
@@ -1686,6 +1610,71 @@ void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vecto
stubCall.callWithValueProfiling(dst);
}
+void JIT::emit_op_put_to_base(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int id = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ PutToBaseOperation* operation = m_codeBlock->putToBaseOperation(currentInstruction[4].u.operand);
+
+
+ switch (operation->m_kind) {
+ case PutToBaseOperation::GlobalVariablePutChecked:
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(operation->m_predicatePointer)));
+ case PutToBaseOperation::GlobalVariablePut: {
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ if (operation->m_isDynamic)
+ addSlowCase(branchPtr(NotEqual, payloadFor(base), TrustedImmPtr(globalObject)));
+
+ emitLoad(value, regT1, regT0);
+ storePtr(regT0, reinterpret_cast<char*>(operation->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
+ storePtr(regT1, reinterpret_cast<char*>(operation->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ break;
+ }
+ case PutToBaseOperation::VariablePut: {
+ loadPtr(payloadFor(base), regT3);
+ emitLoad(value, regT1, regT0);
+ loadPtr(Address(regT3, JSVariableObject::offsetOfRegisters()), regT2);
+ store32(regT0, Address(regT2, operation->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(regT1, Address(regT2, operation->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(regT3, regT1, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ break;
+ }
+
+ case PutToBaseOperation::GlobalPropertyPut: {
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ loadPtr(payloadFor(base), regT3);
+ emitLoad(value, regT1, regT0);
+ loadPtr(&operation->m_structure, regT2);
+ addSlowCase(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), regT2));
+ ASSERT(!operation->m_structure || !operation->m_structure->inlineCapacity());
+ loadPtr(Address(regT3, JSObject::butterflyOffset()), regT2);
+ load32(&operation->m_offsetInButterfly, regT3);
+ storePtr(regT0, BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ storePtr(regT1, BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(globalObject, regT1, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ break;
+ }
+
+ case PutToBaseOperation::Uninitialised:
+ case PutToBaseOperation::Readonly:
+ case PutToBaseOperation::Generic:
+ JITStubCall stubCall(this, cti_op_put_to_base);
+
+ stubCall.addArgument(TrustedImm32(base));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
+ stubCall.addArgument(TrustedImm32(value));
+ stubCall.addArgument(TrustedImmPtr(operation));
+ stubCall.call();
+ break;
+ }
+}
+
} // namespace JSC
#endif // USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index 8a4017f1d..b7be821f6 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -137,7 +137,7 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
Label done = label();
#if !ASSERT_DISABLED
- Jump resultOK = branchTestPtr(NonZero, regT0);
+ Jump resultOK = branchTest64(NonZero, regT0);
breakpoint();
resultOK.link(this);
#endif
@@ -155,8 +155,8 @@ JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType)
badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ContiguousShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
- loadPtr(BaseIndex(regT2, regT1, ScalePtr), regT0);
- slowCases.append(branchTestPtr(Zero, regT0));
+ load64(BaseIndex(regT2, regT1, TimesEight), regT0);
+ slowCases.append(branchTest64(Zero, regT0));
return slowCases;
}
@@ -171,8 +171,8 @@ JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
- loadPtr(BaseIndex(regT2, regT1, ScalePtr, ArrayStorage::vectorOffset()), regT0);
- slowCases.append(branchTestPtr(Zero, regT0));
+ load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0);
+ slowCases.append(branchTest64(Zero, regT0));
return slowCases;
}
@@ -189,7 +189,7 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter); // base array check
Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get()));
emitNakedCall(CodeLocationLabel(m_globalData->getCTIStub(stringGetByValStubGenerator).code()));
- Jump failed = branchTestPtr(Zero, regT0);
+ Jump failed = branchTest64(Zero, regT0);
emitPutVirtualRegister(dst, regT0);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
failed.link(this);
@@ -235,7 +235,7 @@ void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID
neg32(offset);
}
signExtend32ToPtr(offset, offset);
- loadPtr(BaseIndex(scratch, offset, ScalePtr, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
+ load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
}
void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
@@ -248,7 +248,7 @@ void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
unsigned i = currentInstruction[6].u.operand;
emitGetVirtualRegister(property, regT0);
- addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
+ addSlowCase(branch64(NotEqual, regT0, addressFor(expected)));
emitGetVirtualRegisters(base, regT0, iter, regT1);
emitJumpSlowCaseIfNotJSCell(regT0, base);
@@ -337,7 +337,7 @@ JIT::JumpList JIT::emitContiguousPutByVal(Instruction* currentInstruction, Patch
Label storeResult = label();
emitGetVirtualRegister(value, regT3);
- storePtr(regT3, BaseIndex(regT2, regT1, ScalePtr));
+ store64(regT3, BaseIndex(regT2, regT1, TimesEight));
Jump done = jump();
outOfBounds.link(this);
@@ -367,11 +367,11 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
- Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
Label storeResult(this);
emitGetVirtualRegister(value, regT3);
- storePtr(regT3, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ store64(regT3, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
Jump end = jump();
empty.link(this);
@@ -535,7 +535,7 @@ void JIT::compileGetByIdHotPath(int baseVReg, Identifier*)
addSlowCase(structureCheck);
ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
- DataLabelCompact displacementLabel = loadPtrWithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
+ DataLabelCompact displacementLabel = load64WithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
Label putResult(this);
@@ -602,7 +602,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
+ DataLabel32 displacementLabel = store64WithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
END_UNINTERRUPTED_SEQUENCE(sequencePutById);
@@ -636,35 +636,35 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset)
{
if (isInlineOffset(cachedOffset)) {
- storePtr(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
+ store64(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
return;
}
loadPtr(Address(base, JSObject::butterflyOffset()), base);
- storePtr(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset)));
+ store64(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset)));
}
// Compile a load from an object's property storage. May overwrite base.
void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset)
{
if (isInlineOffset(cachedOffset)) {
- loadPtr(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
+ load64(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
return;
}
loadPtr(Address(base, JSObject::butterflyOffset()), result);
- loadPtr(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result);
+ load64(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result);
}
void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset)
{
if (isInlineOffset(cachedOffset)) {
- loadPtr(base->locationForOffset(cachedOffset), result);
+ load64(base->locationForOffset(cachedOffset), result);
return;
}
loadPtr(base->butterflyAddress(), result);
- loadPtr(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
+ load64(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
}
void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
@@ -1190,90 +1190,35 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
-void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
-{
- int skip = currentInstruction[3].u.operand;
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT0);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
-
- loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
- loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
- emitValueProfilingSite();
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
-{
- int skip = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1);
-
- emitWriteBarrier(regT1, regT0, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
-
- loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
- storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
-}
-
-void JIT::emit_op_get_global_var(Instruction* currentInstruction)
-{
- loadPtr(currentInstruction[2].u.registerPointer, regT0);
- emitValueProfilingSite();
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+void JIT::emit_op_init_global_const(Instruction* currentInstruction)
{
JSGlobalObject* globalObject = m_codeBlock->globalObject();
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
-
- storePtr(regT0, currentInstruction[1].u.registerPointer);
+
+ store64(regT0, currentInstruction[1].u.registerPointer);
if (Heap::isWriteBarrierEnabled())
emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
}
-void JIT::emit_op_put_global_var_check(Instruction* currentInstruction)
+void JIT::emit_op_init_global_const_check(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
-
+
addSlowCase(branchTest8(NonZero, AbsoluteAddress(currentInstruction[3].u.predicatePointer)));
JSGlobalObject* globalObject = m_codeBlock->globalObject();
-
- storePtr(regT0, currentInstruction[1].u.registerPointer);
+
+ store64(regT0, currentInstruction[1].u.registerPointer);
if (Heap::isWriteBarrierEnabled())
emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
}
-void JIT::emitSlow_op_put_global_var_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_init_global_const_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_put_global_var_check);
+
+ JITStubCall stubCall(this, cti_op_init_global_const_check);
stubCall.addArgument(regT0);
stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand));
stubCall.call();
@@ -1585,8 +1530,8 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp
convertInt32ToDouble(resultPayload, fpRegT0);
addDouble(AbsoluteAddress(&twoToThe32), fpRegT0);
#if USE(JSVALUE64)
- moveDoubleToPtr(fpRegT0, resultPayload);
- subPtr(tagTypeNumberRegister, resultPayload);
+ moveDoubleTo64(fpRegT0, resultPayload);
+ sub64(tagTypeNumberRegister, resultPayload);
#else
moveDoubleToInts(fpRegT0, resultPayload, resultTag);
#endif
@@ -1596,7 +1541,7 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp
}
#if USE(JSVALUE64)
- orPtr(tagTypeNumberRegister, resultPayload);
+ or64(tagTypeNumberRegister, resultPayload);
#else
move(TrustedImm32(JSValue::Int32Tag), resultTag);
#endif
@@ -1645,8 +1590,8 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
}
#if USE(JSVALUE64)
- moveDoubleToPtr(fpRegT0, resultPayload);
- subPtr(tagTypeNumberRegister, resultPayload);
+ moveDoubleTo64(fpRegT0, resultPayload);
+ sub64(tagTypeNumberRegister, resultPayload);
#else
moveDoubleToInts(fpRegT0, resultPayload, resultTag);
#endif
@@ -1746,8 +1691,8 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction,
Jump ready = jump();
doubleCase.link(this);
slowCases.append(emitJumpIfNotImmediateNumber(earlyScratch));
- addPtr(tagTypeNumberRegister, earlyScratch);
- movePtrToDouble(earlyScratch, fpRegT0);
+ add64(tagTypeNumberRegister, earlyScratch);
+ move64ToDouble(earlyScratch, fpRegT0);
ready.link(this);
#else
emitLoad(value, lateScratch, earlyScratch);
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index a4a547889..5d619b94b 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -1232,72 +1232,7 @@ void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowC
stubCall.call(dst);
}
-void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int index = currentInstruction[2].u.operand;
- int skip = currentInstruction[3].u.operand;
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
-
- loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
-
- emitLoad(index, regT1, regT0, regT2);
- emitValueProfilingSite();
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
-}
-
-void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
-{
- int index = currentInstruction[1].u.operand;
- int skip = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- emitLoad(value, regT1, regT0);
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
-
- loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT3);
- emitStore(index, regT1, regT0, regT3);
- emitWriteBarrier(regT2, regT1, regT0, regT1, ShouldFilterImmediates, WriteBarrierForVariableAccess);
-}
-
-void JIT::emit_op_get_global_var(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- WriteBarrier<Unknown>* registerPointer = currentInstruction[2].u.registerPointer;
-
- load32(registerPointer->tagPointer(), regT1);
- load32(registerPointer->payloadPointer(), regT0);
- emitValueProfilingSite();
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
-}
-
-void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+void JIT::emit_op_init_global_const(Instruction* currentInstruction)
{
WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer;
int value = currentInstruction[2].u.operand;
@@ -1314,10 +1249,10 @@ void JIT::emit_op_put_global_var(Instruction* currentInstruction)
store32(regT1, registerPointer->tagPointer());
store32(regT0, registerPointer->payloadPointer());
- map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_init_global_const), value, regT1, regT0);
}
-void JIT::emit_op_put_global_var_check(Instruction* currentInstruction)
+void JIT::emit_op_init_global_const_check(Instruction* currentInstruction)
{
WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer;
int value = currentInstruction[2].u.operand;
@@ -1338,11 +1273,11 @@ void JIT::emit_op_put_global_var_check(Instruction* currentInstruction)
unmap();
}
-void JIT::emitSlow_op_put_global_var_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_init_global_const_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_put_global_var_check);
+ JITStubCall stubCall(this, cti_op_init_global_const_check);
stubCall.addArgument(regT1, regT0);
stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand));
stubCall.call();
diff --git a/Source/JavaScriptCore/jit/JITStubCall.h b/Source/JavaScriptCore/jit/JITStubCall.h
index 352956559..25755886a 100644
--- a/Source/JavaScriptCore/jit/JITStubCall.h
+++ b/Source/JavaScriptCore/jit/JITStubCall.h
@@ -94,15 +94,15 @@ namespace JSC {
{
}
-#if USE(JSVALUE32_64)
JITStubCall(JIT* jit, EncodedJSValue (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
, m_stub(stub)
+#if USE(JSVALUE32_64) || !ASSERT_DISABLED
, m_returnType(Value)
+#endif
, m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
{
}
-#endif
// Arguments are added first to last.
@@ -137,7 +137,11 @@ namespace JSC {
void addArgument(JIT::RegisterID argument)
{
+#if USE(JSVALUE32_64)
m_jit->poke(argument, m_stackIndex);
+#else
+ m_jit->poke64(argument, m_stackIndex);
+#endif
m_stackIndex += stackIndexStep;
}
@@ -148,6 +152,18 @@ namespace JSC {
m_jit->poke(JIT::Imm32(value.tag()), m_stackIndex + 1);
m_stackIndex += stackIndexStep;
}
+#else
+ void addArgument(JIT::TrustedImm64 argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(JIT::Imm64 argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
#endif
void addArgument(JIT::RegisterID tag, JIT::RegisterID payload)
@@ -179,9 +195,9 @@ namespace JSC {
void addArgument(unsigned src, JIT::RegisterID scratchRegister) // src is a virtual register.
{
if (m_jit->m_codeBlock->isConstantRegisterIndex(src))
- addArgument(JIT::ImmPtr(JSValue::encode(m_jit->m_codeBlock->getConstant(src))));
+ addArgument(JIT::Imm64(JSValue::encode(m_jit->m_codeBlock->getConstant(src))));
else {
- m_jit->loadPtr(JIT::Address(JIT::callFrameRegister, src * sizeof(Register)), scratchRegister);
+ m_jit->load64(JIT::Address(JIT::callFrameRegister, src * sizeof(Register)), scratchRegister);
addArgument(scratchRegister);
}
m_jit->killLastResultRegister();
@@ -242,7 +258,7 @@ namespace JSC {
#else
JIT::Call call(unsigned dst) // dst is a virtual register.
{
- ASSERT(m_returnType == VoidPtr || m_returnType == Cell);
+ ASSERT(m_returnType == Value || m_returnType == Cell);
JIT::Call call = this->call();
m_jit->emitPutVirtualRegister(dst);
return call;
@@ -250,7 +266,7 @@ namespace JSC {
JIT::Call callWithValueProfiling(unsigned dst)
{
- ASSERT(m_returnType == VoidPtr || m_returnType == Cell);
+ ASSERT(m_returnType == Value || m_returnType == Cell);
JIT::Call call = this->call();
ASSERT(JIT::returnValueRegister == JIT::regT0);
m_jit->emitValueProfilingSite();
@@ -261,10 +277,8 @@ namespace JSC {
JIT::Call call(JIT::RegisterID dst) // dst is a machine register.
{
-#if USE(JSVALUE32_64)
+#if USE(JSVALUE32_64) || !ASSERT_DISABLED
ASSERT(m_returnType == Value || m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
-#else
- ASSERT(m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
#endif
JIT::Call call = this->call();
if (dst != JIT::returnValueRegister)
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
index f1f2f4c9d..a16b328ad 100644
--- a/Source/JavaScriptCore/jit/JITStubs.cpp
+++ b/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -2375,7 +2375,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_array_buffer)
return constructArray(stackFrame.callFrame, stackFrame.callFrame->codeBlock()->constantBuffer(stackFrame.args[0].int32()), stackFrame.args[1].int32());
}
-DEFINE_STUB_FUNCTION(void, op_put_global_var_check)
+DEFINE_STUB_FUNCTION(void, op_init_global_const_check)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2390,11 +2390,22 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve)
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = JSScope::resolve(callFrame, stackFrame.args[0].identifier());
+ JSValue result = JSScope::resolve(callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].resolveOperations());
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
+DEFINE_STUB_FUNCTION(void, op_put_to_base)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue base = callFrame->r(stackFrame.args[0].int32()).jsValue();
+ JSValue value = callFrame->r(stackFrame.args[2].int32()).jsValue();
+ JSScope::resolvePut(callFrame, base, stackFrame.args[1].identifier(), value, stackFrame.args[3].putToBaseOperation());
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2719,14 +2730,14 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return JSValue::encode(JSScope::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), false));
+ return JSValue::encode(JSScope::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), false, stackFrame.args[1].resolveOperations(), stackFrame.args[2].putToBaseOperation()));
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base_strict_put)
{
STUB_INIT_STACK_FRAME(stackFrame);
- if (JSValue result = JSScope::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), true))
+ if (JSValue result = JSScope::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), true, stackFrame.args[1].resolveOperations(), stackFrame.args[2].putToBaseOperation()))
return JSValue::encode(result);
VM_THROW_EXCEPTION();
}
@@ -2745,36 +2756,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_ensure_property_exists)
return JSValue::encode(base);
}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue result = JSScope::resolveSkip(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].int32());
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[0].identifier();
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned globalResolveInfoIndex = stackFrame.args[1].int32();
- GlobalResolveInfo& globalResolveInfo = codeBlock->globalResolveInfo(globalResolveInfoIndex);
-
- JSValue result = JSScope::resolveGlobal(
- callFrame,
- ident,
- callFrame->lexicalGlobalObject(),
- &globalResolveInfo.structure,
- &globalResolveInfo.offset
- );
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(result);
-}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_div)
{
@@ -3055,7 +3036,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = JSScope::resolveWithBase(callFrame, stackFrame.args[0].identifier(), &callFrame->registers()[stackFrame.args[1].int32()]);
+ JSValue result = JSScope::resolveWithBase(callFrame, stackFrame.args[0].identifier(), &callFrame->registers()[stackFrame.args[1].int32()], stackFrame.args[2].resolveOperations(), stackFrame.args[3].putToBaseOperation());
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -3065,7 +3046,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_this)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = JSScope::resolveWithThis(callFrame, stackFrame.args[0].identifier(), &callFrame->registers()[stackFrame.args[1].int32()]);
+ JSValue result = JSScope::resolveWithThis(callFrame, stackFrame.args[0].identifier(), &callFrame->registers()[stackFrame.args[1].int32()], stackFrame.args[2].resolveOperations());
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
index 4a3b252d6..6e3141e22 100644
--- a/Source/JavaScriptCore/jit/JITStubs.h
+++ b/Source/JavaScriptCore/jit/JITStubs.h
@@ -35,6 +35,7 @@
#include "LowLevelInterpreter.h"
#include "MacroAssemblerCodeRef.h"
#include "Register.h"
+#include "ResolveOperation.h"
#include "ThunkGenerators.h"
#include <wtf/HashMap.h>
@@ -82,6 +83,8 @@ namespace JSC {
JSString* jsString() { return static_cast<JSString*>(asPointer); }
Structure* structure() { return static_cast<Structure*>(asPointer); }
ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
+ ResolveOperations* resolveOperations() { return static_cast<ResolveOperations*>(asPointer); }
+ PutToBaseOperation* putToBaseOperation() { return static_cast<PutToBaseOperation*>(asPointer); }
};
struct TrampolineStructure {
@@ -398,11 +401,9 @@ extern "C" {
EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_resolve_base_strict_put(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_ensure_property_exists(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_resolve_global(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_resolve_global_dynamic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_resolve_skip(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_resolve_with_this(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+ void JIT_STUB cti_op_put_to_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
@@ -450,7 +451,7 @@ extern "C" {
void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_put_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_put_getter_setter(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_global_var_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+ void JIT_STUB cti_op_init_global_const_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_throw_reference_error(STUB_ARGS_DECLARATION) WTF_INTERNAL;
diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
index 8d9a0c800..d2a91ba0a 100644
--- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -270,36 +270,36 @@ namespace JSC {
#if USE(JSVALUE64)
ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfImmediateNumber(RegisterID reg)
{
- return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
+ return branchTest64(NonZero, reg, tagTypeNumberRegister);
}
ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotImmediateNumber(RegisterID reg)
{
- return branchTestPtr(Zero, reg, tagTypeNumberRegister);
+ return branchTest64(Zero, reg, tagTypeNumberRegister);
}
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID dst)
{
- loadPtr(addressFor(virtualRegisterIndex), dst);
- return branchTestPtr(NonZero, dst, tagMaskRegister);
+ load64(addressFor(virtualRegisterIndex), dst);
+ return branchTest64(NonZero, dst, tagMaskRegister);
}
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
{
- loadPtr(addressFor(virtualRegisterIndex), dst);
- Jump result = branchPtr(Below, dst, tagTypeNumberRegister);
+ load64(addressFor(virtualRegisterIndex), dst);
+ Jump result = branch64(Below, dst, tagTypeNumberRegister);
zeroExtend32ToPtr(dst, dst);
return result;
}
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
{
- loadPtr(addressFor(virtualRegisterIndex), scratch);
+ load64(addressFor(virtualRegisterIndex), scratch);
Jump notNumber = emitJumpIfNotImmediateNumber(scratch);
- Jump notInt = branchPtr(Below, scratch, tagTypeNumberRegister);
+ Jump notInt = branch64(Below, scratch, tagTypeNumberRegister);
convertInt32ToDouble(scratch, dst);
Jump done = jump();
notInt.link(this);
- addPtr(tagTypeNumberRegister, scratch);
- movePtrToDouble(scratch, dst);
+ add64(tagTypeNumberRegister, scratch);
+ move64ToDouble(scratch, dst);
done.link(this);
return notNumber;
}
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
index 560f7c833..9c7fbce81 100644
--- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -90,9 +90,9 @@ namespace JSC {
void returnDouble(FPRegisterID src)
{
#if USE(JSVALUE64)
- moveDoubleToPtr(src, regT0);
- Jump zero = branchTestPtr(Zero, regT0);
- subPtr(tagTypeNumberRegister, regT0);
+ moveDoubleTo64(src, regT0);
+ Jump zero = branchTest64(Zero, regT0);
+ sub64(tagTypeNumberRegister, regT0);
Jump done = jump();
zero.link(this);
move(tagTypeNumberRegister, regT0);
@@ -151,7 +151,7 @@ namespace JSC {
void tagReturnAsInt32()
{
#if USE(JSVALUE64)
- orPtr(tagTypeNumberRegister, regT0);
+ or64(tagTypeNumberRegister, regT0);
#else
move(TrustedImm32(JSValue::Int32Tag), regT1);
#endif
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
index fbf5b8598..74beae98a 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
@@ -119,12 +119,20 @@ namespace JSC { namespace LLInt {
JSValue __rp_returnValue = (value); \
LLINT_CHECK_EXCEPTION(); \
LLINT_OP(1) = __rp_returnValue; \
- pc[OPCODE_LENGTH(opcode) - 1].u.profile->m_buckets[0] = \
- JSValue::encode(__rp_returnValue); \
+ LLINT_PROFILE_VALUE(opcode, __rp_returnValue); \
LLINT_END_IMPL(); \
} while (false)
+
+#define LLINT_PROFILE_VALUE(opcode, value) do { \
+ pc[OPCODE_LENGTH(opcode) - 1].u.profile->m_buckets[0] = \
+ JSValue::encode(value); \
+ } while (false)
+
#else // ENABLE(VALUE_PROFILER)
#define LLINT_RETURN_PROFILED(opcode, value) LLINT_RETURN(value)
+
+#define LLINT_PROFILE_VALUE(opcode, value) do { } while (false)
+
#endif // ENABLE(VALUE_PROFILER)
#define LLINT_CALL_END_IMPL(exec, callTarget) LLINT_RETURN_TWO((callTarget), (exec))
@@ -777,52 +785,84 @@ LLINT_SLOW_PATH_DECL(slow_path_in)
LLINT_SLOW_PATH_DECL(slow_path_resolve)
{
LLINT_BEGIN();
- LLINT_RETURN_PROFILED(op_resolve, JSScope::resolve(exec, exec->codeBlock()->identifier(pc[2].u.operand)));
-}
+ Identifier ident = exec->codeBlock()->identifier(pc[2].u.operand);
+ ResolveOperations* operations = exec->codeBlock()->resolveOperations(pc[3].u.operand);
+ JSValue result = JSScope::resolve(exec, ident, operations);
+ ASSERT(operations->size());
+ ASSERT(operations == exec->codeBlock()->resolveOperations(pc[3].u.operand));
+ switch (operations->data()[0].m_operation) {
+ case ResolveOperation::GetAndReturnGlobalProperty:
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_global_property);
+ break;
-LLINT_SLOW_PATH_DECL(slow_path_resolve_skip)
-{
- LLINT_BEGIN();
- LLINT_RETURN_PROFILED(
- op_resolve_skip,
- JSScope::resolveSkip(
- exec,
- exec->codeBlock()->identifier(pc[2].u.operand),
- pc[3].u.operand));
-}
+ case ResolveOperation::GetAndReturnGlobalVar:
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_global_var);
+ break;
-LLINT_SLOW_PATH_DECL(slow_path_resolve_global)
-{
- LLINT_BEGIN();
- Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
- LLINT_RETURN_PROFILED(op_resolve_global, JSScope::resolveGlobal(exec, ident, exec->lexicalGlobalObject(), &pc[3].u.structure, &pc[4].u.operand));
-}
+ case ResolveOperation::SkipTopScopeNode:
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_scoped_var_with_top_scope_check);
+ break;
-LLINT_SLOW_PATH_DECL(slow_path_resolve_global_dynamic)
-{
- // FIXME: <rdar://problem/12185487> LLInt resolve_global_dynamic doesn't check intervening scopes for modification
- LLINT_BEGIN();
- Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
- LLINT_RETURN_PROFILED(op_resolve_global_dynamic, JSScope::resolveGlobal(exec, ident, exec->lexicalGlobalObject(), &pc[3].u.structure, &pc[4].u.operand));
+ case ResolveOperation::SkipScopes:
+ if (operations->data()[0].m_scopesToSkip)
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_scoped_var);
+ else
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_scoped_var_on_top_scope);
+ break;
+
+ default:
+ break;
+ }
+ LLINT_RETURN_PROFILED(op_resolve, result);
}
-LLINT_SLOW_PATH_DECL(slow_path_resolve_for_resolve_global_dynamic)
+LLINT_SLOW_PATH_DECL(slow_path_put_to_base)
{
LLINT_BEGIN();
- LLINT_RETURN_PROFILED(op_resolve_global_dynamic, JSScope::resolve(exec, exec->codeBlock()->identifier(pc[2].u.operand)));
+ PutToBaseOperation* operation = exec->codeBlock()->putToBaseOperation(pc[4].u.operand);
+ JSScope::resolvePut(exec, LLINT_OP_C(1).jsValue(), exec->codeBlock()->identifier(pc[2].u.operand), LLINT_OP_C(3).jsValue(), operation);
+ switch (operation->m_kind) {
+ case PutToBaseOperation::VariablePut:
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_put_to_base_variable);
+ break;
+
+ default:
+ break;
+ }
+ LLINT_END();
}
LLINT_SLOW_PATH_DECL(slow_path_resolve_base)
{
LLINT_BEGIN();
Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
+ ResolveOperations* operations = exec->codeBlock()->resolveOperations(pc[4].u.operand);
+ JSValue result;
if (pc[3].u.operand) {
- if (JSValue result = JSScope::resolveBase(exec, ident, true))
- LLINT_RETURN(result);
- LLINT_THROW(globalData.exception);
+ result = JSScope::resolveBase(exec, ident, true, operations, exec->codeBlock()->putToBaseOperation(pc[5].u.operand));
+ if (!result)
+ LLINT_THROW(globalData.exception);
+ } else
+ result = JSScope::resolveBase(exec, ident, false, operations, exec->codeBlock()->putToBaseOperation(pc[5].u.operand));
+ ASSERT(operations->size());
+ switch (operations->data()[0].m_operation) {
+ case ResolveOperation::ReturnGlobalObjectAsBase:
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_base_to_global);
+ break;
+
+ case ResolveOperation::SkipTopScopeNode:
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_base_to_scope_with_top_scope_check);
+ break;
+
+ case ResolveOperation::SkipScopes:
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_base_to_scope);
+ break;
+
+ default:
+ break;
}
-
- LLINT_RETURN_PROFILED(op_resolve_base, JSScope::resolveBase(exec, ident, false));
+ LLINT_PROFILE_VALUE(op_resolve_base, result);
+ LLINT_RETURN(result);
}
LLINT_SLOW_PATH_DECL(slow_path_ensure_property_exists)
@@ -839,24 +879,26 @@ LLINT_SLOW_PATH_DECL(slow_path_ensure_property_exists)
LLINT_SLOW_PATH_DECL(slow_path_resolve_with_base)
{
LLINT_BEGIN();
- JSValue result = JSScope::resolveWithBase(exec, exec->codeBlock()->identifier(pc[3].u.operand), &LLINT_OP(1));
+ ResolveOperations* operations = exec->codeBlock()->resolveOperations(pc[4].u.operand);
+ JSValue result = JSScope::resolveWithBase(exec, exec->codeBlock()->identifier(pc[3].u.operand), &LLINT_OP(1), operations, exec->codeBlock()->putToBaseOperation(pc[5].u.operand));
LLINT_CHECK_EXCEPTION();
LLINT_OP(2) = result;
- // FIXME: technically should have profiling, but we don't do it because the DFG won't use it.
+ LLINT_PROFILE_VALUE(op_resolve_with_base, result);
LLINT_END();
}
LLINT_SLOW_PATH_DECL(slow_path_resolve_with_this)
{
LLINT_BEGIN();
- JSValue result = JSScope::resolveWithThis(exec, exec->codeBlock()->identifier(pc[3].u.operand), &LLINT_OP(1));
+ ResolveOperations* operations = exec->codeBlock()->resolveOperations(pc[4].u.operand);
+ JSValue result = JSScope::resolveWithThis(exec, exec->codeBlock()->identifier(pc[3].u.operand), &LLINT_OP(1), operations);
LLINT_CHECK_EXCEPTION();
LLINT_OP(2) = result;
- // FIXME: technically should have profiling, but we don't do it because the DFG won't use it.
+ LLINT_PROFILE_VALUE(op_resolve_with_this, result);
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_put_global_var_check)
+LLINT_SLOW_PATH_DECL(slow_path_init_global_const_check)
{
LLINT_BEGIN();
CodeBlock* codeBlock = exec->codeBlock();
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
index 3d770f3c5..f78476841 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.h
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
@@ -157,15 +157,12 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_is_object);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_is_function);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_in);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_skip);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_global);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_global_dynamic);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_for_resolve_global_dynamic);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_to_base);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_base);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_ensure_property_exists);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_with_base);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_with_this);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_global_var_check);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_init_global_const_check);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_id);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_arguments_length);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_id);
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
index ad509e05d..022637dbe 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
@@ -54,8 +54,28 @@ if JSVALUE64
const PB = t6
const tagTypeNumber = csr1
const tagMask = csr2
+
+ macro loadisFromInstruction(offset, dest)
+ loadis offset * 8[PB, PC, 8], dest
+ end
+
+ macro loadpFromInstruction(offset, dest)
+ loadp offset * 8[PB, PC, 8], dest
+ end
+
+ macro storepToInstruction(value, offset)
+ storep value, offset * 8[PB, PC, 8]
+ end
+
else
const PC = t4
+ macro loadisFromInstruction(offset, dest)
+ loadis offset * 4[PC], dest
+ end
+
+ macro loadpFromInstruction(offset, dest)
+ loadp offset * 4[PC], dest
+ end
end
# Constants for reasoning about value representation.
@@ -100,6 +120,29 @@ const HashFlags8BitBuffer = 64
# Copied from PropertyOffset.h
const firstOutOfLineOffset = 100
+# From ResolveOperations.h
+const ResolveOperationFail = 0
+const ResolveOperationSetBaseToUndefined = 1
+const ResolveOperationReturnScopeAsBase = 2
+const ResolveOperationSetBaseToScope = 3
+const ResolveOperationSetBaseToGlobal = 4
+const ResolveOperationGetAndReturnScopedVar = 5
+const ResolveOperationGetAndReturnGlobalVar = 6
+const ResolveOperationGetAndReturnGlobalVarWatchable = 7
+const ResolveOperationSkipTopScopeNode = 8
+const ResolveOperationSkipScopes = 9
+const ResolveOperationReturnGlobalObjectAsBase = 10
+const ResolveOperationGetAndReturnGlobalProperty = 11
+const ResolveOperationCheckForDynamicEntriesBeforeGlobalScope = 12
+
+const PutToBaseOperationKindUninitialised = 0
+const PutToBaseOperationKindGeneric = 1
+const PutToBaseOperationKindReadonly = 2
+const PutToBaseOperationKindGlobalVariablePut = 3
+const PutToBaseOperationKindGlobalVariablePutChecked = 4
+const PutToBaseOperationKindGlobalPropertyPut = 5
+const PutToBaseOperationKindVariablePut = 6
+
# Allocation constants
if JSVALUE64
const JSFinalObjectSizeClassIndex = 1
@@ -196,13 +239,8 @@ macro arrayProfile(structureAndIndexingType, profile, scratch)
const indexingType = structureAndIndexingType
if VALUE_PROFILER
storep structure, ArrayProfile::m_lastSeenStructure[profile]
- loadb Structure::m_indexingType[structure], indexingType
- move 1, scratch
- lshifti indexingType, scratch
- ori scratch, ArrayProfile::m_observedArrayModes[profile]
- else
- loadb Structure::m_indexingType[structure], indexingType
end
+ loadb Structure::m_indexingType[structure], indexingType
end
macro checkSwitchToJIT(increment, action)
@@ -499,41 +537,417 @@ _llint_op_in:
callSlowPath(_llint_slow_path_in)
dispatch(4)
+macro getPutToBaseOperationField(scratch, scratch1, fieldOffset, fieldGetter)
+ loadisFromInstruction(4, scratch)
+ mulp sizeof PutToBaseOperation, scratch, scratch
+ loadp CodeBlock[cfr], scratch1
+ loadp VectorBufferOffset + CodeBlock::m_putToBaseOperations[scratch1], scratch1
+ fieldGetter(fieldOffset[scratch1, scratch, 1])
+end
+
+macro moveJSValueFromRegisterWithoutProfiling(value, destBuffer, destOffsetReg)
+ storep value, [destBuffer, destOffsetReg, 8]
+end
+
+
+macro moveJSValueFromRegistersWithoutProfiling(tag, payload, destBuffer, destOffsetReg)
+ storep tag, TagOffset[destBuffer, destOffsetReg, 8]
+ storep payload, PayloadOffset[destBuffer, destOffsetReg, 8]
+end
+
+macro putToBaseVariableBody(variableOffset, scratch1, scratch2, scratch3)
+ loadisFromInstruction(1, scratch1)
+ loadp PayloadOffset[cfr, scratch1, 8], scratch1
+ loadp JSVariableObject::m_registers[scratch1], scratch1
+ loadisFromInstruction(3, scratch2)
+ if JSVALUE64
+ loadConstantOrVariable(scratch2, scratch3)
+ moveJSValueFromRegisterWithoutProfiling(scratch3, scratch1, variableOffset)
+ else
+ loadConstantOrVariable2Reg(scratch2, scratch3, scratch2) # scratch3=tag, scratch2=payload
+ moveJSValueFromRegistersWithoutProfiling(scratch3, scratch2, scratch1, variableOffset)
+ end
+end
+
+_llint_op_put_to_base_variable:
+ traceExecution()
+ getPutToBaseOperationField(t0, t1, PutToBaseOperation::m_offset, macro(addr)
+ loadis addr, t0
+ end)
+ putToBaseVariableBody(t0, t1, t2, t3)
+ dispatch(5)
+
+_llint_op_put_to_base:
+ traceExecution()
+ getPutToBaseOperationField(t0, t1, 0, macro(addr)
+ leap addr, t0
+ bbneq PutToBaseOperation::m_kindAsUint8[t0], PutToBaseOperationKindVariablePut, .notPutToBaseVariable
+ loadis PutToBaseOperation::m_offset[t0], t0
+ putToBaseVariableBody(t0, t1, t2, t3)
+ dispatch(5)
+ .notPutToBaseVariable:
+ end)
+ callSlowPath(_llint_slow_path_put_to_base)
+ dispatch(5)
+
+macro getResolveOperation(resolveOperationIndex, dest, scratch)
+ loadisFromInstruction(resolveOperationIndex, dest)
+ mulp sizeof ResolveOperations, dest, dest
+ loadp CodeBlock[cfr], scratch
+ loadp VectorBufferOffset + CodeBlock::m_resolveOperations[scratch], scratch
+ loadp VectorBufferOffset[scratch, dest, 1], dest
+end
+
+macro getScope(loadInitialScope, scopeCount, dest, scratch)
+ loadInitialScope(dest)
+ loadi scopeCount, scratch
+
+ btiz scratch, .done
+.loop:
+ loadp JSScope::m_next[dest], dest
+ subi 1, scratch
+ btinz scratch, .loop
+
+.done:
+end
+
+macro moveJSValue(sourceBuffer, sourceOffsetReg, destBuffer, destOffsetReg, profileOffset, scratchRegister)
+ if JSVALUE64
+ loadp [sourceBuffer, sourceOffsetReg, 8], scratchRegister
+ storep scratchRegister, [destBuffer, destOffsetReg, 8]
+ loadpFromInstruction(profileOffset, destOffsetReg)
+ valueProfile(scratchRegister, destOffsetReg)
+ else
+ loadp PayloadOffset[sourceBuffer, sourceOffsetReg, 8], scratchRegister
+ storep scratchRegister, PayloadOffset[destBuffer, destOffsetReg, 8]
+ loadp TagOffset[sourceBuffer, sourceOffsetReg, 8], sourceOffsetReg
+ storep sourceOffsetReg, TagOffset[destBuffer, destOffsetReg, 8]
+ loadpFromInstruction(profileOffset, destOffsetReg)
+ valueProfile(sourceOffsetReg, scratchRegister, destOffsetReg)
+ end
+end
+
+macro moveJSValueFromSlot(slot, destBuffer, destOffsetReg, profileOffset, scratchRegister)
+ if JSVALUE64
+ loadp [slot], scratchRegister
+ storep scratchRegister, [destBuffer, destOffsetReg, 8]
+ loadpFromInstruction(profileOffset, destOffsetReg)
+ valueProfile(scratchRegister, destOffsetReg)
+ else
+ loadp PayloadOffset[slot], scratchRegister
+ storep scratchRegister, PayloadOffset[destBuffer, destOffsetReg, 8]
+ loadp TagOffset[slot], slot
+ storep slot, TagOffset[destBuffer, destOffsetReg, 8]
+ loadpFromInstruction(profileOffset, destOffsetReg)
+ valueProfile(slot, scratchRegister, destOffsetReg)
+ end
+end
+
+macro moveJSValueFromRegister(value, destBuffer, destOffsetReg, profileOffset)
+ storep value, [destBuffer, destOffsetReg, 8]
+ loadpFromInstruction(profileOffset, destOffsetReg)
+ valueProfile(value, destOffsetReg)
+end
+
+macro moveJSValueFromRegisters(tag, payload, destBuffer, destOffsetReg, profileOffset)
+ storep tag, TagOffset[destBuffer, destOffsetReg, 8]
+ storep payload, PayloadOffset[destBuffer, destOffsetReg, 8]
+ loadpFromInstruction(profileOffset, destOffsetReg)
+ valueProfile(tag, payload, destOffsetReg)
+end
+
+_llint_op_resolve_global_property:
+ traceExecution()
+ getResolveOperation(3, t0, t1)
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_globalObject[t1], t1
+ loadp ResolveOperation::m_structure[t0], t2
+ bpneq JSCell::m_structure[t1], t2, _llint_op_resolve
+ loadis ResolveOperation::m_offset[t0], t0
+ if JSVALUE64
+ loadPropertyAtVariableOffsetKnownNotInline(t0, t1, t2)
+ loadisFromInstruction(1, t0)
+ moveJSValueFromRegister(t2, cfr, t0, 4)
+ else
+ loadPropertyAtVariableOffsetKnownNotInline(t0, t1, t2, t3)
+ loadisFromInstruction(1, t0)
+ moveJSValueFromRegisters(t2, t3, cfr, t0, 4)
+ end
+ dispatch(5)
+
+_llint_op_resolve_global_var:
+ traceExecution()
+ getResolveOperation(3, t0, t1)
+ loadp ResolveOperation::m_registerAddress[t0], t0
+ loadisFromInstruction(1, t1)
+ moveJSValueFromSlot(t0, cfr, t1, 4, t3)
+ dispatch(5)
+
+macro resolveScopedVarBody(resolveOperations)
+ # First ResolveOperation is to skip scope chain nodes
+ getScope(macro(dest)
+ loadp ScopeChain + PayloadOffset[cfr], dest
+ end,
+ ResolveOperation::m_scopesToSkip[resolveOperations], t1, t2)
+ loadp JSVariableObject::m_registers[t1], t1 # t1 now contains the activation registers
+
+ # Second ResolveOperation tells us what offset to use
+ loadis ResolveOperation::m_offset + sizeof ResolveOperation[resolveOperations], t2
+ loadisFromInstruction(1, t3)
+ moveJSValue(t1, t2, cfr, t3, 4, t0)
+end
+
+_llint_op_resolve_scoped_var:
+ traceExecution()
+ getResolveOperation(3, t0, t1)
+ resolveScopedVarBody(t0)
+ dispatch(5)
+
+_llint_op_resolve_scoped_var_on_top_scope:
+ traceExecution()
+ getResolveOperation(3, t0, t1)
+
+ # Load destination index
+ loadisFromInstruction(1, t3)
+
+ # We know we want the top scope chain entry
+ loadp ScopeChain + PayloadOffset[cfr], t1
+ loadp JSVariableObject::m_registers[t1], t1 # t1 now contains the activation registers
+
+ # Second ResolveOperation tells us what offset to use
+ loadis ResolveOperation::m_offset + sizeof ResolveOperation[t0], t2
+
+ moveJSValue(t1, t2, cfr, t3, 4, t0)
+ dispatch(5)
+
+_llint_op_resolve_scoped_var_with_top_scope_check:
+ traceExecution()
+ getResolveOperation(3, t0, t1)
+ # First ResolveOperation tells us what register to check
+ loadis ResolveOperation::m_activationRegister[t0], t1
+
+ loadp PayloadOffset[cfr, t1, 8], t1
+
+ getScope(macro(dest)
+ btpz t1, .scopeChainNotCreated
+ loadp JSScope::m_next[t1], dest
+ jmp .done
+ .scopeChainNotCreated:
+ loadp ScopeChain + PayloadOffset[cfr], dest
+ .done:
+ end,
+ # Second ResolveOperation tells us how many more nodes to skip
+ ResolveOperation::m_scopesToSkip + sizeof ResolveOperation[t0], t1, t2)
+ loadp JSVariableObject::m_registers[t1], t1 # t1 now contains the activation registers
+
+ # Third operation tells us what offset to use
+ loadis ResolveOperation::m_offset + 2 * sizeof ResolveOperation[t0], t2
+ loadisFromInstruction(1, t3)
+ moveJSValue(t1, t2, cfr, t3, 4, t0)
+ dispatch(5)
_llint_op_resolve:
traceExecution()
+ getResolveOperation(3, t0, t1)
+ btpz t0, .noInstructions
+ loadis ResolveOperation::m_operation[t0], t1
+ bineq t1, ResolveOperationSkipScopes, .notSkipScopes
+ resolveScopedVarBody(t0)
+ dispatch(5)
+.notSkipScopes:
+ bineq t1, ResolveOperationGetAndReturnGlobalVar, .notGetAndReturnGlobalVar
+ loadp ResolveOperation::m_registerAddress[t0], t0
+ loadisFromInstruction(1, t1)
+ moveJSValueFromSlot(t0, cfr, t1, 4, t3)
+ dispatch(5)
+.notGetAndReturnGlobalVar:
+
+.noInstructions:
callSlowPath(_llint_slow_path_resolve)
- dispatch(4)
+ dispatch(5)
+
+_llint_op_resolve_base_to_global:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_globalObject[t1], t1
+ loadisFromInstruction(1, t3)
+ if JSVALUE64
+ moveJSValueFromRegister(t1, cfr, t3, 6)
+ else
+ move CellTag, t2
+ moveJSValueFromRegisters(t2, t1, cfr, t3, 6)
+ end
+ dispatch(7)
+_llint_op_resolve_base_to_global_dynamic:
+ jmp _llint_op_resolve_base
-_llint_op_resolve_skip:
+_llint_op_resolve_base_to_scope:
traceExecution()
- callSlowPath(_llint_slow_path_resolve_skip)
- dispatch(5)
+ getResolveOperation(4, t0, t1)
+ # First ResolveOperation is to skip scope chain nodes
+ getScope(macro(dest)
+ loadp ScopeChain + PayloadOffset[cfr], dest
+ end,
+ ResolveOperation::m_scopesToSkip[t0], t1, t2)
+ loadisFromInstruction(1, t3)
+ if JSVALUE64
+ moveJSValueFromRegister(t1, cfr, t3, 6)
+ else
+ move CellTag, t2
+ moveJSValueFromRegisters(t2, t1, cfr, t3, 6)
+ end
+ dispatch(7)
+_llint_op_resolve_base_to_scope_with_top_scope_check:
+ traceExecution()
+ getResolveOperation(4, t0, t1)
+ # First ResolveOperation tells us what register to check
+ loadis ResolveOperation::m_activationRegister[t0], t1
+
+ loadp PayloadOffset[cfr, t1, 8], t1
+
+ getScope(macro(dest)
+ btpz t1, .scopeChainNotCreated
+ loadp JSScope::m_next[t1], dest
+ jmp .done
+ .scopeChainNotCreated:
+ loadp ScopeChain + PayloadOffset[cfr], dest
+ .done:
+ end,
+ # Second ResolveOperation tells us how many more nodes to skip
+ ResolveOperation::m_scopesToSkip + sizeof ResolveOperation[t0], t1, t2)
+
+ loadisFromInstruction(1, t3)
+ if JSVALUE64
+ moveJSValueFromRegister(t1, cfr, t3, 6)
+ else
+ move CellTag, t2
+ moveJSValueFromRegisters(t2, t1, cfr, t3, 6)
+ end
+ dispatch(7)
_llint_op_resolve_base:
traceExecution()
callSlowPath(_llint_slow_path_resolve_base)
- dispatch(5)
-
+ dispatch(7)
_llint_op_ensure_property_exists:
traceExecution()
callSlowPath(_llint_slow_path_ensure_property_exists)
dispatch(3)
+macro interpretResolveWithBase(opcodeLength, slowPath)
+ traceExecution()
+ getResolveOperation(4, t0, t1)
+ btpz t0, .slowPath
+
+ loadp ScopeChain[cfr], t3
+ # Get the base
+ loadis ResolveOperation::m_operation[t0], t2
+
+ bineq t2, ResolveOperationSkipScopes, .notSkipScopes
+ getScope(macro(dest) move t3, dest end,
+ ResolveOperation::m_scopesToSkip[t0], t1, t2)
+ move t1, t3
+ addp sizeof ResolveOperation, t0, t0
+ jmp .haveCorrectScope
+
+ .notSkipScopes:
+
+ bineq t2, ResolveOperationSkipTopScopeNode, .notSkipTopScopeNode
+ loadis ResolveOperation::m_activationRegister[t0], t1
+ loadp PayloadOffset[cfr, t1, 8], t1
+
+ getScope(macro(dest)
+ btpz t1, .scopeChainNotCreated
+ loadp JSScope::m_next[t1], dest
+ jmp .done
+ .scopeChainNotCreated:
+ loadp ScopeChain + PayloadOffset[cfr], dest
+ .done:
+ end,
+ sizeof ResolveOperation + ResolveOperation::m_scopesToSkip[t0], t1, t2)
+ move t1, t3
+ # We've handled two opcodes here
+ addp 2 * sizeof ResolveOperation, t0, t0
+
+ .notSkipTopScopeNode:
+
+ .haveCorrectScope:
+
+ # t3 now contains the correct Scope
+ # t0 contains a pointer to the current ResolveOperation
+
+ loadis ResolveOperation::m_operation[t0], t2
+ # t2 contains the next instruction
+
+ loadisFromInstruction(1, t1)
+ # t1 now contains the index for the base register
+
+ bineq t2, ResolveOperationSetBaseToScope, .notSetBaseToScope
+ storep t3, PayloadOffset[cfr, t1, 8]
+ if JSVALUE64
+ else
+ storep CellTag, TagOffset[cfr, t1, 8]
+ end
+ jmp .haveSetBase
+
+ .notSetBaseToScope:
+
+ bineq t2, ResolveOperationSetBaseToUndefined, .notSetBaseToUndefined
+ if JSVALUE64
+ storep ValueUndefined, PayloadOffset[cfr, t1, 8]
+ else
+ storep 0, PayloadOffset[cfr, t1, 8]
+ storep UndefinedTag, TagOffset[cfr, t1, 8]
+ end
+ jmp .haveSetBase
+
+ .notSetBaseToUndefined:
+ bineq t2, ResolveOperationSetBaseToGlobal, .slowPath
+ loadp JSCell::m_structure[t3], t2
+ loadp Structure::m_globalObject[t2], t2
+ storep t2, PayloadOffset[cfr, t1, 8]
+ if JSVALUE64
+ else
+ storep CellTag, TagOffset[cfr, t1, 8]
+ end
+
+ .haveSetBase:
+
+ # Get the value
+
+ # Load the operation into t2
+ loadis ResolveOperation::m_operation + sizeof ResolveOperation[t0], t2
+
+ # Load the index for the value register into t1
+ loadisFromInstruction(2, t1)
+
+ bineq t2, ResolveOperationGetAndReturnScopedVar, .notGetAndReturnScopedVar
+ loadp JSVariableObject::m_registers[t3], t3 # t3 now contains the activation registers
+
+ # Second ResolveOperation tells us what offset to use
+ loadis ResolveOperation::m_offset + sizeof ResolveOperation[t0], t2
+ moveJSValue(t3, t2, cfr, t1, opcodeLength - 1, t0)
+ dispatch(opcodeLength)
+
+ .notGetAndReturnScopedVar:
+ bineq t2, ResolveOperationGetAndReturnGlobalProperty, .slowPath
+ callSlowPath(slowPath)
+ dispatch(opcodeLength)
+
+.slowPath:
+ callSlowPath(slowPath)
+ dispatch(opcodeLength)
+end
_llint_op_resolve_with_base:
- traceExecution()
- callSlowPath(_llint_slow_path_resolve_with_base)
- dispatch(5)
+ interpretResolveWithBase(7, _llint_slow_path_resolve_with_base)
_llint_op_resolve_with_this:
- traceExecution()
- callSlowPath(_llint_slow_path_resolve_with_this)
- dispatch(5)
+ interpretResolveWithBase(6, _llint_slow_path_resolve_with_this)
macro withInlineStorage(object, propertyStorage, continuation)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
index f0d45eb0e..d0072d714 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
@@ -976,126 +976,7 @@ macro resolveGlobal(size, slow)
valueProfile(t2, t3, t0)
end
-_llint_op_resolve_global:
- traceExecution()
- resolveGlobal(6, .opResolveGlobalSlow)
- dispatch(6)
-
-.opResolveGlobalSlow:
- callSlowPath(_llint_slow_path_resolve_global)
- dispatch(6)
-
-
-# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
-# scopes as they are traversed. scopeCheck() is called with two arguments: the register
-# holding the scope, and a register that can be used for scratch. Note that this does not
-# use t3, so you can hold stuff in t3 if need be.
-macro getScope(deBruijinIndexOperand, scopeCheck)
- loadp ScopeChain + PayloadOffset[cfr], t0
- loadi deBruijinIndexOperand, t2
-
- btiz t2, .done
-
- loadp CodeBlock[cfr], t1
- bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
- btbz CodeBlock::m_needsFullScopeChain[t1], .loop
-
- loadi CodeBlock::m_activationRegister[t1], t1
-
- # Need to conditionally skip over one scope.
- bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
- scopeCheck(t0, t1)
- loadp JSScope::m_next[t0], t0
-.noActivation:
- subi 1, t2
-
- btiz t2, .done
-.loop:
- scopeCheck(t0, t1)
- loadp JSScope::m_next[t0], t0
- subi 1, t2
- btinz t2, .loop
-
-.done:
-end
-
-_llint_op_resolve_global_dynamic:
- traceExecution()
- loadp CodeBlock[cfr], t3
- loadp CodeBlock::m_globalObject[t3], t3
- loadp JSGlobalObject::m_activationStructure[t3], t3
- getScope(
- 20[PC],
- macro (scope, scratch)
- bpneq JSCell::m_structure[scope], t3, .opResolveGlobalDynamicSuperSlow
- end)
- resolveGlobal(7, .opResolveGlobalDynamicSlow)
- dispatch(7)
-
-.opResolveGlobalDynamicSuperSlow:
- callSlowPath(_llint_slow_path_resolve_for_resolve_global_dynamic)
- dispatch(7)
-
-.opResolveGlobalDynamicSlow:
- callSlowPath(_llint_slow_path_resolve_global_dynamic)
- dispatch(7)
-
-
-_llint_op_get_scoped_var:
- traceExecution()
- # Operands are as follows:
- # 4[PC] Destination for the load.
- # 8[PC] Index of register in the scope.
- # 12[PC] De Bruijin index.
- getScope(12[PC], macro (scope, scratch) end)
- loadi 4[PC], t1
- loadi 8[PC], t2
- loadp JSVariableObject::m_registers[t0], t0
- loadi TagOffset[t0, t2, 8], t3
- loadi PayloadOffset[t0, t2, 8], t0
- storei t3, TagOffset[cfr, t1, 8]
- storei t0, PayloadOffset[cfr, t1, 8]
- loadi 16[PC], t1
- valueProfile(t3, t0, t1)
- dispatch(5)
-
-
-_llint_op_put_scoped_var:
- traceExecution()
- getScope(8[PC], macro (scope, scratch) end)
- loadi 12[PC], t1
- loadConstantOrVariable(t1, t3, t2)
- loadi 4[PC], t1
- writeBarrier(t3, t2)
- loadp JSVariableObject::m_registers[t0], t0
- storei t3, TagOffset[t0, t1, 8]
- storei t2, PayloadOffset[t0, t1, 8]
- dispatch(4)
-
-
-macro getGlobalVar(size)
- traceExecution()
- loadp 8[PC], t0
- loadi 4[PC], t3
- loadi TagOffset[t0], t2
- loadi PayloadOffset[t0], t1
- storei t2, TagOffset[cfr, t3, 8]
- storei t1, PayloadOffset[cfr, t3, 8]
- loadi (size - 1) * 4[PC], t3
- valueProfile(t2, t1, t3)
- dispatch(size)
-end
-
-_llint_op_get_global_var:
- getGlobalVar(4)
-
-
-_llint_op_get_global_var_watchable:
- getGlobalVar(5)
-
-
_llint_op_init_global_const:
-_llint_op_put_global_var:
traceExecution()
loadi 8[PC], t1
loadi 4[PC], t0
@@ -1107,22 +988,20 @@ _llint_op_put_global_var:
_llint_op_init_global_const_check:
-_llint_op_put_global_var_check:
traceExecution()
loadp 12[PC], t2
loadi 8[PC], t1
loadi 4[PC], t0
- btbnz [t2], .opPutGlobalVarCheckSlow
+ btbnz [t2], .opInitGlobalConstCheckSlow
loadConstantOrVariable(t1, t2, t3)
writeBarrier(t2, t3)
storei t2, TagOffset[t0]
storei t3, PayloadOffset[t0]
dispatch(5)
-.opPutGlobalVarCheckSlow:
- callSlowPath(_llint_slow_path_put_global_var_check)
+.opInitGlobalConstCheckSlow:
+ callSlowPath(_llint_slow_path_init_global_const_check)
dispatch(5)
-
# We only do monomorphic get_by_id caching for now, and we do not modify the
# opcode. We do, however, allow for the cache to change anytime if fails, since
# ping-ponging is free. At best we get lucky and the get_by_id will continue
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
index 8b72674ab..59fa18ccf 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
@@ -815,139 +815,7 @@ macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value)
loadp (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8], value
end
-macro resolveGlobal(size, slow)
- # Operands are as follows:
- # 8[PB, PC, 8] Destination for the load.
- # 16[PB, PC, 8] Property identifier index in the code block.
- # 24[PB, PC, 8] Structure pointer, initialized to 0 by bytecode generator.
- # 32[PB, PC, 8] Offset in global object, initialized to 0 by bytecode generator.
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
- loadp JSCell::m_structure[t0], t1
- bpneq t1, 24[PB, PC, 8], slow
- loadis 32[PB, PC, 8], t1
- loadPropertyAtVariableOffsetKnownNotInline(t1, t0, t2)
- loadis 8[PB, PC, 8], t0
- storep t2, [cfr, t0, 8]
- loadp (size - 1) * 8[PB, PC, 8], t0
- valueProfile(t2, t0)
-end
-
-_llint_op_resolve_global:
- traceExecution()
- resolveGlobal(6, .opResolveGlobalSlow)
- dispatch(6)
-
-.opResolveGlobalSlow:
- callSlowPath(_llint_slow_path_resolve_global)
- dispatch(6)
-
-
-# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
-# scopes as they are traversed. scopeCheck() is called with two arguments: the register
-# holding the scope, and a register that can be used for scratch. Note that this does not
-# use t3, so you can hold stuff in t3 if need be.
-macro getScope(deBruijinIndexOperand, scopeCheck)
- loadp ScopeChain[cfr], t0
- loadis deBruijinIndexOperand, t2
-
- btiz t2, .done
-
- loadp CodeBlock[cfr], t1
- bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
- btbz CodeBlock::m_needsFullScopeChain[t1], .loop
-
- loadis CodeBlock::m_activationRegister[t1], t1
-
- # Need to conditionally skip over one scope.
- btpz [cfr, t1, 8], .noActivation
- scopeCheck(t0, t1)
- loadp JSScope::m_next[t0], t0
-.noActivation:
- subi 1, t2
-
- btiz t2, .done
-.loop:
- scopeCheck(t0, t1)
- loadp JSScope::m_next[t0], t0
- subi 1, t2
- btinz t2, .loop
-
-.done:
-end
-
-_llint_op_resolve_global_dynamic:
- traceExecution()
- loadp CodeBlock[cfr], t3
- loadp CodeBlock::m_globalObject[t3], t3
- loadp JSGlobalObject::m_activationStructure[t3], t3
- getScope(
- 40[PB, PC, 8],
- macro (scope, scratch)
- bpneq JSCell::m_structure[scope], t3, .opResolveGlobalDynamicSuperSlow
- end)
- resolveGlobal(7, .opResolveGlobalDynamicSlow)
- dispatch(7)
-
-.opResolveGlobalDynamicSuperSlow:
- callSlowPath(_llint_slow_path_resolve_for_resolve_global_dynamic)
- dispatch(7)
-
-.opResolveGlobalDynamicSlow:
- callSlowPath(_llint_slow_path_resolve_global_dynamic)
- dispatch(7)
-
-
-_llint_op_get_scoped_var:
- traceExecution()
- # Operands are as follows:
- # 8[PB, PC, 8] Destination for the load
- # 16[PB, PC, 8] Index of register in the scope
- # 24[PB, PC, 8] De Bruijin index.
- getScope(24[PB, PC, 8], macro (scope, scratch) end)
- loadis 8[PB, PC, 8], t1
- loadis 16[PB, PC, 8], t2
- loadp JSVariableObject::m_registers[t0], t0
- loadp [t0, t2, 8], t3
- storep t3, [cfr, t1, 8]
- loadp 32[PB, PC, 8], t1
- valueProfile(t3, t1)
- dispatch(5)
-
-
-_llint_op_put_scoped_var:
- traceExecution()
- getScope(16[PB, PC, 8], macro (scope, scratch) end)
- loadis 24[PB, PC, 8], t1
- loadConstantOrVariable(t1, t3)
- loadis 8[PB, PC, 8], t1
- writeBarrier(t3)
- loadp JSVariableObject::m_registers[t0], t0
- storep t3, [t0, t1, 8]
- dispatch(4)
-
-
-macro getGlobalVar(size)
- traceExecution()
- loadp 16[PB, PC, 8], t0
- loadis 8[PB, PC, 8], t3
- loadp [t0], t1
- storep t1, [cfr, t3, 8]
- loadp (size - 1) * 8[PB, PC, 8], t0
- valueProfile(t1, t0)
- dispatch(size)
-end
-
-_llint_op_get_global_var:
- getGlobalVar(4)
-
-
-_llint_op_get_global_var_watchable:
- getGlobalVar(5)
-
-
_llint_op_init_global_const:
-_llint_op_put_global_var:
traceExecution()
loadis 16[PB, PC, 8], t1
loadp 8[PB, PC, 8], t0
@@ -958,21 +826,19 @@ _llint_op_put_global_var:
_llint_op_init_global_const_check:
-_llint_op_put_global_var_check:
traceExecution()
loadp 24[PB, PC, 8], t2
loadis 16[PB, PC, 8], t1
loadp 8[PB, PC, 8], t0
- btbnz [t2], .opPutGlobalVarCheckSlow
+ btbnz [t2], .opInitGlobalConstCheckSlow
loadConstantOrVariable(t1, t2)
writeBarrier(t2)
storep t2, [t0]
dispatch(5)
-.opPutGlobalVarCheckSlow:
- callSlowPath(_llint_slow_path_put_global_var_check)
+.opInitGlobalConstCheckSlow:
+ callSlowPath(_llint_slow_path_init_global_const_check)
dispatch(5)
-
macro getById(getPropertyStorage)
traceExecution()
# We only do monomorphic get_by_id caching for now, and we do not modify the
diff --git a/Source/JavaScriptCore/offlineasm/armv7.rb b/Source/JavaScriptCore/offlineasm/armv7.rb
index 07543574e..ab0496f71 100644
--- a/Source/JavaScriptCore/offlineasm/armv7.rb
+++ b/Source/JavaScriptCore/offlineasm/armv7.rb
@@ -1,4 +1,4 @@
-# Copyright (C) 2011 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -24,6 +24,7 @@
require "config"
require "ast"
require "opt"
+require "risc"
class Node
def armV7Single
@@ -33,7 +34,7 @@ class Node
end
end
-class SpecialRegister < NoChildren
+class SpecialRegister
def armV7Operand
@name
end
@@ -133,487 +134,6 @@ class AbsoluteAddress
end
#
-# Lowering of branch ops. For example:
-#
-# baddiz foo, bar, baz
-#
-# will become:
-#
-# addi foo, bar
-# bz baz
-#
-
-def armV7LowerBranchOps(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- annotation = node.annotation
- case node.opcode
- when /^b(addi|subi|ori|addp)/
- op = $1
- branch = "b" + $~.post_match
-
- case op
- when "addi", "addp"
- op = "addis"
- when "subi"
- op = "subis"
- when "ori"
- op = "oris"
- end
-
- newList << Instruction.new(node.codeOrigin, op, node.operands[0..-2], annotation)
- newList << Instruction.new(node.codeOrigin, branch, [node.operands[-1]])
- when "bmulio"
- tmp1 = Tmp.new(node.codeOrigin, :gpr)
- tmp2 = Tmp.new(node.codeOrigin, :gpr)
- newList << Instruction.new(node.codeOrigin, "smulli", [node.operands[0], node.operands[1], node.operands[1], tmp1], annotation)
- newList << Instruction.new(node.codeOrigin, "rshifti", [node.operands[-2], Immediate.new(node.codeOrigin, 31), tmp2])
- newList << Instruction.new(node.codeOrigin, "bineq", [tmp1, tmp2, node.operands[-1]])
- when /^bmuli/
- condition = $~.post_match
- newList << Instruction.new(node.codeOrigin, "muli", node.operands[0..-2], annotation)
- newList << Instruction.new(node.codeOrigin, "bti" + condition, [node.operands[-2], node.operands[-1]])
- else
- newList << node
- end
- else
- newList << node
- end
- }
- newList
-end
-
-#
-# Lowering of shift ops. For example:
-#
-# lshifti foo, bar
-#
-# will become:
-#
-# andi foo, 31, tmp
-# lshifti tmp, bar
-#
-
-def armV7SanitizeShift(operand, list)
- return operand if operand.immediate?
-
- tmp = Tmp.new(operand.codeOrigin, :gpr)
- list << Instruction.new(operand.codeOrigin, "andi", [operand, Immediate.new(operand.codeOrigin, 31), tmp])
- tmp
-end
-
-def armV7LowerShiftOps(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- case node.opcode
- when "lshifti", "rshifti", "urshifti", "lshiftp", "rshiftp", "urshiftp"
- if node.operands.size == 2
- newList << Instruction.new(node.codeOrigin, node.opcode, [armV7SanitizeShift(node.operands[0], newList), node.operands[1]], node.annotation)
- else
- newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], armV7SanitizeShift(node.operands[1], newList), node.operands[2]], node.annotation)
- raise "Wrong number of operands for shift at #{node.codeOriginString}" unless node.operands.size == 3
- end
- else
- newList << node
- end
- else
- newList << node
- end
- }
- newList
-end
-
-#
-# Lowering of malformed addresses. For example:
-#
-# loadp 10000[foo], bar
-#
-# will become:
-#
-# move 10000, tmp
-# addp foo, tmp
-# loadp 0[tmp], bar
-#
-
-class Node
- def armV7LowerMalformedAddressesRecurse(list)
- mapChildren {
- | node |
- node.armV7LowerMalformedAddressesRecurse(list)
- }
- end
-end
-
-class Address
- def armV7LowerMalformedAddressesRecurse(list)
- if offset.value < -0xff or offset.value > 0xfff
- tmp = Tmp.new(codeOrigin, :gpr)
- list << Instruction.new(codeOrigin, "move", [offset, tmp])
- list << Instruction.new(codeOrigin, "addp", [base, tmp])
- Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
- else
- self
- end
- end
-end
-
-class BaseIndex
- def armV7LowerMalformedAddressesRecurse(list)
- if offset.value != 0
- tmp = Tmp.new(codeOrigin, :gpr)
- list << Instruction.new(codeOrigin, "move", [offset, tmp])
- list << Instruction.new(codeOrigin, "addp", [base, tmp])
- BaseIndex.new(codeOrigin, tmp, index, scale, Immediate.new(codeOrigin, 0))
- else
- self
- end
- end
-end
-
-class AbsoluteAddress
- def armV7LowerMalformedAddressesRecurse(list)
- tmp = Tmp.new(codeOrigin, :gpr)
- list << Instruction.new(codeOrigin, "move", [address, tmp])
- Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
- end
-end
-
-def armV7LowerMalformedAddresses(list)
- newList = []
- list.each {
- | node |
- newList << node.armV7LowerMalformedAddressesRecurse(newList)
- }
- newList
-end
-
-#
-# Lowering of malformed addresses in double loads and stores. For example:
-#
-# loadd [foo, bar, 8], baz
-#
-# becomes:
-#
-# leap [foo, bar, 8], tmp
-# loadd [tmp], baz
-#
-
-class Node
- def armV7DoubleAddress(list)
- self
- end
-end
-
-class BaseIndex
- def armV7DoubleAddress(list)
- tmp = Tmp.new(codeOrigin, :gpr)
- list << Instruction.new(codeOrigin, "leap", [self, tmp])
- Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
- end
-end
-
-def armV7LowerMalformedAddressesDouble(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- case node.opcode
- when "loadd"
- newList << Instruction.new(node.codeOrigin, "loadd", [node.operands[0].armV7DoubleAddress(newList), node.operands[1]], node.annotation)
- when "stored"
- newList << Instruction.new(node.codeOrigin, "stored", [node.operands[0], node.operands[1].armV7DoubleAddress(newList)], node.annotation)
- else
- newList << node
- end
- else
- newList << node
- end
- }
- newList
-end
-
-#
-# Lowering of misplaced immediates. For example:
-#
-# storei 0, [foo]
-#
-# will become:
-#
-# move 0, tmp
-# storei tmp, [foo]
-#
-
-def armV7LowerMisplacedImmediates(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- case node.opcode
- when "storeb", "storei", "storep"
- operands = node.operands
- newOperands = []
- operands.each {
- | operand |
- if operand.is_a? Immediate
- tmp = Tmp.new(operand.codeOrigin, :gpr)
- newList << Instruction.new(operand.codeOrigin, "move", [operand, tmp])
- newOperands << tmp
- else
- newOperands << operand
- end
- }
- newList << Instruction.new(node.codeOrigin, node.opcode, newOperands, node.annotation)
- else
- newList << node
- end
- else
- newList << node
- end
- }
- newList
-end
-
-#
-# Lowering of malformed immediates except when used in a "move" instruction.
-# For example:
-#
-# addp 642641, foo
-#
-# will become:
-#
-# move 642641, tmp
-# addp tmp, foo
-#
-
-class Node
- def armV7LowerMalformedImmediatesRecurse(list)
- mapChildren {
- | node |
- node.armV7LowerMalformedImmediatesRecurse(list)
- }
- end
-end
-
-class Address
- def armV7LowerMalformedImmediatesRecurse(list)
- self
- end
-end
-
-class BaseIndex
- def armV7LowerMalformedImmediatesRecurse(list)
- self
- end
-end
-
-class AbsoluteAddress
- def armV7LowerMalformedImmediatesRecurse(list)
- self
- end
-end
-
-class Immediate
- def armV7LowerMalformedImmediatesRecurse(list)
- if value < 0 or value > 255
- tmp = Tmp.new(codeOrigin, :gpr)
- list << Instruction.new(codeOrigin, "move", [self, tmp])
- tmp
- else
- self
- end
- end
-end
-
-def armV7LowerMalformedImmediates(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- annotation = node.annotation
- case node.opcode
- when "move"
- newList << node
- when "addi", "addp", "addis", "subi", "subp", "subis"
- if node.operands[0].is_a? Immediate and
- node.operands[0].value < 0 and
- node.operands[0].value >= 255 and
- node.operands.size == 2
- if node.opcode =~ /add/
- newOpcode = "sub" + node.opcode[-1..-1]
- else
- newOpcode = "add" + node.opcode[-1..-1]
- end
- newList << Instruction.new(node.codeOrigin, newOpcode,
- [Immediate.new(-node.operands[0].value)] + node.operands[1..-1],
- annotation)
- else
- newList << node.armV7LowerMalformedImmediatesRecurse(newList)
- end
- when "muli", "mulp"
- if node.operands[0].is_a? Immediate
- tmp = Tmp.new(codeOrigin, :gpr)
- newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp], annotation)
- newList << Instruction.new(node.codeOrigin, "muli", [tmp] + node.operands[1..-1])
- else
- newList << node.armV7LowerMalformedImmediatesRecurse(newList)
- end
- else
- newList << node.armV7LowerMalformedImmediatesRecurse(newList)
- end
- else
- newList << node
- end
- }
- newList
-end
-
-#
-# Lowering of misplaced addresses. For example:
-#
-# addi foo, [bar]
-#
-# will become:
-#
-# loadi [bar], tmp
-# addi foo, tmp
-# storei tmp, [bar]
-#
-# Another example:
-#
-# addi [foo], bar
-#
-# will become:
-#
-# loadi [foo], tmp
-# addi tmp, bar
-#
-
-def armV7AsRegister(preList, postList, operand, suffix, needStore)
- return operand unless operand.address?
-
- tmp = Tmp.new(operand.codeOrigin, if suffix == "d" then :fpr else :gpr end)
- preList << Instruction.new(operand.codeOrigin, "load" + suffix, [operand, tmp])
- if needStore
- postList << Instruction.new(operand.codeOrigin, "store" + suffix, [tmp, operand])
- end
- tmp
-end
-
-def armV7AsRegisters(preList, postList, operands, suffix)
- newOperands = []
- operands.each_with_index {
- | operand, index |
- newOperands << armV7AsRegister(preList, postList, operand, suffix, index == operands.size - 1)
- }
- newOperands
-end
-
-def armV7LowerMisplacedAddresses(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- postInstructions = []
- annotation = node.annotation
- case node.opcode
- when "addi", "addp", "addis", "andi", "andp", "lshifti", "lshiftp", "muli", "mulp", "negi",
- "negp", "noti", "ori", "oris", "orp", "rshifti", "urshifti", "rshiftp", "urshiftp", "subi",
- "subp", "subis", "xori", "xorp", /^bi/, /^bp/, /^bti/, /^btp/, /^ci/, /^cp/, /^ti/
- newList << Instruction.new(node.codeOrigin,
- node.opcode,
- armV7AsRegisters(newList, postInstructions, node.operands, "i"),
- annotation)
- when "bbeq", "bbneq", "bba", "bbaeq", "bbb", "bbbeq", "btbz", "btbnz", "tbz", "tbnz",
- "cbeq", "cbneq", "cba", "cbaeq", "cbb", "cbbeq"
- newList << Instruction.new(node.codeOrigin,
- node.opcode,
- armV7AsRegisters(newList, postInstructions, node.operands, "b"),
- annotation)
- when "bbgt", "bbgteq", "bblt", "bblteq", "btbs", "tbs", "cbgt", "cbgteq", "cblt", "cblteq"
- newList << Instruction.new(node.codeOrigin,
- node.opcode,
- armV7AsRegisters(newList, postInstructions, node.operands, "bs"),
- annotation)
- when "addd", "divd", "subd", "muld", "sqrtd", /^bd/
- newList << Instruction.new(node.codeOrigin,
- node.opcode,
- armV7AsRegisters(newList, postInstructions, node.operands, "d"),
- annotation)
- when "jmp", "call"
- newList << Instruction.new(node.codeOrigin,
- node.opcode,
- [armV7AsRegister(newList, postInstructions, node.operands[0], "p", false)],
- annotation)
- else
- newList << node
- end
- newList += postInstructions
- else
- newList << node
- end
- }
- newList
-end
-
-#
-# Lowering of register reuse in compare instructions. For example:
-#
-# cieq t0, t1, t0
-#
-# will become:
-#
-# mov tmp, t0
-# cieq tmp, t1, t0
-#
-
-def armV7LowerRegisterReuse(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- annotation = node.annotation
- case node.opcode
- when "cieq", "cineq", "cia", "ciaeq", "cib", "cibeq", "cigt", "cigteq", "cilt", "cilteq",
- "cpeq", "cpneq", "cpa", "cpaeq", "cpb", "cpbeq", "cpgt", "cpgteq", "cplt", "cplteq",
- "tis", "tiz", "tinz", "tbs", "tbz", "tbnz", "tps", "tpz", "tpnz", "cbeq", "cbneq",
- "cba", "cbaeq", "cbb", "cbbeq", "cbgt", "cbgteq", "cblt", "cblteq"
- if node.operands.size == 2
- if node.operands[0] == node.operands[1]
- tmp = Tmp.new(node.codeOrigin, :gpr)
- newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp], annotation)
- newList << Instruction.new(node.codeOrigin, node.opcode, [tmp, node.operands[1]])
- else
- newList << node
- end
- else
- raise "Wrong number of arguments at #{node.codeOriginString}" unless node.operands.size == 3
- if node.operands[0] == node.operands[2]
- tmp = Tmp.new(node.codeOrigin, :gpr)
- newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp], annotation)
- newList << Instruction.new(node.codeOrigin, node.opcode, [tmp, node.operands[1], node.operands[2]])
- elsif node.operands[1] == node.operands[2]
- tmp = Tmp.new(node.codeOrigin, :gpr)
- newList << Instruction.new(node.codeOrigin, "move", [node.operands[1], tmp], annotation)
- newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], tmp, node.operands[2]])
- else
- newList << node
- end
- end
- else
- newList << node
- end
- else
- newList << node
- end
- }
- newList
-end
-
-#
# Lea support.
#
@@ -642,31 +162,28 @@ end
class Sequence
def getModifiedListARMv7
- myList = @list
-
- # Verify that we will only see instructions and labels.
- myList.each {
- | node |
- unless node.is_a? Instruction or
- node.is_a? Label or
- node.is_a? LocalLabel or
- node.is_a? Skip
- raise "Unexpected #{node.inspect} at #{node.codeOrigin}"
+ result = @list
+ result = riscLowerSimpleBranchOps(result)
+ result = riscLowerHardBranchOps(result)
+ result = riscLowerShiftOps(result)
+ result = riscLowerMalformedAddresses(result) {
+ | node, address |
+ if address.is_a? BaseIndex
+ address.offset.value == 0
+ elsif address.is_a? Address
+ (-0xff..0xfff).include? address.offset.value
+ else
+ false
end
}
-
- myList = armV7LowerBranchOps(myList)
- myList = armV7LowerShiftOps(myList)
- myList = armV7LowerMalformedAddresses(myList)
- myList = armV7LowerMalformedAddressesDouble(myList)
- myList = armV7LowerMisplacedImmediates(myList)
- myList = armV7LowerMalformedImmediates(myList)
- myList = armV7LowerMisplacedAddresses(myList)
- myList = armV7LowerRegisterReuse(myList)
- myList = assignRegistersToTemporaries(myList, :gpr, ARMv7_EXTRA_GPRS)
- myList = assignRegistersToTemporaries(myList, :fpr, ARMv7_EXTRA_FPRS)
-
- return myList
+ result = riscLowerMalformedAddressesDouble(result)
+ result = riscLowerMisplacedImmediates(result)
+ result = riscLowerMalformedImmediates(result, 0..0xff)
+ result = riscLowerMisplacedAddresses(result)
+ result = riscLowerRegisterReuse(result)
+ result = assignRegistersToTemporaries(result, :gpr, ARMv7_EXTRA_GPRS)
+ result = assignRegistersToTemporaries(result, :fpr, ARMv7_EXTRA_FPRS)
+ return result
end
end
@@ -747,8 +264,8 @@ class Instruction
$asm.annotation annotation if $enableInstrAnnotations
case opcode
- when "addi", "addp", "addis"
- if opcode == "addis"
+ when "addi", "addp", "addis", "addps"
+ if opcode == "addis" or opcode == "addps"
suffix = "s"
else
suffix = ""
@@ -837,7 +354,7 @@ class Instruction
$asm.puts "vcmpe.f64 #{armV7Operands(operands[0..1])}"
$asm.puts "vmrs apsr_nzcv, fpscr"
isUnordered = LocalLabel.unique("bdneq")
- $asm.puts "bvs #{LabelReference.new(codeOrigin, isUnordered).asmLabel}"
+ $asm.puts "bvs #{LocalLabelReference.new(codeOrigin, isUnordered).asmLabel}"
$asm.puts "bne #{operands[2].asmLabel}"
isUnordered.lower("ARMv7")
when "bdgt"
diff --git a/Source/JavaScriptCore/offlineasm/risc.rb b/Source/JavaScriptCore/offlineasm/risc.rb
new file mode 100644
index 000000000..44b4dbd71
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/risc.rb
@@ -0,0 +1,555 @@
+# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'config'
+require 'ast'
+require 'opt'
+
+# This file contains utilities that are useful for implementing a backend
+# for RISC-like processors (ARM, MIPS, etc).
+
+#
+# Lowering of simple branch ops. For example:
+#
+# baddiz foo, bar, baz
+#
+# will become:
+#
+# addi foo, bar
+# bz baz
+#
+
+def riscLowerSimpleBranchOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ annotation = node.annotation
+ case node.opcode
+ when /^b(addi|subi|ori|addp)/
+ op = $1
+ branch = "b" + $~.post_match
+
+ case op
+ when "addi"
+ op = "addis"
+ when "addp"
+ op = "addps"
+ when "subi"
+ op = "subis"
+ when "ori"
+ op = "oris"
+ end
+
+ newList << Instruction.new(node.codeOrigin, op, node.operands[0..-2], annotation)
+ newList << Instruction.new(node.codeOrigin, branch, [node.operands[-1]])
+ when 'bmulis', 'bmulz', 'bmulnz'
+ condition = $~.post_match
+ newList << Instruction.new(node.codeOrigin, "muli", node.operands[0..-2], annotation)
+ newList << Instruction.new(node.codeOrigin, "bti" + condition, [node.operands[-2], node.operands[-1]])
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowing of complex branch ops. For example:
+#
+# bmulio foo, bar, baz
+#
+# becomes:
+#
+# smulli foo, bar, bar, tmp1
+# rshifti bar, 31, tmp2
+# bineq tmp1, tmp2, baz
+#
+
+def riscLowerHardBranchOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction and node.opcode == "bmulio"
+ tmp1 = Tmp.new(node.codeOrigin, :gpr)
+ tmp2 = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "smulli", [node.operands[0], node.operands[1], node.operands[1], tmp1], node.annotation)
+ newList << Instruction.new(node.codeOrigin, "rshifti", [node.operands[-2], Immediate.new(node.codeOrigin, 31), tmp2])
+ newList << Instruction.new(node.codeOrigin, "bineq", [tmp1, tmp2, node.operands[-1]])
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of shift ops. For example:
+#
+# lshifti foo, bar
+#
+# will become:
+#
+# andi foo, 31, tmp
+# lshifti tmp, bar
+#
+
+def riscSanitizeShift(operand, list)
+ return operand if operand.immediate?
+
+ tmp = Tmp.new(operand.codeOrigin, :gpr)
+ list << Instruction.new(operand.codeOrigin, "andi", [operand, Immediate.new(operand.codeOrigin, 31), tmp])
+ tmp
+end
+
+def riscLowerShiftOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "lshifti", "rshifti", "urshifti", "lshiftp", "rshiftp", "urshiftp"
+ if node.operands.size == 2
+ newList << Instruction.new(node.codeOrigin, node.opcode, [riscSanitizeShift(node.operands[0], newList), node.operands[1]], node.annotation)
+ else
+ newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], riscSanitizeShift(node.operands[1], newList), node.operands[2]], node.annotation)
+ raise "Wrong number of operands for shift at #{node.codeOriginString}" unless node.operands.size == 3
+ end
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of malformed addresses. For example:
+#
+# loadp 10000[foo], bar
+#
+# will become:
+#
+# move 10000, tmp
+# addp foo, tmp
+# loadp 0[tmp], bar
+#
+# Note that you use this lowering phase by passing it a block that returns true
+# if you don't want to lower the address, or false if you do. For example to get
+# the effect of the example above, the block would have to say something like:
+#
+# riscLowerMalformedAddresses(thingy) {
+# | node, address |
+# if address.is_a? Address
+# address.offset.value > 1000
+# else
+# true # don't lower anything else, in this example
+# end
+# }
+#
+# See armv7.rb for a different example, in which we lower all BaseIndex addresses
+# that have non-zero offset, all Address addresses that have large offsets, and
+# all other addresses (like AbsoluteAddress).
+#
+
+class Node
+ def riscLowerMalformedAddressesRecurse(list, topLevelNode, &block)
+ mapChildren {
+ | subNode |
+ subNode.riscLowerMalformedAddressesRecurse(list, topLevelNode, &block)
+ }
+ end
+end
+
+class Address
+ def riscLowerMalformedAddressesRecurse(list, node)
+ return self if yield node, self
+
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "move", [offset, tmp])
+ list << Instruction.new(codeOrigin, "addp", [base, tmp])
+ Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
+ end
+end
+
+class BaseIndex
+ def riscLowerMalformedAddressesRecurse(list, node, &block)
+ return self if yield node, self
+
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "leap", [BaseIndex.new(codeOrigin, base, index, scale, Immediate.new(codeOrigin, 0)), tmp])
+ Address.new(codeOrigin, tmp, offset).riscLowerMalformedAddressesRecurse(list, node, &block)
+ end
+end
+
+class AbsoluteAddress
+ def riscLowerMalformedAddressesRecurse(list, node)
+ return self if yield node, self
+
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "move", [address, tmp])
+ Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
+ end
+end
+
+def riscLowerMalformedAddresses(list, &block)
+ newList = []
+ list.each {
+ | node |
+ newList << node.riscLowerMalformedAddressesRecurse(newList, node, &block)
+ }
+ newList
+end
+
+#
+# Lowering of malformed addresses in double loads and stores. For example:
+#
+# loadd [foo, bar, 8], baz
+#
+# becomes:
+#
+# leap [foo, bar, 8], tmp
+# loadd [tmp], baz
+#
+
+class Node
+ def riscDoubleAddress(list)
+ self
+ end
+end
+
+class BaseIndex
+ def riscDoubleAddress(list)
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "leap", [self, tmp])
+ Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
+ end
+end
+
+def riscLowerMalformedAddressesDouble(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "loadd"
+ newList << Instruction.new(node.codeOrigin, "loadd", [node.operands[0].riscDoubleAddress(newList), node.operands[1]], node.annotation)
+ when "stored"
+ newList << Instruction.new(node.codeOrigin, "stored", [node.operands[0], node.operands[1].riscDoubleAddress(newList)], node.annotation)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of misplaced immediates. For example:
+#
+# storei 0, [foo]
+#
+# will become:
+#
+# move 0, tmp
+# storei tmp, [foo]
+#
+
+def riscLowerMisplacedImmediates(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "storeb", "storei", "storep"
+ operands = node.operands
+ newOperands = []
+ operands.each {
+ | operand |
+ if operand.is_a? Immediate
+ tmp = Tmp.new(operand.codeOrigin, :gpr)
+ newList << Instruction.new(operand.codeOrigin, "move", [operand, tmp])
+ newOperands << tmp
+ else
+ newOperands << operand
+ end
+ }
+ newList << Instruction.new(node.codeOrigin, node.opcode, newOperands, node.annotation)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of malformed immediates except when used in a "move" instruction.
+# For example:
+#
+# addp 642641, foo
+#
+# will become:
+#
+# move 642641, tmp
+# addp tmp, foo
+#
+
+class Node
+ def riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ mapChildren {
+ | node |
+ node.riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ }
+ end
+end
+
+class Address
+ def riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ self
+ end
+end
+
+class BaseIndex
+ def riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ self
+ end
+end
+
+class AbsoluteAddress
+ def riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ self
+ end
+end
+
+class Immediate
+ def riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ unless validImmediates.include? value
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "move", [self, tmp])
+ tmp
+ else
+ self
+ end
+ end
+end
+
+def riscLowerMalformedImmediates(list, validImmediates)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ annotation = node.annotation
+ case node.opcode
+ when "move"
+ newList << node
+ when "addi", "addp", "addis", "subi", "subp", "subis"
+ if node.operands[0].is_a? Immediate and
+ (not validImmediates.include? node.operands[0].value) and
+ validImmediates.include? -node.operands[0].value
+ node.operands.size == 2
+ if node.opcode =~ /add/
+ newOpcode = "sub" + $~.post_match
+ else
+ newOpcode = "add" + $~.post_match
+ end
+ newList << Instruction.new(node.codeOrigin, newOpcode,
+ [Immediate.new(node.codeOrigin, -node.operands[0].value)] + node.operands[1..-1],
+ annotation)
+ else
+ newList << node.riscLowerMalformedImmediatesRecurse(newList, validImmediates)
+ end
+ when "muli", "mulp"
+ if node.operands[0].is_a? Immediate
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp], annotation)
+ newList << Instruction.new(node.codeOrigin, node.opcode, [tmp] + node.operands[1..-1])
+ else
+ newList << node.riscLowerMalformedImmediatesRecurse(newList, validImmediates)
+ end
+ else
+ newList << node.riscLowerMalformedImmediatesRecurse(newList, validImmediates)
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of misplaced addresses. For example:
+#
+# addi foo, [bar]
+#
+# will become:
+#
+# loadi [bar], tmp
+# addi foo, tmp
+# storei tmp, [bar]
+#
+# Another example:
+#
+# addi [foo], bar
+#
+# will become:
+#
+# loadi [foo], tmp
+# addi tmp, bar
+#
+
+def riscAsRegister(preList, postList, operand, suffix, needStore)
+ return operand unless operand.address?
+
+ tmp = Tmp.new(operand.codeOrigin, if suffix == "d" then :fpr else :gpr end)
+ preList << Instruction.new(operand.codeOrigin, "load" + suffix, [operand, tmp])
+ if needStore
+ postList << Instruction.new(operand.codeOrigin, "store" + suffix, [tmp, operand])
+ end
+ tmp
+end
+
+def riscAsRegisters(preList, postList, operands, suffix)
+ newOperands = []
+ operands.each_with_index {
+ | operand, index |
+ newOperands << riscAsRegister(preList, postList, operand, suffix, index == operands.size - 1)
+ }
+ newOperands
+end
+
+def riscLowerMisplacedAddresses(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ postInstructions = []
+ annotation = node.annotation
+ case node.opcode
+ when "addi", "addis", "andi", "lshifti", "muli", "negi", "noti", "ori", "oris",
+ "rshifti", "urshifti", "subi", "subis", "xori", /^bi/, /^bti/, /^ci/, /^ti/
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, postInstructions, node.operands, "i"),
+ annotation)
+ when "addp", "andp", "lshiftp", "mulp", "negp", "orp", "rshiftp", "urshiftp",
+ "subp", "xorp", /^bp/, /^btp/, /^cp/
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, postInstructions, node.operands, "p"),
+ annotation)
+ when "bbeq", "bbneq", "bba", "bbaeq", "bbb", "bbbeq", "btbz", "btbnz", "tbz", "tbnz",
+ "cbeq", "cbneq", "cba", "cbaeq", "cbb", "cbbeq"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, postInstructions, node.operands, "b"),
+ annotation)
+ when "bbgt", "bbgteq", "bblt", "bblteq", "btbs", "tbs", "cbgt", "cbgteq", "cblt", "cblteq"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, postInstructions, node.operands, "bs"),
+ annotation)
+ when "addd", "divd", "subd", "muld", "sqrtd", /^bd/
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, postInstructions, node.operands, "d"),
+ annotation)
+ when "jmp", "call"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ [riscAsRegister(newList, postInstructions, node.operands[0], "p", false)],
+ annotation)
+ else
+ newList << node
+ end
+ newList += postInstructions
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of register reuse in compare instructions. For example:
+#
+# cieq t0, t1, t0
+#
+# will become:
+#
+# mov tmp, t0
+# cieq tmp, t1, t0
+#
+
+def riscLowerRegisterReuse(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ annotation = node.annotation
+ case node.opcode
+ when "cieq", "cineq", "cia", "ciaeq", "cib", "cibeq", "cigt", "cigteq", "cilt", "cilteq",
+ "cpeq", "cpneq", "cpa", "cpaeq", "cpb", "cpbeq", "cpgt", "cpgteq", "cplt", "cplteq",
+ "tis", "tiz", "tinz", "tbs", "tbz", "tbnz", "tps", "tpz", "tpnz", "cbeq", "cbneq",
+ "cba", "cbaeq", "cbb", "cbbeq", "cbgt", "cbgteq", "cblt", "cblteq"
+ if node.operands.size == 2
+ if node.operands[0] == node.operands[1]
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp], annotation)
+ newList << Instruction.new(node.codeOrigin, node.opcode, [tmp, node.operands[1]])
+ else
+ newList << node
+ end
+ else
+ raise "Wrong number of arguments at #{node.codeOriginString}" unless node.operands.size == 3
+ if node.operands[0] == node.operands[2]
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp], annotation)
+ newList << Instruction.new(node.codeOrigin, node.opcode, [tmp, node.operands[1], node.operands[2]])
+ elsif node.operands[1] == node.operands[2]
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[1], tmp], annotation)
+ newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], tmp, node.operands[2]])
+ else
+ newList << node
+ end
+ end
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
diff --git a/Source/JavaScriptCore/parser/Lexer.cpp b/Source/JavaScriptCore/parser/Lexer.cpp
index 8c9eaa12c..8b2020987 100644
--- a/Source/JavaScriptCore/parser/Lexer.cpp
+++ b/Source/JavaScriptCore/parser/Lexer.cpp
@@ -1617,7 +1617,7 @@ bool Lexer<T>::scanRegExp(const Identifier*& pattern, const Identifier*& flags,
}
}
- pattern = makeIdentifier(m_buffer16.data(), m_buffer16.size());
+ pattern = makeIdentifierSameType(m_buffer16.data(), m_buffer16.size());
m_buffer16.resize(0);
while (isIdentPart(m_current)) {
@@ -1625,7 +1625,7 @@ bool Lexer<T>::scanRegExp(const Identifier*& pattern, const Identifier*& flags,
shift();
}
- flags = makeIdentifier(m_buffer16.data(), m_buffer16.size());
+ flags = makeIdentifierSameType(m_buffer16.data(), m_buffer16.size());
m_buffer16.resize(0);
return true;
diff --git a/Source/JavaScriptCore/parser/Lexer.h b/Source/JavaScriptCore/parser/Lexer.h
index d75f2020d..aa1599b96 100644
--- a/Source/JavaScriptCore/parser/Lexer.h
+++ b/Source/JavaScriptCore/parser/Lexer.h
@@ -146,6 +146,9 @@ private:
ALWAYS_INLINE const Identifier* makeIdentifier(const LChar* characters, size_t length);
ALWAYS_INLINE const Identifier* makeIdentifier(const UChar* characters, size_t length);
+ ALWAYS_INLINE const Identifier* makeLCharIdentifier(const LChar* characters, size_t length);
+ ALWAYS_INLINE const Identifier* makeLCharIdentifier(const UChar* characters, size_t length);
+ ALWAYS_INLINE const Identifier* makeIdentifierSameType(const UChar* characters, size_t length);
ALWAYS_INLINE const Identifier* makeIdentifierLCharFromUChar(const UChar* characters, size_t length);
ALWAYS_INLINE bool lastTokenWasRestrKeyword() const;
@@ -239,6 +242,18 @@ ALWAYS_INLINE const Identifier* Lexer<T>::makeIdentifier(const UChar* characters
}
template <>
+ALWAYS_INLINE const Identifier* Lexer<LChar>::makeIdentifierSameType(const UChar* characters, size_t length)
+{
+ return &m_arena->makeIdentifierLCharFromUChar(m_globalData, characters, length);
+}
+
+template <>
+ALWAYS_INLINE const Identifier* Lexer<UChar>::makeIdentifierSameType(const UChar* characters, size_t length)
+{
+ return &m_arena->makeIdentifier(m_globalData, characters, length);
+}
+
+template <>
ALWAYS_INLINE void Lexer<LChar>::setCodeStart(const StringImpl* sourceString)
{
ASSERT(sourceString->is8Bit());
@@ -259,6 +274,18 @@ ALWAYS_INLINE const Identifier* Lexer<T>::makeIdentifierLCharFromUChar(const UCh
}
template <typename T>
+ALWAYS_INLINE const Identifier* Lexer<T>::makeLCharIdentifier(const LChar* characters, size_t length)
+{
+ return &m_arena->makeIdentifier(m_globalData, characters, length);
+}
+
+template <typename T>
+ALWAYS_INLINE const Identifier* Lexer<T>::makeLCharIdentifier(const UChar* characters, size_t length)
+{
+ return &m_arena->makeIdentifierLCharFromUChar(m_globalData, characters, length);
+}
+
+template <typename T>
ALWAYS_INLINE JSTokenType Lexer<T>::lexExpectIdentifier(JSTokenData* tokenData, JSTokenLocation* tokenLocation, unsigned lexerFlags, bool strictMode)
{
ASSERT((lexerFlags & LexerFlagsIgnoreReservedWords));
@@ -293,7 +320,7 @@ ALWAYS_INLINE JSTokenType Lexer<T>::lexExpectIdentifier(JSTokenData* tokenData,
if (lexerFlags & LexexFlagsDontBuildKeywords)
tokenData->ident = 0;
else
- tokenData->ident = makeIdentifier(start, ptr - start);
+ tokenData->ident = makeLCharIdentifier(start, ptr - start);
tokenLocation->line = m_lineNumber;
tokenLocation->startOffset = start - m_codeStart;
tokenLocation->endOffset = currentOffset();
diff --git a/Source/JavaScriptCore/parser/Parser.h b/Source/JavaScriptCore/parser/Parser.h
index 4c005fa5e..dc42d36ba 100644
--- a/Source/JavaScriptCore/parser/Parser.h
+++ b/Source/JavaScriptCore/parser/Parser.h
@@ -882,7 +882,7 @@ private:
bool canRecurse()
{
- return m_stack.recursionCheck();
+ return m_stack.isSafeToRecurse();
}
int lastTokenEnd() const
diff --git a/Source/JavaScriptCore/runtime/JSScope.cpp b/Source/JavaScriptCore/runtime/JSScope.cpp
index 8fd49b861..508a90540 100644
--- a/Source/JavaScriptCore/runtime/JSScope.cpp
+++ b/Source/JavaScriptCore/runtime/JSScope.cpp
@@ -86,126 +86,388 @@ int JSScope::localDepth()
return scopeDepth;
}
-JSValue JSScope::resolve(CallFrame* callFrame, const Identifier& identifier)
-{
- JSScope* scope = callFrame->scope();
- ASSERT(scope);
+struct LookupResult {
+ JSValue base() const { return m_base; }
+ JSValue value() const { return m_value; }
+ void setBase(JSValue base) { ASSERT(base); m_base = base; }
+ void setValue(JSValue value) { ASSERT(value); m_value = value; }
- do {
- JSObject* object = JSScope::objectAtScope(scope);
- PropertySlot slot(object);
- if (object->getPropertySlot(callFrame, identifier, slot))
- return slot.getValue(callFrame, identifier);
- } while ((scope = scope->next()));
+private:
+ JSValue m_base;
+ JSValue m_value;
+};
- return throwError(callFrame, createUndefinedVariableError(callFrame, identifier));
+
+static void setPutPropertyAccessOffset(PutToBaseOperation* operation, PropertyOffset offset)
+{
+ ASSERT(isOutOfLineOffset(offset));
+ operation->m_offset = offset;
+ operation->m_offsetInButterfly = offsetInButterfly(offset);
+}
+
+static bool executeResolveOperations(CallFrame* callFrame, JSScope* scope, const Identifier& propertyName, ResolveOperation* pc, LookupResult& result)
+{
+ while (true) {
+ switch (pc->m_operation) {
+ case ResolveOperation::Fail:
+ return false;
+ case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope: {
+ while (JSScope* nextScope = scope->next()) {
+ if (scope->isActivationObject() && scope->structure() != scope->globalObject()->activationStructure())
+ return false;
+ ASSERT(scope->isNameScopeObject() || scope->isVariableObject() || scope->isGlobalObject());
+ scope = nextScope;
+ }
+ pc++;
+ break;
+ }
+ case ResolveOperation::SetBaseToUndefined:
+ result.setBase(jsUndefined());
+ pc++;
+ continue;
+ case ResolveOperation::SetBaseToScope:
+ result.setBase(scope);
+ pc++;
+ continue;
+ case ResolveOperation::ReturnScopeAsBase:
+ result.setBase(scope);
+ return true;
+ case ResolveOperation::SetBaseToGlobal:
+ result.setBase(scope->globalObject());
+ pc++;
+ continue;
+ case ResolveOperation::SkipScopes: {
+ int count = pc->m_scopesToSkip;
+ while (count--)
+ scope = scope->next();
+ ASSERT(scope);
+ pc++;
+ continue;
+ }
+ case ResolveOperation::SkipTopScopeNode:
+ if (callFrame->r(pc->m_activationRegister).jsValue())
+ scope = scope->next();
+ ASSERT(scope);
+ pc++;
+ continue;
+ case ResolveOperation::GetAndReturnScopedVar:
+ ASSERT(jsCast<JSVariableObject*>(scope)->registerAt(pc->m_offset).get());
+ result.setValue(jsCast<JSVariableObject*>(scope)->registerAt(pc->m_offset).get());
+ return true;
+ case ResolveOperation::GetAndReturnGlobalVar:
+ result.setValue(pc->m_registerAddress->get());
+ return true;
+ case ResolveOperation::GetAndReturnGlobalVarWatchable:
+ result.setValue(pc->m_registerAddress->get());
+ return true;
+ case ResolveOperation::ReturnGlobalObjectAsBase:
+ result.setBase(callFrame->lexicalGlobalObject());
+ return true;
+ case ResolveOperation::GetAndReturnGlobalProperty: {
+ JSGlobalObject* globalObject = scope->globalObject();
+ if (globalObject->structure() == pc->m_structure.get()) {
+ result.setValue(globalObject->getDirectOffset(pc->m_offset));
+ return true;
+ }
+
+ PropertySlot slot(globalObject);
+ if (!globalObject->getPropertySlot(callFrame, propertyName, slot))
+ return false;
+
+ JSValue value = slot.getValue(callFrame, propertyName);
+ if (callFrame->hadException())
+ return false;
+
+ Structure* structure = globalObject->structure();
+
+ // Don't try to cache prototype lookups
+ if (globalObject != slot.slotBase() || !slot.isCacheableValue() || !structure->propertyAccessesAreCacheable()) {
+ result.setValue(value);
+ return true;
+ }
+
+ pc->m_structure.set(callFrame->globalData(), callFrame->codeBlock()->ownerExecutable(), structure);
+ pc->m_offset = slot.cachedOffset();
+ result.setValue(value);
+ return true;
+ }
+ }
+ }
}
-JSValue JSScope::resolveSkip(CallFrame* callFrame, const Identifier& identifier, int skip)
+template <JSScope::LookupMode mode, JSScope::ReturnValues returnValues> JSObject* JSScope::resolveContainingScopeInternal(CallFrame* callFrame, const Identifier& identifier, PropertySlot& slot, Vector<ResolveOperation>* operations, PutToBaseOperation* putToBaseOperation, bool )
{
JSScope* scope = callFrame->scope();
ASSERT(scope);
-
+ int scopeCount = 0;
+ bool seenGenericObjectScope = false;
+ bool requiresDynamicChecks = false;
+ bool skipTopScopeNode = false;
+ int activationRegister = 0;
CodeBlock* codeBlock = callFrame->codeBlock();
-
- bool checkTopLevel = codeBlock->codeType() == FunctionCode && codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- if (callFrame->uncheckedR(codeBlock->activationRegister()).jsValue())
- scope = scope->next();
- }
- while (skip--) {
- scope = scope->next();
- ASSERT(scope);
- }
+ if (mode == UnknownResolve) {
+ ASSERT(operations->isEmpty());
+ if (codeBlock->codeType() == FunctionCode && codeBlock->needsActivation()) {
+ activationRegister = codeBlock->activationRegister();
+ JSValue activation = callFrame->r(activationRegister).jsValue();
+
+ // If the activation register doesn't match our actual scope, a dynamic
+ // scope has been inserted so we shouldn't skip the top scope node.
+ if (activation == scope) {
+ jsCast<JSActivation*>(activation.asCell())->isDynamicScope(requiresDynamicChecks);
+ if (!requiresDynamicChecks) {
+ ASSERT(jsCast<JSActivation*>(activation.asCell())->symbolTable()->get(identifier.impl()).isNull());
+ scope = scope->next();
+ ASSERT(scope);
+ skipTopScopeNode = true;
+ }
+ } else if (!activation)
+ skipTopScopeNode = true;
+ }
+ } else
+ ASSERT(operations->size());
+
+ if (codeBlock->codeType() == EvalCode && scope->next())
+ requiresDynamicChecks = true;
+
+ if (mode == UnknownResolve && putToBaseOperation)
+ putToBaseOperation->m_kind = PutToBaseOperation::Generic;
do {
JSObject* object = JSScope::objectAtScope(scope);
- PropertySlot slot(object);
- if (object->getPropertySlot(callFrame, identifier, slot))
- return slot.getValue(callFrame, identifier);
+ slot = PropertySlot(object);
+
+ bool currentScopeNeedsDynamicChecks = false;
+ if (!(scope->isVariableObject() || scope->isNameScopeObject()) || (scope->next() && scope->isDynamicScope(currentScopeNeedsDynamicChecks)))
+ seenGenericObjectScope = true;
+
+ requiresDynamicChecks = requiresDynamicChecks || currentScopeNeedsDynamicChecks;
+
+ if (object->getPropertySlot(callFrame, identifier, slot)) {
+ if (mode == UnknownResolve) {
+ if (seenGenericObjectScope)
+ goto fail;
+ if (putToBaseOperation)
+ putToBaseOperation->m_isDynamic = requiresDynamicChecks;
+ if (!scope->next()) {
+ // Global lookup of some kind
+ JSGlobalObject* globalObject = jsCast<JSGlobalObject*>(scope);
+ SymbolTableEntry entry = globalObject->symbolTable()->get(identifier.impl());
+ if (!entry.isNull()) {
+ if (requiresDynamicChecks)
+ operations->append(ResolveOperation::checkForDynamicEntriesBeforeGlobalScope());
+
+ if (putToBaseOperation) {
+ putToBaseOperation->m_isDynamic = requiresDynamicChecks;
+ if (entry.isReadOnly())
+ putToBaseOperation->m_kind = PutToBaseOperation::Readonly;
+ else if (entry.couldBeWatched()) {
+ putToBaseOperation->m_kind = PutToBaseOperation::GlobalVariablePutChecked;
+ putToBaseOperation->m_predicatePointer = entry.addressOfIsWatched();
+ } else
+ putToBaseOperation->m_kind = PutToBaseOperation::GlobalVariablePut;
+ putToBaseOperation->m_registerAddress = &globalObject->registerAt(entry.getIndex());
+ }
+ // Override custom accessor behaviour that the DOM introduces for some
+ // event handlers declared on function declarations.
+ if (!requiresDynamicChecks)
+ slot.setValue(globalObject, globalObject->registerAt(entry.getIndex()).get());
+ switch (returnValues) {
+ case ReturnValue:
+ ASSERT(!putToBaseOperation);
+ operations->append(ResolveOperation::getAndReturnGlobalVar(&globalObject->registerAt(entry.getIndex()), entry.couldBeWatched()));
+ break;
+ case ReturnBase:
+ ASSERT(putToBaseOperation);
+ operations->append(ResolveOperation::returnGlobalObjectAsBase());
+ break;
+ case ReturnBaseAndValue:
+ ASSERT(putToBaseOperation);
+ operations->append(ResolveOperation::setBaseToGlobal());
+ operations->append(ResolveOperation::getAndReturnGlobalVar(&globalObject->registerAt(entry.getIndex()), entry.couldBeWatched()));
+ break;
+ case ReturnThisAndValue:
+ ASSERT(!putToBaseOperation);
+ operations->append(ResolveOperation::setBaseToUndefined());
+ operations->append(ResolveOperation::getAndReturnGlobalVar(&globalObject->registerAt(entry.getIndex()), entry.couldBeWatched()));
+ break;
+ }
+ } else {
+ if (!slot.isCacheableValue() || slot.slotBase() != globalObject)
+ goto fail;
+
+ if (requiresDynamicChecks)
+ operations->append(ResolveOperation::checkForDynamicEntriesBeforeGlobalScope());
+
+ if (putToBaseOperation) {
+ putToBaseOperation->m_isDynamic = requiresDynamicChecks;
+ putToBaseOperation->m_kind = PutToBaseOperation::GlobalPropertyPut;
+ putToBaseOperation->m_structure.set(callFrame->globalData(), callFrame->codeBlock()->ownerExecutable(), globalObject->structure());
+ setPutPropertyAccessOffset(putToBaseOperation, slot.cachedOffset());
+ }
+ switch (returnValues) {
+ case ReturnValue:
+ ASSERT(!putToBaseOperation);
+ operations->append(ResolveOperation::getAndReturnGlobalProperty());
+ break;
+ case ReturnBase:
+ ASSERT(putToBaseOperation);
+ operations->append(ResolveOperation::returnGlobalObjectAsBase());
+ break;
+ case ReturnBaseAndValue:
+ ASSERT(putToBaseOperation);
+ operations->append(ResolveOperation::setBaseToGlobal());
+ operations->append(ResolveOperation::getAndReturnGlobalProperty());
+ break;
+ case ReturnThisAndValue:
+ ASSERT(!putToBaseOperation);
+ operations->append(ResolveOperation::setBaseToUndefined());
+ operations->append(ResolveOperation::getAndReturnGlobalProperty());
+ break;
+ }
+ }
+ return object;
+ }
+ if (!requiresDynamicChecks) {
+ // Normal lexical lookup
+ JSVariableObject* variableObject = jsCast<JSVariableObject*>(scope);
+ ASSERT(variableObject);
+ ASSERT(variableObject->symbolTable());
+ SymbolTableEntry entry = variableObject->symbolTable()->get(identifier.impl());
+ // Variable was actually inserted by eval
+ if (entry.isNull()) {
+ ASSERT(!jsDynamicCast<JSNameScope*>(variableObject));
+ goto fail;
+ }
+
+ if (putToBaseOperation) {
+ putToBaseOperation->m_kind = entry.isReadOnly() ? PutToBaseOperation::Readonly : PutToBaseOperation::VariablePut;
+ putToBaseOperation->m_structure.set(callFrame->globalData(), callFrame->codeBlock()->ownerExecutable(), callFrame->lexicalGlobalObject()->activationStructure());
+ putToBaseOperation->m_offset = entry.getIndex();
+ putToBaseOperation->m_scopeDepth = (skipTopScopeNode ? 1 : 0) + scopeCount;
+ }
+
+ if (skipTopScopeNode)
+ operations->append(ResolveOperation::skipTopScopeNode(activationRegister));
+
+ operations->append(ResolveOperation::skipScopes(scopeCount));
+ switch (returnValues) {
+ case ReturnBaseAndValue:
+ operations->append(ResolveOperation::setBaseToScope());
+ operations->append(ResolveOperation::getAndReturnScopedVar(entry.getIndex()));
+ break;
+
+ case ReturnBase:
+ operations->append(ResolveOperation::returnScopeAsBase());
+ break;
+
+ case ReturnThisAndValue:
+ operations->append(ResolveOperation::setBaseToUndefined());
+ // fallthrough
+ case ReturnValue:
+ operations->append(ResolveOperation::getAndReturnScopedVar(entry.getIndex()));
+ break;
+ }
+ return object;
+ }
+ fail:
+ if (!operations->size())
+ operations->append(ResolveOperation::resolveFail());
+ }
+ return object;
+ }
+ scopeCount++;
} while ((scope = scope->next()));
-
- return throwError(callFrame, createUndefinedVariableError(callFrame, identifier));
+
+ if (mode == UnknownResolve) {
+ ASSERT(operations->isEmpty());
+ if (seenGenericObjectScope) {
+ operations->append(ResolveOperation::resolveFail());
+ return 0;
+ }
+ if (putToBaseOperation) {
+ putToBaseOperation->m_isDynamic = requiresDynamicChecks;
+ putToBaseOperation->m_kind = PutToBaseOperation::GlobalPropertyPut;
+ putToBaseOperation->m_structure.clear();
+ putToBaseOperation->m_offset = -1;
+ }
+ if (requiresDynamicChecks)
+ operations->append(ResolveOperation::checkForDynamicEntriesBeforeGlobalScope());
+ switch (returnValues) {
+ case ReturnValue:
+ ASSERT(!putToBaseOperation);
+ operations->append(ResolveOperation::getAndReturnGlobalProperty());
+ break;
+ case ReturnBase:
+ ASSERT(putToBaseOperation);
+ operations->append(ResolveOperation::returnGlobalObjectAsBase());
+ break;
+ case ReturnBaseAndValue:
+ ASSERT(putToBaseOperation);
+ operations->append(ResolveOperation::setBaseToGlobal());
+ operations->append(ResolveOperation::getAndReturnGlobalProperty());
+ break;
+ case ReturnThisAndValue:
+ ASSERT(!putToBaseOperation);
+ operations->append(ResolveOperation::setBaseToUndefined());
+ operations->append(ResolveOperation::getAndReturnGlobalProperty());
+ break;
+ }
+ }
+ return 0;
}
-JSValue JSScope::resolveGlobal(
- CallFrame* callFrame,
- const Identifier& identifier,
- JSGlobalObject* globalObject,
- WriteBarrierBase<Structure>* cachedStructure,
- PropertyOffset* cachedOffset
-)
+template <JSScope::ReturnValues returnValues> JSObject* JSScope::resolveContainingScope(CallFrame* callFrame, const Identifier& identifier, PropertySlot& slot, Vector<ResolveOperation>* operations, PutToBaseOperation* putToBaseOperation, bool isStrict)
{
- if (globalObject->structure() == cachedStructure->get())
- return globalObject->getDirectOffset(*cachedOffset);
-
- PropertySlot slot(globalObject);
- if (!globalObject->getPropertySlot(callFrame, identifier, slot))
- return throwError(callFrame, createUndefinedVariableError(callFrame, identifier));
-
- JSValue result = slot.getValue(callFrame, identifier);
- if (callFrame->globalData().exception)
- return JSValue();
-
- if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
- cachedStructure->set(callFrame->globalData(), callFrame->codeBlock()->ownerExecutable(), globalObject->structure());
- *cachedOffset = slot.cachedOffset();
- }
+ if (operations->size())
+ return resolveContainingScopeInternal<KnownResolve, returnValues>(callFrame, identifier, slot, operations, putToBaseOperation, isStrict);
+ JSObject* result = resolveContainingScopeInternal<UnknownResolve, returnValues>(callFrame, identifier, slot, operations, putToBaseOperation, isStrict);
+ operations->shrinkToFit();
return result;
}
-JSValue JSScope::resolveGlobalDynamic(
- CallFrame* callFrame,
- const Identifier& identifier,
- int skip,
- WriteBarrierBase<Structure>* cachedStructure,
- PropertyOffset* cachedOffset
-)
+JSValue JSScope::resolve(CallFrame* callFrame, const Identifier& identifier, ResolveOperations* operations)
{
- JSScope* scope = callFrame->scope();
- ASSERT(scope);
-
- CodeBlock* codeBlock = callFrame->codeBlock();
-
- bool checkTopLevel = codeBlock->codeType() == FunctionCode && codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- if (callFrame->uncheckedR(codeBlock->activationRegister()).jsValue())
- scope = scope->next();
+ ASSERT(operations);
+ LookupResult fastResult;
+ if (operations->size() && executeResolveOperations(callFrame, callFrame->scope(), identifier, operations->data(), fastResult)) {
+ ASSERT(fastResult.value());
+ ASSERT(!callFrame->hadException());
+ return fastResult.value();
}
- while (skip--) {
- JSObject* object = JSScope::objectAtScope(scope);
- if (!object->hasCustomProperties())
- continue;
- PropertySlot slot(object);
- if (!object->getPropertySlot(callFrame, identifier, slot))
- continue;
+ if (callFrame->hadException())
+ return JSValue();
- JSValue result = slot.getValue(callFrame, identifier);
- if (callFrame->globalData().exception)
- return JSValue();
- return result;
+ PropertySlot slot;
+ if (JSScope::resolveContainingScope<ReturnValue>(callFrame, identifier, slot, operations, 0, false)) {
+ ASSERT(operations->size());
+ return slot.getValue(callFrame, identifier);
}
+ ASSERT(operations->size());
- return resolveGlobal(callFrame, identifier, callFrame->lexicalGlobalObject(), cachedStructure, cachedOffset);
+ return throwError(callFrame, createUndefinedVariableError(callFrame, identifier));
}
-JSValue JSScope::resolveBase(CallFrame* callFrame, const Identifier& identifier, bool isStrict)
+JSValue JSScope::resolveBase(CallFrame* callFrame, const Identifier& identifier, bool isStrict, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations)
{
- JSScope* scope = callFrame->scope();
- ASSERT(scope);
-
- do {
- JSObject* object = JSScope::objectAtScope(scope);
+ ASSERT(operations);
+ ASSERT_UNUSED(putToBaseOperations, putToBaseOperations);
+ LookupResult fastResult;
+ if (operations->size() && executeResolveOperations(callFrame, callFrame->scope(), identifier, operations->data(), fastResult)) {
+ ASSERT(fastResult.base());
+ ASSERT(!callFrame->hadException());
+ return fastResult.base();
+ }
- PropertySlot slot(object);
- if (!object->getPropertySlot(callFrame, identifier, slot))
- continue;
+ if (callFrame->hadException())
+ return JSValue();
- return JSValue(object);
- } while ((scope = scope->next()));
+ PropertySlot slot;
+ if (JSObject* base = JSScope::resolveContainingScope<ReturnBase>(callFrame, identifier, slot, operations, putToBaseOperations, isStrict)) {
+ ASSERT(operations->size());
+ return base;
+ }
if (!isStrict)
return callFrame->lexicalGlobalObject();
@@ -213,50 +475,157 @@ JSValue JSScope::resolveBase(CallFrame* callFrame, const Identifier& identifier,
return throwError(callFrame, createErrorForInvalidGlobalAssignment(callFrame, identifier.string()));
}
-JSValue JSScope::resolveWithBase(CallFrame* callFrame, const Identifier& identifier, Register* base)
+JSValue JSScope::resolveWithBase(CallFrame* callFrame, const Identifier& identifier, Register* base, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations)
{
- JSScope* scope = callFrame->scope();
- ASSERT(scope);
-
- do {
- JSObject* object = JSScope::objectAtScope(scope);
+ ASSERT(operations);
+ ASSERT_UNUSED(putToBaseOperations, putToBaseOperations);
+ LookupResult fastResult;
+ if (operations->size() && executeResolveOperations(callFrame, callFrame->scope(), identifier, operations->data(), fastResult)) {
+ ASSERT(fastResult.base());
+ ASSERT(fastResult.value());
+ ASSERT(!callFrame->hadException());
+ *base = fastResult.base();
+ return fastResult.value();
+ }
- PropertySlot slot(object);
- if (!object->getPropertySlot(callFrame, identifier, slot))
- continue;
+ if (callFrame->hadException())
+ return JSValue();
+ PropertySlot slot;
+ if (JSObject* propertyBase = JSScope::resolveContainingScope<ReturnBaseAndValue>(callFrame, identifier, slot, operations, putToBaseOperations, false)) {
+ ASSERT(operations->size());
JSValue value = slot.getValue(callFrame, identifier);
if (callFrame->globalData().exception)
return JSValue();
- *base = JSValue(object);
+ *base = propertyBase;
return value;
- } while ((scope = scope->next()));
+ }
+ ASSERT(operations->size());
return throwError(callFrame, createUndefinedVariableError(callFrame, identifier));
}
-JSValue JSScope::resolveWithThis(CallFrame* callFrame, const Identifier& identifier, Register* base)
+JSValue JSScope::resolveWithThis(CallFrame* callFrame, const Identifier& identifier, Register* base, ResolveOperations* operations)
{
- JSScope* scope = callFrame->scope();
- ASSERT(scope);
-
- do {
- JSObject* object = JSScope::objectAtScope(scope);
+ ASSERT(operations);
+ LookupResult fastResult;
+ if (operations->size() && executeResolveOperations(callFrame, callFrame->scope(), identifier, operations->data(), fastResult)) {
+ ASSERT(fastResult.base());
+ ASSERT(fastResult.value());
+ ASSERT(!callFrame->hadException());
+ *base = fastResult.base();
+ return fastResult.value();
+ }
- PropertySlot slot(object);
- if (!object->getPropertySlot(callFrame, identifier, slot))
- continue;
+ if (callFrame->hadException())
+ return JSValue();
+ PropertySlot slot;
+ if (JSObject* propertyBase = JSScope::resolveContainingScope<ReturnThisAndValue>(callFrame, identifier, slot, operations, 0, false)) {
+ ASSERT(operations->size());
JSValue value = slot.getValue(callFrame, identifier);
if (callFrame->globalData().exception)
return JSValue();
-
- *base = object->structure()->typeInfo().isEnvironmentRecord() ? jsUndefined() : JSValue(object);
+ ASSERT(value);
+ *base = propertyBase->structure()->typeInfo().isEnvironmentRecord() ? jsUndefined() : JSValue(propertyBase);
return value;
- } while ((scope = scope->next()));
+ }
+ ASSERT(operations->size());
+
+ return throwError(callFrame, createUndefinedVariableError(callFrame, identifier));
+}
+
+void JSScope::resolvePut(CallFrame* callFrame, JSValue base, const Identifier& property, JSValue value, PutToBaseOperation* operation)
+{
+ ASSERT_UNUSED(operation, operation);
+ ASSERT(base);
+ ASSERT(value);
+ switch (operation->m_kind) {
+ case PutToBaseOperation::Uninitialised:
+ CRASH();
+
+ case PutToBaseOperation::Readonly:
+ return;
+
+ case PutToBaseOperation::GlobalVariablePutChecked:
+ if (*operation->m_predicatePointer)
+ goto genericHandler;
+ case PutToBaseOperation::GlobalVariablePut:
+ if (operation->m_isDynamic) {
+ JSObject* baseObject = jsCast<JSObject*>(base);
+ if (baseObject != callFrame->lexicalGlobalObject()) {
+ if (baseObject->isGlobalObject())
+ ASSERT(!jsCast<JSGlobalObject*>(baseObject)->assertRegisterIsInThisObject(operation->m_registerAddress));
+ goto genericHandler;
+ }
+ }
+ operation->m_registerAddress->set(callFrame->globalData(), base.asCell(), value);
+ return;
+
+ case PutToBaseOperation::VariablePut: {
+ if (operation->m_isDynamic) {
+ JSObject* baseObject = jsCast<JSObject*>(base);
+ if (baseObject->structure() != operation->m_structure.get())
+ goto genericHandler;
+ }
+ JSVariableObject* variableObject = jsCast<JSVariableObject*>(base);
+ variableObject->registerAt(operation->m_offset).set(callFrame->globalData(), variableObject, value);
+ return;
+ }
+
+ case PutToBaseOperation::GlobalPropertyPut: {
+ JSObject* object = jsCast<JSObject*>(base);
+ if (operation->m_structure.get() != object->structure())
+ break;
+ object->putDirectOffset(callFrame->globalData(), operation->m_offset, value);
+ return;
+ }
+
+ genericHandler:
+ case PutToBaseOperation::Generic:
+ PutPropertySlot slot(operation->m_isStrict);
+ base.put(callFrame, property, value, slot);
+ return;
+ }
+ ASSERT(operation->m_kind == PutToBaseOperation::GlobalPropertyPut);
+ PutPropertySlot slot(operation->m_isStrict);
+ base.put(callFrame, property, value, slot);
+ if (!slot.isCacheable())
+ return;
+ if (callFrame->hadException())
+ return;
+ JSObject* baseObject = jsCast<JSObject*>(base);
+ if (!baseObject->structure()->propertyAccessesAreCacheable())
+ return;
+ if (slot.base() != callFrame->lexicalGlobalObject())
+ return;
+ if (slot.base() != baseObject)
+ return;
+ ASSERT(!baseObject->hasInlineStorage());
+ operation->m_structure.set(callFrame->globalData(), callFrame->codeBlock()->ownerExecutable(), baseObject->structure());
+ setPutPropertyAccessOffset(operation, slot.cachedOffset());
+ return;
+}
+
+JSValue JSScope::resolveGlobal(CallFrame* callFrame, const Identifier& identifier, JSGlobalObject* globalObject, ResolveOperation* resolveOperation)
+{
+ ASSERT(resolveOperation);
+ ASSERT(resolveOperation->m_operation == ResolveOperation::GetAndReturnGlobalProperty);
+ ASSERT_UNUSED(globalObject, callFrame->lexicalGlobalObject() == globalObject);
+
+ LookupResult fastResult;
+ if (executeResolveOperations(callFrame, callFrame->scope(), identifier, resolveOperation, fastResult)) {
+ ASSERT(fastResult.value());
+ ASSERT(!callFrame->hadException());
+ return fastResult.value();
+ }
+
+ if (callFrame->hadException())
+ return JSValue();
return throwError(callFrame, createUndefinedVariableError(callFrame, identifier));
}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/runtime/JSScope.h b/Source/JavaScriptCore/runtime/JSScope.h
index 011aff57e..a9a9dd8d8 100644
--- a/Source/JavaScriptCore/runtime/JSScope.h
+++ b/Source/JavaScriptCore/runtime/JSScope.h
@@ -27,6 +27,7 @@
#define JSScope_h
#include "JSObject.h"
+#include "ResolveOperation.h"
namespace JSC {
@@ -41,25 +42,12 @@ public:
JS_EXPORT_PRIVATE static JSObject* objectAtScope(JSScope*);
- static JSValue resolve(CallFrame*, const Identifier&);
- static JSValue resolveSkip(CallFrame*, const Identifier&, int skip);
- static JSValue resolveGlobal(
- CallFrame*,
- const Identifier&,
- JSGlobalObject* globalObject,
- WriteBarrierBase<Structure>* cachedStructure,
- PropertyOffset* cachedOffset
- );
- static JSValue resolveGlobalDynamic(
- CallFrame*,
- const Identifier&,
- int skip,
- WriteBarrierBase<Structure>* cachedStructure,
- PropertyOffset* cachedOffset
- );
- static JSValue resolveBase(CallFrame*, const Identifier&, bool isStrict);
- static JSValue resolveWithBase(CallFrame*, const Identifier&, Register* base);
- static JSValue resolveWithThis(CallFrame*, const Identifier&, Register* base);
+ static JSValue resolve(CallFrame*, const Identifier&, ResolveOperations*);
+ static JSValue resolveBase(CallFrame*, const Identifier&, bool isStrict, ResolveOperations*, PutToBaseOperation*);
+ static JSValue resolveWithBase(CallFrame*, const Identifier&, Register* base, ResolveOperations*, PutToBaseOperation*);
+ static JSValue resolveWithThis(CallFrame*, const Identifier&, Register* base, ResolveOperations*);
+ static JSValue resolveGlobal(CallFrame*, const Identifier&, JSGlobalObject*, ResolveOperation*);
+ static void resolvePut(CallFrame*, JSValue base, const Identifier&, JSValue, PutToBaseOperation*);
static void visitChildren(JSCell*, SlotVisitor&);
@@ -80,6 +68,16 @@ protected:
private:
WriteBarrier<JSScope> m_next;
+ enum ReturnValues {
+ ReturnValue = 1,
+ ReturnBase = 2,
+ ReturnThis = 4,
+ ReturnBaseAndValue = ReturnValue | ReturnBase,
+ ReturnThisAndValue = ReturnValue | ReturnThis,
+ };
+ enum LookupMode { UnknownResolve, KnownResolve };
+ template <LookupMode, ReturnValues> static JSObject* resolveContainingScopeInternal(CallFrame*, const Identifier&, PropertySlot&, ResolveOperations*, PutToBaseOperation*, bool isStrict);
+ template <ReturnValues> static JSObject* resolveContainingScope(CallFrame*, const Identifier&, PropertySlot&, ResolveOperations*, PutToBaseOperation*, bool isStrict);
};
inline JSScope::JSScope(JSGlobalData& globalData, Structure* structure, JSScope* next)
diff --git a/Source/JavaScriptCore/runtime/JSValue.cpp b/Source/JavaScriptCore/runtime/JSValue.cpp
index 651e50cec..a5cdf700b 100644
--- a/Source/JavaScriptCore/runtime/JSValue.cpp
+++ b/Source/JavaScriptCore/runtime/JSValue.cpp
@@ -204,7 +204,7 @@ char* JSValue::description() const
snprintf(description, size, "Int32: %d", asInt32());
else if (isDouble()) {
#if USE(JSVALUE64)
- snprintf(description, size, "Double: %lx, %lf", reinterpretDoubleToIntptr(asDouble()), asDouble());
+ snprintf(description, size, "Double: %lld, %lf", (long long)reinterpretDoubleToInt64(asDouble()), asDouble());
#else
union {
double asDouble;
diff --git a/Source/JavaScriptCore/runtime/JSValue.h b/Source/JavaScriptCore/runtime/JSValue.h
index 6e01d8d2d..7b5c81aa9 100644
--- a/Source/JavaScriptCore/runtime/JSValue.h
+++ b/Source/JavaScriptCore/runtime/JSValue.h
@@ -69,11 +69,7 @@ namespace JSC {
enum PreferredPrimitiveType { NoPreference, PreferNumber, PreferString };
-#if USE(JSVALUE32_64)
typedef int64_t EncodedJSValue;
-#else
- typedef void* EncodedJSValue;
-#endif
union EncodedValueDescriptor {
int64_t asInt64;
@@ -390,9 +386,9 @@ namespace JSC {
EncodedValueDescriptor u;
};
-#if USE(JSVALUE32_64)
typedef IntHash<EncodedJSValue> EncodedJSValueHash;
+#if USE(JSVALUE32_64)
struct EncodedJSValueHashTraits : HashTraits<EncodedJSValue> {
static const bool emptyValueIsZero = false;
static EncodedJSValue emptyValue() { return JSValue::encode(JSValue()); }
@@ -400,8 +396,6 @@ namespace JSC {
static bool isDeletedValue(EncodedJSValue value) { return value == JSValue::encode(JSValue(JSValue::HashTableDeletedValue)); }
};
#else
- typedef PtrHash<EncodedJSValue> EncodedJSValueHash;
-
struct EncodedJSValueHashTraits : HashTraits<EncodedJSValue> {
static void constructDeletedValue(EncodedJSValue& slot) { slot = JSValue::encode(JSValue(JSValue::HashTableDeletedValue)); }
static bool isDeletedValue(EncodedJSValue value) { return value == JSValue::encode(JSValue(JSValue::HashTableDeletedValue)); }
diff --git a/Source/JavaScriptCore/runtime/JSValueInlineMethods.h b/Source/JavaScriptCore/runtime/JSValueInlineMethods.h
index 4c582ab2a..52b747890 100644
--- a/Source/JavaScriptCore/runtime/JSValueInlineMethods.h
+++ b/Source/JavaScriptCore/runtime/JSValueInlineMethods.h
@@ -140,7 +140,6 @@ namespace JSC {
*this = JSValue(static_cast<int32_t>(d));
}
-#if USE(JSVALUE32_64)
inline EncodedJSValue JSValue::encode(JSValue value)
{
return value.u.asInt64;
@@ -153,6 +152,7 @@ namespace JSC {
return v;
}
+#if USE(JSVALUE32_64)
inline JSValue::JSValue()
{
u.asBits.tag = EmptyValueTag;
@@ -333,17 +333,6 @@ namespace JSC {
#else // !USE(JSVALUE32_64) i.e. USE(JSVALUE64)
- // JSValue member functions.
- inline EncodedJSValue JSValue::encode(JSValue value)
- {
- return value.u.ptr;
- }
-
- inline JSValue JSValue::decode(EncodedJSValue ptr)
- {
- return JSValue(reinterpret_cast<JSCell*>(ptr));
- }
-
// 0x0 can never occur naturally because it has a tag of 00, indicating a pointer value, but a payload of 0x0, which is in the (invalid) zero page.
inline JSValue::JSValue()
{
@@ -358,27 +347,27 @@ namespace JSC {
inline JSValue::JSValue(JSCell* ptr)
{
- u.ptr = ptr;
+ u.asInt64 = reinterpret_cast<uintptr_t>(ptr);
}
inline JSValue::JSValue(const JSCell* ptr)
{
- u.ptr = const_cast<JSCell*>(ptr);
+ u.asInt64 = reinterpret_cast<uintptr_t>(const_cast<JSCell*>(ptr));
}
inline JSValue::operator bool() const
{
- return u.ptr;
+ return u.asInt64;
}
inline bool JSValue::operator==(const JSValue& other) const
{
- return u.ptr == other.u.ptr;
+ return u.asInt64 == other.u.asInt64;
}
inline bool JSValue::operator!=(const JSValue& other) const
{
- return u.ptr != other.u.ptr;
+ return u.asInt64 != other.u.asInt64;
}
inline bool JSValue::isEmpty() const
@@ -464,18 +453,18 @@ namespace JSC {
return (u.asInt64 & TagTypeNumber) == TagTypeNumber;
}
- inline intptr_t reinterpretDoubleToIntptr(double value)
+ inline int64_t reinterpretDoubleToInt64(double value)
{
- return bitwise_cast<intptr_t>(value);
+ return bitwise_cast<int64_t>(value);
}
- inline double reinterpretIntptrToDouble(intptr_t value)
+ inline double reinterpretInt64ToDouble(int64_t value)
{
return bitwise_cast<double>(value);
}
ALWAYS_INLINE JSValue::JSValue(EncodeAsDoubleTag, double d)
{
- u.asInt64 = reinterpretDoubleToIntptr(d) + DoubleEncodeOffset;
+ u.asInt64 = reinterpretDoubleToInt64(d) + DoubleEncodeOffset;
}
inline JSValue::JSValue(int i)
@@ -486,7 +475,7 @@ namespace JSC {
inline double JSValue::asDouble() const
{
ASSERT(isDouble());
- return reinterpretIntptrToDouble(u.asInt64 - DoubleEncodeOffset);
+ return reinterpretInt64ToDouble(u.asInt64 - DoubleEncodeOffset);
}
inline bool JSValue::isNumber() const
diff --git a/Source/JavaScriptCore/runtime/JSVariableObject.cpp b/Source/JavaScriptCore/runtime/JSVariableObject.cpp
index c815dcd11..9b03a2c64 100644
--- a/Source/JavaScriptCore/runtime/JSVariableObject.cpp
+++ b/Source/JavaScriptCore/runtime/JSVariableObject.cpp
@@ -31,4 +31,6 @@
namespace JSC {
+const ClassInfo JSVariableObject::s_info = { "VariableObject", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(JSVariableObject) };
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/runtime/JSVariableObject.h b/Source/JavaScriptCore/runtime/JSVariableObject.h
index 25961dc09..3ff7aa841 100644
--- a/Source/JavaScriptCore/runtime/JSVariableObject.h
+++ b/Source/JavaScriptCore/runtime/JSVariableObject.h
@@ -53,6 +53,8 @@ namespace JSC {
WriteBarrierBase<Unknown>* const * addressOfRegisters() const { return &m_registers; }
static size_t offsetOfRegisters() { return OBJECT_OFFSETOF(JSVariableObject, m_registers); }
+ static const ClassInfo s_info;
+
protected:
static const unsigned StructureFlags = Base::StructureFlags;
diff --git a/Source/JavaScriptCore/runtime/RegExpKey.h b/Source/JavaScriptCore/runtime/RegExpKey.h
index f93fbbc1d..58fa38725 100644
--- a/Source/JavaScriptCore/runtime/RegExpKey.h
+++ b/Source/JavaScriptCore/runtime/RegExpKey.h
@@ -73,9 +73,17 @@ struct RegExpKey {
, pattern(pattern)
{
}
+
+ friend inline bool operator==(const RegExpKey& a, const RegExpKey& b);
+
+ struct Hash {
+ static unsigned hash(const RegExpKey& key) { return key.pattern->hash(); }
+ static bool equal(const RegExpKey& a, const RegExpKey& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = false;
+ };
};
-inline bool operator==(const RegExpKey& a, const RegExpKey& b)
+inline bool operator==(const RegExpKey& a, const RegExpKey& b)
{
if (a.flagsValue != b.flagsValue)
return false;
@@ -90,16 +98,9 @@ inline bool operator==(const RegExpKey& a, const RegExpKey& b)
namespace WTF {
template<typename T> struct DefaultHash;
-template<typename T> struct RegExpHash;
-
-template<> struct RegExpHash<JSC::RegExpKey> {
- static unsigned hash(const JSC::RegExpKey& key) { return key.pattern->hash(); }
- static bool equal(const JSC::RegExpKey& a, const JSC::RegExpKey& b) { return a == b; }
- static const bool safeToCompareToEmptyOrDeleted = false;
-};
template<> struct DefaultHash<JSC::RegExpKey> {
- typedef RegExpHash<JSC::RegExpKey> Hash;
+ typedef JSC::RegExpKey::Hash Hash;
};
template<> struct HashTraits<JSC::RegExpKey> : GenericHashTraits<JSC::RegExpKey> {
diff --git a/Source/JavaScriptCore/runtime/StringRecursionChecker.h b/Source/JavaScriptCore/runtime/StringRecursionChecker.h
index 127d028e0..831e25b46 100644
--- a/Source/JavaScriptCore/runtime/StringRecursionChecker.h
+++ b/Source/JavaScriptCore/runtime/StringRecursionChecker.h
@@ -21,6 +21,7 @@
#define StringRecursionChecker_h
#include "Interpreter.h"
+#include <wtf/StackStats.h>
namespace JSC {
@@ -41,6 +42,8 @@ private:
ExecState* m_exec;
JSObject* m_thisObject;
JSValue m_earlyReturnValue;
+
+ StackStats::CheckPoint stackCheckpoint;
};
inline JSValue StringRecursionChecker::performCheck()
diff --git a/Source/JavaScriptCore/runtime/Structure.h b/Source/JavaScriptCore/runtime/Structure.h
index f45e9f1d9..5f1299766 100644
--- a/Source/JavaScriptCore/runtime/Structure.h
+++ b/Source/JavaScriptCore/runtime/Structure.h
@@ -140,6 +140,8 @@ namespace JSC {
bool isDictionary() const { return m_dictionaryKind != NoneDictionaryKind; }
bool isUncacheableDictionary() const { return m_dictionaryKind == UncachedDictionaryKind; }
+ bool propertyAccessesAreCacheable() { return m_dictionaryKind != UncachedDictionaryKind && !typeInfo().prohibitsPropertyCaching(); }
+
// Type accessors.
const TypeInfo& typeInfo() const { ASSERT(structure()->classInfo() == &s_info); return m_typeInfo; }
bool isObject() const { return typeInfo().isObject(); }