summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/llint
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2015-10-15 09:45:50 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2015-10-15 09:45:50 +0000
commite15dd966d523731101f70ccf768bba12435a0208 (patch)
treeae9cb828a24ded2585a41af3f21411523b47897d /Source/JavaScriptCore/llint
downloadWebKitGtk-tarball-e15dd966d523731101f70ccf768bba12435a0208.tar.gz
webkitgtk-2.10.2webkitgtk-2.10.2
Diffstat (limited to 'Source/JavaScriptCore/llint')
-rw-r--r--Source/JavaScriptCore/llint/LLIntCLoop.cpp44
-rw-r--r--Source/JavaScriptCore/llint/LLIntCLoop.h51
-rw-r--r--Source/JavaScriptCore/llint/LLIntCommon.h47
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.cpp172
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.h103
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoint.cpp124
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoint.h44
-rw-r--r--Source/JavaScriptCore/llint/LLIntExceptions.cpp63
-rw-r--r--Source/JavaScriptCore/llint/LLIntExceptions.h53
-rw-r--r--Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h170
-rw-r--r--Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp93
-rw-r--r--Source/JavaScriptCore/llint/LLIntOpcode.h51
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.cpp1426
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.h128
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.cpp120
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.h52
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.asm1414
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.cpp529
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.h50
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm2375
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter64.asm2228
21 files changed, 9337 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/llint/LLIntCLoop.cpp b/Source/JavaScriptCore/llint/LLIntCLoop.cpp
new file mode 100644
index 000000000..e3c6c6ce9
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntCLoop.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntCLoop.h"
+
+#if !ENABLE(JIT)
+
+#include "LLIntData.h"
+
+namespace JSC {
+namespace LLInt {
+
+void CLoop::initialize()
+{
+ execute(llint_entry, 0, 0, 0, true);
+}
+
+} // namespace LLInt
+} // namespace JSC
+
+#endif // !ENABLE(JIT)
diff --git a/Source/JavaScriptCore/llint/LLIntCLoop.h b/Source/JavaScriptCore/llint/LLIntCLoop.h
new file mode 100644
index 000000000..8782b369c
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntCLoop.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntCLoop_h
+#define LLIntCLoop_h
+
+#if !ENABLE(JIT)
+
+#include "CallFrame.h"
+#include "JSCJSValue.h"
+#include "Opcode.h"
+#include "ProtoCallFrame.h"
+
+namespace JSC {
+namespace LLInt {
+
+class CLoop {
+public:
+ static void initialize();
+ static JSValue execute(OpcodeID entryOpcodeID, void* executableAddress, VM*, ProtoCallFrame*, bool isInitializationPass = false);
+};
+
+} } // namespace JSC::LLInt
+
+using JSC::LLInt::CLoop;
+
+#endif // !ENABLE(JIT)
+
+#endif // LLIntCLoop_h
diff --git a/Source/JavaScriptCore/llint/LLIntCommon.h b/Source/JavaScriptCore/llint/LLIntCommon.h
new file mode 100644
index 000000000..d32a264e5
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntCommon.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntCommon_h
+#define LLIntCommon_h
+
+// Print every instruction executed.
+#define LLINT_EXECUTION_TRACING 0
+
+// Print some information for some of the more subtle slow paths.
+#define LLINT_SLOW_PATH_TRACING 0
+
+// Disable inline allocation in the interpreter. This is great if you're changing
+// how the GC allocates.
+#if ENABLE(ALLOCATION_LOGGING)
+#define LLINT_ALWAYS_ALLOCATE_SLOW 1
+#else
+#define LLINT_ALWAYS_ALLOCATE_SLOW 0
+#endif
+
+// Disable inline caching of get_by_id and put_by_id.
+#define LLINT_ALWAYS_ACCESS_SLOW 0
+
+#endif // LLIntCommon_h
+
diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp
new file mode 100644
index 000000000..e926f52dc
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntData.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntData.h"
+#include "BytecodeConventions.h"
+#include "CodeType.h"
+#include "Instruction.h"
+#include "JSScope.h"
+#include "LLIntCLoop.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "Opcode.h"
+#include "PropertyOffset.h"
+
+namespace JSC { namespace LLInt {
+
+Instruction* Data::s_exceptionInstructions = 0;
+Opcode Data::s_opcodeMap[numOpcodeIDs] = { };
+
+#if ENABLE(JIT)
+extern "C" void llint_entry(void*);
+#endif
+
+void initialize()
+{
+ Data::s_exceptionInstructions = new Instruction[maxOpcodeLength + 1];
+
+#if !ENABLE(JIT)
+ CLoop::initialize();
+
+#else // ENABLE(JIT)
+ llint_entry(&Data::s_opcodeMap);
+
+ for (int i = 0; i < maxOpcodeLength + 1; ++i)
+ Data::s_exceptionInstructions[i].u.pointer =
+ LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
+#endif // ENABLE(JIT)
+}
+
+#if COMPILER(CLANG)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmissing-noreturn"
+#endif
+void Data::performAssertions(VM& vm)
+{
+ UNUSED_PARAM(vm);
+
+ // Assertions to match LowLevelInterpreter.asm. If you change any of this code, be
+ // prepared to change LowLevelInterpreter.asm as well!!
+
+#ifndef NDEBUG
+#if USE(JSVALUE64)
+ const ptrdiff_t PtrSize = 8;
+ const ptrdiff_t CallFrameHeaderSlots = 5;
+#else // USE(JSVALUE64) // i.e. 32-bit version
+ const ptrdiff_t PtrSize = 4;
+ const ptrdiff_t CallFrameHeaderSlots = 4;
+#endif
+ const ptrdiff_t SlotSize = 8;
+#endif
+
+ ASSERT(sizeof(void*) == PtrSize);
+ ASSERT(sizeof(Register) == SlotSize);
+ ASSERT(JSStack::CallFrameHeaderSize == CallFrameHeaderSlots);
+
+ ASSERT(!CallFrame::callerFrameOffset());
+ ASSERT(JSStack::CallerFrameAndPCSize == (PtrSize * 2) / SlotSize);
+ ASSERT(CallFrame::returnPCOffset() == CallFrame::callerFrameOffset() + PtrSize);
+ ASSERT(JSStack::CodeBlock * sizeof(Register) == CallFrame::returnPCOffset() + PtrSize);
+ ASSERT(JSStack::Callee * sizeof(Register) == JSStack::CodeBlock * sizeof(Register) + SlotSize);
+ ASSERT(JSStack::ArgumentCount * sizeof(Register) == JSStack::Callee * sizeof(Register) + SlotSize);
+ ASSERT(JSStack::ThisArgument * sizeof(Register) == JSStack::ArgumentCount * sizeof(Register) + SlotSize);
+ ASSERT(JSStack::CallFrameHeaderSize == JSStack::ThisArgument);
+
+ ASSERT(CallFrame::argumentOffsetIncludingThis(0) == JSStack::ThisArgument);
+
+#if CPU(BIG_ENDIAN)
+ ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 0);
+ ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 4);
+#else
+ ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 4);
+ ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 0);
+#endif
+#if USE(JSVALUE32_64)
+ ASSERT(JSValue::Int32Tag == static_cast<unsigned>(-1));
+ ASSERT(JSValue::BooleanTag == static_cast<unsigned>(-2));
+ ASSERT(JSValue::NullTag == static_cast<unsigned>(-3));
+ ASSERT(JSValue::UndefinedTag == static_cast<unsigned>(-4));
+ ASSERT(JSValue::CellTag == static_cast<unsigned>(-5));
+ ASSERT(JSValue::EmptyValueTag == static_cast<unsigned>(-6));
+ ASSERT(JSValue::DeletedValueTag == static_cast<unsigned>(-7));
+ ASSERT(JSValue::LowestTag == static_cast<unsigned>(-7));
+#else
+ ASSERT(TagBitTypeOther == 0x2);
+ ASSERT(TagBitBool == 0x4);
+ ASSERT(TagBitUndefined == 0x8);
+ ASSERT(ValueEmpty == 0x0);
+ ASSERT(ValueFalse == (TagBitTypeOther | TagBitBool));
+ ASSERT(ValueTrue == (TagBitTypeOther | TagBitBool | 1));
+ ASSERT(ValueUndefined == (TagBitTypeOther | TagBitUndefined));
+ ASSERT(ValueNull == TagBitTypeOther);
+#endif
+#if (CPU(X86_64) && !OS(WINDOWS)) || CPU(ARM64) || !ENABLE(JIT)
+ ASSERT(!maxFrameExtentForSlowPathCall);
+#elif CPU(ARM) || CPU(SH4)
+ ASSERT(maxFrameExtentForSlowPathCall == 24);
+#elif CPU(X86) || CPU(MIPS)
+ ASSERT(maxFrameExtentForSlowPathCall == 40);
+#elif CPU(X86_64) && OS(WINDOWS)
+ ASSERT(maxFrameExtentForSlowPathCall == 64);
+#endif
+ ASSERT(StringType == 6);
+ ASSERT(ObjectType == 18);
+ ASSERT(FinalObjectType == 19);
+ ASSERT(MasqueradesAsUndefined == 1);
+ ASSERT(ImplementsHasInstance == 2);
+ ASSERT(ImplementsDefaultHasInstance == 8);
+ ASSERT(FirstConstantRegisterIndex == 0x40000000);
+ ASSERT(GlobalCode == 0);
+ ASSERT(EvalCode == 1);
+ ASSERT(FunctionCode == 2);
+
+ static_assert(GlobalProperty == 0, "LLInt assumes GlobalProperty ResultType is == 0");
+ static_assert(GlobalVar == 1, "LLInt assumes GlobalVar ResultType is == 1");
+ static_assert(ClosureVar == 2, "LLInt assumes ClosureVar ResultType is == 2");
+ static_assert(LocalClosureVar == 3, "LLInt assumes LocalClosureVar ResultType is == 3");
+ static_assert(GlobalPropertyWithVarInjectionChecks == 4, "LLInt assumes GlobalPropertyWithVarInjectionChecks ResultType is == 4");
+ static_assert(GlobalVarWithVarInjectionChecks == 5, "LLInt assumes GlobalVarWithVarInjectionChecks ResultType is == 5");
+ static_assert(ClosureVarWithVarInjectionChecks == 6, "LLInt assumes ClosureVarWithVarInjectionChecks ResultType is == 6");
+ static_assert(Dynamic == 7, "LLInt assumes Dynamic ResultType is == 7");
+
+ ASSERT(ResolveModeAndType::mask == 0xffff);
+
+ ASSERT(MarkedBlock::blockMask == ~static_cast<decltype(MarkedBlock::blockMask)>(0x3fff));
+
+ // FIXME: make these assertions less horrible.
+#if !ASSERT_DISABLED
+ Vector<int> testVector;
+ testVector.resize(42);
+ ASSERT(bitwise_cast<uint32_t*>(&testVector)[sizeof(void*)/sizeof(uint32_t) + 1] == 42);
+ ASSERT(bitwise_cast<int**>(&testVector)[0] == testVector.begin());
+#endif
+
+ ASSERT(StringImpl::s_hashFlag8BitBuffer == 8);
+}
+#if COMPILER(CLANG)
+#pragma clang diagnostic pop
+#endif
+
+} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntData.h b/Source/JavaScriptCore/llint/LLIntData.h
new file mode 100644
index 000000000..7e7794b14
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntData.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntData_h
+#define LLIntData_h
+
+#include "JSCJSValue.h"
+#include "Opcode.h"
+
+namespace JSC {
+
+class VM;
+struct Instruction;
+
+#if !ENABLE(JIT)
+typedef OpcodeID LLIntCode;
+#else
+typedef void (*LLIntCode)();
+#endif
+
+namespace LLInt {
+
+class Data {
+public:
+ static void performAssertions(VM&);
+
+private:
+ static Instruction* s_exceptionInstructions;
+ static Opcode s_opcodeMap[numOpcodeIDs];
+
+ friend void initialize();
+
+ friend Instruction* exceptionInstructions();
+ friend Opcode* opcodeMap();
+ friend Opcode getOpcode(OpcodeID);
+ friend void* getCodePtr(OpcodeID);
+};
+
+void initialize();
+
+inline Instruction* exceptionInstructions()
+{
+ return Data::s_exceptionInstructions;
+}
+
+inline Opcode* opcodeMap()
+{
+ return Data::s_opcodeMap;
+}
+
+inline Opcode getOpcode(OpcodeID id)
+{
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+ return Data::s_opcodeMap[id];
+#else
+ return static_cast<Opcode>(id);
+#endif
+}
+
+ALWAYS_INLINE void* getCodePtr(OpcodeID id)
+{
+ return reinterpret_cast<void*>(getOpcode(id));
+}
+
+#if ENABLE(JIT)
+ALWAYS_INLINE LLIntCode getCodeFunctionPtr(OpcodeID codeId)
+{
+ return reinterpret_cast<LLIntCode>(getCodePtr(codeId));
+}
+#endif
+
+ALWAYS_INLINE void* getCodePtr(JSC::EncodedJSValue glueHelper())
+{
+ return bitwise_cast<void*>(glueHelper);
+}
+
+
+} } // namespace JSC::LLInt
+
+#endif // LLIntData_h
+
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp b/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp
new file mode 100644
index 000000000..9d00106b9
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntEntrypoint.h"
+#include "CodeBlock.h"
+#include "HeapInlines.h"
+#include "JITCode.h"
+#include "JSCellInlines.h"
+#include "JSObject.h"
+#include "LLIntThunks.h"
+#include "LowLevelInterpreter.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "StackAlignment.h"
+#include "VM.h"
+
+namespace JSC { namespace LLInt {
+
+static void setFunctionEntrypoint(VM& vm, CodeBlock* codeBlock)
+{
+ CodeSpecializationKind kind = codeBlock->specializationKind();
+
+#if ENABLE(JIT)
+ if (vm.canUseJIT()) {
+ if (kind == CodeForCall) {
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(vm.getCTIStub(functionForCallEntryThunkGenerator), vm.getCTIStub(functionForCallArityCheckThunkGenerator).code(), JITCode::InterpreterThunk)));
+ return;
+ }
+ ASSERT(kind == CodeForConstruct);
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(vm.getCTIStub(functionForConstructEntryThunkGenerator), vm.getCTIStub(functionForConstructArityCheckThunkGenerator).code(), JITCode::InterpreterThunk)));
+ return;
+ }
+#endif // ENABLE(JIT)
+
+ UNUSED_PARAM(vm);
+ if (kind == CodeForCall) {
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_call_prologue), MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check), JITCode::InterpreterThunk)));
+ return;
+ }
+ ASSERT(kind == CodeForConstruct);
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_construct_prologue), MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check), JITCode::InterpreterThunk)));
+}
+
+static void setEvalEntrypoint(VM& vm, CodeBlock* codeBlock)
+{
+#if ENABLE(JIT)
+ if (vm.canUseJIT()) {
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(vm.getCTIStub(evalEntryThunkGenerator), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+ return;
+ }
+#endif // ENABLE(JIT)
+
+ UNUSED_PARAM(vm);
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_eval_prologue), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+}
+
+static void setProgramEntrypoint(VM& vm, CodeBlock* codeBlock)
+{
+#if ENABLE(JIT)
+ if (vm.canUseJIT()) {
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(vm.getCTIStub(programEntryThunkGenerator), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+ return;
+ }
+#endif // ENABLE(JIT)
+
+ UNUSED_PARAM(vm);
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+}
+
+void setEntrypoint(VM& vm, CodeBlock* codeBlock)
+{
+ switch (codeBlock->codeType()) {
+ case GlobalCode:
+ setProgramEntrypoint(vm, codeBlock);
+ return;
+ case EvalCode:
+ setEvalEntrypoint(vm, codeBlock);
+ return;
+ case FunctionCode:
+ setFunctionEntrypoint(vm, codeBlock);
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+unsigned frameRegisterCountFor(CodeBlock* codeBlock)
+{
+ ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeRegisters) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeRegisters)));
+
+ return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeRegisters + maxFrameExtentForSlowPathCallInRegisters);
+}
+
+} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoint.h b/Source/JavaScriptCore/llint/LLIntEntrypoint.h
new file mode 100644
index 000000000..5b8fd51cd
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoint.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntEntrypoint_h
+#define LLIntEntrypoint_h
+
+#include "CodeSpecializationKind.h"
+
+namespace JSC {
+
+class CodeBlock;
+class VM;
+
+namespace LLInt {
+
+void setEntrypoint(VM&, CodeBlock*);
+
+unsigned frameRegisterCountFor(CodeBlock*);
+
+} } // namespace JSC::LLInt
+
+#endif // LLIntEntrypoint_h
diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.cpp b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
new file mode 100644
index 000000000..039936e73
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntExceptions.h"
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "Instruction.h"
+#include "LLIntCommon.h"
+#include "LowLevelInterpreter.h"
+#include "JSCInlines.h"
+
+namespace JSC { namespace LLInt {
+
+Instruction* returnToThrowForThrownException(ExecState* exec)
+{
+ UNUSED_PARAM(exec);
+ return LLInt::exceptionInstructions();
+}
+
+Instruction* returnToThrow(ExecState* exec)
+{
+ UNUSED_PARAM(exec);
+#if LLINT_SLOW_PATH_TRACING
+ VM* vm = &exec->vm();
+ dataLog("Throwing exception ", vm->exception(), " (returnToThrow).\n");
+#endif
+ return LLInt::exceptionInstructions();
+}
+
+void* callToThrow(ExecState* exec)
+{
+ UNUSED_PARAM(exec);
+#if LLINT_SLOW_PATH_TRACING
+ VM* vm = &exec->vm();
+ dataLog("Throwing exception ", vm->exception(), " (callToThrow).\n");
+#endif
+ return LLInt::getCodePtr(llint_throw_during_call_trampoline);
+}
+
+} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.h b/Source/JavaScriptCore/llint/LLIntExceptions.h
new file mode 100644
index 000000000..bdeb5e4a7
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntExceptions.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntExceptions_h
+#define LLIntExceptions_h
+
+#include <wtf/StdLibExtras.h>
+#include "MacroAssemblerCodeRef.h"
+
+namespace JSC {
+
+class ExecState;
+struct Instruction;
+
+namespace LLInt {
+
+// Tells you where to jump to if you want to return-to-throw, after you've already
+// set up all information needed to throw the exception.
+Instruction* returnToThrowForThrownException(ExecState*);
+
+// Gives you a PC that you can tell the interpreter to go to, which when advanced
+// between 1 and 9 slots will give you an "instruction" that threads to the
+// interpreter's exception handler.
+Instruction* returnToThrow(ExecState*);
+
+// Use this when you're throwing to a call thunk.
+void* callToThrow(ExecState*);
+
+} } // namespace JSC::LLInt
+
+#endif // LLIntExceptions_h
diff --git a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
new file mode 100644
index 000000000..5ab9ced09
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntOfflineAsmConfig_h
+#define LLIntOfflineAsmConfig_h
+
+#include "LLIntCommon.h"
+#include <wtf/Assertions.h>
+#include <wtf/InlineASM.h>
+
+#if !ENABLE(JIT)
+#define OFFLINE_ASM_C_LOOP 1
+#define OFFLINE_ASM_X86 0
+#define OFFLINE_ASM_X86_WIN 0
+#define OFFLINE_ASM_ARM 0
+#define OFFLINE_ASM_ARMv7 0
+#define OFFLINE_ASM_ARMv7_TRADITIONAL 0
+#define OFFLINE_ASM_ARM64 0
+#define OFFLINE_ASM_X86_64 0
+#define OFFLINE_ASM_X86_64_WIN 0
+#define OFFLINE_ASM_ARMv7k 0
+#define OFFLINE_ASM_ARMv7s 0
+#define OFFLINE_ASM_MIPS 0
+#define OFFLINE_ASM_SH4 0
+
+#else // ENABLE(JIT)
+
+#define OFFLINE_ASM_C_LOOP 0
+
+#if CPU(X86) && !PLATFORM(WIN)
+#define OFFLINE_ASM_X86 1
+#else
+#define OFFLINE_ASM_X86 0
+#endif
+
+#if CPU(X86) && PLATFORM(WIN)
+#define OFFLINE_ASM_X86_WIN 1
+#else
+#define OFFLINE_ASM_X86_WIN 0
+#endif
+
+#ifdef __ARM_ARCH_7K__
+#define OFFLINE_ASM_ARMv7k 1
+#else
+#define OFFLINE_ASM_ARMv7k 0
+#endif
+
+#ifdef __ARM_ARCH_7S__
+#define OFFLINE_ASM_ARMv7s 1
+#else
+#define OFFLINE_ASM_ARMv7s 0
+#endif
+
+#if CPU(ARM_THUMB2)
+#define OFFLINE_ASM_ARMv7 1
+#else
+#define OFFLINE_ASM_ARMv7 0
+#endif
+
+#if CPU(ARM_TRADITIONAL)
+#if WTF_ARM_ARCH_AT_LEAST(7)
+#define OFFLINE_ASM_ARMv7_TRADITIONAL 1
+#define OFFLINE_ASM_ARM 0
+#else
+#define OFFLINE_ASM_ARM 1
+#define OFFLINE_ASM_ARMv7_TRADITIONAL 0
+#endif
+#else
+#define OFFLINE_ASM_ARMv7_TRADITIONAL 0
+#define OFFLINE_ASM_ARM 0
+#endif
+
+#if CPU(X86_64) && !PLATFORM(WIN)
+#define OFFLINE_ASM_X86_64 1
+#else
+#define OFFLINE_ASM_X86_64 0
+#endif
+
+#if CPU(X86_64) && PLATFORM(WIN)
+#define OFFLINE_ASM_X86_64_WIN 1
+#else
+#define OFFLINE_ASM_X86_64_WIN 0
+#endif
+
+#if CPU(MIPS)
+#define OFFLINE_ASM_MIPS 1
+#else
+#define OFFLINE_ASM_MIPS 0
+#endif
+
+#if CPU(SH4)
+#define OFFLINE_ASM_SH4 1
+#else
+#define OFFLINE_ASM_SH4 0
+#endif
+
+#if CPU(ARM64)
+#define OFFLINE_ASM_ARM64 1
+#else
+#define OFFLINE_ASM_ARM64 0
+#endif
+
+#if CPU(MIPS)
+#ifdef WTF_MIPS_PIC
+#define S(x) #x
+#define SX(x) S(x)
+#define OFFLINE_ASM_CPLOAD(reg) \
+ ".set noreorder\n" \
+ ".cpload " SX(reg) "\n" \
+ ".set reorder\n"
+#else
+#define OFFLINE_ASM_CPLOAD(reg)
+#endif
+#endif
+
+#endif // ENABLE(JIT)
+
+#if USE(JSVALUE64)
+#define OFFLINE_ASM_JSVALUE64 1
+#else
+#define OFFLINE_ASM_JSVALUE64 0
+#endif
+
+#if !ASSERT_DISABLED
+#define OFFLINE_ASM_ASSERT_ENABLED 1
+#else
+#define OFFLINE_ASM_ASSERT_ENABLED 0
+#endif
+
+#if CPU(BIG_ENDIAN)
+#define OFFLINE_ASM_BIG_ENDIAN 1
+#else
+#define OFFLINE_ASM_BIG_ENDIAN 0
+#endif
+
+#if LLINT_EXECUTION_TRACING
+#define OFFLINE_ASM_EXECUTION_TRACING 1
+#else
+#define OFFLINE_ASM_EXECUTION_TRACING 0
+#endif
+
+#if ENABLE(GGC)
+#define OFFLINE_ASM_GGC 1
+#else
+#define OFFLINE_ASM_GGC 0
+#endif
+
+#endif // LLIntOfflineAsmConfig_h
diff --git a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
new file mode 100644
index 000000000..2d4677c66
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ArrayProfile.h"
+#include "CodeBlock.h"
+#include "CommonSlowPaths.h"
+#include "Debugger.h"
+#include "DirectArguments.h"
+#include "Exception.h"
+#include "Executable.h"
+#include "Heap.h"
+#include "Interpreter.h"
+#include "JITStubs.h"
+#include "JSArray.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+#include "VM.h"
+#include "JSEnvironmentRecord.h"
+#include "JSGlobalObject.h"
+#include "JSObject.h"
+#include "JSStack.h"
+#include "JSString.h"
+#include "JSTypeInfo.h"
+#include "JumpTable.h"
+#include "LLIntOfflineAsmConfig.h"
+#include "MarkedSpace.h"
+#include "ProtoCallFrame.h"
+#include "Structure.h"
+#include "StructureChain.h"
+#include "TypeProfiler.h"
+#include "TypeProfilerLog.h"
+#include "VMEntryRecord.h"
+#include "ValueProfile.h"
+#include "Watchdog.h"
+#include <wtf/text/StringImpl.h>
+
+
+namespace JSC {
+
+#define OFFLINE_ASM_OFFSETOF(clazz, field) (static_cast<unsigned>(OBJECT_OFFSETOF(clazz, field)))
+
+class LLIntOffsetsExtractor {
+public:
+ static const unsigned* dummy();
+};
+
+const unsigned* LLIntOffsetsExtractor::dummy()
+{
+// This is a file generated by offlineasm/generate_offsets_extractor.rb, and contains code
+// to create a table of offsets, sizes, and a header identifying what combination of
+// Platform.h macros we have set. We include it inside of a method on LLIntOffsetsExtractor
+// because the fields whose offsets we're extracting are mostly private. So we make their
+// classes friends with LLIntOffsetsExtractor, and include the header here, to get the C++
+// compiler to kindly step aside and yield to our best intentions.
+#include "LLIntDesiredOffsets.h"
+ return extractorTable;
+}
+
+} // namespace JSC
+
+int main(int, char**)
+{
+ // Out of an abundance of caution, make sure that LLIntOffsetsExtractor::dummy() is live,
+ // and the extractorTable is live, too.
+ printf("%p\n", JSC::LLIntOffsetsExtractor::dummy());
+ return 0;
+}
+
+
diff --git a/Source/JavaScriptCore/llint/LLIntOpcode.h b/Source/JavaScriptCore/llint/LLIntOpcode.h
new file mode 100644
index 000000000..9b26676c4
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntOpcode.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntOpcode_h
+#define LLIntOpcode_h
+
+#if !ENABLE(JIT)
+
+#define FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
+ FOR_EACH_CLOOP_BYTECODE_HELPER_ID(macro)
+
+#else // ENABLE(JIT)
+
+#define FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
+ // Nothing to do here. Use the JIT impl instead.
+
+#endif // !ENABLE(JIT)
+
+
+#define FOR_EACH_LLINT_NATIVE_HELPER(macro) \
+ FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
+ \
+ FOR_EACH_BYTECODE_HELPER_ID(macro)
+
+
+
+#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) FOR_EACH_LLINT_NATIVE_HELPER(macro)
+
+#endif // LLIntOpcode_h
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
new file mode 100644
index 000000000..56db62f38
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
@@ -0,0 +1,1426 @@
+/*
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntSlowPaths.h"
+
+#include "ArrayConstructor.h"
+#include "CallFrame.h"
+#include "CommonSlowPaths.h"
+#include "CommonSlowPathsExceptions.h"
+#include "Error.h"
+#include "ErrorHandlingScope.h"
+#include "Exception.h"
+#include "ExceptionFuzz.h"
+#include "GetterSetter.h"
+#include "HostCallReturnValue.h"
+#include "Interpreter.h"
+#include "JIT.h"
+#include "JITExceptions.h"
+#include "JSLexicalEnvironment.h"
+#include "JSCInlines.h"
+#include "JSCJSValue.h"
+#include "JSGlobalObjectFunctions.h"
+#include "JSStackInlines.h"
+#include "JSString.h"
+#include "JSWithScope.h"
+#include "LLIntCommon.h"
+#include "LLIntExceptions.h"
+#include "LegacyProfiler.h"
+#include "LowLevelInterpreter.h"
+#include "ObjectConstructor.h"
+#include "ProtoCallFrame.h"
+#include "StructureRareDataInlines.h"
+#include "VMInlines.h"
+#include <wtf/StringPrintStream.h>
+
+namespace JSC { namespace LLInt {
+
+#define LLINT_BEGIN_NO_SET_PC() \
+ VM& vm = exec->vm(); \
+ NativeCallFrameTracer tracer(&vm, exec)
+
+#ifndef NDEBUG
+#define LLINT_SET_PC_FOR_STUBS() do { \
+ exec->codeBlock()->bytecodeOffset(pc); \
+ exec->setCurrentVPC(pc + 1); \
+ } while (false)
+#else
+#define LLINT_SET_PC_FOR_STUBS() do { \
+ exec->setCurrentVPC(pc + 1); \
+ } while (false)
+#endif
+
+#define LLINT_BEGIN() \
+ LLINT_BEGIN_NO_SET_PC(); \
+ LLINT_SET_PC_FOR_STUBS()
+
+#define LLINT_OP(index) (exec->uncheckedR(pc[index].u.operand))
+#define LLINT_OP_C(index) (exec->r(pc[index].u.operand))
+
+#define LLINT_RETURN_TWO(first, second) do { \
+ return encodeResult(first, second); \
+ } while (false)
+
+#define LLINT_END_IMPL() LLINT_RETURN_TWO(pc, 0)
+
+#define LLINT_THROW(exceptionToThrow) do { \
+ vm.throwException(exec, exceptionToThrow); \
+ pc = returnToThrow(exec); \
+ LLINT_END_IMPL(); \
+ } while (false)
+
+#define LLINT_CHECK_EXCEPTION() do { \
+ doExceptionFuzzingIfEnabled(exec, "LLIntSlowPaths", pc); \
+ if (UNLIKELY(vm.exception())) { \
+ pc = returnToThrow(exec); \
+ LLINT_END_IMPL(); \
+ } \
+ } while (false)
+
+#define LLINT_END() do { \
+ LLINT_CHECK_EXCEPTION(); \
+ LLINT_END_IMPL(); \
+ } while (false)
+
+#define LLINT_BRANCH(opcode, condition) do { \
+ bool __b_condition = (condition); \
+ LLINT_CHECK_EXCEPTION(); \
+ if (__b_condition) \
+ pc += pc[OPCODE_LENGTH(opcode) - 1].u.operand; \
+ else \
+ pc += OPCODE_LENGTH(opcode); \
+ LLINT_END_IMPL(); \
+ } while (false)
+
+#define LLINT_RETURN(value) do { \
+ JSValue __r_returnValue = (value); \
+ LLINT_CHECK_EXCEPTION(); \
+ LLINT_OP(1) = __r_returnValue; \
+ LLINT_END_IMPL(); \
+ } while (false)
+
+#define LLINT_RETURN_WITH_PC_ADJUSTMENT(value, pcAdjustment) do { \
+ JSValue __r_returnValue = (value); \
+ LLINT_CHECK_EXCEPTION(); \
+ LLINT_OP(1) = __r_returnValue; \
+ pc += (pcAdjustment); \
+ LLINT_END_IMPL(); \
+ } while (false)
+
+#define LLINT_RETURN_PROFILED(opcode, value) do { \
+ JSValue __rp_returnValue = (value); \
+ LLINT_CHECK_EXCEPTION(); \
+ LLINT_OP(1) = __rp_returnValue; \
+ LLINT_PROFILE_VALUE(opcode, __rp_returnValue); \
+ LLINT_END_IMPL(); \
+ } while (false)
+
+#define LLINT_PROFILE_VALUE(opcode, value) do { \
+ pc[OPCODE_LENGTH(opcode) - 1].u.profile->m_buckets[0] = \
+ JSValue::encode(value); \
+ } while (false)
+
+#define LLINT_CALL_END_IMPL(exec, callTarget) LLINT_RETURN_TWO((callTarget), (exec))
+
+#define LLINT_CALL_THROW(exec, exceptionToThrow) do { \
+ ExecState* __ct_exec = (exec); \
+ vm.throwException(__ct_exec, exceptionToThrow); \
+ LLINT_CALL_END_IMPL(0, callToThrow(__ct_exec)); \
+ } while (false)
+
+#define LLINT_CALL_CHECK_EXCEPTION(exec, execCallee) do { \
+ ExecState* __cce_exec = (exec); \
+ ExecState* __cce_execCallee = (execCallee); \
+ doExceptionFuzzingIfEnabled(__cce_exec, "LLIntSlowPaths/call", nullptr); \
+ if (UNLIKELY(vm.exception())) \
+ LLINT_CALL_END_IMPL(0, callToThrow(__cce_execCallee)); \
+ } while (false)
+
+#define LLINT_CALL_RETURN(exec, execCallee, callTarget) do { \
+ ExecState* __cr_exec = (exec); \
+ ExecState* __cr_execCallee = (execCallee); \
+ void* __cr_callTarget = (callTarget); \
+ LLINT_CALL_CHECK_EXCEPTION(__cr_exec, __cr_execCallee); \
+ LLINT_CALL_END_IMPL(__cr_execCallee, __cr_callTarget); \
+ } while (false)
+
+#define LLINT_RETURN_CALLEE_FRAME(execCallee) do { \
+ ExecState* __rcf_exec = (execCallee); \
+ LLINT_RETURN_TWO(pc, __rcf_exec); \
+ } while (false)
+
+extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, Instruction* pc, int fromWhere, int operand)
+{
+ LLINT_BEGIN();
+ dataLogF("%p / %p: executing bc#%zu, op#%u: Trace(%d): %d: %d\n",
+ exec->codeBlock(),
+ exec,
+ static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
+ exec->vm().interpreter->getOpcodeID(pc[0].u.opcode),
+ fromWhere,
+ operand,
+ pc[operand].u.operand);
+ LLINT_END();
+}
+
+extern "C" SlowPathReturnType llint_trace_value(ExecState* exec, Instruction* pc, int fromWhere, int operand)
+{
+ JSValue value = LLINT_OP_C(operand).jsValue();
+ union {
+ struct {
+ uint32_t tag;
+ uint32_t payload;
+ } bits;
+ EncodedJSValue asValue;
+ } u;
+ u.asValue = JSValue::encode(value);
+ dataLogF(
+ "%p / %p: executing bc#%zu, op#%u: Trace(%d): %d: %d: %08x:%08x: %s\n",
+ exec->codeBlock(),
+ exec,
+ static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
+ exec->vm().interpreter->getOpcodeID(pc[0].u.opcode),
+ fromWhere,
+ operand,
+ pc[operand].u.operand,
+ u.bits.tag,
+ u.bits.payload,
+ toCString(value).data());
+ LLINT_END_IMPL();
+}
+
+LLINT_SLOW_PATH_DECL(trace_prologue)
+{
+ dataLogF("%p / %p: in prologue.\n", exec->codeBlock(), exec);
+ LLINT_END_IMPL();
+}
+
+static void traceFunctionPrologue(ExecState* exec, const char* comment, CodeSpecializationKind kind)
+{
+ JSFunction* callee = jsCast<JSFunction*>(exec->callee());
+ FunctionExecutable* executable = callee->jsExecutable();
+ CodeBlock* codeBlock = executable->codeBlockFor(kind);
+ dataLogF("%p / %p: in %s of function %p, executable %p; numVars = %u, numParameters = %u, numCalleeRegisters = %u, caller = %p.\n",
+ codeBlock, exec, comment, callee, executable,
+ codeBlock->m_numVars, codeBlock->numParameters(), codeBlock->m_numCalleeRegisters,
+ exec->callerFrame());
+}
+
+LLINT_SLOW_PATH_DECL(trace_prologue_function_for_call)
+{
+ traceFunctionPrologue(exec, "call prologue", CodeForCall);
+ LLINT_END_IMPL();
+}
+
+LLINT_SLOW_PATH_DECL(trace_prologue_function_for_construct)
+{
+ traceFunctionPrologue(exec, "construct prologue", CodeForConstruct);
+ LLINT_END_IMPL();
+}
+
+LLINT_SLOW_PATH_DECL(trace_arityCheck_for_call)
+{
+ traceFunctionPrologue(exec, "call arity check", CodeForCall);
+ LLINT_END_IMPL();
+}
+
+LLINT_SLOW_PATH_DECL(trace_arityCheck_for_construct)
+{
+ traceFunctionPrologue(exec, "construct arity check", CodeForConstruct);
+ LLINT_END_IMPL();
+}
+
+LLINT_SLOW_PATH_DECL(trace)
+{
+ dataLogF("%p / %p: executing bc#%zu, %s, pc = %p\n",
+ exec->codeBlock(),
+ exec,
+ static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
+ opcodeNames[exec->vm().interpreter->getOpcodeID(pc[0].u.opcode)], pc);
+ if (exec->vm().interpreter->getOpcodeID(pc[0].u.opcode) == op_enter) {
+ dataLogF("Frame will eventually return to %p\n", exec->returnPC().value());
+ *bitwise_cast<volatile char*>(exec->returnPC().value());
+ }
+ if (exec->vm().interpreter->getOpcodeID(pc[0].u.opcode) == op_ret) {
+ dataLogF("Will be returning to %p\n", exec->returnPC().value());
+ dataLogF("The new cfr will be %p\n", exec->callerFrame());
+ }
+ LLINT_END_IMPL();
+}
+
+LLINT_SLOW_PATH_DECL(special_trace)
+{
+ dataLogF("%p / %p: executing special case bc#%zu, op#%u, return PC is %p\n",
+ exec->codeBlock(),
+ exec,
+ static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
+ exec->vm().interpreter->getOpcodeID(pc[0].u.opcode),
+ exec->returnPC().value());
+ LLINT_END_IMPL();
+}
+
+enum EntryKind { Prologue, ArityCheck };
+
+#if ENABLE(JIT)
+inline bool shouldJIT(ExecState* exec)
+{
+ // You can modify this to turn off JITting without rebuilding the world.
+ return exec->vm().canUseJIT();
+}
+
+// Returns true if we should try to OSR.
+inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec)
+{
+ VM& vm = exec->vm();
+ DeferGCForAWhile deferGC(vm.heap); // My callers don't set top callframe, so we don't want to GC here at all.
+
+ codeBlock->updateAllValueProfilePredictions();
+
+ if (!codeBlock->checkIfJITThresholdReached()) {
+ if (Options::verboseOSR())
+ dataLogF(" JIT threshold should be lifted.\n");
+ return false;
+ }
+
+ switch (codeBlock->jitType()) {
+ case JITCode::BaselineJIT: {
+ if (Options::verboseOSR())
+ dataLogF(" Code was already compiled.\n");
+ codeBlock->jitSoon();
+ return true;
+ }
+ case JITCode::InterpreterThunk: {
+ CompilationResult result = JIT::compile(&vm, codeBlock, JITCompilationCanFail);
+ switch (result) {
+ case CompilationFailed:
+ if (Options::verboseOSR())
+ dataLogF(" JIT compilation failed.\n");
+ codeBlock->dontJITAnytimeSoon();
+ return false;
+ case CompilationSuccessful:
+ if (Options::verboseOSR())
+ dataLogF(" JIT compilation successful.\n");
+ codeBlock->install();
+ codeBlock->jitSoon();
+ return true;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+ }
+ default:
+ dataLog("Unexpected code block in LLInt: ", *codeBlock, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+}
+
+static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* codeBlock, const char *name, EntryKind kind)
+{
+ if (Options::verboseOSR()) {
+ dataLog(
+ *codeBlock, ": Entered ", name, " with executeCounter = ",
+ codeBlock->llintExecuteCounter(), "\n");
+ }
+
+ if (!shouldJIT(exec)) {
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_RETURN_TWO(0, 0);
+ }
+ if (!jitCompileAndSetHeuristics(codeBlock, exec))
+ LLINT_RETURN_TWO(0, 0);
+
+ if (kind == Prologue)
+ LLINT_RETURN_TWO(codeBlock->jitCode()->executableAddress(), 0);
+ ASSERT(kind == ArityCheck);
+ LLINT_RETURN_TWO(codeBlock->jitCode()->addressForCall(
+ *codeBlock->vm(), codeBlock->ownerExecutable(), MustCheckArity,
+ RegisterPreservationNotRequired).executableAddress(), 0);
+}
+#else // ENABLE(JIT)
+static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* codeBlock, const char*, EntryKind)
+{
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_RETURN_TWO(0, exec);
+}
+#endif // ENABLE(JIT)
+
+LLINT_SLOW_PATH_DECL(entry_osr)
+{
+ return entryOSR(exec, pc, exec->codeBlock(), "entry_osr", Prologue);
+}
+
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_call)
+{
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForCall(), "entry_osr_function_for_call", Prologue);
+}
+
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_construct)
+{
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForConstruct(), "entry_osr_function_for_construct", Prologue);
+}
+
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_call_arityCheck)
+{
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForCall(), "entry_osr_function_for_call_arityCheck", ArityCheck);
+}
+
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_construct_arityCheck)
+{
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForConstruct(), "entry_osr_function_for_construct_arityCheck", ArityCheck);
+}
+
+LLINT_SLOW_PATH_DECL(loop_osr)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+
+#if ENABLE(JIT)
+ if (Options::verboseOSR()) {
+ dataLog(
+ *codeBlock, ": Entered loop_osr with executeCounter = ",
+ codeBlock->llintExecuteCounter(), "\n");
+ }
+
+ if (!shouldJIT(exec)) {
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_RETURN_TWO(0, 0);
+ }
+
+ if (!jitCompileAndSetHeuristics(codeBlock, exec))
+ LLINT_RETURN_TWO(0, 0);
+
+ ASSERT(codeBlock->jitType() == JITCode::BaselineJIT);
+
+ Vector<BytecodeAndMachineOffset> map;
+ codeBlock->jitCodeMap()->decode(map);
+ BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(map, map.size(), pc - codeBlock->instructions().begin(), BytecodeAndMachineOffset::getBytecodeIndex);
+ ASSERT(mapping);
+ ASSERT(mapping->m_bytecodeIndex == static_cast<unsigned>(pc - codeBlock->instructions().begin()));
+
+ void* jumpTarget = codeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
+ ASSERT(jumpTarget);
+
+ LLINT_RETURN_TWO(jumpTarget, exec->topOfFrame());
+#else // ENABLE(JIT)
+ UNUSED_PARAM(pc);
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_RETURN_TWO(0, 0);
+#endif // ENABLE(JIT)
+}
+
+LLINT_SLOW_PATH_DECL(replace)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+
+#if ENABLE(JIT)
+ if (Options::verboseOSR()) {
+ dataLog(
+ *codeBlock, ": Entered replace with executeCounter = ",
+ codeBlock->llintExecuteCounter(), "\n");
+ }
+
+ if (shouldJIT(exec))
+ jitCompileAndSetHeuristics(codeBlock, exec);
+ else
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_END_IMPL();
+#else // ENABLE(JIT)
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_END_IMPL();
+#endif // ENABLE(JIT)
+}
+
+LLINT_SLOW_PATH_DECL(stack_check)
+{
+ LLINT_BEGIN();
+#if LLINT_SLOW_PATH_TRACING
+ dataLogF("Checking stack height with exec = %p.\n", exec);
+ dataLogF("CodeBlock = %p.\n", exec->codeBlock());
+ dataLogF("Num callee registers = %u.\n", exec->codeBlock()->m_numCalleeRegisters);
+ dataLogF("Num vars = %u.\n", exec->codeBlock()->m_numVars);
+
+#if ENABLE(JIT)
+ dataLogF("Current end is at %p.\n", exec->vm().stackLimit());
+#else
+ dataLogF("Current end is at %p.\n", exec->vm().jsStackLimit());
+#endif
+
+#endif
+ // If the stack check succeeds and we don't need to throw the error, then
+ // we'll return 0 instead. The prologue will check for a non-zero value
+ // when determining whether to set the callFrame or not.
+
+ // For JIT enabled builds which uses the C stack, the stack is not growable.
+ // Hence, if we get here, then we know a stack overflow is imminent. So, just
+ // throw the StackOverflowError unconditionally.
+#if !ENABLE(JIT)
+ ASSERT(!vm.interpreter->stack().containsAddress(exec->topOfFrame()));
+ if (LIKELY(vm.interpreter->stack().ensureCapacityFor(exec->topOfFrame())))
+ LLINT_RETURN_TWO(pc, 0);
+#endif
+
+ vm.topCallFrame = exec;
+ ErrorHandlingScope errorScope(vm);
+ CommonSlowPaths::interpreterThrowInCaller(exec, createStackOverflowError(exec));
+ pc = returnToThrowForThrownException(exec);
+ LLINT_RETURN_TWO(pc, exec);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_object)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(constructEmptyObject(exec, pc[3].u.objectAllocationProfile->structure()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_array)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(constructArrayNegativeIndexed(exec, pc[4].u.arrayAllocationProfile, bitwise_cast<JSValue*>(&LLINT_OP(2)), pc[3].u.operand));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_array_with_size)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(constructArrayWithSizeQuirk(exec, pc[3].u.arrayAllocationProfile, exec->lexicalGlobalObject(), LLINT_OP_C(2).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_array_buffer)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(constructArray(exec, pc[4].u.arrayAllocationProfile, exec->codeBlock()->constantBuffer(pc[2].u.operand), pc[3].u.operand));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_regexp)
+{
+ LLINT_BEGIN();
+ RegExp* regExp = exec->codeBlock()->regexp(pc[2].u.operand);
+ if (!regExp->isValid())
+ LLINT_THROW(createSyntaxError(exec, "Invalid flag supplied to RegExp constructor."));
+ LLINT_RETURN(RegExpObject::create(vm, exec->lexicalGlobalObject()->regExpStructure(), regExp));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_check_has_instance)
+{
+ LLINT_BEGIN();
+
+ JSValue value = LLINT_OP_C(2).jsValue();
+ JSValue baseVal = LLINT_OP_C(3).jsValue();
+ if (baseVal.isObject()) {
+ JSObject* baseObject = asObject(baseVal);
+ ASSERT(!baseObject->structure()->typeInfo().implementsDefaultHasInstance());
+ if (baseObject->structure()->typeInfo().implementsHasInstance()) {
+ JSValue result = jsBoolean(baseObject->methodTable()->customHasInstance(baseObject, exec, value));
+ LLINT_RETURN_WITH_PC_ADJUSTMENT(result, pc[4].u.operand);
+ }
+ }
+ LLINT_THROW(createInvalidInstanceofParameterError(exec, baseVal));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_instanceof)
+{
+ LLINT_BEGIN();
+ JSValue value = LLINT_OP_C(2).jsValue();
+ JSValue proto = LLINT_OP_C(3).jsValue();
+ ASSERT(!value.isObject() || !proto.isObject());
+ LLINT_RETURN(jsBoolean(JSObject::defaultHasInstance(exec, value, proto)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_get_by_id)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ const Identifier& ident = codeBlock->identifier(pc[3].u.operand);
+ JSValue baseValue = LLINT_OP_C(2).jsValue();
+ PropertySlot slot(baseValue);
+
+ JSValue result = baseValue.get(exec, ident, slot);
+ LLINT_CHECK_EXCEPTION();
+ LLINT_OP(1) = result;
+
+ if (!LLINT_ALWAYS_ACCESS_SLOW
+ && baseValue.isCell()
+ && slot.isCacheable()
+ && slot.slotBase() == baseValue
+ && slot.isCacheableValue()) {
+
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+
+ if (!structure->isUncacheableDictionary()
+ && !structure->typeInfo().prohibitsPropertyCaching()
+ && !structure->typeInfo().newImpurePropertyFiresWatchpoints()) {
+ ConcurrentJITLocker locker(codeBlock->m_lock);
+
+ pc[4].u.structure.set(
+ vm, codeBlock->ownerExecutable(), structure);
+ if (isInlineOffset(slot.cachedOffset())) {
+ pc[0].u.opcode = LLInt::getOpcode(op_get_by_id);
+ pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
+ } else {
+ pc[0].u.opcode = LLInt::getOpcode(op_get_by_id_out_of_line);
+ pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue);
+ }
+ }
+ }
+
+ if (!LLINT_ALWAYS_ACCESS_SLOW
+ && isJSArray(baseValue)
+ && ident == exec->propertyNames().length) {
+ pc[0].u.opcode = LLInt::getOpcode(op_get_array_length);
+ ArrayProfile* arrayProfile = codeBlock->getOrAddArrayProfile(pc - codeBlock->instructions().begin());
+ arrayProfile->observeStructure(baseValue.asCell()->structure());
+ pc[4].u.arrayProfile = arrayProfile;
+ }
+
+ pc[OPCODE_LENGTH(op_get_by_id) - 1].u.profile->m_buckets[0] = JSValue::encode(result);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_get_arguments_length)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ const Identifier& ident = codeBlock->identifier(pc[3].u.operand);
+ JSValue baseValue = LLINT_OP(2).jsValue();
+ PropertySlot slot(baseValue);
+ LLINT_RETURN(baseValue.get(exec, ident, slot));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_by_id)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ const Identifier& ident = codeBlock->identifier(pc[2].u.operand);
+
+ JSValue baseValue = LLINT_OP_C(1).jsValue();
+ PutPropertySlot slot(baseValue, codeBlock->isStrictMode(), codeBlock->putByIdContext());
+ if (pc[8].u.operand)
+ asObject(baseValue)->putDirect(vm, ident, LLINT_OP_C(3).jsValue(), slot);
+ else
+ baseValue.put(exec, ident, LLINT_OP_C(3).jsValue(), slot);
+ LLINT_CHECK_EXCEPTION();
+
+ if (!LLINT_ALWAYS_ACCESS_SLOW
+ && baseValue.isCell()
+ && slot.isCacheablePut()) {
+
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+
+ if (!structure->isUncacheableDictionary()
+ && !structure->typeInfo().prohibitsPropertyCaching()
+ && baseCell == slot.base()) {
+
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ GCSafeConcurrentJITLocker locker(codeBlock->m_lock, vm.heap);
+
+ if (!structure->isDictionary() && structure->previousID()->outOfLineCapacity() == structure->outOfLineCapacity()) {
+ ASSERT(structure->previousID()->transitionWatchpointSetHasBeenInvalidated());
+
+ // This is needed because some of the methods we call
+ // below may GC.
+ pc[0].u.opcode = LLInt::getOpcode(op_put_by_id);
+
+ if (normalizePrototypeChain(exec, structure) != InvalidPrototypeChain) {
+ ASSERT(structure->previousID()->isObject());
+ pc[4].u.structure.set(
+ vm, codeBlock->ownerExecutable(), structure->previousID());
+ if (isInlineOffset(slot.cachedOffset()))
+ pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
+ else
+ pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue);
+ pc[6].u.structure.set(
+ vm, codeBlock->ownerExecutable(), structure);
+ StructureChain* chain = structure->prototypeChain(exec);
+ ASSERT(chain);
+ pc[7].u.structureChain.set(
+ vm, codeBlock->ownerExecutable(), chain);
+
+ if (pc[8].u.operand) {
+ if (isInlineOffset(slot.cachedOffset()))
+ pc[0].u.opcode = LLInt::getOpcode(op_put_by_id_transition_direct);
+ else
+ pc[0].u.opcode = LLInt::getOpcode(op_put_by_id_transition_direct_out_of_line);
+ } else {
+ if (isInlineOffset(slot.cachedOffset()))
+ pc[0].u.opcode = LLInt::getOpcode(op_put_by_id_transition_normal);
+ else
+ pc[0].u.opcode = LLInt::getOpcode(op_put_by_id_transition_normal_out_of_line);
+ }
+ }
+ }
+ } else {
+ structure->didCachePropertyReplacement(vm, slot.cachedOffset());
+ pc[4].u.structure.set(
+ vm, codeBlock->ownerExecutable(), structure);
+ if (isInlineOffset(slot.cachedOffset())) {
+ pc[0].u.opcode = LLInt::getOpcode(op_put_by_id);
+ pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
+ } else {
+ pc[0].u.opcode = LLInt::getOpcode(op_put_by_id_out_of_line);
+ pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue);
+ }
+ }
+ }
+ }
+
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_del_by_id)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSObject* baseObject = LLINT_OP_C(2).jsValue().toObject(exec);
+ bool couldDelete = baseObject->methodTable()->deleteProperty(baseObject, exec, codeBlock->identifier(pc[3].u.operand));
+ LLINT_CHECK_EXCEPTION();
+ if (!couldDelete && codeBlock->isStrictMode())
+ LLINT_THROW(createTypeError(exec, "Unable to delete property."));
+ LLINT_RETURN(jsBoolean(couldDelete));
+}
+
+inline JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript)
+{
+ if (LIKELY(baseValue.isCell() && subscript.isString())) {
+ VM& vm = exec->vm();
+ Structure& structure = *baseValue.asCell()->structure(vm);
+ if (JSCell::canUseFastGetOwnProperty(structure)) {
+ if (RefPtr<AtomicStringImpl> existingAtomicString = asString(subscript)->toExistingAtomicString(exec)) {
+ if (JSValue result = baseValue.asCell()->fastGetOwnProperty(vm, structure, existingAtomicString.get()))
+ return result;
+ }
+ }
+ }
+
+ if (subscript.isUInt32()) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i))
+ return asString(baseValue)->getIndex(exec, i);
+
+ return baseValue.get(exec, i);
+ }
+
+ baseValue.requireObjectCoercible(exec);
+ if (exec->hadException())
+ return jsUndefined();
+ auto property = subscript.toPropertyKey(exec);
+ if (exec->hadException())
+ return jsUndefined();
+ return baseValue.get(exec, property);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_get_by_val)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN_PROFILED(op_get_by_val, getByVal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_by_val)
+{
+ LLINT_BEGIN();
+
+ JSValue baseValue = LLINT_OP_C(1).jsValue();
+ JSValue subscript = LLINT_OP_C(2).jsValue();
+ JSValue value = LLINT_OP_C(3).jsValue();
+
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
+ if (baseValue.isObject()) {
+ JSObject* object = asObject(baseValue);
+ if (object->canSetIndexQuickly(i))
+ object->setIndexQuickly(vm, i, value);
+ else
+ object->methodTable()->putByIndex(object, exec, i, value, exec->codeBlock()->isStrictMode());
+ LLINT_END();
+ }
+ baseValue.putByIndex(exec, i, value, exec->codeBlock()->isStrictMode());
+ LLINT_END();
+ }
+
+ auto property = subscript.toPropertyKey(exec);
+ LLINT_CHECK_EXCEPTION();
+ PutPropertySlot slot(baseValue, exec->codeBlock()->isStrictMode());
+ baseValue.put(exec, property, value, slot);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_by_val_direct)
+{
+ LLINT_BEGIN();
+
+ JSValue baseValue = LLINT_OP_C(1).jsValue();
+ JSValue subscript = LLINT_OP_C(2).jsValue();
+ JSValue value = LLINT_OP_C(3).jsValue();
+ RELEASE_ASSERT(baseValue.isObject());
+ JSObject* baseObject = asObject(baseValue);
+ bool isStrictMode = exec->codeBlock()->isStrictMode();
+ if (LIKELY(subscript.isUInt32())) {
+ // Despite its name, JSValue::isUInt32 will return true only for positive boxed int32_t; all those values are valid array indices.
+ ASSERT(isIndex(subscript.asUInt32()));
+ baseObject->putDirectIndex(exec, subscript.asUInt32(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ LLINT_END();
+ }
+
+ if (subscript.isDouble()) {
+ double subscriptAsDouble = subscript.asDouble();
+ uint32_t subscriptAsUInt32 = static_cast<uint32_t>(subscriptAsDouble);
+ if (subscriptAsDouble == subscriptAsUInt32 && isIndex(subscriptAsUInt32)) {
+ baseObject->putDirectIndex(exec, subscriptAsUInt32, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ LLINT_END();
+ }
+ }
+
+ // Don't put to an object if toString threw an exception.
+ auto property = subscript.toPropertyKey(exec);
+ if (exec->vm().exception())
+ LLINT_END();
+
+ if (Optional<uint32_t> index = parseIndex(property))
+ baseObject->putDirectIndex(exec, index.value(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ else {
+ PutPropertySlot slot(baseObject, isStrictMode);
+ baseObject->putDirect(exec->vm(), property, value, slot);
+ }
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_del_by_val)
+{
+ LLINT_BEGIN();
+ JSValue baseValue = LLINT_OP_C(2).jsValue();
+ JSObject* baseObject = baseValue.toObject(exec);
+
+ JSValue subscript = LLINT_OP_C(3).jsValue();
+
+ bool couldDelete;
+
+ uint32_t i;
+ if (subscript.getUInt32(i))
+ couldDelete = baseObject->methodTable()->deletePropertyByIndex(baseObject, exec, i);
+ else {
+ LLINT_CHECK_EXCEPTION();
+ auto property = subscript.toPropertyKey(exec);
+ LLINT_CHECK_EXCEPTION();
+ couldDelete = baseObject->methodTable()->deleteProperty(baseObject, exec, property);
+ }
+
+ if (!couldDelete && exec->codeBlock()->isStrictMode())
+ LLINT_THROW(createTypeError(exec, "Unable to delete property."));
+
+ LLINT_RETURN(jsBoolean(couldDelete));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_by_index)
+{
+ LLINT_BEGIN();
+ JSValue arrayValue = LLINT_OP_C(1).jsValue();
+ ASSERT(isJSArray(arrayValue));
+ asArray(arrayValue)->putDirectIndex(exec, pc[2].u.operand, LLINT_OP_C(3).jsValue());
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_getter_by_id)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+
+ JSValue getter = LLINT_OP(3).jsValue();
+ ASSERT(getter.isObject());
+
+ baseObj->putGetter(exec, exec->codeBlock()->identifier(pc[2].u.operand), asObject(getter));
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_setter_by_id)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+
+ JSValue setter = LLINT_OP(3).jsValue();
+ ASSERT(setter.isObject());
+
+ baseObj->putSetter(exec, exec->codeBlock()->identifier(pc[2].u.operand), asObject(setter));
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+
+ GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject());
+ LLINT_CHECK_EXCEPTION();
+
+ JSValue getter = LLINT_OP(3).jsValue();
+ JSValue setter = LLINT_OP(4).jsValue();
+ ASSERT(getter.isObject() || getter.isUndefined());
+ ASSERT(setter.isObject() || setter.isUndefined());
+ ASSERT(getter.isObject() || setter.isObject());
+
+ if (!getter.isUndefined())
+ accessor->setGetter(vm, exec->lexicalGlobalObject(), asObject(getter));
+ if (!setter.isUndefined())
+ accessor->setSetter(vm, exec->lexicalGlobalObject(), asObject(setter));
+ baseObj->putDirectAccessor(
+ exec,
+ exec->codeBlock()->identifier(pc[2].u.operand),
+ accessor, Accessor);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jtrue)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jtrue, LLINT_OP_C(1).jsValue().toBoolean(exec));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jfalse)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jfalse, !LLINT_OP_C(1).jsValue().toBoolean(exec));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jless)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jless, jsLess<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jnless)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jnless, !jsLess<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jgreater)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jgreater, jsLess<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jngreater)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jngreater, !jsLess<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jlesseq)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jlesseq, jsLessEq<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jnlesseq)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jnlesseq, !jsLessEq<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jgreatereq)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jgreatereq, jsLessEq<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jngreatereq)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jngreatereq, !jsLessEq<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_switch_imm)
+{
+ LLINT_BEGIN();
+ JSValue scrutinee = LLINT_OP_C(3).jsValue();
+ ASSERT(scrutinee.isDouble());
+ double value = scrutinee.asDouble();
+ int32_t intValue = static_cast<int32_t>(value);
+ int defaultOffset = pc[2].u.operand;
+ if (value == intValue) {
+ CodeBlock* codeBlock = exec->codeBlock();
+ pc += codeBlock->switchJumpTable(pc[1].u.operand).offsetForValue(intValue, defaultOffset);
+ } else
+ pc += defaultOffset;
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_switch_char)
+{
+ LLINT_BEGIN();
+ JSValue scrutinee = LLINT_OP_C(3).jsValue();
+ ASSERT(scrutinee.isString());
+ JSString* string = asString(scrutinee);
+ ASSERT(string->length() == 1);
+ int defaultOffset = pc[2].u.operand;
+ StringImpl* impl = string->value(exec).impl();
+ CodeBlock* codeBlock = exec->codeBlock();
+ pc += codeBlock->switchJumpTable(pc[1].u.operand).offsetForValue((*impl)[0], defaultOffset);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_switch_string)
+{
+ LLINT_BEGIN();
+ JSValue scrutinee = LLINT_OP_C(3).jsValue();
+ int defaultOffset = pc[2].u.operand;
+ if (!scrutinee.isString())
+ pc += defaultOffset;
+ else {
+ CodeBlock* codeBlock = exec->codeBlock();
+ pc += codeBlock->stringSwitchJumpTable(pc[1].u.operand).offsetForValue(asString(scrutinee)->value(exec).impl(), defaultOffset);
+ }
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_func)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ ASSERT(codeBlock->codeType() != FunctionCode || !codeBlock->needsActivation() || exec->hasActivation());
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+#if LLINT_SLOW_PATH_TRACING
+ dataLogF("Creating function!\n");
+#endif
+ LLINT_RETURN(JSFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_func_exp)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+ FunctionExecutable* function = codeBlock->functionExpr(pc[3].u.operand);
+ JSFunction* func = JSFunction::create(vm, function, scope);
+
+ LLINT_RETURN(func);
+}
+
+static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, JSValue callee, CodeSpecializationKind kind)
+{
+ UNUSED_PARAM(pc);
+
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Performing host call.\n");
+#endif
+
+ ExecState* exec = execCallee->callerFrame();
+ VM& vm = exec->vm();
+
+ execCallee->setCodeBlock(0);
+ execCallee->clearReturnPC();
+
+ if (kind == CodeForCall) {
+ CallData callData;
+ CallType callType = getCallData(callee, callData);
+
+ ASSERT(callType != CallTypeJS);
+
+ if (callType == CallTypeHost) {
+ NativeCallFrameTracer tracer(&vm, execCallee);
+ execCallee->setCallee(asObject(callee));
+ vm.hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
+
+ LLINT_CALL_RETURN(execCallee, execCallee, LLInt::getCodePtr(getHostCallReturnValue));
+ }
+
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Call callee is not a function: ", callee, "\n");
+#endif
+
+ ASSERT(callType == CallTypeNone);
+ LLINT_CALL_THROW(exec, createNotAFunctionError(exec, callee));
+ }
+
+ ASSERT(kind == CodeForConstruct);
+
+ ConstructData constructData;
+ ConstructType constructType = getConstructData(callee, constructData);
+
+ ASSERT(constructType != ConstructTypeJS);
+
+ if (constructType == ConstructTypeHost) {
+ NativeCallFrameTracer tracer(&vm, execCallee);
+ execCallee->setCallee(asObject(callee));
+ vm.hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
+
+ LLINT_CALL_RETURN(execCallee, execCallee, LLInt::getCodePtr(getHostCallReturnValue));
+ }
+
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Constructor callee is not a function: ", callee, "\n");
+#endif
+
+ ASSERT(constructType == ConstructTypeNone);
+ LLINT_CALL_THROW(exec, createNotAConstructorError(exec, callee));
+}
+
+inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, CodeSpecializationKind kind, JSValue calleeAsValue, LLIntCallLinkInfo* callLinkInfo = 0)
+{
+ ExecState* exec = execCallee->callerFrame();
+
+#if LLINT_SLOW_PATH_TRACING
+ dataLogF("Performing call with recorded PC = %p\n", exec->currentVPC());
+#endif
+
+ JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
+ if (!calleeAsFunctionCell)
+ return handleHostCall(execCallee, pc, calleeAsValue, kind);
+
+ JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
+ JSScope* scope = callee->scopeUnchecked();
+ VM& vm = *scope->vm();
+ ExecutableBase* executable = callee->executable();
+
+ MacroAssemblerCodePtr codePtr;
+ CodeBlock* codeBlock = 0;
+ if (executable->isHostFunction())
+ codePtr = executable->entrypointFor(vm, kind, MustCheckArity, RegisterPreservationNotRequired);
+ else {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+
+ if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct)
+ LLINT_CALL_THROW(exec, createNotAConstructorError(exec, callee));
+
+ JSObject* error = functionExecutable->prepareForExecution(execCallee, callee, scope, kind);
+ if (error)
+ LLINT_CALL_THROW(exec, error);
+ codeBlock = functionExecutable->codeBlockFor(kind);
+ ASSERT(codeBlock);
+ ArityCheckMode arity;
+ if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()))
+ arity = MustCheckArity;
+ else
+ arity = ArityCheckNotRequired;
+ codePtr = functionExecutable->entrypointFor(vm, kind, arity, RegisterPreservationNotRequired);
+ }
+
+ ASSERT(!!codePtr);
+
+ if (!LLINT_ALWAYS_ACCESS_SLOW && callLinkInfo) {
+ CodeBlock* callerCodeBlock = exec->codeBlock();
+
+ ConcurrentJITLocker locker(callerCodeBlock->m_lock);
+
+ if (callLinkInfo->isOnList())
+ callLinkInfo->remove();
+ callLinkInfo->callee.set(vm, callerCodeBlock->ownerExecutable(), callee);
+ callLinkInfo->lastSeenCallee.set(vm, callerCodeBlock->ownerExecutable(), callee);
+ callLinkInfo->machineCodeTarget = codePtr;
+ if (codeBlock)
+ codeBlock->linkIncomingCall(exec, callLinkInfo);
+ }
+
+ LLINT_CALL_RETURN(exec, execCallee, codePtr.executableAddress());
+}
+
+inline SlowPathReturnType genericCall(ExecState* exec, Instruction* pc, CodeSpecializationKind kind)
+{
+ // This needs to:
+ // - Set up a call frame.
+ // - Figure out what to call and compile it if necessary.
+ // - If possible, link the call's inline cache.
+ // - Return a tuple of machine code address to call and the new call frame.
+
+ JSValue calleeAsValue = LLINT_OP_C(2).jsValue();
+
+ ExecState* execCallee = exec - pc[4].u.operand;
+
+ execCallee->setArgumentCountIncludingThis(pc[3].u.operand);
+ execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
+ execCallee->setCallerFrame(exec);
+
+ ASSERT(pc[5].u.callLinkInfo);
+ return setUpCall(execCallee, pc, kind, calleeAsValue, pc[5].u.callLinkInfo);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_call)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ return genericCall(exec, pc, CodeForCall);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_construct)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ return genericCall(exec, pc, CodeForConstruct);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_size_frame_for_varargs)
+{
+ LLINT_BEGIN();
+ // This needs to:
+ // - Set up a call frame while respecting the variable arguments.
+
+ unsigned numUsedStackSlots = -pc[5].u.operand;
+ unsigned length = sizeFrameForVarargs(exec, &vm.interpreter->stack(),
+ LLINT_OP_C(4).jsValue(), numUsedStackSlots, pc[6].u.operand);
+ LLINT_CALL_CHECK_EXCEPTION(exec, exec);
+
+ ExecState* execCallee = calleeFrameForVarargs(exec, numUsedStackSlots, length + 1);
+ vm.varargsLength = length;
+ vm.newCallFrameReturnValue = execCallee;
+
+ LLINT_RETURN_CALLEE_FRAME(execCallee);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_call_varargs)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ // This needs to:
+ // - Figure out what to call and compile it if necessary.
+ // - Return a tuple of machine code address to call and the new call frame.
+
+ JSValue calleeAsValue = LLINT_OP_C(2).jsValue();
+
+ ExecState* execCallee = vm.newCallFrameReturnValue;
+
+ setupVarargsFrameAndSetThis(exec, execCallee, LLINT_OP_C(3).jsValue(), LLINT_OP_C(4).jsValue(), pc[6].u.operand, vm.varargsLength);
+ LLINT_CALL_CHECK_EXCEPTION(exec, exec);
+
+ execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
+ execCallee->setCallerFrame(exec);
+ exec->setCurrentVPC(pc);
+
+ return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_construct_varargs)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ // This needs to:
+ // - Figure out what to call and compile it if necessary.
+ // - Return a tuple of machine code address to call and the new call frame.
+
+ JSValue calleeAsValue = LLINT_OP_C(2).jsValue();
+
+ ExecState* execCallee = vm.newCallFrameReturnValue;
+
+ setupVarargsFrameAndSetThis(exec, execCallee, LLINT_OP_C(3).jsValue(), LLINT_OP_C(4).jsValue(), pc[6].u.operand, vm.varargsLength);
+ LLINT_CALL_CHECK_EXCEPTION(exec, exec);
+
+ execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
+ execCallee->setCallerFrame(exec);
+ exec->setCurrentVPC(pc);
+
+ return setUpCall(execCallee, pc, CodeForConstruct, calleeAsValue);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_call_eval)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ JSValue calleeAsValue = LLINT_OP(2).jsValue();
+
+ ExecState* execCallee = exec - pc[4].u.operand;
+
+ execCallee->setArgumentCountIncludingThis(pc[3].u.operand);
+ execCallee->setCallerFrame(exec);
+ execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
+ execCallee->setReturnPC(LLInt::getCodePtr(llint_generic_return_point));
+ execCallee->setCodeBlock(0);
+ exec->setCurrentVPC(pc);
+
+ if (!isHostFunction(calleeAsValue, globalFuncEval))
+ return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
+
+ vm.hostCallReturnValue = eval(execCallee);
+ LLINT_CALL_RETURN(exec, execCallee, LLInt::getCodePtr(getHostCallReturnValue));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_strcat)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsStringFromRegisterArray(exec, &LLINT_OP(2), pc[3].u.operand));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_to_primitive)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(LLINT_OP_C(2).jsValue().toPrimitive(exec));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_throw)
+{
+ LLINT_BEGIN();
+ LLINT_THROW(LLINT_OP_C(1).jsValue());
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_throw_static_error)
+{
+ LLINT_BEGIN();
+ JSValue errorMessageValue = LLINT_OP_C(1).jsValue();
+ RELEASE_ASSERT(errorMessageValue.isString());
+ String errorMessage = asString(errorMessageValue)->value(exec);
+ if (pc[2].u.operand)
+ LLINT_THROW(createReferenceError(exec, errorMessage));
+ else
+ LLINT_THROW(createTypeError(exec, errorMessage));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_handle_watchdog_timer)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ ASSERT(vm.watchdog);
+ if (UNLIKELY(vm.shouldTriggerTermination(exec)))
+ LLINT_THROW(createTerminatedExecutionException(&vm));
+ LLINT_RETURN_TWO(0, exec);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_debug)
+{
+ LLINT_BEGIN();
+ int debugHookID = pc[1].u.operand;
+ vm.interpreter->debug(exec, static_cast<DebugHookID>(debugHookID));
+
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_profile_will_call)
+{
+ LLINT_BEGIN();
+ if (LegacyProfiler* profiler = vm.enabledProfiler())
+ profiler->willExecute(exec, LLINT_OP(1).jsValue());
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_profile_did_call)
+{
+ LLINT_BEGIN();
+ if (LegacyProfiler* profiler = vm.enabledProfiler())
+ profiler->didExecute(exec, LLINT_OP(1).jsValue());
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_handle_exception)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ genericUnwind(&vm, exec);
+ LLINT_END_IMPL();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_resolve_scope)
+{
+ LLINT_BEGIN();
+ const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand);
+ JSScope* scope = LLINT_OP(2).Register::scope();
+ LLINT_RETURN(JSScope::resolve(exec, scope, ident));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_get_from_scope)
+{
+ LLINT_BEGIN();
+
+ const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand);
+ JSObject* scope = jsCast<JSObject*>(LLINT_OP(2).jsValue());
+ ResolveModeAndType modeAndType(pc[4].u.operand);
+
+ PropertySlot slot(scope);
+ if (!scope->getPropertySlot(exec, ident, slot)) {
+ if (modeAndType.mode() == ThrowIfNotFound)
+ LLINT_RETURN(exec->vm().throwException(exec, createUndefinedVariableError(exec, ident)));
+ LLINT_RETURN(jsUndefined());
+ }
+
+ // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time.
+ if (slot.isCacheableValue() && slot.slotBase() == scope && scope->structure()->propertyAccessesAreCacheable()) {
+ if (modeAndType.type() == GlobalProperty || modeAndType.type() == GlobalPropertyWithVarInjectionChecks) {
+ CodeBlock* codeBlock = exec->codeBlock();
+ Structure* structure = scope->structure(vm);
+ {
+ ConcurrentJITLocker locker(codeBlock->m_lock);
+ pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), structure);
+ pc[6].u.operand = slot.cachedOffset();
+ }
+ structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset());
+ }
+ }
+
+ LLINT_RETURN(slot.getValue(exec, ident));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_to_scope)
+{
+ LLINT_BEGIN();
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ const Identifier& ident = codeBlock->identifier(pc[2].u.operand);
+ JSObject* scope = jsCast<JSObject*>(LLINT_OP(1).jsValue());
+ JSValue value = LLINT_OP_C(3).jsValue();
+ ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
+ if (modeAndType.type() == LocalClosureVar) {
+ JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope);
+ environment->variableAt(ScopeOffset(pc[6].u.operand)).set(vm, environment, value);
+
+ // Have to do this *after* the write, because if this puts the set into IsWatched, then we need
+ // to have already changed the value of the variable. Otherwise we might watch and constant-fold
+ // to the Undefined value from before the assignment.
+ if (WatchpointSet* set = pc[5].u.watchpointSet)
+ set->touch("Executed op_put_scope<LocalClosureVar>");
+ LLINT_END();
+ }
+
+ if (modeAndType.mode() == ThrowIfNotFound && !scope->hasProperty(exec, ident))
+ LLINT_THROW(createUndefinedVariableError(exec, ident));
+
+ PutPropertySlot slot(scope, codeBlock->isStrictMode());
+ scope->methodTable()->put(scope, exec, ident, value, slot);
+
+ CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, modeAndType, slot);
+
+ LLINT_END();
+}
+
+extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM* vm, ProtoCallFrame* protoFrame)
+{
+ ExecState* exec = vm->topCallFrame;
+ if (!exec)
+ exec = protoFrame->callee()->globalObject()->globalExec();
+ throwStackOverflowError(exec);
+ return encodeResult(0, 0);
+}
+
+#if !ENABLE(JIT)
+extern "C" SlowPathReturnType llint_stack_check_at_vm_entry(VM* vm, Register* newTopOfStack)
+{
+ bool success = vm->interpreter->stack().ensureCapacityFor(newTopOfStack);
+ return encodeResult(reinterpret_cast<void*>(success), 0);
+}
+#endif
+
+extern "C" void llint_write_barrier_slow(ExecState* exec, JSCell* cell)
+{
+ VM& vm = exec->vm();
+ vm.heap.writeBarrier(cell);
+}
+
+extern "C" NO_RETURN_DUE_TO_CRASH void llint_crash()
+{
+ CRASH();
+}
+
+} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
new file mode 100644
index 000000000..8cc69a960
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntSlowPaths_h
+#define LLIntSlowPaths_h
+
+#include "CommonSlowPaths.h"
+#include <wtf/StdLibExtras.h>
+
+namespace JSC {
+
+class ExecState;
+struct Instruction;
+struct ProtoCallFrame;
+
+namespace LLInt {
+
+extern "C" SlowPathReturnType llint_trace_operand(ExecState*, Instruction*, int fromWhere, int operand);
+extern "C" SlowPathReturnType llint_trace_value(ExecState*, Instruction*, int fromWhere, int operand);
+extern "C" void llint_write_barrier_slow(ExecState*, JSCell*) WTF_INTERNAL;
+
+#define LLINT_SLOW_PATH_DECL(name) \
+ extern "C" SlowPathReturnType llint_##name(ExecState* exec, Instruction* pc)
+
+#define LLINT_SLOW_PATH_HIDDEN_DECL(name) \
+ LLINT_SLOW_PATH_DECL(name) WTF_INTERNAL
+
+LLINT_SLOW_PATH_HIDDEN_DECL(trace_prologue);
+LLINT_SLOW_PATH_HIDDEN_DECL(trace_prologue_function_for_call);
+LLINT_SLOW_PATH_HIDDEN_DECL(trace_prologue_function_for_construct);
+LLINT_SLOW_PATH_HIDDEN_DECL(trace_arityCheck_for_call);
+LLINT_SLOW_PATH_HIDDEN_DECL(trace_arityCheck_for_construct);
+LLINT_SLOW_PATH_HIDDEN_DECL(trace);
+LLINT_SLOW_PATH_HIDDEN_DECL(special_trace);
+LLINT_SLOW_PATH_HIDDEN_DECL(entry_osr);
+LLINT_SLOW_PATH_HIDDEN_DECL(entry_osr_function_for_call);
+LLINT_SLOW_PATH_HIDDEN_DECL(entry_osr_function_for_construct);
+LLINT_SLOW_PATH_HIDDEN_DECL(entry_osr_function_for_call_arityCheck);
+LLINT_SLOW_PATH_HIDDEN_DECL(entry_osr_function_for_construct_arityCheck);
+LLINT_SLOW_PATH_HIDDEN_DECL(loop_osr);
+LLINT_SLOW_PATH_HIDDEN_DECL(replace);
+LLINT_SLOW_PATH_HIDDEN_DECL(stack_check);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_object);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array_with_size);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array_buffer);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_regexp);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_check_has_instance);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_instanceof);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_id);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_arguments_length);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_id);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_del_by_id);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_val);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_argument_by_val);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_val);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_val_direct);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_del_by_val);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_index);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_by_id);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_setter_by_id);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_setter);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jtrue);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jfalse);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jless);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jnless);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jgreater);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jngreater);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jlesseq);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jnlesseq);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jgreatereq);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jngreatereq);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_switch_imm);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_switch_char);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_switch_string);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_func);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_func_exp);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_size_frame_for_varargs);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_varargs);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct_varargs);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_eval);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tear_off_arguments);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_strcat);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_to_primitive);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw_static_error);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_handle_watchdog_timer);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_debug);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_will_call);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_did_call);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_handle_exception);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_scope);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_from_scope);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_to_scope);
+extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM*, ProtoCallFrame*) WTF_INTERNAL;
+#if !ENABLE(JIT)
+extern "C" SlowPathReturnType llint_stack_check_at_vm_entry(VM*, Register*) WTF_INTERNAL;
+#endif
+extern "C" NO_RETURN_DUE_TO_CRASH void llint_crash() WTF_INTERNAL;
+
+} } // namespace JSC::LLInt
+
+#endif // LLIntSlowPaths_h
+
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.cpp b/Source/JavaScriptCore/llint/LLIntThunks.cpp
new file mode 100644
index 000000000..8ab96b304
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntThunks.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntThunks.h"
+
+#include "CallData.h"
+#include "ExceptionHelpers.h"
+#include "Interpreter.h"
+#include "JSCJSValueInlines.h"
+#include "JSInterfaceJIT.h"
+#include "JSObject.h"
+#include "JSStackInlines.h"
+#include "LLIntCLoop.h"
+#include "LinkBuffer.h"
+#include "LowLevelInterpreter.h"
+#include "ProtoCallFrame.h"
+#include "StackAlignment.h"
+#include "VM.h"
+
+namespace JSC {
+
+#if ENABLE(JIT)
+
+namespace LLInt {
+
+static MacroAssemblerCodeRef generateThunkWithJumpTo(VM* vm, void (*target)(), const char *thunkKind)
+{
+ JSInterfaceJIT jit(vm);
+
+ // FIXME: there's probably a better way to do it on X86, but I'm not sure I care.
+ jit.move(JSInterfaceJIT::TrustedImmPtr(bitwise_cast<void*>(target)), JSInterfaceJIT::regT0);
+ jit.jump(JSInterfaceJIT::regT0);
+
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("LLInt %s prologue thunk", thunkKind));
+}
+
+MacroAssemblerCodeRef functionForCallEntryThunkGenerator(VM* vm)
+{
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_call_prologue), "function for call");
+}
+
+MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(VM* vm)
+{
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_construct_prologue), "function for construct");
+}
+
+MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(VM* vm)
+{
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_call_arity_check), "function for call with arity check");
+}
+
+MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(VM* vm)
+{
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_construct_arity_check), "function for construct with arity check");
+}
+
+MacroAssemblerCodeRef evalEntryThunkGenerator(VM* vm)
+{
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_eval_prologue), "eval");
+}
+
+MacroAssemblerCodeRef programEntryThunkGenerator(VM* vm)
+{
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_program_prologue), "program");
+}
+
+} // namespace LLInt
+
+#else // ENABLE(JIT)
+
+// Non-JIT (i.e. C Loop LLINT) case:
+
+EncodedJSValue vmEntryToJavaScript(void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame)
+{
+ JSValue result = CLoop::execute(llint_vm_entry_to_javascript, executableAddress, vm, protoCallFrame);
+ return JSValue::encode(result);
+}
+
+EncodedJSValue vmEntryToNative(void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame)
+{
+ JSValue result = CLoop::execute(llint_vm_entry_to_native, executableAddress, vm, protoCallFrame);
+ return JSValue::encode(result);
+}
+
+extern "C" VMEntryRecord* vmEntryRecord(VMEntryFrame* entryFrame)
+{
+ // The C Loop doesn't have any callee save registers, so the VMEntryRecord is allocated at the base of the frame.
+ intptr_t stackAlignment = stackAlignmentBytes();
+ intptr_t VMEntryTotalFrameSize = (sizeof(VMEntryRecord) + (stackAlignment - 1)) & ~(stackAlignment - 1);
+ return reinterpret_cast<VMEntryRecord*>(static_cast<char*>(entryFrame) - VMEntryTotalFrameSize);
+}
+
+
+#endif // ENABLE(JIT)
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.h b/Source/JavaScriptCore/llint/LLIntThunks.h
new file mode 100644
index 000000000..0d1be6bda
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntThunks.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntThunks_h
+#define LLIntThunks_h
+
+#include "MacroAssemblerCodeRef.h"
+
+namespace JSC {
+
+class VM;
+struct ProtoCallFrame;
+
+extern "C" {
+ EncodedJSValue vmEntryToJavaScript(void*, VM*, ProtoCallFrame*);
+ EncodedJSValue vmEntryToNative(void*, VM*, ProtoCallFrame*);
+}
+
+namespace LLInt {
+
+MacroAssemblerCodeRef functionForCallEntryThunkGenerator(VM*);
+MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(VM*);
+MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(VM*);
+MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(VM*);
+MacroAssemblerCodeRef evalEntryThunkGenerator(VM*);
+MacroAssemblerCodeRef programEntryThunkGenerator(VM*);
+
+} } // namespace JSC::LLInt
+
+#endif // LLIntThunks_h
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
new file mode 100644
index 000000000..c910617ed
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
@@ -0,0 +1,1414 @@
+# Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# First come the common protocols that both interpreters use. Note that each
+# of these must have an ASSERT() in LLIntData.cpp
+
+# Work-around for the fact that the toolchain's awareness of armv7k / armv7s
+# results in a separate slab in the fat binary, yet the offlineasm doesn't know
+# to expect it.
+if ARMv7k
+end
+if ARMv7s
+end
+
+# These declarations must match interpreter/JSStack.h.
+
+if JSVALUE64
+ const PtrSize = 8
+ const CallFrameHeaderSlots = 5
+else
+ const PtrSize = 4
+ const CallFrameHeaderSlots = 4
+ const CallFrameAlignSlots = 1
+end
+const SlotSize = 8
+
+const JSEnvironmentRecord_variables = (sizeof JSEnvironmentRecord + SlotSize - 1) & ~(SlotSize - 1)
+const DirectArguments_storage = (sizeof DirectArguments + SlotSize - 1) & ~(SlotSize - 1)
+
+const StackAlignment = 16
+const StackAlignmentMask = StackAlignment - 1
+
+const CallerFrameAndPCSize = 2 * PtrSize
+
+const CallerFrame = 0
+const ReturnPC = CallerFrame + PtrSize
+const CodeBlock = ReturnPC + PtrSize
+const Callee = CodeBlock + SlotSize
+const ArgumentCount = Callee + SlotSize
+const ThisArgumentOffset = ArgumentCount + SlotSize
+const FirstArgumentOffset = ThisArgumentOffset + SlotSize
+const CallFrameHeaderSize = ThisArgumentOffset
+
+# Some value representation constants.
+if JSVALUE64
+ const TagBitTypeOther = 0x2
+ const TagBitBool = 0x4
+ const TagBitUndefined = 0x8
+ const ValueEmpty = 0x0
+ const ValueFalse = TagBitTypeOther | TagBitBool
+ const ValueTrue = TagBitTypeOther | TagBitBool | 1
+ const ValueUndefined = TagBitTypeOther | TagBitUndefined
+ const ValueNull = TagBitTypeOther
+ const TagTypeNumber = 0xffff000000000000
+ const TagMask = TagTypeNumber | TagBitTypeOther
+else
+ const Int32Tag = -1
+ const BooleanTag = -2
+ const NullTag = -3
+ const UndefinedTag = -4
+ const CellTag = -5
+ const EmptyValueTag = -6
+ const DeletedValueTag = -7
+ const LowestTag = DeletedValueTag
+end
+
+const CallOpCodeSize = 9
+
+if X86_64 or ARM64 or C_LOOP
+ const maxFrameExtentForSlowPathCall = 0
+elsif ARM or ARMv7_TRADITIONAL or ARMv7 or SH4
+ const maxFrameExtentForSlowPathCall = 24
+elsif X86 or X86_WIN
+ const maxFrameExtentForSlowPathCall = 40
+elsif MIPS
+ const maxFrameExtentForSlowPathCall = 40
+elsif X86_64_WIN
+ const maxFrameExtentForSlowPathCall = 64
+end
+
+# Watchpoint states
+const ClearWatchpoint = 0
+const IsWatched = 1
+const IsInvalidated = 2
+
+# Some register conventions.
+if JSVALUE64
+ # - Use a pair of registers to represent the PC: one register for the
+ # base of the bytecodes, and one register for the index.
+ # - The PC base (or PB for short) should be stored in the csr. It will
+ # get clobbered on calls to other JS code, but will get saved on calls
+ # to C functions.
+ # - C calls are still given the Instruction* rather than the PC index.
+ # This requires an add before the call, and a sub after.
+ const PC = t5
+ const PB = t6
+ const tagTypeNumber = csr1
+ const tagMask = csr2
+
+ macro loadisFromInstruction(offset, dest)
+ loadis offset * 8[PB, PC, 8], dest
+ end
+
+ macro loadpFromInstruction(offset, dest)
+ loadp offset * 8[PB, PC, 8], dest
+ end
+
+ macro storepToInstruction(value, offset)
+ storep value, offset * 8[PB, PC, 8]
+ end
+
+else
+ const PC = t5
+ macro loadisFromInstruction(offset, dest)
+ loadis offset * 4[PC], dest
+ end
+
+ macro loadpFromInstruction(offset, dest)
+ loadp offset * 4[PC], dest
+ end
+end
+
+# Constants for reasoning about value representation.
+if BIG_ENDIAN
+ const TagOffset = 0
+ const PayloadOffset = 4
+else
+ const TagOffset = 4
+ const PayloadOffset = 0
+end
+
+# Constant for reasoning about butterflies.
+const IsArray = 1
+const IndexingShapeMask = 30
+const NoIndexingShape = 0
+const Int32Shape = 20
+const DoubleShape = 22
+const ContiguousShape = 26
+const ArrayStorageShape = 28
+const SlowPutArrayStorageShape = 30
+
+# Type constants.
+const StringType = 6
+const ObjectType = 18
+const FinalObjectType = 19
+
+# Type flags constants.
+const MasqueradesAsUndefined = 1
+const ImplementsHasInstance = 2
+const ImplementsDefaultHasInstance = 8
+
+# Bytecode operand constants.
+const FirstConstantRegisterIndex = 0x40000000
+
+# Code type constants.
+const GlobalCode = 0
+const EvalCode = 1
+const FunctionCode = 2
+
+# The interpreter steals the tag word of the argument count.
+const LLIntReturnPC = ArgumentCount + TagOffset
+
+# String flags.
+const HashFlags8BitBuffer = 8
+
+# Copied from PropertyOffset.h
+const firstOutOfLineOffset = 100
+
+# ResolveType
+const GlobalProperty = 0
+const GlobalVar = 1
+const ClosureVar = 2
+const LocalClosureVar = 3
+const GlobalPropertyWithVarInjectionChecks = 4
+const GlobalVarWithVarInjectionChecks = 5
+const ClosureVarWithVarInjectionChecks = 6
+const Dynamic = 7
+
+const ResolveModeMask = 0xffff
+
+const MarkedBlockSize = 16 * 1024
+const MarkedBlockMask = ~(MarkedBlockSize - 1)
+# Constants for checking mark bits.
+const AtomNumberShift = 3
+const BitMapWordShift = 4
+
+# Allocation constants
+if JSVALUE64
+ const JSFinalObjectSizeClassIndex = 1
+else
+ const JSFinalObjectSizeClassIndex = 3
+end
+
+# This must match wtf/Vector.h
+const VectorBufferOffset = 0
+if JSVALUE64
+ const VectorSizeOffset = 12
+else
+ const VectorSizeOffset = 8
+end
+
+# Some common utilities.
+macro crash()
+ if C_LOOP
+ cloopCrash
+ else
+ call _llint_crash
+ end
+end
+
+macro assert(assertion)
+ if ASSERT_ENABLED
+ assertion(.ok)
+ crash()
+ .ok:
+ end
+end
+
+macro checkStackPointerAlignment(tempReg, location)
+ if ARM64 or C_LOOP or SH4
+ # ARM64 will check for us!
+ # C_LOOP does not need the alignment, and can use a little perf
+ # improvement from avoiding useless work.
+ # SH4 does not need specific alignment (4 bytes).
+ else
+ if ARM or ARMv7 or ARMv7_TRADITIONAL
+ # ARM can't do logical ops with the sp as a source
+ move sp, tempReg
+ andp StackAlignmentMask, tempReg
+ else
+ andp sp, StackAlignmentMask, tempReg
+ end
+ btpz tempReg, .stackPointerOkay
+ move location, tempReg
+ break
+ .stackPointerOkay:
+ end
+end
+
+if C_LOOP
+ const CalleeSaveRegisterCount = 0
+elsif ARM or ARMv7_TRADITIONAL or ARMv7
+ const CalleeSaveRegisterCount = 7
+elsif ARM64
+ const CalleeSaveRegisterCount = 10
+elsif SH4 or X86_64 or MIPS
+ const CalleeSaveRegisterCount = 5
+elsif X86 or X86_WIN
+ const CalleeSaveRegisterCount = 3
+elsif X86_64_WIN
+ const CalleeSaveRegisterCount = 7
+end
+
+const CalleeRegisterSaveSize = CalleeSaveRegisterCount * PtrSize
+
+# VMEntryTotalFrameSize includes the space for struct VMEntryRecord and the
+# callee save registers rounded up to keep the stack aligned
+const VMEntryTotalFrameSize = (CalleeRegisterSaveSize + sizeof VMEntryRecord + StackAlignment - 1) & ~StackAlignmentMask
+
+macro pushCalleeSaves()
+ if C_LOOP
+ elsif ARM or ARMv7_TRADITIONAL
+ emit "push {r4-r10}"
+ elsif ARMv7
+ emit "push {r4-r6, r8-r11}"
+ elsif ARM64
+ emit "stp x20, x19, [sp, #-16]!"
+ emit "stp x22, x21, [sp, #-16]!"
+ emit "stp x24, x23, [sp, #-16]!"
+ emit "stp x26, x25, [sp, #-16]!"
+ emit "stp x28, x27, [sp, #-16]!"
+ elsif MIPS
+ emit "addiu $sp, $sp, -20"
+ emit "sw $20, 16($sp)"
+ emit "sw $19, 12($sp)"
+ emit "sw $18, 8($sp)"
+ emit "sw $17, 4($sp)"
+ emit "sw $16, 0($sp)"
+ elsif SH4
+ emit "mov.l r13, @-r15"
+ emit "mov.l r11, @-r15"
+ emit "mov.l r10, @-r15"
+ emit "mov.l r9, @-r15"
+ emit "mov.l r8, @-r15"
+ elsif X86
+ emit "push %esi"
+ emit "push %edi"
+ emit "push %ebx"
+ elsif X86_WIN
+ emit "push esi"
+ emit "push edi"
+ emit "push ebx"
+ elsif X86_64
+ emit "push %r12"
+ emit "push %r13"
+ emit "push %r14"
+ emit "push %r15"
+ emit "push %rbx"
+ elsif X86_64_WIN
+ emit "push r12"
+ emit "push r13"
+ emit "push r14"
+ emit "push r15"
+ emit "push rbx"
+ emit "push rdi"
+ emit "push rsi"
+ end
+end
+
+macro popCalleeSaves()
+ if C_LOOP
+ elsif ARM or ARMv7_TRADITIONAL
+ emit "pop {r4-r10}"
+ elsif ARMv7
+ emit "pop {r4-r6, r8-r11}"
+ elsif ARM64
+ emit "ldp x28, x27, [sp], #16"
+ emit "ldp x26, x25, [sp], #16"
+ emit "ldp x24, x23, [sp], #16"
+ emit "ldp x22, x21, [sp], #16"
+ emit "ldp x20, x19, [sp], #16"
+ elsif MIPS
+ emit "lw $16, 0($sp)"
+ emit "lw $17, 4($sp)"
+ emit "lw $18, 8($sp)"
+ emit "lw $19, 12($sp)"
+ emit "lw $20, 16($sp)"
+ emit "addiu $sp, $sp, 20"
+ elsif SH4
+ emit "mov.l @r15+, r8"
+ emit "mov.l @r15+, r9"
+ emit "mov.l @r15+, r10"
+ emit "mov.l @r15+, r11"
+ emit "mov.l @r15+, r13"
+ elsif X86
+ emit "pop %ebx"
+ emit "pop %edi"
+ emit "pop %esi"
+ elsif X86_WIN
+ emit "pop ebx"
+ emit "pop edi"
+ emit "pop esi"
+ elsif X86_64
+ emit "pop %rbx"
+ emit "pop %r15"
+ emit "pop %r14"
+ emit "pop %r13"
+ emit "pop %r12"
+ elsif X86_64_WIN
+ emit "pop rsi"
+ emit "pop rdi"
+ emit "pop rbx"
+ emit "pop r15"
+ emit "pop r14"
+ emit "pop r13"
+ emit "pop r12"
+ end
+end
+
+macro preserveCallerPCAndCFR()
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+ push lr
+ push cfr
+ elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+ push cfr
+ elsif ARM64
+ push cfr, lr
+ else
+ error
+ end
+ move sp, cfr
+end
+
+macro restoreCallerPCAndCFR()
+ move cfr, sp
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+ pop cfr
+ pop lr
+ elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+ pop cfr
+ elsif ARM64
+ pop lr, cfr
+ end
+end
+
+macro preserveReturnAddressAfterCall(destinationRegister)
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
+ # In C_LOOP case, we're only preserving the bytecode vPC.
+ move lr, destinationRegister
+ elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+ pop destinationRegister
+ else
+ error
+ end
+end
+
+macro restoreReturnAddressBeforeReturn(sourceRegister)
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
+ # In C_LOOP case, we're only restoring the bytecode vPC.
+ move sourceRegister, lr
+ elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+ push sourceRegister
+ else
+ error
+ end
+end
+
+macro functionPrologue()
+ if X86 or X86_WIN or X86_64 or X86_64_WIN
+ push cfr
+ elsif ARM64
+ push cfr, lr
+ elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+ push lr
+ push cfr
+ end
+ move sp, cfr
+end
+
+macro functionEpilogue()
+ if X86 or X86_WIN or X86_64 or X86_64_WIN
+ pop cfr
+ elsif ARM64
+ pop lr, cfr
+ elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+ pop cfr
+ pop lr
+ end
+end
+
+macro vmEntryRecord(entryFramePointer, resultReg)
+ subp entryFramePointer, VMEntryTotalFrameSize, resultReg
+end
+
+macro getFrameRegisterSizeForCodeBlock(codeBlock, size)
+ loadi CodeBlock::m_numCalleeRegisters[codeBlock], size
+ lshiftp 3, size
+ addp maxFrameExtentForSlowPathCall, size
+end
+
+macro restoreStackPointerAfterCall()
+ loadp CodeBlock[cfr], t2
+ getFrameRegisterSizeForCodeBlock(t2, t4)
+ if ARMv7
+ subp cfr, t4, t4
+ move t4, sp
+ else
+ subp cfr, t4, sp
+ end
+end
+
+macro traceExecution()
+ if EXECUTION_TRACING
+ callSlowPath(_llint_trace)
+ end
+end
+
+macro callTargetFunction(callLinkInfo, calleeFramePtr)
+ move calleeFramePtr, sp
+ if C_LOOP
+ cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
+ else
+ call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
+ end
+ restoreStackPointerAfterCall()
+ dispatchAfterCall()
+end
+
+macro slowPathForCall(slowPath)
+ callCallSlowPath(
+ slowPath,
+ macro (callee)
+ btpz t1, .dontUpdateSP
+ if ARMv7
+ addp CallerFrameAndPCSize, t1, t1
+ move t1, sp
+ else
+ addp CallerFrameAndPCSize, t1, sp
+ end
+ .dontUpdateSP:
+ if C_LOOP
+ cloopCallJSFunction callee
+ else
+ call callee
+ end
+ restoreStackPointerAfterCall()
+ dispatchAfterCall()
+ end)
+end
+
+macro arrayProfile(cellAndIndexingType, profile, scratch)
+ const cell = cellAndIndexingType
+ const indexingType = cellAndIndexingType
+ loadi JSCell::m_structureID[cell], scratch
+ storei scratch, ArrayProfile::m_lastSeenStructureID[profile]
+ loadb JSCell::m_indexingType[cell], indexingType
+end
+
+macro skipIfIsRememberedOrInEden(cell, scratch1, scratch2, continuation)
+ loadb JSCell::m_gcData[cell], scratch1
+ continuation(scratch1)
+end
+
+macro notifyWrite(set, slow)
+ bbneq WatchpointSet::m_state[set], IsInvalidated, slow
+end
+
+macro checkSwitchToJIT(increment, action)
+ loadp CodeBlock[cfr], t0
+ baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue
+ action()
+ .continue:
+end
+
+macro checkSwitchToJITForEpilogue()
+ checkSwitchToJIT(
+ 10,
+ macro ()
+ callSlowPath(_llint_replace)
+ end)
+end
+
+macro assertNotConstant(index)
+ assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end)
+end
+
+macro functionForCallCodeBlockGetter(targetRegister)
+ if JSVALUE64
+ loadp Callee[cfr], targetRegister
+ else
+ loadp Callee + PayloadOffset[cfr], targetRegister
+ end
+ loadp JSFunction::m_executable[targetRegister], targetRegister
+ loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister
+end
+
+macro functionForConstructCodeBlockGetter(targetRegister)
+ if JSVALUE64
+ loadp Callee[cfr], targetRegister
+ else
+ loadp Callee + PayloadOffset[cfr], targetRegister
+ end
+ loadp JSFunction::m_executable[targetRegister], targetRegister
+ loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister
+end
+
+macro notFunctionCodeBlockGetter(targetRegister)
+ loadp CodeBlock[cfr], targetRegister
+end
+
+macro functionCodeBlockSetter(sourceRegister)
+ storep sourceRegister, CodeBlock[cfr]
+end
+
+macro notFunctionCodeBlockSetter(sourceRegister)
+ # Nothing to do!
+end
+
+# Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
+# in t1. May also trigger prologue entry OSR.
+macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
+ # Set up the call frame and check if we should OSR.
+ preserveCallerPCAndCFR()
+
+ if EXECUTION_TRACING
+ subp maxFrameExtentForSlowPathCall, sp
+ callSlowPath(traceSlowPath)
+ addp maxFrameExtentForSlowPathCall, sp
+ end
+ codeBlockGetter(t1)
+ if not C_LOOP
+ baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue
+ if JSVALUE64
+ cCall2(osrSlowPath, cfr, PC)
+ else
+ # We are after the function prologue, but before we have set up sp from the CodeBlock.
+ # Temporarily align stack pointer for this call.
+ subp 8, sp
+ cCall2(osrSlowPath, cfr, PC)
+ addp 8, sp
+ end
+ btpz t0, .recover
+ move cfr, sp # restore the previous sp
+ # pop the callerFrame since we will jump to a function that wants to save it
+ if ARM64
+ pop lr, cfr
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+ pop cfr
+ pop lr
+ else
+ pop cfr
+ end
+ jmp t0
+ .recover:
+ codeBlockGetter(t1)
+ .continue:
+ end
+
+ codeBlockSetter(t1)
+
+ # Set up the PC.
+ if JSVALUE64
+ loadp CodeBlock::m_instructions[t1], PB
+ move 0, PC
+ else
+ loadp CodeBlock::m_instructions[t1], PC
+ end
+
+ # Get new sp in t0 and check stack height.
+ getFrameRegisterSizeForCodeBlock(t1, t0)
+ subp cfr, t0, t0
+ loadp CodeBlock::m_vm[t1], t2
+ bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK
+
+ # Stack height check failed - need to call a slow_path.
+ subp maxFrameExtentForSlowPathCall, sp # Set up temporary stack pointer for call
+ callSlowPath(_llint_stack_check)
+ bpeq t1, 0, .stackHeightOKGetCodeBlock
+ move t1, cfr
+ dispatch(0) # Go to exception handler in PC
+
+.stackHeightOKGetCodeBlock:
+ # Stack check slow path returned that the stack was ok.
+ # Since they were clobbered, need to get CodeBlock and new sp
+ codeBlockGetter(t1)
+ getFrameRegisterSizeForCodeBlock(t1, t0)
+ subp cfr, t0, t0
+
+.stackHeightOK:
+ move t0, sp
+end
+
+# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
+# Must call dispatch(0) after calling this.
+macro functionInitialization(profileArgSkip)
+ # Profile the arguments. Unfortunately, we have no choice but to do this. This
+ # code is pretty horrendous because of the difference in ordering between
+ # arguments and value profiles, the desire to have a simple loop-down-to-zero
+ # loop, and the desire to use only three registers so as to preserve the PC and
+ # the code block. It is likely that this code should be rewritten in a more
+ # optimal way for architectures that have more than five registers available
+ # for arbitrary use in the interpreter.
+ loadi CodeBlock::m_numParameters[t1], t0
+ addp -profileArgSkip, t0 # Use addi because that's what has the peephole
+ assert(macro (ok) bpgteq t0, 0, ok end)
+ btpz t0, .argumentProfileDone
+ loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
+ mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
+ lshiftp 3, t0
+ addp t2, t3
+.argumentProfileLoop:
+ if JSVALUE64
+ loadq ThisArgumentOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+ subp sizeof ValueProfile, t3
+ storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
+ else
+ loadi ThisArgumentOffset + TagOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+ subp sizeof ValueProfile, t3
+ storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
+ loadi ThisArgumentOffset + PayloadOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+ storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
+ end
+ baddpnz -8, t0, .argumentProfileLoop
+.argumentProfileDone:
+end
+
+macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
+ const offsetOfFirstFreeCell =
+ MarkedAllocator::m_freeList +
+ MarkedBlock::FreeList::head
+
+ # Get the object from the free list.
+ loadp offsetOfFirstFreeCell[allocator], result
+ btpz result, slowCase
+
+ # Remove the object from the free list.
+ loadp [result], scratch1
+ storep scratch1, offsetOfFirstFreeCell[allocator]
+
+ # Initialize the object.
+ storep 0, JSObject::m_butterfly[result]
+ storeStructureWithTypeInfo(result, structure, scratch1)
+end
+
+macro doReturn()
+ restoreCallerPCAndCFR()
+ ret
+end
+
+# stub to call into JavaScript or Native functions
+# EncodedJSValue vmEntryToJavaScript(void* code, VM* vm, ProtoCallFrame* protoFrame)
+# EncodedJSValue vmEntryToNativeFunction(void* code, VM* vm, ProtoCallFrame* protoFrame)
+
+if C_LOOP
+ _llint_vm_entry_to_javascript:
+else
+ global _vmEntryToJavaScript
+ _vmEntryToJavaScript:
+end
+ doVMEntry(makeJavaScriptCall)
+
+
+if C_LOOP
+ _llint_vm_entry_to_native:
+else
+ global _vmEntryToNative
+ _vmEntryToNative:
+end
+ doVMEntry(makeHostFunctionCall)
+
+
+if not C_LOOP
+ # void sanitizeStackForVMImpl(VM* vm)
+ global _sanitizeStackForVMImpl
+ _sanitizeStackForVMImpl:
+ if X86_64
+ const vm = t4
+ const address = t1
+ const zeroValue = t0
+ elsif X86_64_WIN
+ const vm = t2
+ const address = t1
+ const zeroValue = t0
+ elsif X86 or X86_WIN
+ const vm = t2
+ const address = t1
+ const zeroValue = t0
+ else
+ const vm = a0
+ const address = t1
+ const zeroValue = t2
+ end
+
+ if X86 or X86_WIN
+ loadp 4[sp], vm
+ end
+
+ loadp VM::m_lastStackTop[vm], address
+ bpbeq sp, address, .zeroFillDone
+
+ move 0, zeroValue
+ .zeroFillLoop:
+ storep zeroValue, [address]
+ addp PtrSize, address
+ bpa sp, address, .zeroFillLoop
+
+ .zeroFillDone:
+ move sp, address
+ storep address, VM::m_lastStackTop[vm]
+ ret
+
+ # VMEntryRecord* vmEntryRecord(const VMEntryFrame* entryFrame)
+ global _vmEntryRecord
+ _vmEntryRecord:
+ if X86_64
+ const entryFrame = t4
+ const result = t0
+ elsif X86 or X86_WIN or X86_64_WIN
+ const entryFrame = t2
+ const result = t0
+ else
+ const entryFrame = a0
+ const result = t0
+ end
+
+ if X86 or X86_WIN
+ loadp 4[sp], entryFrame
+ end
+
+ vmEntryRecord(entryFrame, result)
+ ret
+end
+
+if C_LOOP
+ # Dummy entry point the C Loop uses to initialize.
+ _llint_entry:
+ crash()
+ else
+ macro initPCRelative(pcBase)
+ if X86_64 or X86_64_WIN
+ call _relativePCBase
+ _relativePCBase:
+ pop pcBase
+ elsif X86 or X86_WIN
+ call _relativePCBase
+ _relativePCBase:
+ pop pcBase
+ loadp 20[sp], t4
+ elsif ARM64
+ elsif ARMv7
+ _relativePCBase:
+ move pc, pcBase
+ subp 3, pcBase # Need to back up the PC and set the Thumb2 bit
+ elsif ARM or ARMv7_TRADITIONAL
+ _relativePCBase:
+ move pc, pcBase
+ subp 8, pcBase
+ elsif MIPS
+ la _relativePCBase, pcBase
+ _relativePCBase:
+ elsif SH4
+ mova _relativePCBase, t0
+ move t0, pcBase
+ alignformova
+ _relativePCBase:
+ end
+end
+
+macro setEntryAddress(index, label)
+ if X86_64
+ leap (label - _relativePCBase)[t1], t0
+ move index, t2
+ storep t0, [t4, t2, 8]
+ elsif X86_64_WIN
+ leap (label - _relativePCBase)[t1], t0
+ move index, t4
+ storep t0, [t2, t4, 8]
+ elsif X86 or X86_WIN
+ leap (label - _relativePCBase)[t1], t0
+ move index, t2
+ storep t0, [t4, t2, 4]
+ elsif ARM64
+ pcrtoaddr label, t1
+ move index, t2
+ storep t1, [a0, t2, 8]
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+ mvlbl (label - _relativePCBase), t2
+ addp t2, t1, t2
+ move index, t3
+ storep t2, [a0, t3, 4]
+ elsif SH4
+ move (label - _relativePCBase), t2
+ addp t2, t1, t2
+ move index, t3
+ storep t2, [a0, t3, 4]
+ flushcp # Force constant pool flush to avoid "pcrel too far" link error.
+ elsif MIPS
+ la label, t2
+ la _relativePCBase, t3
+ subp t3, t2
+ addp t2, t1, t2
+ move index, t3
+ storep t2, [a0, t3, 4]
+ end
+end
+
+global _llint_entry
+# Entry point for the llint to initialize.
+_llint_entry:
+ functionPrologue()
+ pushCalleeSaves()
+ initPCRelative(t1)
+
+ # Include generated bytecode initialization file.
+ include InitBytecodes
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+end
+
+_llint_program_prologue:
+ prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
+ dispatch(0)
+
+
+_llint_eval_prologue:
+ prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
+ dispatch(0)
+
+
+_llint_function_for_call_prologue:
+ prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
+ functionInitialization(0)
+ dispatch(0)
+
+
+_llint_function_for_construct_prologue:
+ prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
+ functionInitialization(1)
+ dispatch(0)
+
+
+_llint_function_for_call_arity_check:
+ prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
+ functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck)
+.functionForCallBegin:
+ functionInitialization(0)
+ dispatch(0)
+
+
+_llint_function_for_construct_arity_check:
+ prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
+ functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck)
+.functionForConstructBegin:
+ functionInitialization(1)
+ dispatch(0)
+
+
+# Value-representation-specific code.
+if JSVALUE64
+ include LowLevelInterpreter64
+else
+ include LowLevelInterpreter32_64
+end
+
+
+# Value-representation-agnostic code.
+_llint_op_create_direct_arguments:
+ traceExecution()
+ callSlowPath(_slow_path_create_direct_arguments)
+ dispatch(2)
+
+
+_llint_op_create_scoped_arguments:
+ traceExecution()
+ callSlowPath(_slow_path_create_scoped_arguments)
+ dispatch(3)
+
+
+_llint_op_create_out_of_band_arguments:
+ traceExecution()
+ callSlowPath(_slow_path_create_out_of_band_arguments)
+ dispatch(2)
+
+
+_llint_op_new_func:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_func)
+ dispatch(4)
+
+
+_llint_op_new_array:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_array)
+ dispatch(5)
+
+
+_llint_op_new_array_with_size:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_array_with_size)
+ dispatch(4)
+
+
+_llint_op_new_array_buffer:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_array_buffer)
+ dispatch(5)
+
+
+_llint_op_new_regexp:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_regexp)
+ dispatch(3)
+
+
+_llint_op_less:
+ traceExecution()
+ callSlowPath(_slow_path_less)
+ dispatch(4)
+
+
+_llint_op_lesseq:
+ traceExecution()
+ callSlowPath(_slow_path_lesseq)
+ dispatch(4)
+
+
+_llint_op_greater:
+ traceExecution()
+ callSlowPath(_slow_path_greater)
+ dispatch(4)
+
+
+_llint_op_greatereq:
+ traceExecution()
+ callSlowPath(_slow_path_greatereq)
+ dispatch(4)
+
+
+_llint_op_mod:
+ traceExecution()
+ callSlowPath(_slow_path_mod)
+ dispatch(4)
+
+
+_llint_op_typeof:
+ traceExecution()
+ callSlowPath(_slow_path_typeof)
+ dispatch(3)
+
+
+_llint_op_is_object_or_null:
+ traceExecution()
+ callSlowPath(_slow_path_is_object_or_null)
+ dispatch(3)
+
+_llint_op_is_function:
+ traceExecution()
+ callSlowPath(_slow_path_is_function)
+ dispatch(3)
+
+
+_llint_op_in:
+ traceExecution()
+ callSlowPath(_slow_path_in)
+ dispatch(4)
+
+macro withInlineStorage(object, propertyStorage, continuation)
+ # Indicate that the object is the property storage, and that the
+ # property storage register is unused.
+ continuation(object, propertyStorage)
+end
+
+macro withOutOfLineStorage(object, propertyStorage, continuation)
+ loadp JSObject::m_butterfly[object], propertyStorage
+ # Indicate that the propertyStorage register now points to the
+ # property storage, and that the object register may be reused
+ # if the object pointer is not needed anymore.
+ continuation(propertyStorage, object)
+end
+
+
+_llint_op_del_by_id:
+ traceExecution()
+ callSlowPath(_llint_slow_path_del_by_id)
+ dispatch(4)
+
+
+_llint_op_del_by_val:
+ traceExecution()
+ callSlowPath(_llint_slow_path_del_by_val)
+ dispatch(4)
+
+
+_llint_op_put_by_index:
+ traceExecution()
+ callSlowPath(_llint_slow_path_put_by_index)
+ dispatch(4)
+
+
+_llint_op_put_getter_by_id:
+ traceExecution()
+ callSlowPath(_llint_slow_path_put_getter_by_id)
+ dispatch(4)
+
+
+_llint_op_put_setter_by_id:
+ traceExecution()
+ callSlowPath(_llint_slow_path_put_setter_by_id)
+ dispatch(4)
+
+
+_llint_op_put_getter_setter:
+ traceExecution()
+ callSlowPath(_llint_slow_path_put_getter_setter)
+ dispatch(5)
+
+
+_llint_op_jtrue:
+ traceExecution()
+ jumpTrueOrFalse(
+ macro (value, target) btinz value, target end,
+ _llint_slow_path_jtrue)
+
+
+_llint_op_jfalse:
+ traceExecution()
+ jumpTrueOrFalse(
+ macro (value, target) btiz value, target end,
+ _llint_slow_path_jfalse)
+
+
+_llint_op_jless:
+ traceExecution()
+ compare(
+ macro (left, right, target) bilt left, right, target end,
+ macro (left, right, target) bdlt left, right, target end,
+ _llint_slow_path_jless)
+
+
+_llint_op_jnless:
+ traceExecution()
+ compare(
+ macro (left, right, target) bigteq left, right, target end,
+ macro (left, right, target) bdgtequn left, right, target end,
+ _llint_slow_path_jnless)
+
+
+_llint_op_jgreater:
+ traceExecution()
+ compare(
+ macro (left, right, target) bigt left, right, target end,
+ macro (left, right, target) bdgt left, right, target end,
+ _llint_slow_path_jgreater)
+
+
+_llint_op_jngreater:
+ traceExecution()
+ compare(
+ macro (left, right, target) bilteq left, right, target end,
+ macro (left, right, target) bdltequn left, right, target end,
+ _llint_slow_path_jngreater)
+
+
+_llint_op_jlesseq:
+ traceExecution()
+ compare(
+ macro (left, right, target) bilteq left, right, target end,
+ macro (left, right, target) bdlteq left, right, target end,
+ _llint_slow_path_jlesseq)
+
+
+_llint_op_jnlesseq:
+ traceExecution()
+ compare(
+ macro (left, right, target) bigt left, right, target end,
+ macro (left, right, target) bdgtun left, right, target end,
+ _llint_slow_path_jnlesseq)
+
+
+_llint_op_jgreatereq:
+ traceExecution()
+ compare(
+ macro (left, right, target) bigteq left, right, target end,
+ macro (left, right, target) bdgteq left, right, target end,
+ _llint_slow_path_jgreatereq)
+
+
+_llint_op_jngreatereq:
+ traceExecution()
+ compare(
+ macro (left, right, target) bilt left, right, target end,
+ macro (left, right, target) bdltun left, right, target end,
+ _llint_slow_path_jngreatereq)
+
+
+_llint_op_loop_hint:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_vm[t1], t1
+ loadp VM::watchdog[t1], t0
+ btpnz t0, .handleWatchdogTimer
+.afterWatchdogTimerCheck:
+ checkSwitchToJITForLoop()
+ dispatch(1)
+.handleWatchdogTimer:
+ loadb Watchdog::m_timerDidFire[t0], t0
+ btbz t0, .afterWatchdogTimerCheck
+ callWatchdogTimerHandler(.throwHandler)
+ jmp .afterWatchdogTimerCheck
+.throwHandler:
+ jmp _llint_throw_from_slow_path_trampoline
+
+_llint_op_switch_string:
+ traceExecution()
+ callSlowPath(_llint_slow_path_switch_string)
+ dispatch(0)
+
+
+_llint_op_new_func_exp:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_func_exp)
+ dispatch(4)
+
+
+_llint_op_call:
+ traceExecution()
+ arrayProfileForCall()
+ doCall(_llint_slow_path_call)
+
+
+_llint_op_construct:
+ traceExecution()
+ doCall(_llint_slow_path_construct)
+
+
+_llint_op_call_varargs:
+ traceExecution()
+ callSlowPath(_llint_slow_path_size_frame_for_varargs)
+ branchIfException(_llint_throw_from_slow_path_trampoline)
+ # calleeFrame in t1
+ if JSVALUE64
+ move t1, sp
+ else
+ # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
+ if ARMv7
+ subp t1, CallerFrameAndPCSize, t2
+ move t2, sp
+ else
+ subp t1, CallerFrameAndPCSize, sp
+ end
+ end
+ slowPathForCall(_llint_slow_path_call_varargs)
+
+_llint_op_construct_varargs:
+ traceExecution()
+ callSlowPath(_llint_slow_path_size_frame_for_varargs)
+ branchIfException(_llint_throw_from_slow_path_trampoline)
+ # calleeFrame in t1
+ if JSVALUE64
+ move t1, sp
+ else
+ # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
+ if ARMv7
+ subp t1, CallerFrameAndPCSize, t2
+ move t2, sp
+ else
+ subp t1, CallerFrameAndPCSize, sp
+ end
+ end
+ slowPathForCall(_llint_slow_path_construct_varargs)
+
+
+_llint_op_call_eval:
+ traceExecution()
+
+ # Eval is executed in one of two modes:
+ #
+ # 1) We find that we're really invoking eval() in which case the
+ # execution is perfomed entirely inside the slow_path, and it
+ # returns the PC of a function that just returns the return value
+ # that the eval returned.
+ #
+ # 2) We find that we're invoking something called eval() that is not
+ # the real eval. Then the slow_path returns the PC of the thing to
+ # call, and we call it.
+ #
+ # This allows us to handle two cases, which would require a total of
+ # up to four pieces of state that cannot be easily packed into two
+ # registers (C functions can return up to two registers, easily):
+ #
+ # - The call frame register. This may or may not have been modified
+ # by the slow_path, but the convention is that it returns it. It's not
+ # totally clear if that's necessary, since the cfr is callee save.
+ # But that's our style in this here interpreter so we stick with it.
+ #
+ # - A bit to say if the slow_path successfully executed the eval and has
+ # the return value, or did not execute the eval but has a PC for us
+ # to call.
+ #
+ # - Either:
+ # - The JS return value (two registers), or
+ #
+ # - The PC to call.
+ #
+ # It turns out to be easier to just always have this return the cfr
+ # and a PC to call, and that PC may be a dummy thunk that just
+ # returns the JS value that the eval returned.
+
+ slowPathForCall(_llint_slow_path_call_eval)
+
+
+_llint_generic_return_point:
+ dispatchAfterCall()
+
+
+_llint_op_strcat:
+ traceExecution()
+ callSlowPath(_slow_path_strcat)
+ dispatch(4)
+
+
+_llint_op_push_with_scope:
+ traceExecution()
+ callSlowPath(_slow_path_push_with_scope)
+ dispatch(4)
+
+
+_llint_op_create_lexical_environment:
+ traceExecution()
+ callSlowPath(_slow_path_create_lexical_environment)
+ dispatch(5)
+
+
+_llint_op_throw:
+ traceExecution()
+ callSlowPath(_llint_slow_path_throw)
+ dispatch(2)
+
+
+_llint_op_throw_static_error:
+ traceExecution()
+ callSlowPath(_llint_slow_path_throw_static_error)
+ dispatch(3)
+
+
+_llint_op_profile_will_call:
+ traceExecution()
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_vm[t0], t0
+ loadi VM::m_enabledProfiler[t0], t0
+ btpz t0, .opProfilerWillCallDone
+ callSlowPath(_llint_slow_path_profile_will_call)
+.opProfilerWillCallDone:
+ dispatch(2)
+
+
+_llint_op_profile_did_call:
+ traceExecution()
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_vm[t0], t0
+ loadi VM::m_enabledProfiler[t0], t0
+ btpz t0, .opProfilerDidCallDone
+ callSlowPath(_llint_slow_path_profile_did_call)
+.opProfilerDidCallDone:
+ dispatch(2)
+
+
+_llint_op_debug:
+ traceExecution()
+ loadp CodeBlock[cfr], t0
+ loadi CodeBlock::m_debuggerRequests[t0], t0
+ btiz t0, .opDebugDone
+ callSlowPath(_llint_slow_path_debug)
+.opDebugDone:
+ dispatch(3)
+
+
+_llint_native_call_trampoline:
+ nativeCallTrampoline(NativeExecutable::m_function)
+
+
+_llint_native_construct_trampoline:
+ nativeCallTrampoline(NativeExecutable::m_constructor)
+
+_llint_op_get_enumerable_length:
+ traceExecution()
+ callSlowPath(_slow_path_get_enumerable_length)
+ dispatch(3)
+
+_llint_op_has_indexed_property:
+ traceExecution()
+ callSlowPath(_slow_path_has_indexed_property)
+ dispatch(5)
+
+_llint_op_has_structure_property:
+ traceExecution()
+ callSlowPath(_slow_path_has_structure_property)
+ dispatch(5)
+
+_llint_op_has_generic_property:
+ traceExecution()
+ callSlowPath(_slow_path_has_generic_property)
+ dispatch(4)
+
+_llint_op_get_direct_pname:
+ traceExecution()
+ callSlowPath(_slow_path_get_direct_pname)
+ dispatch(7)
+
+_llint_op_get_property_enumerator:
+ traceExecution()
+ callSlowPath(_slow_path_get_property_enumerator)
+ dispatch(3)
+
+_llint_op_enumerator_structure_pname:
+ traceExecution()
+ callSlowPath(_slow_path_next_structure_enumerator_pname)
+ dispatch(4)
+
+_llint_op_enumerator_generic_pname:
+ traceExecution()
+ callSlowPath(_slow_path_next_generic_enumerator_pname)
+ dispatch(4)
+
+_llint_op_to_index_string:
+ traceExecution()
+ callSlowPath(_slow_path_to_index_string)
+ dispatch(3)
+
+_llint_op_profile_control_flow:
+ traceExecution()
+ loadpFromInstruction(1, t0)
+ storeb 1, BasicBlockLocation::m_hasExecuted[t0]
+ dispatch(2)
+
+# Lastly, make sure that we can link even though we don't support all opcodes.
+# These opcodes should never arise when using LLInt or either JIT. We assert
+# as much.
+
+macro notSupported()
+ if ASSERT_ENABLED
+ crash()
+ else
+ # We should use whatever the smallest possible instruction is, just to
+ # ensure that there is a gap between instruction labels. If multiple
+ # smallest instructions exist, we should pick the one that is most
+ # likely result in execution being halted. Currently that is the break
+ # instruction on all architectures we're interested in. (Break is int3
+ # on Intel, which is 1 byte, and bkpt on ARMv7, which is 2 bytes.)
+ break
+ end
+end
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
new file mode 100644
index 000000000..72bcddf57
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LowLevelInterpreter.h"
+#include "LLIntOfflineAsmConfig.h"
+#include <wtf/InlineASM.h>
+
+#if !ENABLE(JIT)
+#include "CodeBlock.h"
+#include "CommonSlowPaths.h"
+#include "LLIntCLoop.h"
+#include "LLIntSlowPaths.h"
+#include "JSCInlines.h"
+#include <wtf/Assertions.h>
+#include <wtf/MathExtras.h>
+
+using namespace JSC::LLInt;
+
+// LLInt C Loop opcodes
+// ====================
+// In the implementation of the C loop, the LLint trampoline glue functions
+// (e.g. llint_program_prologue, llint_eval_prologue, etc) are addressed as
+// if they are bytecode handlers. That means the names of the trampoline
+// functions will be added to the OpcodeID list via the
+// FOR_EACH_LLINT_OPCODE_EXTENSION() macro that FOR_EACH_OPCODE_ID()
+// includes.
+//
+// In addition, some JIT trampoline functions which are needed by LLInt
+// (e.g. getHostCallReturnValue, ctiOpThrowNotCaught) are also added as
+// bytecodes, and the CLoop will provide bytecode handlers for them.
+//
+// In the CLoop, we can only dispatch indirectly to these bytecodes
+// (including the LLInt and JIT extensions). All other dispatches
+// (i.e. goto's) must be to a known label (i.e. local / global labels).
+
+
+// How are the opcodes named?
+// ==========================
+// Here is a table to show examples of how each of the manifestation of the
+// opcodes are named:
+//
+// Type: Opcode Trampoline Glue
+// ====== ===============
+// [In the llint .asm files]
+// llint labels: llint_op_enter llint_program_prologue
+//
+// OpcodeID: op_enter llint_program
+// [in Opcode.h] [in LLIntOpcode.h]
+//
+// When using a switch statement dispatch in the CLoop, each "opcode" is
+// a case statement:
+// Opcode: case op_enter: case llint_program_prologue:
+//
+// When using a computed goto dispatch in the CLoop, each opcode is a label:
+// Opcode: op_enter: llint_program_prologue:
+
+
+//============================================================================
+// Define the opcode dispatch mechanism when using the C loop:
+//
+
+// These are for building a C Loop interpreter:
+#define OFFLINE_ASM_BEGIN
+#define OFFLINE_ASM_END
+
+#if ENABLE(OPCODE_TRACING)
+#define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode)
+#else
+#define TRACE_OPCODE(opcode)
+#endif
+
+// To keep compilers happy in case of unused labels, force usage of the label:
+#define USE_LABEL(label) \
+ do { \
+ if (false) \
+ goto label; \
+ } while (false)
+
+#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode);
+
+#define OFFLINE_ASM_GLOBAL_LABEL(label) OFFLINE_ASM_GLUE_LABEL(label)
+
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+#define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label);
+#else
+#define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label);
+#endif
+
+#define OFFLINE_ASM_LOCAL_LABEL(label) label: USE_LABEL(label);
+
+
+//============================================================================
+// Some utilities:
+//
+
+namespace JSC {
+namespace LLInt {
+
+#if USE(JSVALUE32_64)
+static double Ints2Double(uint32_t lo, uint32_t hi)
+{
+ union {
+ double dval;
+ uint64_t ival64;
+ } u;
+ u.ival64 = (static_cast<uint64_t>(hi) << 32) | lo;
+ return u.dval;
+}
+
+static void Double2Ints(double val, uint32_t& lo, uint32_t& hi)
+{
+ union {
+ double dval;
+ uint64_t ival64;
+ } u;
+ u.dval = val;
+ hi = static_cast<uint32_t>(u.ival64 >> 32);
+ lo = static_cast<uint32_t>(u.ival64);
+}
+#endif // USE(JSVALUE32_64)
+
+} // namespace LLint
+
+
+//============================================================================
+// CLoopRegister is the storage for an emulated CPU register.
+// It defines the policy of how ints smaller than intptr_t are packed into the
+// pseudo register, as well as hides endianness differences.
+
+struct CLoopRegister {
+ CLoopRegister() { i = static_cast<intptr_t>(0xbadbeef0baddbeef); }
+ union {
+ intptr_t i;
+ uintptr_t u;
+#if USE(JSVALUE64)
+#if CPU(BIG_ENDIAN)
+ struct {
+ int32_t i32padding;
+ int32_t i32;
+ };
+ struct {
+ uint32_t u32padding;
+ uint32_t u32;
+ };
+ struct {
+ int8_t i8padding[7];
+ int8_t i8;
+ };
+ struct {
+ uint8_t u8padding[7];
+ uint8_t u8;
+ };
+#else // !CPU(BIG_ENDIAN)
+ struct {
+ int32_t i32;
+ int32_t i32padding;
+ };
+ struct {
+ uint32_t u32;
+ uint32_t u32padding;
+ };
+ struct {
+ int8_t i8;
+ int8_t i8padding[7];
+ };
+ struct {
+ uint8_t u8;
+ uint8_t u8padding[7];
+ };
+#endif // !CPU(BIG_ENDIAN)
+#else // !USE(JSVALUE64)
+ int32_t i32;
+ uint32_t u32;
+
+#if CPU(BIG_ENDIAN)
+ struct {
+ int8_t i8padding[3];
+ int8_t i8;
+ };
+ struct {
+ uint8_t u8padding[3];
+ uint8_t u8;
+ };
+
+#else // !CPU(BIG_ENDIAN)
+ struct {
+ int8_t i8;
+ int8_t i8padding[3];
+ };
+ struct {
+ uint8_t u8;
+ uint8_t u8padding[3];
+ };
+#endif // !CPU(BIG_ENDIAN)
+#endif // !USE(JSVALUE64)
+
+ intptr_t* ip;
+ int8_t* i8p;
+ void* vp;
+ CallFrame* callFrame;
+ ExecState* execState;
+ void* instruction;
+ VM* vm;
+ JSCell* cell;
+ ProtoCallFrame* protoCallFrame;
+ NativeFunction nativeFunc;
+#if USE(JSVALUE64)
+ int64_t i64;
+ uint64_t u64;
+ EncodedJSValue encodedJSValue;
+ double castToDouble;
+#endif
+ Opcode opcode;
+ };
+
+ operator ExecState*() { return execState; }
+ operator Instruction*() { return reinterpret_cast<Instruction*>(instruction); }
+ operator VM*() { return vm; }
+ operator ProtoCallFrame*() { return protoCallFrame; }
+ operator Register*() { return reinterpret_cast<Register*>(vp); }
+ operator JSCell*() { return cell; }
+
+#if USE(JSVALUE64)
+ inline void clearHighWord() { i32padding = 0; }
+#else
+ inline void clearHighWord() { }
+#endif
+};
+
+//============================================================================
+// The llint C++ interpreter loop:
+//
+
+JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame, bool isInitializationPass)
+{
+ #define CAST reinterpret_cast
+ #define SIGN_BIT32(x) ((x) & 0x80000000)
+
+ // One-time initialization of our address tables. We have to put this code
+ // here because our labels are only in scope inside this function. The
+ // caller (or one of its ancestors) is responsible for ensuring that this
+ // is only called once during the initialization of the VM before threads
+ // are at play.
+ if (UNLIKELY(isInitializationPass)) {
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+ Opcode* opcodeMap = LLInt::opcodeMap();
+ #define OPCODE_ENTRY(__opcode, length) \
+ opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
+ FOR_EACH_OPCODE_ID(OPCODE_ENTRY)
+ #undef OPCODE_ENTRY
+
+ #define LLINT_OPCODE_ENTRY(__opcode, length) \
+ opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
+
+ FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY)
+ #undef LLINT_OPCODE_ENTRY
+#endif
+ // Note: we can only set the exceptionInstructions after we have
+ // initialized the opcodeMap above. This is because getCodePtr()
+ // can depend on the opcodeMap.
+ Instruction* exceptionInstructions = LLInt::exceptionInstructions();
+ for (int i = 0; i < maxOpcodeLength + 1; ++i)
+ exceptionInstructions[i].u.pointer =
+ LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
+
+ return JSValue();
+ }
+
+ // Define the pseudo registers used by the LLINT C Loop backend:
+ ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t));
+
+ union CLoopDoubleRegister {
+ double d;
+#if USE(JSVALUE64)
+ int64_t castToInt64;
+#endif
+ };
+
+ // The CLoop llint backend is initially based on the ARMv7 backend, and
+ // then further enhanced with a few instructions from the x86 backend to
+ // support building for X64 targets. Hence, the shape of the generated
+ // code and the usage convention of registers will look a lot like the
+ // ARMv7 backend's.
+ //
+ // For example, on a 32-bit build:
+ // 1. Outgoing args will be set up as follows:
+ // arg1 in t0 (r0 on ARM)
+ // arg2 in t1 (r1 on ARM)
+ // 2. 32 bit return values will be in t0 (r0 on ARM).
+ // 3. 64 bit return values (e.g. doubles) will be in t0,t1 (r0,r1 on ARM).
+ //
+ // But instead of naming these simulator registers based on their ARM
+ // counterparts, we'll name them based on their original llint asm names.
+ // This will make it easier to correlate the generated code with the
+ // original llint asm code.
+ //
+ // On a 64-bit build, it more like x64 in that the registers are 64 bit.
+ // Hence:
+ // 1. Outgoing args are still the same: arg1 in t0, arg2 in t1, etc.
+ // 2. 32 bit result values will be in the low 32-bit of t0.
+ // 3. 64 bit result values will be in t0.
+
+ CLoopRegister t0, t1, t2, t3, t5, t7, sp, cfr, lr, pc;
+#if USE(JSVALUE64)
+ CLoopRegister pcBase, tagTypeNumber, tagMask;
+#endif
+ CLoopDoubleRegister d0, d1;
+
+ lr.opcode = getOpcode(llint_return_to_host);
+ sp.vp = vm->interpreter->stack().topOfStack() + 1;
+ cfr.callFrame = vm->topCallFrame;
+#ifndef NDEBUG
+ void* startSP = sp.vp;
+ CallFrame* startCFR = cfr.callFrame;
+#endif
+
+ // Initialize the incoming args for doVMEntryToJavaScript:
+ t0.vp = executableAddress;
+ t1.vm = vm;
+ t2.protoCallFrame = protoCallFrame;
+
+#if USE(JSVALUE64)
+ // For the ASM llint, JITStubs takes care of this initialization. We do
+ // it explicitly here for the C loop:
+ tagTypeNumber.i = 0xFFFF000000000000;
+ tagMask.i = 0xFFFF000000000002;
+#endif // USE(JSVALUE64)
+
+ // Interpreter variables for value passing between opcodes and/or helpers:
+ NativeFunction nativeFunc = 0;
+ JSValue functionReturnValue;
+ Opcode opcode = getOpcode(entryOpcodeID);
+
+#define PUSH(cloopReg) \
+ do { \
+ sp.ip--; \
+ *sp.ip = cloopReg.i; \
+ } while (false)
+
+#define POP(cloopReg) \
+ do { \
+ cloopReg.i = *sp.ip; \
+ sp.ip++; \
+ } while (false)
+
+#if ENABLE(OPCODE_STATS)
+#define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode)
+#else
+#define RECORD_OPCODE_STATS(__opcode)
+#endif
+
+#if USE(JSVALUE32_64)
+#define FETCH_OPCODE() pc.opcode
+#else // USE(JSVALUE64)
+#define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8)
+#endif // USE(JSVALUE64)
+
+#define NEXT_INSTRUCTION() \
+ do { \
+ opcode = FETCH_OPCODE(); \
+ DISPATCH_OPCODE(); \
+ } while (false)
+
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+
+ //========================================================================
+ // Loop dispatch mechanism using computed goto statements:
+
+ #define DISPATCH_OPCODE() goto *opcode
+
+ #define DEFINE_OPCODE(__opcode) \
+ __opcode: \
+ RECORD_OPCODE_STATS(__opcode);
+
+ // Dispatch to the current PC's bytecode:
+ DISPATCH_OPCODE();
+
+#else // !ENABLE(COMPUTED_GOTO_OPCODES)
+ //========================================================================
+ // Loop dispatch mechanism using a C switch statement:
+
+ #define DISPATCH_OPCODE() goto dispatchOpcode
+
+ #define DEFINE_OPCODE(__opcode) \
+ case __opcode: \
+ __opcode: \
+ RECORD_OPCODE_STATS(__opcode);
+
+ // Dispatch to the current PC's bytecode:
+ dispatchOpcode:
+ switch (opcode)
+
+#endif // !ENABLE(COMPUTED_GOTO_OPCODES)
+
+ //========================================================================
+ // Bytecode handlers:
+ {
+ // This is the file generated by offlineasm, which contains all of the
+ // bytecode handlers for the interpreter, as compiled from
+ // LowLevelInterpreter.asm and its peers.
+
+ #include "LLIntAssembly.h"
+
+ OFFLINE_ASM_GLUE_LABEL(llint_return_to_host)
+ {
+ ASSERT(startSP == sp.vp);
+ ASSERT(startCFR == cfr.callFrame);
+#if USE(JSVALUE32_64)
+ return JSValue(t1.i, t0.i); // returning JSValue(tag, payload);
+#else
+ return JSValue::decode(t0.encodedJSValue);
+#endif
+ }
+
+ // In the ASM llint, getHostCallReturnValue() is a piece of glue
+ // function provided by the JIT (see jit/JITOperations.cpp).
+ // We simulate it here with a pseduo-opcode handler.
+ OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue)
+ {
+ // The part in getHostCallReturnValueWithExecState():
+ JSValue result = vm->hostCallReturnValue;
+#if USE(JSVALUE32_64)
+ t1.i = result.tag();
+ t0.i = result.payload();
+#else
+ t0.encodedJSValue = JSValue::encode(result);
+#endif
+ opcode = lr.opcode;
+ DISPATCH_OPCODE();
+ }
+
+#if !ENABLE(COMPUTED_GOTO_OPCODES)
+ default:
+ ASSERT(false);
+#endif
+
+ } // END bytecode handler cases.
+
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+ // Keep the compiler happy so that it doesn't complain about unused
+ // labels for the LLInt trampoline glue. The labels are automatically
+ // emitted by label macros above, and some of them are referenced by
+ // the llint generated code. Since we can't tell ahead of time which
+ // will be referenced and which will be not, we'll just passify the
+ // compiler on all such labels:
+ #define LLINT_OPCODE_ENTRY(__opcode, length) \
+ UNUSED_LABEL(__opcode);
+ FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY);
+ #undef LLINT_OPCODE_ENTRY
+#endif
+
+ #undef NEXT_INSTRUCTION
+ #undef DEFINE_OPCODE
+ #undef CHECK_FOR_TIMEOUT
+ #undef CAST
+ #undef SIGN_BIT32
+
+ return JSValue(); // to suppress a compiler warning.
+} // Interpreter::llintCLoopExecute()
+
+} // namespace JSC
+
+#elif !OS(WINDOWS)
+
+//============================================================================
+// Define the opcode dispatch mechanism when using an ASM loop:
+//
+
+// These are for building an interpreter from generated assembly code:
+#define OFFLINE_ASM_BEGIN asm (
+#define OFFLINE_ASM_END );
+
+#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode)
+#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode)
+
+#if CPU(ARM_THUMB2)
+#define OFFLINE_ASM_GLOBAL_LABEL(label) \
+ ".text\n" \
+ ".align 4\n" \
+ ".globl " SYMBOL_STRING(label) "\n" \
+ HIDE_SYMBOL(label) "\n" \
+ ".thumb\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
+ SYMBOL_STRING(label) ":\n"
+#elif CPU(ARM64)
+#define OFFLINE_ASM_GLOBAL_LABEL(label) \
+ ".text\n" \
+ ".align 4\n" \
+ ".globl " SYMBOL_STRING(label) "\n" \
+ HIDE_SYMBOL(label) "\n" \
+ SYMBOL_STRING(label) ":\n"
+#else
+#define OFFLINE_ASM_GLOBAL_LABEL(label) \
+ ".text\n" \
+ ".globl " SYMBOL_STRING(label) "\n" \
+ HIDE_SYMBOL(label) "\n" \
+ SYMBOL_STRING(label) ":\n"
+#endif
+
+#define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
+
+// This is a file generated by offlineasm, which contains all of the assembly code
+// for the interpreter, as compiled from LowLevelInterpreter.asm.
+#include "LLIntAssembly.h"
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.h b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
new file mode 100644
index 000000000..8621dbd5a
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LowLevelInterpreter_h
+#define LowLevelInterpreter_h
+
+#include "Opcode.h"
+
+#if !ENABLE(JIT)
+
+namespace JSC {
+
+// The following is a set of alias for the opcode names. This is needed
+// because there is code (e.g. in GetByIdStatus.cpp and PutByIdStatus.cpp)
+// which refers to the opcodes expecting them to be prefixed with "llint_".
+// In the CLoop implementation, the 2 are equivalent. Hence, we set up this
+// alias here.
+
+#define LLINT_OPCODE_ALIAS(opcode, length) \
+ const OpcodeID llint_##opcode = opcode;
+FOR_EACH_CORE_OPCODE_ID(LLINT_OPCODE_ALIAS)
+#undef LLINT_OPCODE_ALIAS
+
+} // namespace JSC
+
+#endif // !ENABLE(JIT)
+
+#endif // LowLevelInterpreter_h
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
new file mode 100644
index 000000000..e90ecedfa
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
@@ -0,0 +1,2375 @@
+# Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# Crash course on the language that this is written in (which I just call
+# "assembly" even though it's more than that):
+#
+# - Mostly gas-style operand ordering. The last operand tends to be the
+# destination. So "a := b" is written as "mov b, a". But unlike gas,
+# comparisons are in-order, so "if (a < b)" is written as
+# "bilt a, b, ...".
+#
+# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
+# Currently this is just 32-bit so "i" and "p" are interchangeable
+# except when an op supports one but not the other.
+#
+# - In general, valid operands for macro invocations and instructions are
+# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
+# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
+# (eg "_foo" or ".foo"). Macro invocations can also take anonymous
+# macros as operands. Instructions cannot take anonymous macros.
+#
+# - Labels must have names that begin with either "_" or ".". A "." label
+# is local and gets renamed before code gen to minimize namespace
+# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
+# may or may not be removed during code gen depending on whether the asm
+# conventions for C name mangling on the target platform mandate a "_"
+# prefix.
+#
+# - A "macro" is a lambda expression, which may be either anonymous or
+# named. But this has caveats. "macro" can take zero or more arguments,
+# which may be macros or any valid operands, but it can only return
+# code. But you can do Turing-complete things via continuation passing
+# style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
+# that, since you'll just crash the assembler.
+#
+# - An "if" is a conditional on settings. Any identifier supplied in the
+# predicate of an "if" is assumed to be a #define that is available
+# during code gen. So you can't use "if" for computation in a macro, but
+# you can use it to select different pieces of code for different
+# platforms.
+#
+# - Arguments to macros follow lexical scoping rather than dynamic scoping.
+# Const's also follow lexical scoping and may override (hide) arguments
+# or other consts. All variables (arguments and constants) can be bound
+# to operands. Additionally, arguments (but not constants) can be bound
+# to macros.
+
+
+# Below we have a bunch of constant declarations. Each constant must have
+# a corresponding ASSERT() in LLIntData.cpp.
+
+# Utilities
+macro dispatch(advance)
+ addp advance * 4, PC
+ jmp [PC]
+end
+
+macro dispatchBranchWithOffset(pcOffset)
+ lshifti 2, pcOffset
+ addp pcOffset, PC
+ jmp [PC]
+end
+
+macro dispatchBranch(pcOffset)
+ loadi pcOffset, t0
+ dispatchBranchWithOffset(t0)
+end
+
+macro dispatchAfterCall()
+ loadi ArgumentCount + TagOffset[cfr], PC
+ loadi 4[PC], t2
+ storei t1, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ valueProfile(t1, t0, 4 * (CallOpCodeSize - 1), t3)
+ dispatch(CallOpCodeSize)
+end
+
+macro cCall2(function, arg1, arg2)
+ if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
+ move arg1, a0
+ move arg2, a1
+ call function
+ elsif X86 or X86_WIN
+ subp 8, sp
+ push arg2
+ push arg1
+ call function
+ addp 16, sp
+ elsif SH4
+ setargs arg1, arg2
+ call function
+ elsif C_LOOP
+ cloopCallSlowPath function, arg1, arg2
+ else
+ error
+ end
+end
+
+macro cCall2Void(function, arg1, arg2)
+ if C_LOOP
+ cloopCallSlowPathVoid function, arg1, arg2
+ else
+ cCall2(function, arg1, arg2)
+ end
+end
+
+# This barely works. arg3 and arg4 should probably be immediates.
+macro cCall4(function, arg1, arg2, arg3, arg4)
+ if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
+ move arg1, a0
+ move arg2, a1
+ move arg3, a2
+ move arg4, a3
+ call function
+ elsif X86 or X86_WIN
+ push arg4
+ push arg3
+ push arg2
+ push arg1
+ call function
+ addp 16, sp
+ elsif SH4
+ setargs arg1, arg2, arg3, arg4
+ call function
+ elsif C_LOOP
+ error
+ else
+ error
+ end
+end
+
+macro callSlowPath(slowPath)
+ cCall2(slowPath, cfr, PC)
+ move t0, PC
+end
+
+macro doVMEntry(makeCall)
+ if X86 or X86_WIN
+ const entry = t4
+ const vm = t3
+ const protoCallFrame = t5
+
+ const temp1 = t0
+ const temp2 = t1
+ const temp3 = t2
+ const temp4 = t3 # same as vm
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL or C_LOOP
+ const entry = a0
+ const vm = a1
+ const protoCallFrame = a2
+
+ const temp1 = t3
+ const temp2 = t4
+ const temp3 = t5
+ const temp4 = t4 # Same as temp2
+ elsif MIPS
+ const entry = a0
+ const vm = a1
+ const protoCallFrame = a2
+
+ const temp1 = t3
+ const temp2 = t5
+ const temp3 = t4
+ const temp4 = t6
+ elsif SH4
+ const entry = a0
+ const vm = a1
+ const protoCallFrame = a2
+
+ const temp1 = t3
+ const temp2 = a3
+ const temp3 = t8
+ const temp4 = t9
+ end
+
+ functionPrologue()
+ pushCalleeSaves()
+
+ if X86 or X86_WIN
+ loadp 12[cfr], vm
+ loadp 8[cfr], entry
+ end
+
+ if ARMv7
+ vmEntryRecord(cfr, temp1)
+ move temp1, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
+
+ storep vm, VMEntryRecord::m_vm[sp]
+ loadp VM::topCallFrame[vm], temp2
+ storep temp2, VMEntryRecord::m_prevTopCallFrame[sp]
+ loadp VM::topVMEntryFrame[vm], temp2
+ storep temp2, VMEntryRecord::m_prevTopVMEntryFrame[sp]
+
+ # Align stack pointer
+ if X86_WIN
+ addp CallFrameAlignSlots * SlotSize, sp, temp1
+ andp ~StackAlignmentMask, temp1
+ subp temp1, CallFrameAlignSlots * SlotSize, sp
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+ addp CallFrameAlignSlots * SlotSize, sp, temp1
+ clrbp temp1, StackAlignmentMask, temp1
+ if ARMv7
+ subp temp1, CallFrameAlignSlots * SlotSize, temp1
+ move temp1, sp
+ else
+ subp temp1, CallFrameAlignSlots * SlotSize, sp
+ end
+ end
+
+ if X86 or X86_WIN
+ loadp 16[cfr], protoCallFrame
+ end
+
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2
+ addp CallFrameHeaderSlots, temp2, temp2
+ lshiftp 3, temp2
+ subp sp, temp2, temp1
+
+ # Ensure that we have enough additional stack capacity for the incoming args,
+ # and the frame for the JS code we're executing. We need to do this check
+ # before we start copying the args from the protoCallFrame below.
+ bpaeq temp1, VM::m_jsStackLimit[vm], .stackHeightOK
+
+ if C_LOOP
+ move entry, temp2
+ move vm, temp3
+ cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, temp1
+ bpeq t0, 0, .stackCheckFailed
+ move temp2, entry
+ move temp3, vm
+ jmp .stackHeightOK
+
+.stackCheckFailed:
+ move temp2, entry
+ move temp3, vm
+ end
+
+ subp 8, sp # Align stack for cCall2() to make a call.
+ cCall2(_llint_throw_stack_overflow_error, vm, protoCallFrame)
+
+ if ARMv7
+ vmEntryRecord(cfr, temp1)
+ move temp1, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
+
+ loadp VMEntryRecord::m_vm[sp], temp3
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], temp4
+ storep temp4, VM::topCallFrame[temp3]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], temp4
+ storep temp4, VM::topVMEntryFrame[temp3]
+
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, temp3
+ move temp3, sp
+ else
+ subp cfr, CalleeRegisterSaveSize, sp
+ end
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+.stackHeightOK:
+ move temp1, sp
+ move 4, temp1
+
+.copyHeaderLoop:
+ subi 1, temp1
+ loadi TagOffset[protoCallFrame, temp1, 8], temp3
+ storei temp3, TagOffset + CodeBlock[sp, temp1, 8]
+ loadi PayloadOffset[protoCallFrame, temp1, 8], temp3
+ storei temp3, PayloadOffset + CodeBlock[sp, temp1, 8]
+ btinz temp1, .copyHeaderLoop
+
+ loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
+ subi 1, temp2
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3
+ subi 1, temp3
+
+ bieq temp2, temp3, .copyArgs
+.fillExtraArgsLoop:
+ subi 1, temp3
+ storei UndefinedTag, ThisArgumentOffset + 8 + TagOffset[sp, temp3, 8]
+ storei 0, ThisArgumentOffset + 8 + PayloadOffset[sp, temp3, 8]
+ bineq temp2, temp3, .fillExtraArgsLoop
+
+.copyArgs:
+ loadp ProtoCallFrame::args[protoCallFrame], temp1
+
+.copyArgsLoop:
+ btiz temp2, .copyArgsDone
+ subi 1, temp2
+ loadi TagOffset[temp1, temp2, 8], temp3
+ storei temp3, ThisArgumentOffset + 8 + TagOffset[sp, temp2, 8]
+ loadi PayloadOffset[temp1, temp2, 8], temp3
+ storei temp3, ThisArgumentOffset + 8 + PayloadOffset[sp, temp2, 8]
+ jmp .copyArgsLoop
+
+.copyArgsDone:
+ storep sp, VM::topCallFrame[vm]
+ storep cfr, VM::topVMEntryFrame[vm]
+
+ makeCall(entry, temp1, temp2)
+
+ if ARMv7
+ vmEntryRecord(cfr, temp1)
+ move temp1, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
+
+ loadp VMEntryRecord::m_vm[sp], temp3
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], temp4
+ storep temp4, VM::topCallFrame[temp3]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], temp4
+ storep temp4, VM::topVMEntryFrame[temp3]
+
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, temp3
+ move temp3, sp
+ else
+ subp cfr, CalleeRegisterSaveSize, sp
+ end
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+end
+
+macro makeJavaScriptCall(entry, temp, unused)
+ addp CallerFrameAndPCSize, sp
+ checkStackPointerAlignment(t2, 0xbad0dc02)
+ if C_LOOP
+ cloopCallJSFunction entry
+ else
+ call entry
+ end
+ checkStackPointerAlignment(t2, 0xbad0dc03)
+ subp CallerFrameAndPCSize, sp
+end
+
+macro makeHostFunctionCall(entry, temp1, temp2)
+ move entry, temp1
+ storep cfr, [sp]
+ if C_LOOP
+ move sp, a0
+ storep lr, PtrSize[sp]
+ cloopCallNative temp1
+ elsif X86 or X86_WIN
+ # Put callee frame pointer on stack as arg0, also put it in ecx for "fastcall" targets
+ move 0, temp2
+ move temp2, 4[sp] # put 0 in ReturnPC
+ move sp, t2 # t2 is ecx
+ push temp2 # Push dummy arg1
+ push t2
+ call temp1
+ addp 8, sp
+ else
+ move sp, a0
+ call temp1
+ end
+end
+
+_handleUncaughtException:
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ loadp VM::callFrameForThrow[t3], cfr
+
+ loadp CallerFrame[cfr], cfr
+
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
+
+ loadp VMEntryRecord::m_vm[sp], t3
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], t5
+ storep t5, VM::topCallFrame[t3]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t5
+ storep t5, VM::topVMEntryFrame[t3]
+
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, t3
+ move t3, sp
+ else
+ subp cfr, CalleeRegisterSaveSize, sp
+ end
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+macro doReturnFromHostFunction(extraStackSpace)
+ functionEpilogue(extraStackSpace)
+ ret
+end
+
+# Debugging operation if you'd like to print an operand in the instruction stream. fromWhere
+# should be an immediate integer - any integer you like; use it to identify the place you're
+# debugging from. operand should likewise be an immediate, and should identify the operand
+# in the instruction stream you'd like to print out.
+macro traceOperand(fromWhere, operand)
+ cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
+ move t0, PC
+ move t1, cfr
+end
+
+# Debugging operation if you'd like to print the value of an operand in the instruction
+# stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
+# value.
+macro traceValue(fromWhere, operand)
+ cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
+ move t0, PC
+ move t1, cfr
+end
+
+# Call a slowPath for call opcodes.
+macro callCallSlowPath(slowPath, action)
+ storep PC, ArgumentCount + TagOffset[cfr]
+ cCall2(slowPath, cfr, PC)
+ action(t0)
+end
+
+macro callWatchdogTimerHandler(throwHandler)
+ storei PC, ArgumentCount + TagOffset[cfr]
+ cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
+ btpnz t0, throwHandler
+ loadi ArgumentCount + TagOffset[cfr], PC
+end
+
+macro checkSwitchToJITForLoop()
+ checkSwitchToJIT(
+ 1,
+ macro ()
+ storei PC, ArgumentCount + TagOffset[cfr]
+ cCall2(_llint_loop_osr, cfr, PC)
+ btpz t0, .recover
+ move t1, sp
+ jmp t0
+ .recover:
+ loadi ArgumentCount + TagOffset[cfr], PC
+ end)
+end
+
+macro loadVariable(operand, index, tag, payload)
+ loadisFromInstruction(operand, index)
+ loadi TagOffset[cfr, index, 8], tag
+ loadi PayloadOffset[cfr, index, 8], payload
+end
+
+# Index, tag, and payload must be different registers. Index is not
+# changed.
+macro loadConstantOrVariable(index, tag, payload)
+ bigteq index, FirstConstantRegisterIndex, .constant
+ loadi TagOffset[cfr, index, 8], tag
+ loadi PayloadOffset[cfr, index, 8], payload
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], payload
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
+ # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
+ # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
+ loadp TagOffset[payload, index, 8], tag
+ loadp PayloadOffset[payload, index, 8], payload
+.done:
+end
+
+macro loadConstantOrVariableTag(index, tag)
+ bigteq index, FirstConstantRegisterIndex, .constant
+ loadi TagOffset[cfr, index, 8], tag
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], tag
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
+ # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
+ # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
+ loadp TagOffset[tag, index, 8], tag
+.done:
+end
+
+# Index and payload may be the same register. Index may be clobbered.
+macro loadConstantOrVariable2Reg(index, tag, payload)
+ bigteq index, FirstConstantRegisterIndex, .constant
+ loadi TagOffset[cfr, index, 8], tag
+ loadi PayloadOffset[cfr, index, 8], payload
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], tag
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
+ # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
+ # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
+ lshifti 3, index
+ addp index, tag
+ loadp PayloadOffset[tag], payload
+ loadp TagOffset[tag], tag
+.done:
+end
+
+macro loadConstantOrVariablePayloadTagCustom(index, tagCheck, payload)
+ bigteq index, FirstConstantRegisterIndex, .constant
+ tagCheck(TagOffset[cfr, index, 8])
+ loadi PayloadOffset[cfr, index, 8], payload
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], payload
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
+ # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
+ # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
+ tagCheck(TagOffset[payload, index, 8])
+ loadp PayloadOffset[payload, index, 8], payload
+.done:
+end
+
+# Index and payload must be different registers. Index is not mutated. Use
+# this if you know what the tag of the variable should be. Doing the tag
+# test as part of loading the variable reduces register use, but may not
+# be faster than doing loadConstantOrVariable followed by a branch on the
+# tag.
+macro loadConstantOrVariablePayload(index, expectedTag, payload, slow)
+ loadConstantOrVariablePayloadTagCustom(
+ index,
+ macro (actualTag) bineq actualTag, expectedTag, slow end,
+ payload)
+end
+
+macro loadConstantOrVariablePayloadUnchecked(index, payload)
+ loadConstantOrVariablePayloadTagCustom(
+ index,
+ macro (actualTag) end,
+ payload)
+end
+
+macro storeStructureWithTypeInfo(cell, structure, scratch)
+ storep structure, JSCell::m_structureID[cell]
+
+ loadi Structure::m_blob + StructureIDBlob::u.words.word2[structure], scratch
+ storei scratch, JSCell::m_indexingType[cell]
+end
+
+macro writeBarrierOnOperand(cellOperand)
+ if GGC
+ loadisFromInstruction(cellOperand, t1)
+ loadConstantOrVariablePayload(t1, CellTag, t2, .writeBarrierDone)
+ skipIfIsRememberedOrInEden(t2, t1, t3,
+ macro(gcData)
+ btbnz gcData, .writeBarrierDone
+ push cfr, PC
+ # We make two extra slots because cCall2 will poke.
+ subp 8, sp
+ cCall2Void(_llint_write_barrier_slow, cfr, t2)
+ addp 8, sp
+ pop PC, cfr
+ end
+ )
+ .writeBarrierDone:
+ end
+end
+
+macro writeBarrierOnOperands(cellOperand, valueOperand)
+ if GGC
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableTag(t1, t0)
+ bineq t0, CellTag, .writeBarrierDone
+
+ writeBarrierOnOperand(cellOperand)
+ .writeBarrierDone:
+ end
+end
+
+macro writeBarrierOnGlobalObject(valueOperand)
+ if GGC
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableTag(t1, t0)
+ bineq t0, CellTag, .writeBarrierDone
+
+ loadp CodeBlock[cfr], t3
+ loadp CodeBlock::m_globalObject[t3], t3
+ skipIfIsRememberedOrInEden(t3, t1, t2,
+ macro(gcData)
+ btbnz gcData, .writeBarrierDone
+ push cfr, PC
+ # We make two extra slots because cCall2 will poke.
+ subp 8, sp
+ cCall2Void(_llint_write_barrier_slow, cfr, t3)
+ addp 8, sp
+ pop PC, cfr
+ end
+ )
+ .writeBarrierDone:
+ end
+end
+
+macro valueProfile(tag, payload, operand, scratch)
+ loadp operand[PC], scratch
+ storei tag, ValueProfile::m_buckets + TagOffset[scratch]
+ storei payload, ValueProfile::m_buckets + PayloadOffset[scratch]
+end
+
+
+# Entrypoints into the interpreter
+
+# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
+macro functionArityCheck(doneLabel, slowPath)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
+ cCall2(slowPath, cfr, PC) # This slowPath has a simple protocol: t0 = 0 => no error, t0 != 0 => error
+ btiz t0, .noError
+ move t1, cfr # t1 contains caller frame
+ jmp _llint_throw_from_slow_path_trampoline
+
+.noError:
+ # t1 points to ArityCheckData.
+ loadp CommonSlowPaths::ArityCheckData::thunkToCall[t1], t2
+ btpz t2, .proceedInline
+
+ loadp CommonSlowPaths::ArityCheckData::returnPC[t1], t5
+ loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t0
+ call t2
+ if ASSERT_ENABLED
+ loadp ReturnPC[cfr], t0
+ loadp [t0], t0
+ end
+ jmp .continue
+
+.proceedInline:
+ loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t1
+ btiz t1, .continue
+
+ // Move frame up "t1 * 2" slots
+ lshiftp 1, t1
+ negi t1
+ move cfr, t3
+ loadi PayloadOffset + ArgumentCount[cfr], t2
+ addi CallFrameHeaderSlots, t2
+.copyLoop:
+ loadi PayloadOffset[t3], t0
+ storei t0, PayloadOffset[t3, t1, 8]
+ loadi TagOffset[t3], t0
+ storei t0, TagOffset[t3, t1, 8]
+ addp 8, t3
+ bsubinz 1, t2, .copyLoop
+
+ // Fill new slots with JSUndefined
+ move t1, t2
+.fillLoop:
+ move 0, t0
+ storei t0, PayloadOffset[t3, t1, 8]
+ move UndefinedTag, t0
+ storei t0, TagOffset[t3, t1, 8]
+ addp 8, t3
+ baddinz 1, t2, .fillLoop
+
+ lshiftp 3, t1
+ addp t1, cfr
+ addp t1, sp
+.continue:
+ # Reload CodeBlock and PC, since the slow_path clobbered it.
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_instructions[t1], PC
+ jmp doneLabel
+end
+
+macro branchIfException(label)
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ btiz VM::m_exception[t3], .noException
+ jmp label
+.noException:
+end
+
+
+# Instruction implementations
+
+_llint_op_enter:
+ traceExecution()
+ checkStackPointerAlignment(t2, 0xdead00e1)
+ loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
+ loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
+ btiz t2, .opEnterDone
+ move UndefinedTag, t0
+ move 0, t1
+ negi t2
+.opEnterLoop:
+ storei t0, TagOffset[cfr, t2, 8]
+ storei t1, PayloadOffset[cfr, t2, 8]
+ addi 1, t2
+ btinz t2, .opEnterLoop
+.opEnterDone:
+ callSlowPath(_slow_path_enter)
+ dispatch(1)
+
+
+_llint_op_get_scope:
+ traceExecution()
+ loadi Callee + PayloadOffset[cfr], t0
+ loadi JSCallee::m_scope[t0], t0
+ loadisFromInstruction(1, t1)
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ dispatch(2)
+
+
+_llint_op_create_this:
+ traceExecution()
+ loadi 8[PC], t0
+ loadp PayloadOffset[cfr, t0, 8], t0
+ loadp JSFunction::m_rareData[t0], t4
+ btpz t4, .opCreateThisSlow
+ loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_allocator[t4], t1
+ loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_structure[t4], t2
+ btpz t1, .opCreateThisSlow
+ loadpFromInstruction(4, t4)
+ bpeq t4, 1, .hasSeenMultipleCallee
+ bpneq t4, t0, .opCreateThisSlow
+.hasSeenMultipleCallee:
+ allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
+ loadi 4[PC], t1
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ dispatch(5)
+
+.opCreateThisSlow:
+ callSlowPath(_slow_path_create_this)
+ dispatch(5)
+
+
+_llint_op_to_this:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], CellTag, .opToThisSlow
+ loadi PayloadOffset[cfr, t0, 8], t0
+ bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
+ loadpFromInstruction(2, t2)
+ bpneq JSCell::m_structureID[t0], t2, .opToThisSlow
+ dispatch(4)
+
+.opToThisSlow:
+ callSlowPath(_slow_path_to_this)
+ dispatch(4)
+
+
+_llint_op_new_object:
+ traceExecution()
+ loadpFromInstruction(3, t0)
+ loadp ObjectAllocationProfile::m_allocator[t0], t1
+ loadp ObjectAllocationProfile::m_structure[t0], t2
+ allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
+ loadi 4[PC], t1
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ dispatch(4)
+
+.opNewObjectSlow:
+ callSlowPath(_llint_slow_path_new_object)
+ dispatch(4)
+
+
+_llint_op_check_tdz:
+ traceExecution()
+ loadisFromInstruction(1, t0)
+ loadConstantOrVariableTag(t0, t1)
+ bineq t1, EmptyValueTag, .opNotTDZ
+ callSlowPath(_slow_path_throw_tdz_error)
+
+.opNotTDZ:
+ dispatch(2)
+
+
+_llint_op_mov:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t0
+ loadConstantOrVariable(t1, t2, t3)
+ storei t2, TagOffset[cfr, t0, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+ dispatch(3)
+
+
+_llint_op_not:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ loadConstantOrVariable(t0, t2, t3)
+ bineq t2, BooleanTag, .opNotSlow
+ xori 1, t3
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opNotSlow:
+ callSlowPath(_slow_path_not)
+ dispatch(3)
+
+
+_llint_op_eq:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, t3, .opEqSlow
+ bieq t2, CellTag, .opEqSlow
+ bib t2, LowestTag, .opEqSlow
+ loadi 4[PC], t2
+ cieq t0, t1, t0
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+
+.opEqSlow:
+ callSlowPath(_slow_path_eq)
+ dispatch(4)
+
+
+_llint_op_eq_null:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t3
+ assertNotConstant(t0)
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ bineq t1, CellTag, .opEqNullImmediate
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined
+ move 0, t1
+ jmp .opEqNullNotImmediate
+.opEqNullMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t0], t1
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ cpeq Structure::m_globalObject[t1], t0, t1
+ jmp .opEqNullNotImmediate
+.opEqNullImmediate:
+ cieq t1, NullTag, t2
+ cieq t1, UndefinedTag, t1
+ ori t2, t1
+.opEqNullNotImmediate:
+ storei BooleanTag, TagOffset[cfr, t3, 8]
+ storei t1, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+
+_llint_op_neq:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, t3, .opNeqSlow
+ bieq t2, CellTag, .opNeqSlow
+ bib t2, LowestTag, .opNeqSlow
+ loadi 4[PC], t2
+ cineq t0, t1, t0
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+
+.opNeqSlow:
+ callSlowPath(_slow_path_neq)
+ dispatch(4)
+
+
+_llint_op_neq_null:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t3
+ assertNotConstant(t0)
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ bineq t1, CellTag, .opNeqNullImmediate
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined
+ move 1, t1
+ jmp .opNeqNullNotImmediate
+.opNeqNullMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t0], t1
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ cpneq Structure::m_globalObject[t1], t0, t1
+ jmp .opNeqNullNotImmediate
+.opNeqNullImmediate:
+ cineq t1, NullTag, t2
+ cineq t1, UndefinedTag, t1
+ andi t2, t1
+.opNeqNullNotImmediate:
+ storei BooleanTag, TagOffset[cfr, t3, 8]
+ storei t1, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+
+macro strictEq(equalityOperation, slowPath)
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, t3, .slow
+ bib t2, LowestTag, .slow
+ bineq t2, CellTag, .notStringOrSymbol
+ bbaeq JSCell::m_type[t0], ObjectType, .notStringOrSymbol
+ bbb JSCell::m_type[t1], ObjectType, .slow
+.notStringOrSymbol:
+ loadi 4[PC], t2
+ equalityOperation(t0, t1, t0)
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(4)
+end
+
+_llint_op_stricteq:
+ traceExecution()
+ strictEq(macro (left, right, result) cieq left, right, result end, _slow_path_stricteq)
+
+
+_llint_op_nstricteq:
+ traceExecution()
+ strictEq(macro (left, right, result) cineq left, right, result end, _slow_path_nstricteq)
+
+
+_llint_op_inc:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opIncSlow
+ loadi PayloadOffset[cfr, t0, 8], t1
+ baddio 1, t1, .opIncSlow
+ storei t1, PayloadOffset[cfr, t0, 8]
+ dispatch(2)
+
+.opIncSlow:
+ callSlowPath(_slow_path_inc)
+ dispatch(2)
+
+
+_llint_op_dec:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opDecSlow
+ loadi PayloadOffset[cfr, t0, 8], t1
+ bsubio 1, t1, .opDecSlow
+ storei t1, PayloadOffset[cfr, t0, 8]
+ dispatch(2)
+
+.opDecSlow:
+ callSlowPath(_slow_path_dec)
+ dispatch(2)
+
+
+_llint_op_to_number:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ loadConstantOrVariable(t0, t2, t3)
+ bieq t2, Int32Tag, .opToNumberIsInt
+ biaeq t2, LowestTag, .opToNumberSlow
+.opToNumberIsInt:
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opToNumberSlow:
+ callSlowPath(_slow_path_to_number)
+ dispatch(3)
+
+
+_llint_op_to_string:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ loadConstantOrVariable(t0, t2, t3)
+ bineq t2, CellTag, .opToStringSlow
+ bbneq JSCell::m_type[t3], StringType, .opToStringSlow
+.opToStringIsString:
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opToStringSlow:
+ callSlowPath(_slow_path_to_string)
+ dispatch(3)
+
+
+_llint_op_negate:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t3
+ loadConstantOrVariable(t0, t1, t2)
+ bineq t1, Int32Tag, .opNegateSrcNotInt
+ btiz t2, 0x7fffffff, .opNegateSlow
+ negi t2
+ storei Int32Tag, TagOffset[cfr, t3, 8]
+ storei t2, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+.opNegateSrcNotInt:
+ bia t1, LowestTag, .opNegateSlow
+ xori 0x80000000, t1
+ storei t1, TagOffset[cfr, t3, 8]
+ storei t2, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+.opNegateSlow:
+ callSlowPath(_slow_path_negate)
+ dispatch(3)
+
+
+macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, Int32Tag, .op1NotInt
+ bineq t3, Int32Tag, .op2NotInt
+ loadi 4[PC], t2
+ integerOperationAndStore(t3, t1, t0, .slow, t2)
+ dispatch(5)
+
+.op1NotInt:
+ # First operand is definitely not an int, the second operand could be anything.
+ bia t2, LowestTag, .slow
+ bib t3, LowestTag, .op1NotIntOp2Double
+ bineq t3, Int32Tag, .slow
+ ci2d t1, ft1
+ jmp .op1NotIntReady
+.op1NotIntOp2Double:
+ fii2d t1, t3, ft1
+.op1NotIntReady:
+ loadi 4[PC], t1
+ fii2d t0, t2, ft0
+ doubleOperation(ft1, ft0)
+ stored ft0, [cfr, t1, 8]
+ dispatch(5)
+
+.op2NotInt:
+ # First operand is definitely an int, the second operand is definitely not.
+ loadi 4[PC], t2
+ bia t3, LowestTag, .slow
+ ci2d t0, ft0
+ fii2d t1, t3, ft1
+ doubleOperation(ft1, ft0)
+ stored ft0, [cfr, t2, 8]
+ dispatch(5)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(5)
+end
+
+macro binaryOp(integerOperation, doubleOperation, slowPath)
+ binaryOpCustomStore(
+ macro (int32Tag, left, right, slow, index)
+ integerOperation(left, right, slow)
+ storei int32Tag, TagOffset[cfr, index, 8]
+ storei right, PayloadOffset[cfr, index, 8]
+ end,
+ doubleOperation, slowPath)
+end
+
+_llint_op_add:
+ traceExecution()
+ binaryOp(
+ macro (left, right, slow) baddio left, right, slow end,
+ macro (left, right) addd left, right end,
+ _slow_path_add)
+
+
+_llint_op_mul:
+ traceExecution()
+ binaryOpCustomStore(
+ macro (int32Tag, left, right, slow, index)
+ const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant.
+ move right, scratch
+ bmulio left, scratch, slow
+ btinz scratch, .done
+ bilt left, 0, slow
+ bilt right, 0, slow
+ .done:
+ storei Int32Tag, TagOffset[cfr, index, 8]
+ storei scratch, PayloadOffset[cfr, index, 8]
+ end,
+ macro (left, right) muld left, right end,
+ _slow_path_mul)
+
+
+_llint_op_sub:
+ traceExecution()
+ binaryOp(
+ macro (left, right, slow) bsubio left, right, slow end,
+ macro (left, right) subd left, right end,
+ _slow_path_sub)
+
+
+_llint_op_div:
+ traceExecution()
+ binaryOpCustomStore(
+ macro (int32Tag, left, right, slow, index)
+ ci2d left, ft0
+ ci2d right, ft1
+ divd ft0, ft1
+ bcd2i ft1, right, .notInt
+ storei int32Tag, TagOffset[cfr, index, 8]
+ storei right, PayloadOffset[cfr, index, 8]
+ jmp .done
+ .notInt:
+ stored ft1, [cfr, index, 8]
+ .done:
+ end,
+ macro (left, right) divd left, right end,
+ _slow_path_div)
+
+
+macro bitOp(operation, slowPath, advance)
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t3, Int32Tag, .slow
+ bineq t2, Int32Tag, .slow
+ loadi 4[PC], t2
+ operation(t1, t0)
+ storei t3, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(advance)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(advance)
+end
+
+_llint_op_lshift:
+ traceExecution()
+ bitOp(
+ macro (left, right) lshifti left, right end,
+ _slow_path_lshift,
+ 4)
+
+
+_llint_op_rshift:
+ traceExecution()
+ bitOp(
+ macro (left, right) rshifti left, right end,
+ _slow_path_rshift,
+ 4)
+
+
+_llint_op_urshift:
+ traceExecution()
+ bitOp(
+ macro (left, right) urshifti left, right end,
+ _slow_path_urshift,
+ 4)
+
+
+_llint_op_unsigned:
+ traceExecution()
+ loadi 4[PC], t0
+ loadi 8[PC], t1
+ loadConstantOrVariablePayload(t1, Int32Tag, t2, .opUnsignedSlow)
+ bilt t2, 0, .opUnsignedSlow
+ storei t2, PayloadOffset[cfr, t0, 8]
+ storei Int32Tag, TagOffset[cfr, t0, 8]
+ dispatch(3)
+.opUnsignedSlow:
+ callSlowPath(_slow_path_unsigned)
+ dispatch(3)
+
+
+_llint_op_bitand:
+ traceExecution()
+ bitOp(
+ macro (left, right) andi left, right end,
+ _slow_path_bitand,
+ 5)
+
+
+_llint_op_bitxor:
+ traceExecution()
+ bitOp(
+ macro (left, right) xori left, right end,
+ _slow_path_bitxor,
+ 5)
+
+
+_llint_op_bitor:
+ traceExecution()
+ bitOp(
+ macro (left, right) ori left, right end,
+ _slow_path_bitor,
+ 5)
+
+
+_llint_op_check_has_instance:
+ traceExecution()
+ loadi 12[PC], t1
+ loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
+ btbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
+ dispatch(5)
+
+.opCheckHasInstanceSlow:
+ callSlowPath(_llint_slow_path_check_has_instance)
+ dispatch(0)
+
+
+_llint_op_instanceof:
+ traceExecution()
+ # Actually do the work.
+ loadi 12[PC], t0
+ loadi 4[PC], t3
+ loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
+ bbb JSCell::m_type[t1], ObjectType, .opInstanceofSlow
+ loadi 8[PC], t0
+ loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
+
+ # Register state: t1 = prototype, t2 = value
+ move 1, t0
+.opInstanceofLoop:
+ loadp JSCell::m_structureID[t2], t2
+ loadi Structure::m_prototype + PayloadOffset[t2], t2
+ bpeq t2, t1, .opInstanceofDone
+ btinz t2, .opInstanceofLoop
+
+ move 0, t0
+.opInstanceofDone:
+ storei BooleanTag, TagOffset[cfr, t3, 8]
+ storei t0, PayloadOffset[cfr, t3, 8]
+ dispatch(4)
+
+.opInstanceofSlow:
+ callSlowPath(_llint_slow_path_instanceof)
+ dispatch(4)
+
+
+_llint_op_is_undefined:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t0
+ loadConstantOrVariable(t1, t2, t3)
+ storei BooleanTag, TagOffset[cfr, t0, 8]
+ bieq t2, CellTag, .opIsUndefinedCell
+ cieq t2, UndefinedTag, t3
+ storei t3, PayloadOffset[cfr, t0, 8]
+ dispatch(3)
+.opIsUndefinedCell:
+ btbnz JSCell::m_flags[t3], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined
+ move 0, t1
+ storei t1, PayloadOffset[cfr, t0, 8]
+ dispatch(3)
+.opIsUndefinedMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t3], t1
+ loadp CodeBlock[cfr], t3
+ loadp CodeBlock::m_globalObject[t3], t3
+ cpeq Structure::m_globalObject[t1], t3, t1
+ storei t1, PayloadOffset[cfr, t0, 8]
+ dispatch(3)
+
+
+_llint_op_is_boolean:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t2
+ loadConstantOrVariableTag(t1, t0)
+ cieq t0, BooleanTag, t0
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(3)
+
+
+_llint_op_is_number:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t2
+ loadConstantOrVariableTag(t1, t0)
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ addi 1, t0
+ cib t0, LowestTag + 1, t1
+ storei t1, PayloadOffset[cfr, t2, 8]
+ dispatch(3)
+
+
+_llint_op_is_string:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t2
+ loadConstantOrVariable(t1, t0, t3)
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ bineq t0, CellTag, .opIsStringNotCell
+ cbeq JSCell::m_type[t3], StringType, t1
+ storei t1, PayloadOffset[cfr, t2, 8]
+ dispatch(3)
+.opIsStringNotCell:
+ storep 0, PayloadOffset[cfr, t2, 8]
+ dispatch(3)
+
+
+_llint_op_is_object:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t2
+ loadConstantOrVariable(t1, t0, t3)
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ bineq t0, CellTag, .opIsObjectNotCell
+ cbaeq JSCell::m_type[t3], ObjectType, t1
+ storei t1, PayloadOffset[cfr, t2, 8]
+ dispatch(3)
+.opIsObjectNotCell:
+ storep 0, PayloadOffset[cfr, t2, 8]
+ dispatch(3)
+
+
+macro loadPropertyAtVariableOffsetKnownNotInline(propertyOffset, objectAndStorage, tag, payload)
+ assert(macro (ok) bigteq propertyOffset, firstOutOfLineOffset, ok end)
+ negi propertyOffset
+ loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
+ loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag
+ loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload
+end
+
+macro loadPropertyAtVariableOffset(propertyOffset, objectAndStorage, tag, payload)
+ bilt propertyOffset, firstOutOfLineOffset, .isInline
+ loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
+ negi propertyOffset
+ jmp .ready
+.isInline:
+ addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
+.ready:
+ loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag
+ loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload
+end
+
+macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, tag, payload)
+ bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
+ loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
+ negi propertyOffsetAsInt
+ jmp .ready
+.isInline:
+ addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
+.ready:
+ storei tag, TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
+ storei payload, PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
+end
+
+
+# We only do monomorphic get_by_id caching for now, and we do not modify the
+# opcode. We do, however, allow for the cache to change anytime if fails, since
+# ping-ponging is free. At best we get lucky and the get_by_id will continue
+# to take fast path on the new cache. At worst we take slow path, which is what
+# we would have been doing anyway.
+
+macro getById(getPropertyStorage)
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
+ loadi 20[PC], t2
+ getPropertyStorage(
+ t3,
+ t0,
+ macro (propertyStorage, scratch)
+ bpneq JSCell::m_structureID[t3], t1, .opGetByIdSlow
+ loadi 4[PC], t1
+ loadi TagOffset[propertyStorage, t2], scratch
+ loadi PayloadOffset[propertyStorage, t2], t2
+ storei scratch, TagOffset[cfr, t1, 8]
+ storei t2, PayloadOffset[cfr, t1, 8]
+ valueProfile(scratch, t2, 32, t1)
+ dispatch(9)
+ end)
+
+ .opGetByIdSlow:
+ callSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
+end
+
+_llint_op_get_by_id:
+ getById(withInlineStorage)
+
+
+_llint_op_get_by_id_out_of_line:
+ getById(withOutOfLineStorage)
+
+
+_llint_op_get_array_length:
+ traceExecution()
+ loadi 8[PC], t0
+ loadp 16[PC], t1
+ loadConstantOrVariablePayload(t0, CellTag, t3, .opGetArrayLengthSlow)
+ move t3, t2
+ arrayProfile(t2, t1, t0)
+ btiz t2, IsArray, .opGetArrayLengthSlow
+ btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
+ loadi 4[PC], t1
+ loadp JSObject::m_butterfly[t3], t0
+ loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
+ bilt t0, 0, .opGetArrayLengthSlow
+ valueProfile(Int32Tag, t0, 32, t2)
+ storep t0, PayloadOffset[cfr, t1, 8]
+ storep Int32Tag, TagOffset[cfr, t1, 8]
+ dispatch(9)
+
+.opGetArrayLengthSlow:
+ callSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
+
+
+macro putById(getPropertyStorage)
+ traceExecution()
+ writeBarrierOnOperands(1, 3)
+ loadi 4[PC], t3
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
+ loadi 12[PC], t2
+ getPropertyStorage(
+ t0,
+ t3,
+ macro (propertyStorage, scratch)
+ bpneq JSCell::m_structureID[t0], t1, .opPutByIdSlow
+ loadi 20[PC], t1
+ loadConstantOrVariable2Reg(t2, scratch, t2)
+ storei scratch, TagOffset[propertyStorage, t1]
+ storei t2, PayloadOffset[propertyStorage, t1]
+ dispatch(9)
+ end)
+
+ .opPutByIdSlow:
+ callSlowPath(_llint_slow_path_put_by_id)
+ dispatch(9)
+end
+
+_llint_op_put_by_id:
+ putById(withInlineStorage)
+
+
+_llint_op_put_by_id_out_of_line:
+ putById(withOutOfLineStorage)
+
+
+macro putByIdTransition(additionalChecks, getPropertyStorage)
+ traceExecution()
+ writeBarrierOnOperand(1)
+ loadi 4[PC], t3
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
+ loadi 12[PC], t2
+ bpneq JSCell::m_structureID[t0], t1, .opPutByIdSlow
+ additionalChecks(t1, t3, .opPutByIdSlow)
+ loadi 20[PC], t1
+ getPropertyStorage(
+ t0,
+ t3,
+ macro (propertyStorage, scratch)
+ addp t1, propertyStorage, t3
+ loadConstantOrVariable2Reg(t2, t1, t2)
+ storei t1, TagOffset[t3]
+ loadi 24[PC], t1
+ storei t2, PayloadOffset[t3]
+ storep t1, JSCell::m_structureID[t0]
+ dispatch(9)
+ end)
+
+ .opPutByIdSlow:
+ callSlowPath(_llint_slow_path_put_by_id)
+ dispatch(9)
+end
+
+macro noAdditionalChecks(oldStructure, scratch, slowPath)
+end
+
+macro structureChainChecks(oldStructure, scratch, slowPath)
+ const protoCell = oldStructure # Reusing the oldStructure register for the proto
+
+ loadp 28[PC], scratch
+ assert(macro (ok) btpnz scratch, ok end)
+ loadp StructureChain::m_vector[scratch], scratch
+ assert(macro (ok) btpnz scratch, ok end)
+ bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
+.loop:
+ loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
+ loadp JSCell::m_structureID[protoCell], oldStructure
+ bpneq oldStructure, [scratch], slowPath
+ addp 4, scratch
+ bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
+.done:
+end
+
+_llint_op_put_by_id_transition_direct:
+ putByIdTransition(noAdditionalChecks, withInlineStorage)
+
+
+_llint_op_put_by_id_transition_direct_out_of_line:
+ putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
+
+
+_llint_op_put_by_id_transition_normal:
+ putByIdTransition(structureChainChecks, withInlineStorage)
+
+
+_llint_op_put_by_id_transition_normal_out_of_line:
+ putByIdTransition(structureChainChecks, withOutOfLineStorage)
+
+
+_llint_op_get_by_val:
+ traceExecution()
+ loadi 8[PC], t2
+ loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
+ move t0, t2
+ loadp 16[PC], t3
+ arrayProfile(t2, t3, t1)
+ loadi 12[PC], t3
+ loadConstantOrVariablePayload(t3, Int32Tag, t1, .opGetByValSlow)
+ loadp JSObject::m_butterfly[t0], t3
+ andi IndexingShapeMask, t2
+ bieq t2, Int32Shape, .opGetByValIsContiguous
+ bineq t2, ContiguousShape, .opGetByValNotContiguous
+.opGetByValIsContiguous:
+
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
+ loadi TagOffset[t3, t1, 8], t2
+ loadi PayloadOffset[t3, t1, 8], t1
+ jmp .opGetByValDone
+
+.opGetByValNotContiguous:
+ bineq t2, DoubleShape, .opGetByValNotDouble
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
+ loadd [t3, t1, 8], ft0
+ bdnequn ft0, ft0, .opGetByValSlow
+ # FIXME: This could be massively optimized.
+ fd2ii ft0, t1, t2
+ loadi 4[PC], t0
+ jmp .opGetByValNotEmpty
+
+.opGetByValNotDouble:
+ subi ArrayStorageShape, t2
+ bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValOutOfBounds
+ loadi ArrayStorage::m_vector + TagOffset[t3, t1, 8], t2
+ loadi ArrayStorage::m_vector + PayloadOffset[t3, t1, 8], t1
+
+.opGetByValDone:
+ loadi 4[PC], t0
+ bieq t2, EmptyValueTag, .opGetByValOutOfBounds
+.opGetByValNotEmpty:
+ storei t2, TagOffset[cfr, t0, 8]
+ storei t1, PayloadOffset[cfr, t0, 8]
+ valueProfile(t2, t1, 20, t0)
+ dispatch(6)
+
+.opGetByValOutOfBounds:
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
+.opGetByValSlow:
+ callSlowPath(_llint_slow_path_get_by_val)
+ dispatch(6)
+
+
+macro contiguousPutByVal(storeCallback)
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
+.storeResult:
+ loadi 12[PC], t2
+ storeCallback(t2, t1, t0, t3)
+ dispatch(5)
+
+.outOfBounds:
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
+ loadp 16[PC], t2
+ storeb 1, ArrayProfile::m_mayStoreToHole[t2]
+ addi 1, t3, t2
+ storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
+ jmp .storeResult
+end
+
+macro putByVal(slowPath)
+ traceExecution()
+ writeBarrierOnOperands(1, 3)
+ loadi 4[PC], t0
+ loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
+ move t1, t2
+ loadp 16[PC], t3
+ arrayProfile(t2, t3, t0)
+ loadi 8[PC], t0
+ loadConstantOrVariablePayload(t0, Int32Tag, t3, .opPutByValSlow)
+ loadp JSObject::m_butterfly[t1], t0
+ andi IndexingShapeMask, t2
+ bineq t2, Int32Shape, .opPutByValNotInt32
+ contiguousPutByVal(
+ macro (operand, scratch, base, index)
+ loadConstantOrVariablePayload(operand, Int32Tag, scratch, .opPutByValSlow)
+ storei Int32Tag, TagOffset[base, index, 8]
+ storei scratch, PayloadOffset[base, index, 8]
+ end)
+
+.opPutByValNotInt32:
+ bineq t2, DoubleShape, .opPutByValNotDouble
+ contiguousPutByVal(
+ macro (operand, scratch, base, index)
+ const tag = scratch
+ const payload = operand
+ loadConstantOrVariable2Reg(operand, tag, payload)
+ bineq tag, Int32Tag, .notInt
+ ci2d payload, ft0
+ jmp .ready
+ .notInt:
+ fii2d payload, tag, ft0
+ bdnequn ft0, ft0, .opPutByValSlow
+ .ready:
+ stored ft0, [base, index, 8]
+ end)
+
+.opPutByValNotDouble:
+ bineq t2, ContiguousShape, .opPutByValNotContiguous
+ contiguousPutByVal(
+ macro (operand, scratch, base, index)
+ const tag = scratch
+ const payload = operand
+ loadConstantOrVariable2Reg(operand, tag, payload)
+ storei tag, TagOffset[base, index, 8]
+ storei payload, PayloadOffset[base, index, 8]
+ end)
+
+.opPutByValNotContiguous:
+ bineq t2, ArrayStorageShape, .opPutByValSlow
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
+ bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty
+.opPutByValArrayStorageStoreResult:
+ loadi 12[PC], t2
+ loadConstantOrVariable2Reg(t2, t1, t2)
+ storei t1, ArrayStorage::m_vector + TagOffset[t0, t3, 8]
+ storei t2, ArrayStorage::m_vector + PayloadOffset[t0, t3, 8]
+ dispatch(5)
+
+.opPutByValArrayStorageEmpty:
+ loadp 16[PC], t1
+ storeb 1, ArrayProfile::m_mayStoreToHole[t1]
+ addi 1, ArrayStorage::m_numValuesInVector[t0]
+ bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
+ addi 1, t3, t1
+ storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
+ jmp .opPutByValArrayStorageStoreResult
+
+.opPutByValOutOfBounds:
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
+.opPutByValSlow:
+ callSlowPath(slowPath)
+ dispatch(5)
+end
+
+_llint_op_put_by_val:
+ putByVal(_llint_slow_path_put_by_val)
+
+_llint_op_put_by_val_direct:
+ putByVal(_llint_slow_path_put_by_val_direct)
+
+_llint_op_jmp:
+ traceExecution()
+ dispatchBranch(4[PC])
+
+
+macro jumpTrueOrFalse(conditionOp, slow)
+ loadi 4[PC], t1
+ loadConstantOrVariablePayload(t1, BooleanTag, t0, .slow)
+ conditionOp(t0, .target)
+ dispatch(3)
+
+.target:
+ dispatchBranch(8[PC])
+
+.slow:
+ callSlowPath(slow)
+ dispatch(0)
+end
+
+
+macro equalNull(cellHandler, immediateHandler)
+ loadi 4[PC], t0
+ assertNotConstant(t0)
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ bineq t1, CellTag, .immediate
+ loadp JSCell::m_structureID[t0], t2
+ cellHandler(t2, JSCell::m_flags[t0], .target)
+ dispatch(3)
+
+.target:
+ dispatchBranch(8[PC])
+
+.immediate:
+ ori 1, t1
+ immediateHandler(t1, .target)
+ dispatch(3)
+end
+
+_llint_op_jeq_null:
+ traceExecution()
+ equalNull(
+ macro (structure, value, target)
+ btbz value, MasqueradesAsUndefined, .opJeqNullNotMasqueradesAsUndefined
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ bpeq Structure::m_globalObject[structure], t0, target
+.opJeqNullNotMasqueradesAsUndefined:
+ end,
+ macro (value, target) bieq value, NullTag, target end)
+
+
+_llint_op_jneq_null:
+ traceExecution()
+ equalNull(
+ macro (structure, value, target)
+ btbz value, MasqueradesAsUndefined, target
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ bpneq Structure::m_globalObject[structure], t0, target
+ end,
+ macro (value, target) bineq value, NullTag, target end)
+
+
+_llint_op_jneq_ptr:
+ traceExecution()
+ loadi 4[PC], t0
+ loadi 8[PC], t1
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_globalObject[t2], t2
+ bineq TagOffset[cfr, t0, 8], CellTag, .opJneqPtrBranch
+ loadp JSGlobalObject::m_specialPointers[t2, t1, 4], t1
+ bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
+.opJneqPtrBranch:
+ dispatchBranch(12[PC])
+.opJneqPtrFallThrough:
+ dispatch(4)
+
+
+macro compare(integerCompare, doubleCompare, slowPath)
+ loadi 4[PC], t2
+ loadi 8[PC], t3
+ loadConstantOrVariable(t2, t0, t1)
+ loadConstantOrVariable2Reg(t3, t2, t3)
+ bineq t0, Int32Tag, .op1NotInt
+ bineq t2, Int32Tag, .op2NotInt
+ integerCompare(t1, t3, .jumpTarget)
+ dispatch(4)
+
+.op1NotInt:
+ bia t0, LowestTag, .slow
+ bib t2, LowestTag, .op1NotIntOp2Double
+ bineq t2, Int32Tag, .slow
+ ci2d t3, ft1
+ jmp .op1NotIntReady
+.op1NotIntOp2Double:
+ fii2d t3, t2, ft1
+.op1NotIntReady:
+ fii2d t1, t0, ft0
+ doubleCompare(ft0, ft1, .jumpTarget)
+ dispatch(4)
+
+.op2NotInt:
+ ci2d t1, ft0
+ bia t2, LowestTag, .slow
+ fii2d t3, t2, ft1
+ doubleCompare(ft0, ft1, .jumpTarget)
+ dispatch(4)
+
+.jumpTarget:
+ dispatchBranch(12[PC])
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(0)
+end
+
+
+_llint_op_switch_imm:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 4[PC], t3
+ loadConstantOrVariable(t2, t1, t0)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_rareData[t2], t2
+ muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
+ loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
+ addp t3, t2
+ bineq t1, Int32Tag, .opSwitchImmNotInt
+ subi SimpleJumpTable::min[t2], t0
+ biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
+ loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
+ loadi [t3, t0, 4], t1
+ btiz t1, .opSwitchImmFallThrough
+ dispatchBranchWithOffset(t1)
+
+.opSwitchImmNotInt:
+ bib t1, LowestTag, .opSwitchImmSlow # Go to slow path if it's a double.
+.opSwitchImmFallThrough:
+ dispatchBranch(8[PC])
+
+.opSwitchImmSlow:
+ callSlowPath(_llint_slow_path_switch_imm)
+ dispatch(0)
+
+
+_llint_op_switch_char:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 4[PC], t3
+ loadConstantOrVariable(t2, t1, t0)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_rareData[t2], t2
+ muli sizeof SimpleJumpTable, t3
+ loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
+ addp t3, t2
+ bineq t1, CellTag, .opSwitchCharFallThrough
+ bbneq JSCell::m_type[t0], StringType, .opSwitchCharFallThrough
+ bineq JSString::m_length[t0], 1, .opSwitchCharFallThrough
+ loadp JSString::m_value[t0], t0
+ btpz t0, .opSwitchOnRope
+ loadp StringImpl::m_data8[t0], t1
+ btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
+ loadh [t1], t0
+ jmp .opSwitchCharReady
+.opSwitchChar8Bit:
+ loadb [t1], t0
+.opSwitchCharReady:
+ subi SimpleJumpTable::min[t2], t0
+ biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
+ loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
+ loadi [t2, t0, 4], t1
+ btiz t1, .opSwitchCharFallThrough
+ dispatchBranchWithOffset(t1)
+
+.opSwitchCharFallThrough:
+ dispatchBranch(8[PC])
+
+.opSwitchOnRope:
+ callSlowPath(_llint_slow_path_switch_char)
+ dispatch(0)
+
+
+macro arrayProfileForCall()
+ loadi 16[PC], t3
+ negi t3
+ bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
+ loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
+ loadp JSCell::m_structureID[t0], t0
+ loadpFromInstruction(CallOpCodeSize - 2, t1)
+ storep t0, ArrayProfile::m_lastSeenStructureID[t1]
+.done:
+end
+
+macro doCall(slowPath)
+ loadi 8[PC], t0
+ loadi 20[PC], t1
+ loadp LLIntCallLinkInfo::callee[t1], t2
+ loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow)
+ bineq t3, t2, .opCallSlow
+ loadi 16[PC], t3
+ lshifti 3, t3
+ negi t3
+ addp cfr, t3 # t3 contains the new value of cfr
+ storei t2, Callee + PayloadOffset[t3]
+ loadi 12[PC], t2
+ storei PC, ArgumentCount + TagOffset[cfr]
+ storei t2, ArgumentCount + PayloadOffset[t3]
+ storei CellTag, Callee + TagOffset[t3]
+ addp CallerFrameAndPCSize, t3
+ callTargetFunction(t1, t3)
+
+.opCallSlow:
+ slowPathForCall(slowPath)
+end
+
+
+_llint_op_ret:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadi 4[PC], t2
+ loadConstantOrVariable(t2, t1, t0)
+ doReturn()
+
+
+_llint_op_to_primitive:
+ traceExecution()
+ loadi 8[PC], t2
+ loadi 4[PC], t3
+ loadConstantOrVariable(t2, t1, t0)
+ bineq t1, CellTag, .opToPrimitiveIsImm
+ bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase
+.opToPrimitiveIsImm:
+ storei t1, TagOffset[cfr, t3, 8]
+ storei t0, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+.opToPrimitiveSlowCase:
+ callSlowPath(_slow_path_to_primitive)
+ dispatch(3)
+
+
+_llint_op_catch:
+ # This is where we end up from the JIT's throw trampoline (because the
+ # machine code return address will be set to _llint_op_catch), and from
+ # the interpreter's throw trampoline (see _llint_throw_trampoline).
+ # The throwing code must have known that we were throwing to the interpreter,
+ # and have set VM::targetInterpreterPCForThrow.
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ loadp VM::callFrameForThrow[t3], cfr
+ loadp VM::vmEntryFrameForThrow[t3], t0
+ storep t0, VM::topVMEntryFrame[t3]
+ restoreStackPointerAfterCall()
+
+ loadi VM::targetInterpreterPCForThrow[t3], PC
+ loadi VM::m_exception[t3], t0
+ storei 0, VM::m_exception[t3]
+ loadi 4[PC], t2
+ storei t0, PayloadOffset[cfr, t2, 8]
+ storei CellTag, TagOffset[cfr, t2, 8]
+
+ loadi Exception::m_value + TagOffset[t0], t1
+ loadi Exception::m_value + PayloadOffset[t0], t0
+ loadi 8[PC], t2
+ storei t0, PayloadOffset[cfr, t2, 8]
+ storei t1, TagOffset[cfr, t2, 8]
+
+ traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
+ dispatch(3)
+
+_llint_op_end:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadi 4[PC], t0
+ assertNotConstant(t0)
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ doReturn()
+
+
+_llint_throw_from_slow_path_trampoline:
+ callSlowPath(_llint_slow_path_handle_exception)
+
+ # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
+ # the throw target is not necessarily interpreted code, we come to here.
+ # This essentially emulates the JIT's throwing protocol.
+ loadp Callee[cfr], t1
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
+ jmp VM::targetMachinePCForThrow[t1]
+
+
+_llint_throw_during_call_trampoline:
+ preserveReturnAddressAfterCall(t2)
+ jmp _llint_throw_from_slow_path_trampoline
+
+
+macro nativeCallTrampoline(executableOffsetToFunction)
+
+ functionPrologue()
+ storep 0, CodeBlock[cfr]
+ loadi Callee + PayloadOffset[cfr], t1
+ // Callee is still in t1 for code below
+ if X86 or X86_WIN
+ subp 8, sp # align stack pointer
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t3
+ storep cfr, VM::topCallFrame[t3]
+ move cfr, t2 # t2 = ecx
+ storep t2, [sp]
+ loadi Callee + PayloadOffset[cfr], t1
+ loadp JSFunction::m_executable[t1], t1
+ checkStackPointerAlignment(t3, 0xdead0001)
+ call executableOffsetToFunction[t1]
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ addp 8, sp
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL or C_LOOP or MIPS or SH4
+ subp 8, sp # align stack pointer
+ # t1 already contains the Callee.
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
+ storep cfr, VM::topCallFrame[t1]
+ if MIPS or SH4
+ move cfr, a0
+ else
+ move cfr, t0
+ end
+ loadi Callee + PayloadOffset[cfr], t1
+ loadp JSFunction::m_executable[t1], t1
+ checkStackPointerAlignment(t3, 0xdead0001)
+ if C_LOOP
+ cloopCallNative executableOffsetToFunction[t1]
+ else
+ call executableOffsetToFunction[t1]
+ end
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ addp 8, sp
+ else
+ error
+ end
+
+ functionEpilogue()
+ btinz VM::m_exception[t3], .handleException
+ ret
+
+.handleException:
+ storep cfr, VM::topCallFrame[t3]
+ restoreStackPointerAfterCall()
+ jmp _llint_throw_from_slow_path_trampoline
+end
+
+
+macro getGlobalObject(dst)
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadisFromInstruction(dst, t1)
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+end
+
+macro varInjectionCheck(slowPath)
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_varInjectionWatchpoint[t0], t0
+ bbeq WatchpointSet::m_state[t0], IsInvalidated, slowPath
+end
+
+macro resolveScope()
+ loadp CodeBlock[cfr], t0
+ loadisFromInstruction(5, t2)
+
+ loadisFromInstruction(2, t0)
+ loadp PayloadOffset[cfr, t0, 8], t0
+ btiz t2, .resolveScopeLoopEnd
+
+.resolveScopeLoop:
+ loadp JSScope::m_next[t0], t0
+ subi 1, t2
+ btinz t2, .resolveScopeLoop
+
+.resolveScopeLoopEnd:
+ loadisFromInstruction(1, t1)
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+end
+
+
+_llint_op_resolve_scope:
+ traceExecution()
+ loadisFromInstruction(4, t0)
+
+#rGlobalProperty:
+ bineq t0, GlobalProperty, .rGlobalVar
+ getGlobalObject(1)
+ dispatch(7)
+
+.rGlobalVar:
+ bineq t0, GlobalVar, .rClosureVar
+ getGlobalObject(1)
+ dispatch(7)
+
+.rClosureVar:
+ bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks
+ resolveScope()
+ dispatch(7)
+
+.rGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getGlobalObject(1)
+ dispatch(7)
+
+.rGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getGlobalObject(1)
+ dispatch(7)
+
+.rClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
+ varInjectionCheck(.rDynamic)
+ resolveScope()
+ dispatch(7)
+
+.rDynamic:
+ callSlowPath(_llint_slow_path_resolve_scope)
+ dispatch(7)
+
+
+macro loadWithStructureCheck(operand, slowPath)
+ loadisFromInstruction(operand, t0)
+ loadp PayloadOffset[cfr, t0, 8], t0
+ loadpFromInstruction(5, t1)
+ bpneq JSCell::m_structureID[t0], t1, slowPath
+end
+
+macro getProperty()
+ loadisFromInstruction(6, t3)
+ loadPropertyAtVariableOffset(t3, t0, t1, t2)
+ valueProfile(t1, t2, 28, t0)
+ loadisFromInstruction(1, t0)
+ storei t1, TagOffset[cfr, t0, 8]
+ storei t2, PayloadOffset[cfr, t0, 8]
+end
+
+macro getGlobalVar()
+ loadpFromInstruction(6, t0)
+ loadp TagOffset[t0], t1
+ loadp PayloadOffset[t0], t2
+ valueProfile(t1, t2, 28, t0)
+ loadisFromInstruction(1, t0)
+ storei t1, TagOffset[cfr, t0, 8]
+ storei t2, PayloadOffset[cfr, t0, 8]
+end
+
+macro getClosureVar()
+ loadisFromInstruction(6, t3)
+ loadp JSEnvironmentRecord_variables + TagOffset[t0, t3, 8], t1
+ loadp JSEnvironmentRecord_variables + PayloadOffset[t0, t3, 8], t2
+ valueProfile(t1, t2, 28, t0)
+ loadisFromInstruction(1, t0)
+ storei t1, TagOffset[cfr, t0, 8]
+ storei t2, PayloadOffset[cfr, t0, 8]
+end
+
+_llint_op_get_from_scope:
+ traceExecution()
+ loadisFromInstruction(4, t0)
+ andi ResolveModeMask, t0
+
+#gGlobalProperty:
+ bineq t0, GlobalProperty, .gGlobalVar
+ loadWithStructureCheck(2, .gDynamic)
+ getProperty()
+ dispatch(8)
+
+.gGlobalVar:
+ bineq t0, GlobalVar, .gClosureVar
+ getGlobalVar()
+ dispatch(8)
+
+.gClosureVar:
+ bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks
+ loadVariable(2, t2, t1, t0)
+ getClosureVar()
+ dispatch(8)
+
+.gGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks
+ loadWithStructureCheck(2, .gDynamic)
+ getProperty()
+ dispatch(8)
+
+.gGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ varInjectionCheck(.gDynamic)
+ getGlobalVar()
+ dispatch(8)
+
+.gClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic
+ varInjectionCheck(.gDynamic)
+ loadVariable(2, t2, t1, t0)
+ getClosureVar()
+ dispatch(8)
+
+.gDynamic:
+ callSlowPath(_llint_slow_path_get_from_scope)
+ dispatch(8)
+
+
+macro putProperty()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadisFromInstruction(6, t1)
+ storePropertyAtVariableOffset(t1, t0, t2, t3)
+end
+
+macro putGlobalVar()
+ loadisFromInstruction(3, t0)
+ loadConstantOrVariable(t0, t1, t2)
+ loadpFromInstruction(5, t3)
+ notifyWrite(t3, .pDynamic)
+ loadpFromInstruction(6, t0)
+ storei t1, TagOffset[t0]
+ storei t2, PayloadOffset[t0]
+end
+
+macro putClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadisFromInstruction(6, t1)
+ storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8]
+ storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8]
+end
+
+macro putLocalClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadpFromInstruction(5, t4)
+ btpz t4, .noVariableWatchpointSet
+ notifyWrite(t4, .pDynamic)
+.noVariableWatchpointSet:
+ loadisFromInstruction(6, t1)
+ storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8]
+ storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8]
+end
+
+
+_llint_op_put_to_scope:
+ traceExecution()
+ loadisFromInstruction(4, t0)
+ andi ResolveModeMask, t0
+
+#pLocalClosureVar:
+ bineq t0, LocalClosureVar, .pGlobalProperty
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t2, t1, t0)
+ putLocalClosureVar()
+ dispatch(7)
+
+.pGlobalProperty:
+ bineq t0, GlobalProperty, .pGlobalVar
+ writeBarrierOnOperands(1, 3)
+ loadWithStructureCheck(1, .pDynamic)
+ putProperty()
+ dispatch(7)
+
+.pGlobalVar:
+ bineq t0, GlobalVar, .pClosureVar
+ writeBarrierOnGlobalObject(3)
+ putGlobalVar()
+ dispatch(7)
+
+.pClosureVar:
+ bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t2, t1, t0)
+ putClosureVar()
+ dispatch(7)
+
+.pGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks
+ writeBarrierOnOperands(1, 3)
+ loadWithStructureCheck(1, .pDynamic)
+ putProperty()
+ dispatch(7)
+
+.pGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ writeBarrierOnGlobalObject(3)
+ varInjectionCheck(.pDynamic)
+ putGlobalVar()
+ dispatch(7)
+
+.pClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic
+ writeBarrierOnOperands(1, 3)
+ varInjectionCheck(.pDynamic)
+ loadVariable(1, t2, t1, t0)
+ putClosureVar()
+ dispatch(7)
+
+.pDynamic:
+ callSlowPath(_llint_slow_path_put_to_scope)
+ dispatch(7)
+
+
+_llint_op_get_from_arguments:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadi 12[PC], t1
+ loadi DirectArguments_storage + TagOffset[t0, t1, 8], t2
+ loadi DirectArguments_storage + PayloadOffset[t0, t1, 8], t3
+ loadisFromInstruction(1, t1)
+ valueProfile(t2, t3, 16, t0)
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(5)
+
+
+_llint_op_put_to_arguments:
+ traceExecution()
+ writeBarrierOnOperands(1, 3)
+ loadisFromInstruction(1, t0)
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 8[PC], t1
+ storei t2, DirectArguments_storage + TagOffset[t0, t1, 8]
+ storei t3, DirectArguments_storage + PayloadOffset[t0, t1, 8]
+ dispatch(4)
+
+
+_llint_op_get_parent_scope:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadp PayloadOffset[cfr, t0, 8], t0
+ loadp JSScope::m_next[t0], t0
+ loadisFromInstruction(1, t1)
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_profile_type:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_vm[t1], t1
+ # t1 is holding the pointer to the typeProfilerLog.
+ loadp VM::m_typeProfilerLog[t1], t1
+
+ # t0 is holding the payload, t4 is holding the tag.
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t2, t4, t0)
+
+ bieq t4, EmptyValueTag, .opProfileTypeDone
+
+ # t2 is holding the pointer to the current log entry.
+ loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
+
+ # Store the JSValue onto the log entry.
+ storei t4, TypeProfilerLog::LogEntry::value + TagOffset[t2]
+ storei t0, TypeProfilerLog::LogEntry::value + PayloadOffset[t2]
+
+ # Store the TypeLocation onto the log entry.
+ loadpFromInstruction(2, t3)
+ storep t3, TypeProfilerLog::LogEntry::location[t2]
+
+ bieq t4, CellTag, .opProfileTypeIsCell
+ storei 0, TypeProfilerLog::LogEntry::structureID[t2]
+ jmp .opProfileTypeSkipIsCell
+.opProfileTypeIsCell:
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, TypeProfilerLog::LogEntry::structureID[t2]
+.opProfileTypeSkipIsCell:
+
+ # Increment the current log entry.
+ addp sizeof TypeProfilerLog::LogEntry, t2
+ storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
+
+ loadp TypeProfilerLog::m_logEndPtr[t1], t1
+ bpneq t2, t1, .opProfileTypeDone
+ callSlowPath(_slow_path_profile_type_clear_log)
+
+.opProfileTypeDone:
+ dispatch(6)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
new file mode 100644
index 000000000..d9455f75c
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
@@ -0,0 +1,2228 @@
+# Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# Utilities.
+macro jumpToInstruction()
+ jmp [PB, PC, 8]
+end
+
+macro dispatch(advance)
+ addp advance, PC
+ jumpToInstruction()
+end
+
+macro dispatchInt(advance)
+ addi advance, PC
+ jumpToInstruction()
+end
+
+macro dispatchIntIndirect(offset)
+ dispatchInt(offset * 8[PB, PC, 8])
+end
+
+macro dispatchAfterCall()
+ loadi ArgumentCount + TagOffset[cfr], PC
+ loadp CodeBlock[cfr], PB
+ loadp CodeBlock::m_instructions[PB], PB
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ valueProfile(t0, (CallOpCodeSize - 1), t2)
+ dispatch(CallOpCodeSize)
+end
+
+macro cCall2(function, arg1, arg2)
+ checkStackPointerAlignment(t4, 0xbad0c002)
+ if X86_64
+ move arg1, t4
+ move arg2, t5
+ call function
+ elsif X86_64_WIN
+ # Note: this implementation is only correct if the return type size is > 8 bytes.
+ # See macro cCall2Void for an implementation when the return type <= 8 bytes.
+ # On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
+ # On entry rcx (t2), should contain a pointer to this stack space. The other parameters are shifted to the right,
+ # rdx (t1) should contain the first argument, and r8 (t6) should contain the second argument.
+ # On return, rax contains a pointer to this stack value, and we then need to copy the 16 byte return value into rax (t0) and rdx (t1)
+ # since the return value is expected to be split between the two.
+ # See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx
+ move arg1, t1
+ move arg2, t6
+ subp 48, sp
+ move sp, t2
+ addp 32, t2
+ call function
+ addp 48, sp
+ move 8[t0], t1
+ move [t0], t0
+ elsif ARM64
+ move arg1, t0
+ move arg2, t1
+ call function
+ elsif C_LOOP
+ cloopCallSlowPath function, arg1, arg2
+ else
+ error
+ end
+end
+
+macro cCall2Void(function, arg1, arg2)
+ if C_LOOP
+ cloopCallSlowPathVoid function, arg1, arg2
+ elsif X86_64_WIN
+ # Note: we cannot use the cCall2 macro for Win64 in this case,
+ # as the Win64 cCall2 implemenation is only correct when the return type size is > 8 bytes.
+ # On Win64, rcx and rdx are used for passing the first two parameters.
+ # We also need to make room on the stack for all four parameter registers.
+ # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
+ move arg2, t1
+ move arg1, t2
+ subp 32, sp
+ call function
+ addp 32, sp
+ else
+ cCall2(function, arg1, arg2)
+ end
+end
+
+# This barely works. arg3 and arg4 should probably be immediates.
+macro cCall4(function, arg1, arg2, arg3, arg4)
+ checkStackPointerAlignment(t4, 0xbad0c004)
+ if X86_64
+ move arg1, t4
+ move arg2, t5
+ move arg3, t1
+ move arg4, t2
+ call function
+ elsif X86_64_WIN
+ # On Win64, rcx, rdx, r8, and r9 are used for passing the first four parameters.
+ # We also need to make room on the stack for all four parameter registers.
+ # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
+ move arg1, t2
+ move arg2, t1
+ move arg3, t6
+ move arg4, t7
+ subp 32, sp
+ call function
+ addp 32, sp
+ elsif ARM64
+ move arg1, t0
+ move arg2, t1
+ move arg3, t2
+ move arg4, t3
+ call function
+ elsif C_LOOP
+ error
+ else
+ error
+ end
+end
+
+macro doVMEntry(makeCall)
+ if X86_64
+ const entry = t4
+ const vm = t5
+ const protoCallFrame = t1
+
+ const previousCFR = t0
+ const previousPC = t6
+ const temp1 = t0
+ const temp2 = t3
+ const temp3 = t6
+ elsif X86_64_WIN
+ const entry = t2
+ const vm = t1
+ const protoCallFrame = t6
+
+ const previousCFR = t0
+ const previousPC = t4
+ const temp1 = t0
+ const temp2 = t3
+ const temp3 = t7
+ elsif ARM64 or C_LOOP
+ const entry = a0
+ const vm = a1
+ const protoCallFrame = a2
+
+ const previousCFR = t5
+ const previousPC = lr
+ const temp1 = t3
+ const temp2 = t4
+ const temp3 = t6
+ end
+
+ functionPrologue()
+ pushCalleeSaves()
+
+ vmEntryRecord(cfr, sp)
+
+ checkStackPointerAlignment(temp2, 0xbad0dc01)
+
+ storep vm, VMEntryRecord::m_vm[sp]
+ loadp VM::topCallFrame[vm], temp2
+ storep temp2, VMEntryRecord::m_prevTopCallFrame[sp]
+ loadp VM::topVMEntryFrame[vm], temp2
+ storep temp2, VMEntryRecord::m_prevTopVMEntryFrame[sp]
+
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2
+ addp CallFrameHeaderSlots, temp2, temp2
+ lshiftp 3, temp2
+ subp sp, temp2, temp1
+
+ # Ensure that we have enough additional stack capacity for the incoming args,
+ # and the frame for the JS code we're executing. We need to do this check
+ # before we start copying the args from the protoCallFrame below.
+ bpaeq temp1, VM::m_jsStackLimit[vm], .stackHeightOK
+
+ if C_LOOP
+ move entry, temp2
+ move vm, temp3
+ cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, temp1
+ bpeq t0, 0, .stackCheckFailed
+ move temp2, entry
+ move temp3, vm
+ jmp .stackHeightOK
+
+.stackCheckFailed:
+ move temp2, entry
+ move temp3, vm
+ end
+
+ cCall2(_llint_throw_stack_overflow_error, vm, protoCallFrame)
+
+ vmEntryRecord(cfr, temp2)
+
+ loadp VMEntryRecord::m_vm[temp2], vm
+ loadp VMEntryRecord::m_prevTopCallFrame[temp2], temp3
+ storep temp3, VM::topCallFrame[vm]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[temp2], temp3
+ storep temp3, VM::topVMEntryFrame[vm]
+
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+.stackHeightOK:
+ move temp1, sp
+ move 4, temp1
+
+.copyHeaderLoop:
+ subi 1, temp1
+ loadq [protoCallFrame, temp1, 8], temp3
+ storeq temp3, CodeBlock[sp, temp1, 8]
+ btinz temp1, .copyHeaderLoop
+
+ loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
+ subi 1, temp2
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3
+ subi 1, temp3
+
+ bieq temp2, temp3, .copyArgs
+ move ValueUndefined, temp1
+.fillExtraArgsLoop:
+ subi 1, temp3
+ storeq temp1, ThisArgumentOffset + 8[sp, temp3, 8]
+ bineq temp2, temp3, .fillExtraArgsLoop
+
+.copyArgs:
+ loadp ProtoCallFrame::args[protoCallFrame], temp1
+
+.copyArgsLoop:
+ btiz temp2, .copyArgsDone
+ subi 1, temp2
+ loadq [temp1, temp2, 8], temp3
+ storeq temp3, ThisArgumentOffset + 8[sp, temp2, 8]
+ jmp .copyArgsLoop
+
+.copyArgsDone:
+ if ARM64
+ move sp, temp2
+ storep temp2, VM::topCallFrame[vm]
+ else
+ storep sp, VM::topCallFrame[vm]
+ end
+ storep cfr, VM::topVMEntryFrame[vm]
+
+ move 0xffff000000000000, csr1
+ addp 2, csr1, csr2
+
+ checkStackPointerAlignment(temp3, 0xbad0dc02)
+
+ makeCall(entry, temp1)
+
+ checkStackPointerAlignment(temp3, 0xbad0dc03)
+
+ vmEntryRecord(cfr, temp2)
+
+ loadp VMEntryRecord::m_vm[temp2], vm
+ loadp VMEntryRecord::m_prevTopCallFrame[temp2], temp3
+ storep temp3, VM::topCallFrame[vm]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[temp2], temp3
+ storep temp3, VM::topVMEntryFrame[vm]
+
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
+
+ ret
+end
+
+
+macro makeJavaScriptCall(entry, temp)
+ addp 16, sp
+ if C_LOOP
+ cloopCallJSFunction entry
+ else
+ call entry
+ end
+ subp 16, sp
+end
+
+
+macro makeHostFunctionCall(entry, temp)
+ move entry, temp
+ storep cfr, [sp]
+ if X86_64
+ move sp, t4
+ elsif X86_64_WIN
+ move sp, t2
+ elsif ARM64 or C_LOOP
+ move sp, a0
+ end
+ if C_LOOP
+ storep lr, 8[sp]
+ cloopCallNative temp
+ elsif X86_64_WIN
+ # We need to allocate 32 bytes on the stack for the shadow space.
+ subp 32, sp
+ call temp
+ addp 32, sp
+ else
+ call temp
+ end
+end
+
+
+_handleUncaughtException:
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ loadp VM::callFrameForThrow[t3], cfr
+
+ loadp CallerFrame[cfr], cfr
+ vmEntryRecord(cfr, t2)
+
+ loadp VMEntryRecord::m_vm[t2], t3
+ loadp VMEntryRecord::m_prevTopCallFrame[t2], t5
+ storep t5, VM::topCallFrame[t3]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[t2], t5
+ storep t5, VM::topVMEntryFrame[t3]
+
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+
+macro prepareStateForCCall()
+ leap [PB, PC, 8], PC
+ move PB, t3
+end
+
+macro restoreStateAfterCCall()
+ move t0, PC
+ move t3, PB
+ subp PB, PC
+ rshiftp 3, PC
+end
+
+macro callSlowPath(slowPath)
+ prepareStateForCCall()
+ cCall2(slowPath, cfr, PC)
+ restoreStateAfterCCall()
+end
+
+macro traceOperand(fromWhere, operand)
+ prepareStateForCCall()
+ cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
+ restoreStateAfterCCall()
+end
+
+macro traceValue(fromWhere, operand)
+ prepareStateForCCall()
+ cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
+ restoreStateAfterCCall()
+end
+
+# Call a slow path for call call opcodes.
+macro callCallSlowPath(slowPath, action)
+ storei PC, ArgumentCount + TagOffset[cfr]
+ prepareStateForCCall()
+ cCall2(slowPath, cfr, PC)
+ action(t0)
+end
+
+macro callWatchdogTimerHandler(throwHandler)
+ storei PC, ArgumentCount + TagOffset[cfr]
+ prepareStateForCCall()
+ cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
+ btpnz t0, throwHandler
+ move t3, PB
+ loadi ArgumentCount + TagOffset[cfr], PC
+end
+
+macro checkSwitchToJITForLoop()
+ checkSwitchToJIT(
+ 1,
+ macro()
+ storei PC, ArgumentCount + TagOffset[cfr]
+ prepareStateForCCall()
+ cCall2(_llint_loop_osr, cfr, PC)
+ btpz t0, .recover
+ move t1, sp
+ jmp t0
+ .recover:
+ move t3, PB
+ loadi ArgumentCount + TagOffset[cfr], PC
+ end)
+end
+
+macro loadVariable(operand, value)
+ loadisFromInstruction(operand, value)
+ loadq [cfr, value, 8], value
+end
+
+# Index and value must be different registers. Index may be clobbered.
+macro loadConstantOrVariable(index, value)
+ bpgteq index, FirstConstantRegisterIndex, .constant
+ loadq [cfr, index, 8], value
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], value
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value
+ subp FirstConstantRegisterIndex, index
+ loadq [value, index, 8], value
+.done:
+end
+
+macro loadConstantOrVariableInt32(index, value, slow)
+ loadConstantOrVariable(index, value)
+ bqb value, tagTypeNumber, slow
+end
+
+macro loadConstantOrVariableCell(index, value, slow)
+ loadConstantOrVariable(index, value)
+ btqnz value, tagMask, slow
+end
+
+macro writeBarrierOnOperand(cellOperand)
+ if GGC
+ loadisFromInstruction(cellOperand, t1)
+ loadConstantOrVariableCell(t1, t2, .writeBarrierDone)
+ skipIfIsRememberedOrInEden(t2, t1, t3,
+ macro(gcData)
+ btbnz gcData, .writeBarrierDone
+ push PB, PC
+ cCall2Void(_llint_write_barrier_slow, cfr, t2)
+ pop PC, PB
+ end
+ )
+ .writeBarrierDone:
+ end
+end
+
+macro writeBarrierOnOperands(cellOperand, valueOperand)
+ if GGC
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
+ btpz t0, .writeBarrierDone
+
+ writeBarrierOnOperand(cellOperand)
+ .writeBarrierDone:
+ end
+end
+
+macro writeBarrierOnGlobalObject(valueOperand)
+ if GGC
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
+ btpz t0, .writeBarrierDone
+
+ loadp CodeBlock[cfr], t3
+ loadp CodeBlock::m_globalObject[t3], t3
+ skipIfIsRememberedOrInEden(t3, t1, t2,
+ macro(gcData)
+ btbnz gcData, .writeBarrierDone
+ push PB, PC
+ cCall2Void(_llint_write_barrier_slow, cfr, t3)
+ pop PC, PB
+ end
+ )
+ .writeBarrierDone:
+ end
+end
+
+macro valueProfile(value, operand, scratch)
+ loadpFromInstruction(operand, scratch)
+ storeq value, ValueProfile::m_buckets[scratch]
+end
+
+macro loadStructure(cell, structure)
+end
+
+macro loadStructureWithScratch(cell, structure, scratch)
+ loadp CodeBlock[cfr], scratch
+ loadp CodeBlock::m_vm[scratch], scratch
+ loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[scratch], scratch
+ loadi JSCell::m_structureID[cell], structure
+ loadp [scratch, structure, 8], structure
+end
+
+macro loadStructureAndClobberFirstArg(cell, structure)
+ loadi JSCell::m_structureID[cell], structure
+ loadp CodeBlock[cfr], cell
+ loadp CodeBlock::m_vm[cell], cell
+ loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[cell], cell
+ loadp [cell, structure, 8], structure
+end
+
+macro storeStructureWithTypeInfo(cell, structure, scratch)
+ loadq Structure::m_blob + StructureIDBlob::u.doubleWord[structure], scratch
+ storeq scratch, JSCell::m_structureID[cell]
+end
+
+# Entrypoints into the interpreter.
+
+# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
+macro functionArityCheck(doneLabel, slowPath)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
+ prepareStateForCCall()
+ cCall2(slowPath, cfr, PC) # This slowPath has the protocol: t0 = 0 => no error, t0 != 0 => error
+ btiz t0, .noError
+ move t1, cfr # t1 contains caller frame
+ jmp _llint_throw_from_slow_path_trampoline
+
+.noError:
+ # t1 points to ArityCheckData.
+ loadp CommonSlowPaths::ArityCheckData::thunkToCall[t1], t2
+ btpz t2, .proceedInline
+
+ loadp CommonSlowPaths::ArityCheckData::returnPC[t1], t7
+ loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t0
+ call t2
+ if ASSERT_ENABLED
+ loadp ReturnPC[cfr], t0
+ loadp [t0], t0
+ end
+ jmp .continue
+
+.proceedInline:
+ loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t1
+ btiz t1, .continue
+
+ // Move frame up "t1 * 2" slots
+ lshiftp 1, t1
+ negq t1
+ move cfr, t3
+ loadi PayloadOffset + ArgumentCount[cfr], t2
+ addi CallFrameHeaderSlots, t2
+.copyLoop:
+ loadq [t3], t0
+ storeq t0, [t3, t1, 8]
+ addp 8, t3
+ bsubinz 1, t2, .copyLoop
+
+ // Fill new slots with JSUndefined
+ move t1, t2
+ move ValueUndefined, t0
+.fillLoop:
+ storeq t0, [t3, t1, 8]
+ addp 8, t3
+ baddinz 1, t2, .fillLoop
+
+ lshiftp 3, t1
+ addp t1, cfr
+ addp t1, sp
+
+.continue:
+ # Reload CodeBlock and reset PC, since the slow_path clobbered them.
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_instructions[t1], PB
+ move 0, PC
+ jmp doneLabel
+end
+
+macro branchIfException(label)
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ btqz VM::m_exception[t3], .noException
+ jmp label
+.noException:
+end
+
+
+# Instruction implementations
+
+_llint_op_enter:
+ traceExecution()
+ checkStackPointerAlignment(t2, 0xdead00e1)
+ loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
+ loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
+ btiz t2, .opEnterDone
+ move ValueUndefined, t0
+ negi t2
+ sxi2q t2, t2
+.opEnterLoop:
+ storeq t0, [cfr, t2, 8]
+ addq 1, t2
+ btqnz t2, .opEnterLoop
+.opEnterDone:
+ callSlowPath(_slow_path_enter)
+ dispatch(1)
+
+
+_llint_op_get_scope:
+ traceExecution()
+ loadp Callee[cfr], t0
+ loadp JSCallee::m_scope[t0], t0
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(2)
+
+
+_llint_op_create_this:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadp [cfr, t0, 8], t0
+ loadp JSFunction::m_rareData[t0], t4
+ btpz t4, .opCreateThisSlow
+ loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_allocator[t4], t1
+ loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_structure[t4], t2
+ btpz t1, .opCreateThisSlow
+ loadpFromInstruction(4, t4)
+ bpeq t4, 1, .hasSeenMultipleCallee
+ bpneq t4, t0, .opCreateThisSlow
+.hasSeenMultipleCallee:
+ allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(5)
+
+.opCreateThisSlow:
+ callSlowPath(_slow_path_create_this)
+ dispatch(5)
+
+
+_llint_op_to_this:
+ traceExecution()
+ loadisFromInstruction(1, t0)
+ loadq [cfr, t0, 8], t0
+ btqnz t0, tagMask, .opToThisSlow
+ bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
+ loadStructureWithScratch(t0, t1, t2)
+ loadpFromInstruction(2, t2)
+ bpneq t1, t2, .opToThisSlow
+ dispatch(4)
+
+.opToThisSlow:
+ callSlowPath(_slow_path_to_this)
+ dispatch(4)
+
+
+_llint_op_new_object:
+ traceExecution()
+ loadpFromInstruction(3, t0)
+ loadp ObjectAllocationProfile::m_allocator[t0], t1
+ loadp ObjectAllocationProfile::m_structure[t0], t2
+ allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(4)
+
+.opNewObjectSlow:
+ callSlowPath(_llint_slow_path_new_object)
+ dispatch(4)
+
+
+_llint_op_check_tdz:
+ traceExecution()
+ loadisFromInstruction(1, t0)
+ loadConstantOrVariable(t0, t1)
+ bqneq t1, ValueEmpty, .opNotTDZ
+ callSlowPath(_slow_path_throw_tdz_error)
+
+.opNotTDZ:
+ dispatch(2)
+
+
+_llint_op_mov:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t0)
+ loadConstantOrVariable(t1, t2)
+ storeq t2, [cfr, t0, 8]
+ dispatch(3)
+
+
+_llint_op_not:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadisFromInstruction(1, t1)
+ loadConstantOrVariable(t0, t2)
+ xorq ValueFalse, t2
+ btqnz t2, ~1, .opNotSlow
+ xorq ValueTrue, t2
+ storeq t2, [cfr, t1, 8]
+ dispatch(3)
+
+.opNotSlow:
+ callSlowPath(_slow_path_not)
+ dispatch(3)
+
+
+macro equalityComparison(integerComparison, slowPath)
+ traceExecution()
+ loadisFromInstruction(3, t0)
+ loadisFromInstruction(2, t2)
+ loadisFromInstruction(1, t3)
+ loadConstantOrVariableInt32(t0, t1, .slow)
+ loadConstantOrVariableInt32(t2, t0, .slow)
+ integerComparison(t0, t1, t0)
+ orq ValueFalse, t0
+ storeq t0, [cfr, t3, 8]
+ dispatch(4)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(4)
+end
+
+_llint_op_eq:
+ equalityComparison(
+ macro (left, right, result) cieq left, right, result end,
+ _slow_path_eq)
+
+
+_llint_op_neq:
+ equalityComparison(
+ macro (left, right, result) cineq left, right, result end,
+ _slow_path_neq)
+
+
+macro equalNullComparison()
+ loadisFromInstruction(2, t0)
+ loadq [cfr, t0, 8], t0
+ btqnz t0, tagMask, .immediate
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
+ move 0, t0
+ jmp .done
+.masqueradesAsUndefined:
+ loadStructureWithScratch(t0, t2, t1)
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ cpeq Structure::m_globalObject[t2], t0, t0
+ jmp .done
+.immediate:
+ andq ~TagBitUndefined, t0
+ cqeq t0, ValueNull, t0
+.done:
+end
+
+_llint_op_eq_null:
+ traceExecution()
+ equalNullComparison()
+ loadisFromInstruction(1, t1)
+ orq ValueFalse, t0
+ storeq t0, [cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_neq_null:
+ traceExecution()
+ equalNullComparison()
+ loadisFromInstruction(1, t1)
+ xorq ValueTrue, t0
+ storeq t0, [cfr, t1, 8]
+ dispatch(3)
+
+
+macro strictEq(equalityOperation, slowPath)
+ traceExecution()
+ loadisFromInstruction(3, t0)
+ loadisFromInstruction(2, t2)
+ loadConstantOrVariable(t0, t1)
+ loadConstantOrVariable(t2, t0)
+ move t0, t2
+ orq t1, t2
+ btqz t2, tagMask, .slow
+ bqaeq t0, tagTypeNumber, .leftOK
+ btqnz t0, tagTypeNumber, .slow
+.leftOK:
+ bqaeq t1, tagTypeNumber, .rightOK
+ btqnz t1, tagTypeNumber, .slow
+.rightOK:
+ equalityOperation(t0, t1, t0)
+ loadisFromInstruction(1, t1)
+ orq ValueFalse, t0
+ storeq t0, [cfr, t1, 8]
+ dispatch(4)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(4)
+end
+
+_llint_op_stricteq:
+ strictEq(
+ macro (left, right, result) cqeq left, right, result end,
+ _slow_path_stricteq)
+
+
+_llint_op_nstricteq:
+ strictEq(
+ macro (left, right, result) cqneq left, right, result end,
+ _slow_path_nstricteq)
+
+
+macro preOp(arithmeticOperation, slowPath)
+ traceExecution()
+ loadisFromInstruction(1, t0)
+ loadq [cfr, t0, 8], t1
+ bqb t1, tagTypeNumber, .slow
+ arithmeticOperation(t1, .slow)
+ orq tagTypeNumber, t1
+ storeq t1, [cfr, t0, 8]
+ dispatch(2)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(2)
+end
+
+_llint_op_inc:
+ preOp(
+ macro (value, slow) baddio 1, value, slow end,
+ _slow_path_inc)
+
+
+_llint_op_dec:
+ preOp(
+ macro (value, slow) bsubio 1, value, slow end,
+ _slow_path_dec)
+
+
+_llint_op_to_number:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadisFromInstruction(1, t1)
+ loadConstantOrVariable(t0, t2)
+ bqaeq t2, tagTypeNumber, .opToNumberIsImmediate
+ btqz t2, tagTypeNumber, .opToNumberSlow
+.opToNumberIsImmediate:
+ storeq t2, [cfr, t1, 8]
+ dispatch(3)
+
+.opToNumberSlow:
+ callSlowPath(_slow_path_to_number)
+ dispatch(3)
+
+
+_llint_op_to_string:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ btqnz t0, tagMask, .opToStringSlow
+ bbneq JSCell::m_type[t0], StringType, .opToStringSlow
+.opToStringIsString:
+ storeq t0, [cfr, t2, 8]
+ dispatch(3)
+
+.opToStringSlow:
+ callSlowPath(_slow_path_to_string)
+ dispatch(3)
+
+
+_llint_op_negate:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadisFromInstruction(1, t1)
+ loadConstantOrVariable(t0, t2)
+ bqb t2, tagTypeNumber, .opNegateNotInt
+ btiz t2, 0x7fffffff, .opNegateSlow
+ negi t2
+ orq tagTypeNumber, t2
+ storeq t2, [cfr, t1, 8]
+ dispatch(3)
+.opNegateNotInt:
+ btqz t2, tagTypeNumber, .opNegateSlow
+ xorq 0x8000000000000000, t2
+ storeq t2, [cfr, t1, 8]
+ dispatch(3)
+
+.opNegateSlow:
+ callSlowPath(_slow_path_negate)
+ dispatch(3)
+
+
+macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
+ loadisFromInstruction(3, t0)
+ loadisFromInstruction(2, t2)
+ loadConstantOrVariable(t0, t1)
+ loadConstantOrVariable(t2, t0)
+ bqb t0, tagTypeNumber, .op1NotInt
+ bqb t1, tagTypeNumber, .op2NotInt
+ loadisFromInstruction(1, t2)
+ integerOperationAndStore(t1, t0, .slow, t2)
+ dispatch(5)
+
+.op1NotInt:
+ # First operand is definitely not an int, the second operand could be anything.
+ btqz t0, tagTypeNumber, .slow
+ bqaeq t1, tagTypeNumber, .op1NotIntOp2Int
+ btqz t1, tagTypeNumber, .slow
+ addq tagTypeNumber, t1
+ fq2d t1, ft1
+ jmp .op1NotIntReady
+.op1NotIntOp2Int:
+ ci2d t1, ft1
+.op1NotIntReady:
+ loadisFromInstruction(1, t2)
+ addq tagTypeNumber, t0
+ fq2d t0, ft0
+ doubleOperation(ft1, ft0)
+ fd2q ft0, t0
+ subq tagTypeNumber, t0
+ storeq t0, [cfr, t2, 8]
+ dispatch(5)
+
+.op2NotInt:
+ # First operand is definitely an int, the second is definitely not.
+ loadisFromInstruction(1, t2)
+ btqz t1, tagTypeNumber, .slow
+ ci2d t0, ft0
+ addq tagTypeNumber, t1
+ fq2d t1, ft1
+ doubleOperation(ft1, ft0)
+ fd2q ft0, t0
+ subq tagTypeNumber, t0
+ storeq t0, [cfr, t2, 8]
+ dispatch(5)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(5)
+end
+
+macro binaryOp(integerOperation, doubleOperation, slowPath)
+ binaryOpCustomStore(
+ macro (left, right, slow, index)
+ integerOperation(left, right, slow)
+ orq tagTypeNumber, right
+ storeq right, [cfr, index, 8]
+ end,
+ doubleOperation, slowPath)
+end
+
+_llint_op_add:
+ traceExecution()
+ binaryOp(
+ macro (left, right, slow) baddio left, right, slow end,
+ macro (left, right) addd left, right end,
+ _slow_path_add)
+
+
+_llint_op_mul:
+ traceExecution()
+ binaryOpCustomStore(
+ macro (left, right, slow, index)
+ # Assume t3 is scratchable.
+ move right, t3
+ bmulio left, t3, slow
+ btinz t3, .done
+ bilt left, 0, slow
+ bilt right, 0, slow
+ .done:
+ orq tagTypeNumber, t3
+ storeq t3, [cfr, index, 8]
+ end,
+ macro (left, right) muld left, right end,
+ _slow_path_mul)
+
+
+_llint_op_sub:
+ traceExecution()
+ binaryOp(
+ macro (left, right, slow) bsubio left, right, slow end,
+ macro (left, right) subd left, right end,
+ _slow_path_sub)
+
+
+_llint_op_div:
+ traceExecution()
+ if X86_64 or X86_64_WIN
+ binaryOpCustomStore(
+ macro (left, right, slow, index)
+ # Assume t3 is scratchable.
+ btiz left, slow
+ bineq left, -1, .notNeg2TwoThe31DivByNeg1
+ bieq right, -2147483648, .slow
+ .notNeg2TwoThe31DivByNeg1:
+ btinz right, .intOK
+ bilt left, 0, slow
+ .intOK:
+ move left, t3
+ move right, t0
+ cdqi
+ idivi t3
+ btinz t1, slow
+ orq tagTypeNumber, t0
+ storeq t0, [cfr, index, 8]
+ end,
+ macro (left, right) divd left, right end,
+ _slow_path_div)
+ else
+ callSlowPath(_slow_path_div)
+ dispatch(5)
+ end
+
+
+macro bitOp(operation, slowPath, advance)
+ loadisFromInstruction(3, t0)
+ loadisFromInstruction(2, t2)
+ loadisFromInstruction(1, t3)
+ loadConstantOrVariable(t0, t1)
+ loadConstantOrVariable(t2, t0)
+ bqb t0, tagTypeNumber, .slow
+ bqb t1, tagTypeNumber, .slow
+ operation(t1, t0)
+ orq tagTypeNumber, t0
+ storeq t0, [cfr, t3, 8]
+ dispatch(advance)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(advance)
+end
+
+_llint_op_lshift:
+ traceExecution()
+ bitOp(
+ macro (left, right) lshifti left, right end,
+ _slow_path_lshift,
+ 4)
+
+
+_llint_op_rshift:
+ traceExecution()
+ bitOp(
+ macro (left, right) rshifti left, right end,
+ _slow_path_rshift,
+ 4)
+
+
+_llint_op_urshift:
+ traceExecution()
+ bitOp(
+ macro (left, right) urshifti left, right end,
+ _slow_path_urshift,
+ 4)
+
+
+_llint_op_unsigned:
+ traceExecution()
+ loadisFromInstruction(1, t0)
+ loadisFromInstruction(2, t1)
+ loadConstantOrVariable(t1, t2)
+ bilt t2, 0, .opUnsignedSlow
+ storeq t2, [cfr, t0, 8]
+ dispatch(3)
+.opUnsignedSlow:
+ callSlowPath(_slow_path_unsigned)
+ dispatch(3)
+
+
+_llint_op_bitand:
+ traceExecution()
+ bitOp(
+ macro (left, right) andi left, right end,
+ _slow_path_bitand,
+ 5)
+
+
+_llint_op_bitxor:
+ traceExecution()
+ bitOp(
+ macro (left, right) xori left, right end,
+ _slow_path_bitxor,
+ 5)
+
+
+_llint_op_bitor:
+ traceExecution()
+ bitOp(
+ macro (left, right) ori left, right end,
+ _slow_path_bitor,
+ 5)
+
+
+_llint_op_check_has_instance:
+ traceExecution()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariableCell(t1, t0, .opCheckHasInstanceSlow)
+ btbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
+ dispatch(5)
+
+.opCheckHasInstanceSlow:
+ callSlowPath(_llint_slow_path_check_has_instance)
+ dispatch(0)
+
+
+_llint_op_instanceof:
+ traceExecution()
+ # Actually do the work.
+ loadisFromInstruction(3, t0)
+ loadConstantOrVariableCell(t0, t1, .opInstanceofSlow)
+ bbb JSCell::m_type[t1], ObjectType, .opInstanceofSlow
+ loadisFromInstruction(2, t0)
+ loadConstantOrVariableCell(t0, t2, .opInstanceofSlow)
+
+ # Register state: t1 = prototype, t2 = value
+ move 1, t0
+.opInstanceofLoop:
+ loadStructureAndClobberFirstArg(t2, t3)
+ loadq Structure::m_prototype[t3], t2
+ bqeq t2, t1, .opInstanceofDone
+ btqz t2, tagMask, .opInstanceofLoop
+
+ move 0, t0
+.opInstanceofDone:
+ orq ValueFalse, t0
+ loadisFromInstruction(1, t3)
+ storeq t0, [cfr, t3, 8]
+ dispatch(4)
+
+.opInstanceofSlow:
+ callSlowPath(_llint_slow_path_instanceof)
+ dispatch(4)
+
+
+_llint_op_is_undefined:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ btqz t0, tagMask, .opIsUndefinedCell
+ cqeq t0, ValueUndefined, t3
+ orq ValueFalse, t3
+ storeq t3, [cfr, t2, 8]
+ dispatch(3)
+.opIsUndefinedCell:
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
+ move ValueFalse, t1
+ storeq t1, [cfr, t2, 8]
+ dispatch(3)
+.masqueradesAsUndefined:
+ loadStructureWithScratch(t0, t3, t1)
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_globalObject[t1], t1
+ cpeq Structure::m_globalObject[t3], t1, t0
+ orq ValueFalse, t0
+ storeq t0, [cfr, t2, 8]
+ dispatch(3)
+
+
+_llint_op_is_boolean:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ xorq ValueFalse, t0
+ tqz t0, ~1, t0
+ orq ValueFalse, t0
+ storeq t0, [cfr, t2, 8]
+ dispatch(3)
+
+
+_llint_op_is_number:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ tqnz t0, tagTypeNumber, t1
+ orq ValueFalse, t1
+ storeq t1, [cfr, t2, 8]
+ dispatch(3)
+
+
+_llint_op_is_string:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ btqnz t0, tagMask, .opIsStringNotCell
+ cbeq JSCell::m_type[t0], StringType, t1
+ orq ValueFalse, t1
+ storeq t1, [cfr, t2, 8]
+ dispatch(3)
+.opIsStringNotCell:
+ storeq ValueFalse, [cfr, t2, 8]
+ dispatch(3)
+
+
+_llint_op_is_object:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ btqnz t0, tagMask, .opIsObjectNotCell
+ cbaeq JSCell::m_type[t0], ObjectType, t1
+ orq ValueFalse, t1
+ storeq t1, [cfr, t2, 8]
+ dispatch(3)
+.opIsObjectNotCell:
+ storeq ValueFalse, [cfr, t2, 8]
+ dispatch(3)
+
+
+macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value)
+ bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
+ loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
+ negi propertyOffsetAsInt
+ sxi2q propertyOffsetAsInt, propertyOffsetAsInt
+ jmp .ready
+.isInline:
+ addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
+.ready:
+ loadq (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8], value
+end
+
+
+macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value)
+ bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
+ loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
+ negi propertyOffsetAsInt
+ sxi2q propertyOffsetAsInt, propertyOffsetAsInt
+ jmp .ready
+.isInline:
+ addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
+.ready:
+ storeq value, (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
+end
+
+macro getById(getPropertyStorage)
+ traceExecution()
+ # We only do monomorphic get_by_id caching for now, and we do not modify the
+ # opcode. We do, however, allow for the cache to change anytime if fails, since
+ # ping-ponging is free. At best we get lucky and the get_by_id will continue
+ # to take fast path on the new cache. At worst we take slow path, which is what
+ # we would have been doing anyway.
+ loadisFromInstruction(2, t0)
+ loadConstantOrVariableCell(t0, t3, .opGetByIdSlow)
+ loadStructureWithScratch(t3, t2, t1)
+ loadpFromInstruction(4, t1)
+ bpneq t2, t1, .opGetByIdSlow
+ getPropertyStorage(
+ t3,
+ t0,
+ macro (propertyStorage, scratch)
+ loadisFromInstruction(5, t2)
+ loadisFromInstruction(1, t1)
+ loadq [propertyStorage, t2], scratch
+ storeq scratch, [cfr, t1, 8]
+ valueProfile(scratch, 8, t1)
+ dispatch(9)
+ end)
+
+ .opGetByIdSlow:
+ callSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
+end
+
+_llint_op_get_by_id:
+ getById(withInlineStorage)
+
+
+_llint_op_get_by_id_out_of_line:
+ getById(withOutOfLineStorage)
+
+
+_llint_op_get_array_length:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadpFromInstruction(4, t1)
+ loadConstantOrVariableCell(t0, t3, .opGetArrayLengthSlow)
+ move t3, t2
+ arrayProfile(t2, t1, t0)
+ btiz t2, IsArray, .opGetArrayLengthSlow
+ btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
+ loadisFromInstruction(1, t1)
+ loadp JSObject::m_butterfly[t3], t0
+ loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
+ bilt t0, 0, .opGetArrayLengthSlow
+ orq tagTypeNumber, t0
+ valueProfile(t0, 8, t2)
+ storeq t0, [cfr, t1, 8]
+ dispatch(9)
+
+.opGetArrayLengthSlow:
+ callSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
+
+
+macro putById(getPropertyStorage)
+ traceExecution()
+ writeBarrierOnOperands(1, 3)
+ loadisFromInstruction(1, t3)
+ loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
+ loadStructureWithScratch(t0, t2, t1)
+ loadpFromInstruction(4, t1)
+ bpneq t2, t1, .opPutByIdSlow
+ getPropertyStorage(
+ t0,
+ t3,
+ macro (propertyStorage, scratch)
+ loadisFromInstruction(5, t1)
+ loadisFromInstruction(3, t2)
+ loadConstantOrVariable(t2, scratch)
+ storeq scratch, [propertyStorage, t1]
+ dispatch(9)
+ end)
+end
+
+_llint_op_put_by_id:
+ putById(withInlineStorage)
+
+.opPutByIdSlow:
+ callSlowPath(_llint_slow_path_put_by_id)
+ dispatch(9)
+
+
+_llint_op_put_by_id_out_of_line:
+ putById(withOutOfLineStorage)
+
+
+macro putByIdTransition(additionalChecks, getPropertyStorage)
+ traceExecution()
+ writeBarrierOnOperand(1)
+ loadisFromInstruction(1, t3)
+ loadpFromInstruction(4, t1)
+ loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
+ loadStructureWithScratch(t0, t2, t3)
+ bpneq t2, t1, .opPutByIdSlow
+ additionalChecks(t1, t3, t2)
+ loadisFromInstruction(3, t2)
+ loadisFromInstruction(5, t1)
+ getPropertyStorage(
+ t0,
+ t3,
+ macro (propertyStorage, scratch)
+ addp t1, propertyStorage, t3
+ loadConstantOrVariable(t2, t1)
+ storeq t1, [t3]
+ loadpFromInstruction(6, t1)
+ loadi Structure::m_blob + StructureIDBlob::u.words.word1[t1], t1
+ storei t1, JSCell::m_structureID[t0]
+ dispatch(9)
+ end)
+end
+
+macro noAdditionalChecks(oldStructure, scratch, scratch2)
+end
+
+macro structureChainChecks(oldStructure, scratch, scratch2)
+ const protoCell = oldStructure # Reusing the oldStructure register for the proto
+ loadpFromInstruction(7, scratch)
+ assert(macro (ok) btpnz scratch, ok end)
+ loadp StructureChain::m_vector[scratch], scratch
+ assert(macro (ok) btpnz scratch, ok end)
+ bqeq Structure::m_prototype[oldStructure], ValueNull, .done
+.loop:
+ loadq Structure::m_prototype[oldStructure], protoCell
+ loadStructureAndClobberFirstArg(protoCell, scratch2)
+ move scratch2, oldStructure
+ bpneq oldStructure, [scratch], .opPutByIdSlow
+ addp 8, scratch
+ bqneq Structure::m_prototype[oldStructure], ValueNull, .loop
+.done:
+end
+
+_llint_op_put_by_id_transition_direct:
+ putByIdTransition(noAdditionalChecks, withInlineStorage)
+
+
+_llint_op_put_by_id_transition_direct_out_of_line:
+ putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
+
+
+_llint_op_put_by_id_transition_normal:
+ putByIdTransition(structureChainChecks, withInlineStorage)
+
+
+_llint_op_put_by_id_transition_normal_out_of_line:
+ putByIdTransition(structureChainChecks, withOutOfLineStorage)
+
+
+_llint_op_get_by_val:
+ traceExecution()
+ loadisFromInstruction(2, t2)
+ loadConstantOrVariableCell(t2, t0, .opGetByValSlow)
+ loadpFromInstruction(4, t3)
+ move t0, t2
+ arrayProfile(t2, t3, t1)
+ loadisFromInstruction(3, t3)
+ loadConstantOrVariableInt32(t3, t1, .opGetByValSlow)
+ sxi2q t1, t1
+ loadp JSObject::m_butterfly[t0], t3
+ andi IndexingShapeMask, t2
+ bieq t2, Int32Shape, .opGetByValIsContiguous
+ bineq t2, ContiguousShape, .opGetByValNotContiguous
+.opGetByValIsContiguous:
+
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
+ loadisFromInstruction(1, t0)
+ loadq [t3, t1, 8], t2
+ btqz t2, .opGetByValOutOfBounds
+ jmp .opGetByValDone
+
+.opGetByValNotContiguous:
+ bineq t2, DoubleShape, .opGetByValNotDouble
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
+ loadis 8[PB, PC, 8], t0
+ loadd [t3, t1, 8], ft0
+ bdnequn ft0, ft0, .opGetByValOutOfBounds
+ fd2q ft0, t2
+ subq tagTypeNumber, t2
+ jmp .opGetByValDone
+
+.opGetByValNotDouble:
+ subi ArrayStorageShape, t2
+ bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValOutOfBounds
+ loadisFromInstruction(1, t0)
+ loadq ArrayStorage::m_vector[t3, t1, 8], t2
+ btqz t2, .opGetByValOutOfBounds
+
+.opGetByValDone:
+ storeq t2, [cfr, t0, 8]
+ valueProfile(t2, 5, t0)
+ dispatch(6)
+
+.opGetByValOutOfBounds:
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
+.opGetByValSlow:
+ callSlowPath(_llint_slow_path_get_by_val)
+ dispatch(6)
+
+
+macro contiguousPutByVal(storeCallback)
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
+.storeResult:
+ loadisFromInstruction(3, t2)
+ storeCallback(t2, t1, [t0, t3, 8])
+ dispatch(5)
+
+.outOfBounds:
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
+ loadp 32[PB, PC, 8], t2
+ storeb 1, ArrayProfile::m_mayStoreToHole[t2]
+ addi 1, t3, t2
+ storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
+ jmp .storeResult
+end
+
+macro putByVal(slowPath)
+ traceExecution()
+ writeBarrierOnOperands(1, 3)
+ loadisFromInstruction(1, t0)
+ loadConstantOrVariableCell(t0, t1, .opPutByValSlow)
+ loadpFromInstruction(4, t3)
+ move t1, t2
+ arrayProfile(t2, t3, t0)
+ loadisFromInstruction(2, t0)
+ loadConstantOrVariableInt32(t0, t3, .opPutByValSlow)
+ sxi2q t3, t3
+ loadp JSObject::m_butterfly[t1], t0
+ andi IndexingShapeMask, t2
+ bineq t2, Int32Shape, .opPutByValNotInt32
+ contiguousPutByVal(
+ macro (operand, scratch, address)
+ loadConstantOrVariable(operand, scratch)
+ bpb scratch, tagTypeNumber, .opPutByValSlow
+ storep scratch, address
+ end)
+
+.opPutByValNotInt32:
+ bineq t2, DoubleShape, .opPutByValNotDouble
+ contiguousPutByVal(
+ macro (operand, scratch, address)
+ loadConstantOrVariable(operand, scratch)
+ bqb scratch, tagTypeNumber, .notInt
+ ci2d scratch, ft0
+ jmp .ready
+ .notInt:
+ addp tagTypeNumber, scratch
+ fq2d scratch, ft0
+ bdnequn ft0, ft0, .opPutByValSlow
+ .ready:
+ stored ft0, address
+ end)
+
+.opPutByValNotDouble:
+ bineq t2, ContiguousShape, .opPutByValNotContiguous
+ contiguousPutByVal(
+ macro (operand, scratch, address)
+ loadConstantOrVariable(operand, scratch)
+ storep scratch, address
+ end)
+
+.opPutByValNotContiguous:
+ bineq t2, ArrayStorageShape, .opPutByValSlow
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
+ btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty
+.opPutByValArrayStorageStoreResult:
+ loadisFromInstruction(3, t2)
+ loadConstantOrVariable(t2, t1)
+ storeq t1, ArrayStorage::m_vector[t0, t3, 8]
+ dispatch(5)
+
+.opPutByValArrayStorageEmpty:
+ loadpFromInstruction(4, t1)
+ storeb 1, ArrayProfile::m_mayStoreToHole[t1]
+ addi 1, ArrayStorage::m_numValuesInVector[t0]
+ bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
+ addi 1, t3, t1
+ storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
+ jmp .opPutByValArrayStorageStoreResult
+
+.opPutByValOutOfBounds:
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
+.opPutByValSlow:
+ callSlowPath(slowPath)
+ dispatch(5)
+end
+
+_llint_op_put_by_val:
+ putByVal(_llint_slow_path_put_by_val)
+
+_llint_op_put_by_val_direct:
+ putByVal(_llint_slow_path_put_by_val_direct)
+
+
+_llint_op_jmp:
+ traceExecution()
+ dispatchIntIndirect(1)
+
+
+macro jumpTrueOrFalse(conditionOp, slow)
+ loadisFromInstruction(1, t1)
+ loadConstantOrVariable(t1, t0)
+ xorq ValueFalse, t0
+ btqnz t0, -1, .slow
+ conditionOp(t0, .target)
+ dispatch(3)
+
+.target:
+ dispatchIntIndirect(2)
+
+.slow:
+ callSlowPath(slow)
+ dispatch(0)
+end
+
+
+macro equalNull(cellHandler, immediateHandler)
+ loadisFromInstruction(1, t0)
+ assertNotConstant(t0)
+ loadq [cfr, t0, 8], t0
+ btqnz t0, tagMask, .immediate
+ loadStructureWithScratch(t0, t2, t1)
+ cellHandler(t2, JSCell::m_flags[t0], .target)
+ dispatch(3)
+
+.target:
+ dispatchIntIndirect(2)
+
+.immediate:
+ andq ~TagBitUndefined, t0
+ immediateHandler(t0, .target)
+ dispatch(3)
+end
+
+_llint_op_jeq_null:
+ traceExecution()
+ equalNull(
+ macro (structure, value, target)
+ btbz value, MasqueradesAsUndefined, .notMasqueradesAsUndefined
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ bpeq Structure::m_globalObject[structure], t0, target
+.notMasqueradesAsUndefined:
+ end,
+ macro (value, target) bqeq value, ValueNull, target end)
+
+
+_llint_op_jneq_null:
+ traceExecution()
+ equalNull(
+ macro (structure, value, target)
+ btbz value, MasqueradesAsUndefined, target
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ bpneq Structure::m_globalObject[structure], t0, target
+ end,
+ macro (value, target) bqneq value, ValueNull, target end)
+
+
+_llint_op_jneq_ptr:
+ traceExecution()
+ loadisFromInstruction(1, t0)
+ loadisFromInstruction(2, t1)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_globalObject[t2], t2
+ loadp JSGlobalObject::m_specialPointers[t2, t1, 8], t1
+ bpneq t1, [cfr, t0, 8], .opJneqPtrTarget
+ dispatch(4)
+
+.opJneqPtrTarget:
+ dispatchIntIndirect(3)
+
+
+macro compare(integerCompare, doubleCompare, slowPath)
+ loadisFromInstruction(1, t2)
+ loadisFromInstruction(2, t3)
+ loadConstantOrVariable(t2, t0)
+ loadConstantOrVariable(t3, t1)
+ bqb t0, tagTypeNumber, .op1NotInt
+ bqb t1, tagTypeNumber, .op2NotInt
+ integerCompare(t0, t1, .jumpTarget)
+ dispatch(4)
+
+.op1NotInt:
+ btqz t0, tagTypeNumber, .slow
+ bqb t1, tagTypeNumber, .op1NotIntOp2NotInt
+ ci2d t1, ft1
+ jmp .op1NotIntReady
+.op1NotIntOp2NotInt:
+ btqz t1, tagTypeNumber, .slow
+ addq tagTypeNumber, t1
+ fq2d t1, ft1
+.op1NotIntReady:
+ addq tagTypeNumber, t0
+ fq2d t0, ft0
+ doubleCompare(ft0, ft1, .jumpTarget)
+ dispatch(4)
+
+.op2NotInt:
+ ci2d t0, ft0
+ btqz t1, tagTypeNumber, .slow
+ addq tagTypeNumber, t1
+ fq2d t1, ft1
+ doubleCompare(ft0, ft1, .jumpTarget)
+ dispatch(4)
+
+.jumpTarget:
+ dispatchIntIndirect(3)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(0)
+end
+
+
+_llint_op_switch_imm:
+ traceExecution()
+ loadisFromInstruction(3, t2)
+ loadisFromInstruction(1, t3)
+ loadConstantOrVariable(t2, t1)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_rareData[t2], t2
+ muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
+ loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
+ addp t3, t2
+ bqb t1, tagTypeNumber, .opSwitchImmNotInt
+ subi SimpleJumpTable::min[t2], t1
+ biaeq t1, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
+ loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
+ loadis [t3, t1, 4], t1
+ btiz t1, .opSwitchImmFallThrough
+ dispatch(t1)
+
+.opSwitchImmNotInt:
+ btqnz t1, tagTypeNumber, .opSwitchImmSlow # Go slow if it's a double.
+.opSwitchImmFallThrough:
+ dispatchIntIndirect(2)
+
+.opSwitchImmSlow:
+ callSlowPath(_llint_slow_path_switch_imm)
+ dispatch(0)
+
+
+_llint_op_switch_char:
+ traceExecution()
+ loadisFromInstruction(3, t2)
+ loadisFromInstruction(1, t3)
+ loadConstantOrVariable(t2, t1)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_rareData[t2], t2
+ muli sizeof SimpleJumpTable, t3
+ loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
+ addp t3, t2
+ btqnz t1, tagMask, .opSwitchCharFallThrough
+ bbneq JSCell::m_type[t1], StringType, .opSwitchCharFallThrough
+ bineq JSString::m_length[t1], 1, .opSwitchCharFallThrough
+ loadp JSString::m_value[t1], t0
+ btpz t0, .opSwitchOnRope
+ loadp StringImpl::m_data8[t0], t1
+ btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
+ loadh [t1], t0
+ jmp .opSwitchCharReady
+.opSwitchChar8Bit:
+ loadb [t1], t0
+.opSwitchCharReady:
+ subi SimpleJumpTable::min[t2], t0
+ biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
+ loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
+ loadis [t2, t0, 4], t1
+ btiz t1, .opSwitchCharFallThrough
+ dispatch(t1)
+
+.opSwitchCharFallThrough:
+ dispatchIntIndirect(2)
+
+.opSwitchOnRope:
+ callSlowPath(_llint_slow_path_switch_char)
+ dispatch(0)
+
+
+macro arrayProfileForCall()
+ loadisFromInstruction(4, t3)
+ negp t3
+ loadq ThisArgumentOffset[cfr, t3, 8], t0
+ btqnz t0, tagMask, .done
+ loadpFromInstruction((CallOpCodeSize - 2), t1)
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, ArrayProfile::m_lastSeenStructureID[t1]
+.done:
+end
+
+macro doCall(slowPath)
+ loadisFromInstruction(2, t0)
+ loadpFromInstruction(5, t1)
+ loadp LLIntCallLinkInfo::callee[t1], t2
+ loadConstantOrVariable(t0, t3)
+ bqneq t3, t2, .opCallSlow
+ loadisFromInstruction(4, t3)
+ lshifti 3, t3
+ negp t3
+ addp cfr, t3
+ storeq t2, Callee[t3]
+ loadisFromInstruction(3, t2)
+ storei PC, ArgumentCount + TagOffset[cfr]
+ storei t2, ArgumentCount + PayloadOffset[t3]
+ addp CallerFrameAndPCSize, t3
+ callTargetFunction(t1, t3)
+
+.opCallSlow:
+ slowPathForCall(slowPath)
+end
+
+
+_llint_op_ret:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t2, t0)
+ doReturn()
+
+
+_llint_op_to_primitive:
+ traceExecution()
+ loadisFromInstruction(2, t2)
+ loadisFromInstruction(1, t3)
+ loadConstantOrVariable(t2, t0)
+ btqnz t0, tagMask, .opToPrimitiveIsImm
+ bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase
+.opToPrimitiveIsImm:
+ storeq t0, [cfr, t3, 8]
+ dispatch(3)
+
+.opToPrimitiveSlowCase:
+ callSlowPath(_slow_path_to_primitive)
+ dispatch(3)
+
+
+_llint_op_catch:
+ # Gotta restore the tag registers. We could be throwing from FTL, which may
+ # clobber them.
+ move TagTypeNumber, tagTypeNumber
+ move TagMask, tagMask
+
+ # This is where we end up from the JIT's throw trampoline (because the
+ # machine code return address will be set to _llint_op_catch), and from
+ # the interpreter's throw trampoline (see _llint_throw_trampoline).
+ # The throwing code must have known that we were throwing to the interpreter,
+ # and have set VM::targetInterpreterPCForThrow.
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ loadp VM::callFrameForThrow[t3], cfr
+ loadp VM::vmEntryFrameForThrow[t3], t0
+ storep t0, VM::topVMEntryFrame[t3]
+ restoreStackPointerAfterCall()
+
+ loadp CodeBlock[cfr], PB
+ loadp CodeBlock::m_instructions[PB], PB
+ loadp VM::targetInterpreterPCForThrow[t3], PC
+ subp PB, PC
+ rshiftp 3, PC
+
+ loadq VM::m_exception[t3], t0
+ storeq 0, VM::m_exception[t3]
+ loadisFromInstruction(1, t2)
+ storeq t0, [cfr, t2, 8]
+
+ loadq Exception::m_value[t0], t3
+ loadisFromInstruction(2, t2)
+ storeq t3, [cfr, t2, 8]
+
+ traceExecution()
+ dispatch(3)
+
+
+_llint_op_end:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadisFromInstruction(1, t0)
+ assertNotConstant(t0)
+ loadq [cfr, t0, 8], t0
+ doReturn()
+
+
+_llint_throw_from_slow_path_trampoline:
+ callSlowPath(_llint_slow_path_handle_exception)
+
+ # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
+ # the throw target is not necessarily interpreted code, we come to here.
+ # This essentially emulates the JIT's throwing protocol.
+ loadp Callee[cfr], t1
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
+ jmp VM::targetMachinePCForThrow[t1]
+
+
+_llint_throw_during_call_trampoline:
+ preserveReturnAddressAfterCall(t2)
+ jmp _llint_throw_from_slow_path_trampoline
+
+
+macro nativeCallTrampoline(executableOffsetToFunction)
+
+ functionPrologue()
+ storep 0, CodeBlock[cfr]
+ if X86_64 or X86_64_WIN
+ if X86_64
+ const arg1 = t4 # t4 = rdi
+ const arg2 = t5 # t5 = rsi
+ const temp = t1
+ elsif X86_64_WIN
+ const arg1 = t2 # t2 = rcx
+ const arg2 = t1 # t1 = rdx
+ const temp = t0
+ end
+ loadp Callee[cfr], t0
+ andp MarkedBlockMask, t0, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
+ storep cfr, VM::topCallFrame[t1]
+ move cfr, arg1
+ loadp Callee[cfr], arg2
+ loadp JSFunction::m_executable[arg2], temp
+ checkStackPointerAlignment(t3, 0xdead0001)
+ if X86_64_WIN
+ subp 32, sp
+ end
+ call executableOffsetToFunction[temp]
+ if X86_64_WIN
+ addp 32, sp
+ end
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ elsif ARM64 or C_LOOP
+ loadp Callee[cfr], t0
+ andp MarkedBlockMask, t0, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
+ storep cfr, VM::topCallFrame[t1]
+ preserveReturnAddressAfterCall(t3)
+ storep t3, ReturnPC[cfr]
+ move cfr, t0
+ loadp Callee[cfr], t1
+ loadp JSFunction::m_executable[t1], t1
+ if C_LOOP
+ cloopCallNative executableOffsetToFunction[t1]
+ else
+ call executableOffsetToFunction[t1]
+ end
+ restoreReturnAddressBeforeReturn(t3)
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ else
+ error
+ end
+
+ functionEpilogue()
+
+ btqnz VM::m_exception[t3], .handleException
+ ret
+
+.handleException:
+ storep cfr, VM::topCallFrame[t3]
+ restoreStackPointerAfterCall()
+ jmp _llint_throw_from_slow_path_trampoline
+end
+
+
+macro getGlobalObject(dst)
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadisFromInstruction(dst, t1)
+ storeq t0, [cfr, t1, 8]
+end
+
+macro varInjectionCheck(slowPath)
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_varInjectionWatchpoint[t0], t0
+ bbeq WatchpointSet::m_state[t0], IsInvalidated, slowPath
+end
+
+macro resolveScope()
+ loadisFromInstruction(5, t2)
+ loadisFromInstruction(2, t0)
+ loadp [cfr, t0, 8], t0
+ btiz t2, .resolveScopeLoopEnd
+
+.resolveScopeLoop:
+ loadp JSScope::m_next[t0], t0
+ subi 1, t2
+ btinz t2, .resolveScopeLoop
+
+.resolveScopeLoopEnd:
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+end
+
+
+_llint_op_resolve_scope:
+ traceExecution()
+ loadisFromInstruction(4, t0)
+
+#rGlobalProperty:
+ bineq t0, GlobalProperty, .rGlobalVar
+ getGlobalObject(1)
+ dispatch(7)
+
+.rGlobalVar:
+ bineq t0, GlobalVar, .rClosureVar
+ getGlobalObject(1)
+ dispatch(7)
+
+.rClosureVar:
+ bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks
+ resolveScope()
+ dispatch(7)
+
+.rGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getGlobalObject(1)
+ dispatch(7)
+
+.rGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getGlobalObject(1)
+ dispatch(7)
+
+.rClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
+ varInjectionCheck(.rDynamic)
+ resolveScope()
+ dispatch(7)
+
+.rDynamic:
+ callSlowPath(_llint_slow_path_resolve_scope)
+ dispatch(7)
+
+
+macro loadWithStructureCheck(operand, slowPath)
+ loadisFromInstruction(operand, t0)
+ loadq [cfr, t0, 8], t0
+ loadStructureWithScratch(t0, t2, t1)
+ loadpFromInstruction(5, t1)
+ bpneq t2, t1, slowPath
+end
+
+macro getProperty()
+ loadisFromInstruction(6, t1)
+ loadPropertyAtVariableOffset(t1, t0, t2)
+ valueProfile(t2, 7, t0)
+ loadisFromInstruction(1, t0)
+ storeq t2, [cfr, t0, 8]
+end
+
+macro getGlobalVar()
+ loadpFromInstruction(6, t0)
+ loadq [t0], t0
+ valueProfile(t0, 7, t1)
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+end
+
+macro getClosureVar()
+ loadisFromInstruction(6, t1)
+ loadq JSEnvironmentRecord_variables[t0, t1, 8], t0
+ valueProfile(t0, 7, t1)
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+end
+
+_llint_op_get_from_scope:
+ traceExecution()
+ loadisFromInstruction(4, t0)
+ andi ResolveModeMask, t0
+
+#gGlobalProperty:
+ bineq t0, GlobalProperty, .gGlobalVar
+ loadWithStructureCheck(2, .gDynamic)
+ getProperty()
+ dispatch(8)
+
+.gGlobalVar:
+ bineq t0, GlobalVar, .gClosureVar
+ getGlobalVar()
+ dispatch(8)
+
+.gClosureVar:
+ bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks
+ loadVariable(2, t0)
+ getClosureVar()
+ dispatch(8)
+
+.gGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks
+ loadWithStructureCheck(2, .gDynamic)
+ getProperty()
+ dispatch(8)
+
+.gGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ varInjectionCheck(.gDynamic)
+ getGlobalVar()
+ dispatch(8)
+
+.gClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic
+ varInjectionCheck(.gDynamic)
+ loadVariable(2, t0)
+ getClosureVar()
+ dispatch(8)
+
+.gDynamic:
+ callSlowPath(_llint_slow_path_get_from_scope)
+ dispatch(8)
+
+
+macro putProperty()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2)
+ loadisFromInstruction(6, t1)
+ storePropertyAtVariableOffset(t1, t0, t2)
+end
+
+macro putGlobalVar()
+ loadisFromInstruction(3, t0)
+ loadConstantOrVariable(t0, t1)
+ loadpFromInstruction(5, t2)
+ loadpFromInstruction(6, t0)
+ notifyWrite(t2, .pDynamic)
+ storeq t1, [t0]
+end
+
+macro putClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2)
+ loadisFromInstruction(6, t1)
+ storeq t2, JSEnvironmentRecord_variables[t0, t1, 8]
+end
+
+macro putLocalClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2)
+ loadpFromInstruction(5, t3)
+ btpz t3, .noVariableWatchpointSet
+ notifyWrite(t3, .pDynamic)
+.noVariableWatchpointSet:
+ loadisFromInstruction(6, t1)
+ storeq t2, JSEnvironmentRecord_variables[t0, t1, 8]
+end
+
+
+_llint_op_put_to_scope:
+ traceExecution()
+ loadisFromInstruction(4, t0)
+ andi ResolveModeMask, t0
+
+#pLocalClosureVar:
+ bineq t0, LocalClosureVar, .pGlobalProperty
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t0)
+ putLocalClosureVar()
+ dispatch(7)
+
+.pGlobalProperty:
+ bineq t0, GlobalProperty, .pGlobalVar
+ writeBarrierOnOperands(1, 3)
+ loadWithStructureCheck(1, .pDynamic)
+ putProperty()
+ dispatch(7)
+
+.pGlobalVar:
+ bineq t0, GlobalVar, .pClosureVar
+ writeBarrierOnGlobalObject(3)
+ putGlobalVar()
+ dispatch(7)
+
+.pClosureVar:
+ bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t0)
+ putClosureVar()
+ dispatch(7)
+
+.pGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks
+ writeBarrierOnOperands(1, 3)
+ loadWithStructureCheck(1, .pDynamic)
+ putProperty()
+ dispatch(7)
+
+.pGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ writeBarrierOnGlobalObject(3)
+ varInjectionCheck(.pDynamic)
+ putGlobalVar()
+ dispatch(7)
+
+.pClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic
+ writeBarrierOnOperands(1, 3)
+ varInjectionCheck(.pDynamic)
+ loadVariable(1, t0)
+ putClosureVar()
+ dispatch(7)
+
+.pDynamic:
+ callSlowPath(_llint_slow_path_put_to_scope)
+ dispatch(7)
+
+
+_llint_op_get_from_arguments:
+ traceExecution()
+ loadVariable(2, t0)
+ loadi 24[PB, PC, 8], t1
+ loadq DirectArguments_storage[t0, t1, 8], t0
+ valueProfile(t0, 4, t1)
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(5)
+
+
+_llint_op_put_to_arguments:
+ traceExecution()
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t0)
+ loadi 16[PB, PC, 8], t1
+ loadisFromInstruction(3, t3)
+ loadConstantOrVariable(t3, t2)
+ storeq t2, DirectArguments_storage[t0, t1, 8]
+ dispatch(4)
+
+
+_llint_op_get_parent_scope:
+ traceExecution()
+ loadVariable(2, t0)
+ loadp JSScope::m_next[t0], t0
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_profile_type:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_vm[t1], t1
+ # t1 is holding the pointer to the typeProfilerLog.
+ loadp VM::m_typeProfilerLog[t1], t1
+ # t2 is holding the pointer to the current log entry.
+ loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
+
+ # t0 is holding the JSValue argument.
+ loadisFromInstruction(1, t3)
+ loadConstantOrVariable(t3, t0)
+
+ bqeq t0, ValueEmpty, .opProfileTypeDone
+ # Store the JSValue onto the log entry.
+ storeq t0, TypeProfilerLog::LogEntry::value[t2]
+
+ # Store the TypeLocation onto the log entry.
+ loadpFromInstruction(2, t3)
+ storep t3, TypeProfilerLog::LogEntry::location[t2]
+
+ btqz t0, tagMask, .opProfileTypeIsCell
+ storei 0, TypeProfilerLog::LogEntry::structureID[t2]
+ jmp .opProfileTypeSkipIsCell
+.opProfileTypeIsCell:
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, TypeProfilerLog::LogEntry::structureID[t2]
+.opProfileTypeSkipIsCell:
+
+ # Increment the current log entry.
+ addp sizeof TypeProfilerLog::LogEntry, t2
+ storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
+
+ loadp TypeProfilerLog::m_logEndPtr[t1], t1
+ bpneq t2, t1, .opProfileTypeDone
+ callSlowPath(_slow_path_profile_type_clear_log)
+
+.opProfileTypeDone:
+ dispatch(6)