summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/llint
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-09-10 19:10:20 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-09-10 19:10:20 +0200
commit284837daa07b29d6a63a748544a90b1f5842ac5c (patch)
treeecd258180bde91fe741e0cfd2638beb3c6da7e8e /Source/JavaScriptCore/llint
parent2e2ba8ff45915f40ed3e014101269c175f2a89a0 (diff)
downloadqtwebkit-284837daa07b29d6a63a748544a90b1f5842ac5c.tar.gz
Imported WebKit commit 68645295d2e3e09af2c942f092556f06aa5f8b0d (http://svn.webkit.org/repository/webkit/trunk@128073)
New snapshot
Diffstat (limited to 'Source/JavaScriptCore/llint')
-rw-r--r--Source/JavaScriptCore/llint/LLIntCLoop.cpp76
-rw-r--r--Source/JavaScriptCore/llint/LLIntCLoop.h59
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.cpp31
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.h96
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoints.cpp10
-rw-r--r--Source/JavaScriptCore/llint/LLIntExceptions.cpp9
-rw-r--r--Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h33
-rw-r--r--Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp4
-rw-r--r--Source/JavaScriptCore/llint/LLIntOpcode.h81
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.cpp146
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.h45
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.cpp6
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.asm92
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.cpp431
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.h62
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm59
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter64.asm119
17 files changed, 1102 insertions, 257 deletions
diff --git a/Source/JavaScriptCore/llint/LLIntCLoop.cpp b/Source/JavaScriptCore/llint/LLIntCLoop.cpp
new file mode 100644
index 000000000..14fc04930
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntCLoop.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntCLoop.h"
+
+#include "Instruction.h"
+
+namespace JSC {
+
+namespace LLInt {
+
+#if ENABLE(LLINT_C_LOOP)
+
+void CLoop::initialize()
+{
+ execute(0, llint_unused, true);
+}
+
+void* CLoop::catchRoutineFor(Instruction* catchPCForInterpreter)
+{
+ return reinterpret_cast<Instruction*>(catchPCForInterpreter->u.opcode);
+}
+
+MacroAssemblerCodePtr CLoop::hostCodeEntryFor(CodeSpecializationKind kind)
+{
+ MacroAssemblerCodePtr codePtr;
+ codePtr = (kind == CodeForCall) ?
+ MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_call_trampoline) :
+ MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_construct_trampoline);
+ return codePtr;
+}
+
+MacroAssemblerCodePtr CLoop::jsCodeEntryWithArityCheckFor(CodeSpecializationKind kind)
+{
+ MacroAssemblerCodePtr codePtr;
+ codePtr = (kind == CodeForCall) ?
+ MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check) :
+ MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check);
+ return codePtr;
+}
+
+MacroAssemblerCodePtr CLoop::jsCodeEntryFor(CodeSpecializationKind kind)
+{
+ MacroAssemblerCodePtr codePtr;
+ codePtr = (kind == CodeForCall) ?
+ MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_prologue) :
+ MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_prologue);
+ return codePtr;
+}
+
+#endif // ENABLE(LLINT_C_LOOP)
+
+} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntCLoop.h b/Source/JavaScriptCore/llint/LLIntCLoop.h
new file mode 100644
index 000000000..3a9c77b6d
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntCLoop.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntCLoop_h
+#define LLIntCLoop_h
+
+#if ENABLE(LLINT_C_LOOP)
+
+#include "CodeSpecializationKind.h"
+#include "JSValue.h"
+#include "MacroAssemblerCodeRef.h"
+#include "Opcode.h"
+#include "Register.h"
+
+namespace JSC {
+
+namespace LLInt {
+
+const OpcodeID llint_unused = llint_end;
+
+class CLoop {
+public:
+ static void initialize();
+ static JSValue execute(CallFrame*, OpcodeID bootstrapOpcodeId, bool isInitializationPass = false);
+
+ static void* catchRoutineFor(Instruction* catchPCForInterpreter);
+
+ static MacroAssemblerCodePtr hostCodeEntryFor(CodeSpecializationKind);
+ static MacroAssemblerCodePtr jsCodeEntryWithArityCheckFor(CodeSpecializationKind);
+ static MacroAssemblerCodePtr jsCodeEntryFor(CodeSpecializationKind);
+};
+
+} } // namespace JSC::LLInt
+
+#endif // ENABLE(LLINT_C_LOOP)
+
+#endif // LLIntCLoop_h
diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp
index 983a7d706..c7fd741d6 100644
--- a/Source/JavaScriptCore/llint/LLIntData.cpp
+++ b/Source/JavaScriptCore/llint/LLIntData.cpp
@@ -31,20 +31,31 @@
#include "BytecodeConventions.h"
#include "CodeType.h"
#include "Instruction.h"
-#include "LowLevelInterpreter.h"
+#include "LLIntCLoop.h"
#include "Opcode.h"
namespace JSC { namespace LLInt {
-Data::Data()
- : m_exceptionInstructions(new Instruction[maxOpcodeLength + 1])
- , m_opcodeMap(new Opcode[numOpcodeIDs])
+Instruction* Data::s_exceptionInstructions = 0;
+Opcode* Data::s_opcodeMap = 0;
+
+void initialize()
{
+ Data::s_exceptionInstructions = new Instruction[maxOpcodeLength + 1];
+ Data::s_opcodeMap = new Opcode[numOpcodeIDs];
+
+ #if ENABLE(LLINT_C_LOOP)
+ CLoop::initialize();
+
+ #else // !ENABLE(LLINT_C_LOOP)
for (int i = 0; i < maxOpcodeLength + 1; ++i)
- m_exceptionInstructions[i].u.pointer = bitwise_cast<void*>(&llint_throw_from_slow_path_trampoline);
-#define OPCODE_ENTRY(opcode, length) m_opcodeMap[opcode] = bitwise_cast<void*>(&llint_##opcode);
+ Data::s_exceptionInstructions[i].u.pointer =
+ LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
+ #define OPCODE_ENTRY(opcode, length) \
+ Data::s_opcodeMap[opcode] = LLInt::getCodePtr(llint_##opcode);
FOR_EACH_OPCODE_ID(OPCODE_ENTRY);
-#undef OPCODE_ENTRY
+ #undef OPCODE_ENTRY
+ #endif // !ENABLE(LLINT_C_LOOP)
}
#if COMPILER(CLANG)
@@ -120,12 +131,6 @@ void Data::performAssertions(JSGlobalData& globalData)
#pragma clang diagnostic pop
#endif
-Data::~Data()
-{
- delete[] m_exceptionInstructions;
- delete[] m_opcodeMap;
-}
-
} } // namespace JSC::LLInt
#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntData.h b/Source/JavaScriptCore/llint/LLIntData.h
index ba8daedf1..3b3b6027f 100644
--- a/Source/JavaScriptCore/llint/LLIntData.h
+++ b/Source/JavaScriptCore/llint/LLIntData.h
@@ -26,6 +26,7 @@
#ifndef LLIntData_h
#define LLIntData_h
+#include "JSValue.h"
#include "Opcode.h"
#include <wtf/Platform.h>
@@ -34,30 +35,59 @@ namespace JSC {
class JSGlobalData;
struct Instruction;
+#if ENABLE(LLINT_C_LOOP)
+typedef OpcodeID LLIntCode;
+#else
+typedef void (*LLIntCode)();
+#endif
+
namespace LLInt {
#if ENABLE(LLINT)
+
class Data {
public:
- Data();
- ~Data();
-
- void performAssertions(JSGlobalData&);
-
- Instruction* exceptionInstructions()
- {
- return m_exceptionInstructions;
- }
-
- Opcode* opcodeMap()
- {
- return m_opcodeMap;
- }
+ static void performAssertions(JSGlobalData&);
+
private:
- Instruction* m_exceptionInstructions;
- Opcode* m_opcodeMap;
+ static Instruction* s_exceptionInstructions;
+ static Opcode* s_opcodeMap;
+
+ friend void initialize();
+
+ friend Instruction* exceptionInstructions();
+ friend Opcode* opcodeMap();
+ friend Opcode getOpcode(OpcodeID);
+ friend void* getCodePtr(OpcodeID);
};
-#else // ENABLE(LLINT)
+
+void initialize();
+
+inline Instruction* exceptionInstructions()
+{
+ return Data::s_exceptionInstructions;
+}
+
+inline Opcode* opcodeMap()
+{
+ return Data::s_opcodeMap;
+}
+
+inline Opcode getOpcode(OpcodeID id)
+{
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+ return Data::s_opcodeMap[id];
+#else
+ return static_cast<Opcode>(id);
+#endif
+}
+
+ALWAYS_INLINE void* getCodePtr(OpcodeID id)
+{
+ return reinterpret_cast<void*>(getOpcode(id));
+}
+
+#else // !ENABLE(LLINT)
#if COMPILER(CLANG)
#pragma clang diagnostic push
@@ -66,26 +96,30 @@ private:
class Data {
public:
- void performAssertions(JSGlobalData&) { }
-
- Instruction* exceptionInstructions()
- {
- ASSERT_NOT_REACHED();
- return 0;
- }
-
- Opcode* opcodeMap()
- {
- ASSERT_NOT_REACHED();
- return 0;
- }
+ static void performAssertions(JSGlobalData&) { }
};
#if COMPILER(CLANG)
#pragma clang diagnostic pop
#endif
-#endif // ENABLE(LLINT)
+#endif // !ENABLE(LLINT)
+
+ALWAYS_INLINE void* getOpcode(void llintOpcode())
+{
+ return bitwise_cast<void*>(llintOpcode);
+}
+
+ALWAYS_INLINE void* getCodePtr(void glueHelper())
+{
+ return bitwise_cast<void*>(glueHelper);
+}
+
+ALWAYS_INLINE void* getCodePtr(JSC::EncodedJSValue glueHelper())
+{
+ return bitwise_cast<void*>(glueHelper);
+}
+
} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp b/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
index dd7d9433d..be79134b7 100644
--- a/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
@@ -33,7 +33,7 @@
#include "JSObject.h"
#include "LLIntThunks.h"
#include "LowLevelInterpreter.h"
-#include "ScopeChain.h"
+
namespace JSC { namespace LLInt {
@@ -52,6 +52,7 @@ void getFunctionEntrypoint(JSGlobalData& globalData, CodeSpecializationKind kind
return;
}
+#if ENABLE(JIT)
if (kind == CodeForCall) {
jitCode = JITCode(globalData.getCTIStub(functionForCallEntryThunkGenerator), JITCode::InterpreterThunk);
arityCheck = globalData.getCTIStub(functionForCallArityCheckThunkGenerator).code();
@@ -61,6 +62,7 @@ void getFunctionEntrypoint(JSGlobalData& globalData, CodeSpecializationKind kind
ASSERT(kind == CodeForConstruct);
jitCode = JITCode(globalData.getCTIStub(functionForConstructEntryThunkGenerator), JITCode::InterpreterThunk);
arityCheck = globalData.getCTIStub(functionForConstructArityCheckThunkGenerator).code();
+#endif // ENABLE(JIT)
}
void getEvalEntrypoint(JSGlobalData& globalData, JITCode& jitCode)
@@ -69,8 +71,9 @@ void getEvalEntrypoint(JSGlobalData& globalData, JITCode& jitCode)
jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_eval_prologue), JITCode::InterpreterThunk);
return;
}
-
+#if ENABLE(JIT)
jitCode = JITCode(globalData.getCTIStub(evalEntryThunkGenerator), JITCode::InterpreterThunk);
+#endif
}
void getProgramEntrypoint(JSGlobalData& globalData, JITCode& jitCode)
@@ -79,8 +82,9 @@ void getProgramEntrypoint(JSGlobalData& globalData, JITCode& jitCode)
jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), JITCode::InterpreterThunk);
return;
}
-
+#if ENABLE(JIT)
jitCode = JITCode(globalData.getCTIStub(programEntryThunkGenerator), JITCode::InterpreterThunk);
+#endif
}
} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.cpp b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
index a915c42e3..80ca732ad 100644
--- a/Source/JavaScriptCore/llint/LLIntExceptions.cpp
+++ b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
@@ -60,7 +60,8 @@ void interpreterThrowInCaller(ExecState* exec, ReturnAddressPtr pc)
Instruction* returnToThrowForThrownException(ExecState* exec)
{
- return exec->globalData().llintData.exceptionInstructions();
+ UNUSED_PARAM(exec);
+ return LLInt::exceptionInstructions();
}
Instruction* returnToThrow(ExecState* exec, Instruction* pc)
@@ -73,7 +74,7 @@ Instruction* returnToThrow(ExecState* exec, Instruction* pc)
fixupPCforExceptionIfNeeded(exec);
genericThrow(globalData, exec, globalData->exception, pc - exec->codeBlock()->instructions().begin());
- return globalData->llintData.exceptionInstructions();
+ return LLInt::exceptionInstructions();
}
void* callToThrow(ExecState* exec, Instruction* pc)
@@ -85,8 +86,8 @@ void* callToThrow(ExecState* exec, Instruction* pc)
#endif
fixupPCforExceptionIfNeeded(exec);
genericThrow(globalData, exec, globalData->exception, pc - exec->codeBlock()->instructions().begin());
-
- return bitwise_cast<void*>(&llint_throw_during_call_trampoline);
+
+ return LLInt::getCodePtr(llint_throw_during_call_trampoline);
}
} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
index 9a1539576..63488aa0b 100644
--- a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
+++ b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
@@ -31,6 +31,17 @@
#include <wtf/InlineASM.h>
#include <wtf/Platform.h>
+
+#if ENABLE(LLINT_C_LOOP)
+#define OFFLINE_ASM_C_LOOP 1
+#define OFFLINE_ASM_X86 0
+#define OFFLINE_ASM_ARMv7 0
+#define OFFLINE_ASM_X86_64 0
+
+#else // !ENABLE(LLINT_C_LOOP)
+
+#define OFFLINE_ASM_C_LOOP 0
+
#if CPU(X86)
#define OFFLINE_ASM_X86 1
#else
@@ -49,6 +60,8 @@
#define OFFLINE_ASM_X86_64 0
#endif
+#endif // !ENABLE(LLINT_C_LOOP)
+
#if USE(JSVALUE64)
#define OFFLINE_ASM_JSVALUE64 1
#else
@@ -91,24 +104,4 @@
#define OFFLINE_ASM_VALUE_PROFILER 0
#endif
-// These are for building an interpreter from generated assembly code:
-#define OFFLINE_ASM_BEGIN asm (
-#define OFFLINE_ASM_END );
-
-#if CPU(ARM_THUMB2)
-#define OFFLINE_ASM_GLOBAL_LABEL(label) \
- ".globl " SYMBOL_STRING(label) "\n" \
- HIDE_SYMBOL(label) "\n" \
- ".thumb\n" \
- ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
- SYMBOL_STRING(label) ":\n"
-#else
-#define OFFLINE_ASM_GLOBAL_LABEL(label) \
- ".globl " SYMBOL_STRING(label) "\n" \
- HIDE_SYMBOL(label) "\n" \
- SYMBOL_STRING(label) ":\n"
-#endif
-
-#define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
-
#endif // LLIntOfflineAsmConfig_h
diff --git a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
index cbb4258d0..3ed6d6d2f 100644
--- a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
+++ b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
@@ -45,7 +45,7 @@
#include "LLIntOfflineAsmConfig.h"
#include "MarkedSpace.h"
#include "RegisterFile.h"
-#include "ScopeChain.h"
+
#include "Structure.h"
#include "StructureChain.h"
#include "ValueProfile.h"
@@ -62,7 +62,7 @@ public:
const unsigned* LLIntOffsetsExtractor::dummy()
{
-#if ENABLE(JIT)
+#if ENABLE(LLINT)
// This is a file generated by offlineasm/generate_offsets_extractor.rb, and contains code
// to create a table of offsets, sizes, and a header identifying what combination of
// Platform.h macros we have set. We include it inside of a method on LLIntOffsetsExtractor
diff --git a/Source/JavaScriptCore/llint/LLIntOpcode.h b/Source/JavaScriptCore/llint/LLIntOpcode.h
new file mode 100644
index 000000000..3588f4ff3
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntOpcode.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntOpcode_h
+#define LLIntOpcode_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(LLINT)
+
+#if ENABLE(LLINT_C_LOOP)
+
+#define FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
+ macro(getHostCallReturnValue, 1) \
+ macro(ctiOpThrowNotCaught, 1)
+
+#else // !ENABLE(LLINT_C_LOOP)
+
+#define FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
+ // Nothing to do here. Use the JIT impl instead.
+
+#endif // !ENABLE(LLINT_C_LOOP)
+
+
+#define FOR_EACH_LLINT_NATIVE_HELPER(macro) \
+ FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
+ \
+ macro(llint_begin, 1) \
+ \
+ macro(llint_program_prologue, 1) \
+ macro(llint_eval_prologue, 1) \
+ macro(llint_function_for_call_prologue, 1) \
+ macro(llint_function_for_construct_prologue, 1) \
+ macro(llint_function_for_call_arity_check, 1) \
+ macro(llint_function_for_construct_arity_check, 1) \
+ macro(llint_generic_return_point, 1) \
+ macro(llint_throw_from_slow_path_trampoline, 1) \
+ macro(llint_throw_during_call_trampoline, 1) \
+ \
+ /* Native call trampolines */ \
+ macro(llint_native_call_trampoline, 1) \
+ macro(llint_native_construct_trampoline, 1) \
+ \
+ macro(llint_end, 1)
+
+
+#if ENABLE(LLINT_C_LOOP)
+#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) FOR_EACH_LLINT_NATIVE_HELPER(macro)
+#else
+#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) // Nothing to add.
+#endif
+
+#else // !ENABLE(LLINT)
+
+#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) // Nothing to add.
+
+#endif // !ENABLE(LLINT)
+
+#endif // LLIntOpcode_h
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
index eef54ac7b..1a34a09d4 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
@@ -38,10 +38,11 @@
#include "JITDriver.h"
#include "JSActivation.h"
#include "JSGlobalObjectFunctions.h"
+#include "JSNameScope.h"
#include "JSPropertyNameIterator.h"
-#include "JSStaticScopeObject.h"
#include "JSString.h"
#include "JSValue.h"
+#include "JSWithScope.h"
#include "LLIntCommon.h"
#include "LLIntExceptions.h"
#include "LowLevelInterpreter.h"
@@ -236,7 +237,7 @@ LLINT_SLOW_PATH_DECL(trace)
exec,
static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
opcodeNames[exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode)],
- exec->scopeChain());
+ exec->scope());
if (exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode) == op_ret) {
dataLog("Will be returning to %p\n", exec->returnPC().value());
dataLog("The new cfr will be %p\n", exec->callerFrame());
@@ -255,6 +256,7 @@ LLINT_SLOW_PATH_DECL(special_trace)
LLINT_END_IMPL();
}
+#if ENABLE(JIT)
inline bool shouldJIT(ExecState* exec)
{
// You can modify this to turn off JITting without rebuilding the world.
@@ -390,6 +392,7 @@ LLINT_SLOW_PATH_DECL(replace)
codeBlock->dontJITAnytimeSoon();
LLINT_END_IMPL();
}
+#endif // ENABLE(JIT)
LLINT_SLOW_PATH_DECL(register_file_check)
{
@@ -447,7 +450,7 @@ LLINT_SLOW_PATH_DECL(slow_path_create_activation)
dataLog("Creating an activation, exec = %p!\n", exec);
#endif
JSActivation* activation = JSActivation::create(globalData, exec, static_cast<FunctionExecutable*>(exec->codeBlock()->ownerExecutable()));
- exec->setScopeChain(exec->scopeChain()->push(activation));
+ exec->setScope(activation);
LLINT_RETURN(JSValue(activation));
}
@@ -757,7 +760,7 @@ LLINT_SLOW_PATH_DECL(slow_path_in)
LLINT_SLOW_PATH_DECL(slow_path_resolve)
{
LLINT_BEGIN();
- LLINT_RETURN_PROFILED(op_resolve, CommonSlowPaths::opResolve(exec, exec->codeBlock()->identifier(pc[2].u.operand)));
+ LLINT_RETURN_PROFILED(op_resolve, JSScope::resolve(exec, exec->codeBlock()->identifier(pc[2].u.operand)));
}
LLINT_SLOW_PATH_DECL(slow_path_resolve_skip)
@@ -765,57 +768,31 @@ LLINT_SLOW_PATH_DECL(slow_path_resolve_skip)
LLINT_BEGIN();
LLINT_RETURN_PROFILED(
op_resolve_skip,
- CommonSlowPaths::opResolveSkip(
+ JSScope::resolveSkip(
exec,
exec->codeBlock()->identifier(pc[2].u.operand),
pc[3].u.operand));
}
-static JSValue resolveGlobal(ExecState* exec, Instruction* pc)
-{
- CodeBlock* codeBlock = exec->codeBlock();
- JSGlobalObject* globalObject = codeBlock->globalObject();
- ASSERT(globalObject->isGlobalObject());
- int property = pc[2].u.operand;
- Structure* structure = pc[3].u.structure.get();
-
- ASSERT_UNUSED(structure, structure != globalObject->structure());
-
- Identifier& ident = codeBlock->identifier(property);
- PropertySlot slot(globalObject);
-
- if (globalObject->getPropertySlot(exec, ident, slot)) {
- JSValue result = slot.getValue(exec, ident);
- if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary()
- && slot.slotBase() == globalObject) {
- pc[3].u.structure.set(
- exec->globalData(), codeBlock->ownerExecutable(), globalObject->structure());
- pc[4] = slot.cachedOffset();
- }
-
- return result;
- }
-
- exec->globalData().exception = createUndefinedVariableError(exec, ident);
- return JSValue();
-}
-
LLINT_SLOW_PATH_DECL(slow_path_resolve_global)
{
LLINT_BEGIN();
- LLINT_RETURN_PROFILED(op_resolve_global, resolveGlobal(exec, pc));
+ Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
+ LLINT_RETURN_PROFILED(op_resolve_global, JSScope::resolveGlobal(exec, ident, exec->lexicalGlobalObject(), &pc[3].u.structure, &pc[4].u.operand));
}
LLINT_SLOW_PATH_DECL(slow_path_resolve_global_dynamic)
{
+ // FIXME: <rdar://problem/12185487> LLInt resolve_global_dynamic doesn't check intervening scopes for modification
LLINT_BEGIN();
- LLINT_RETURN_PROFILED(op_resolve_global_dynamic, resolveGlobal(exec, pc));
+ Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
+ LLINT_RETURN_PROFILED(op_resolve_global_dynamic, JSScope::resolveGlobal(exec, ident, exec->lexicalGlobalObject(), &pc[3].u.structure, &pc[4].u.operand));
}
LLINT_SLOW_PATH_DECL(slow_path_resolve_for_resolve_global_dynamic)
{
LLINT_BEGIN();
- LLINT_RETURN_PROFILED(op_resolve_global_dynamic, CommonSlowPaths::opResolve(exec, exec->codeBlock()->identifier(pc[2].u.operand)));
+ LLINT_RETURN_PROFILED(op_resolve_global_dynamic, JSScope::resolve(exec, exec->codeBlock()->identifier(pc[2].u.operand)));
}
LLINT_SLOW_PATH_DECL(slow_path_resolve_base)
@@ -823,13 +800,12 @@ LLINT_SLOW_PATH_DECL(slow_path_resolve_base)
LLINT_BEGIN();
Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
if (pc[3].u.operand) {
- JSValue base = JSC::resolveBase(exec, ident, exec->scopeChain(), true);
- if (!base)
- LLINT_THROW(createErrorForInvalidGlobalAssignment(exec, ident.ustring()));
- LLINT_RETURN(base);
+ if (JSValue result = JSScope::resolveBase(exec, ident, true))
+ LLINT_RETURN(result);
+ LLINT_THROW(globalData.exception);
}
-
- LLINT_RETURN_PROFILED(op_resolve_base, JSC::resolveBase(exec, ident, exec->scopeChain(), false));
+
+ LLINT_RETURN_PROFILED(op_resolve_base, JSScope::resolveBase(exec, ident, false));
}
LLINT_SLOW_PATH_DECL(slow_path_ensure_property_exists)
@@ -839,14 +815,14 @@ LLINT_SLOW_PATH_DECL(slow_path_ensure_property_exists)
PropertySlot slot(object);
Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
if (!object->getPropertySlot(exec, ident, slot))
- LLINT_THROW(createErrorForInvalidGlobalAssignment(exec, ident.ustring()));
+ LLINT_THROW(createErrorForInvalidGlobalAssignment(exec, ident.string()));
LLINT_END();
}
LLINT_SLOW_PATH_DECL(slow_path_resolve_with_base)
{
LLINT_BEGIN();
- JSValue result = CommonSlowPaths::opResolveWithBase(exec, exec->codeBlock()->identifier(pc[3].u.operand), LLINT_OP(1));
+ JSValue result = JSScope::resolveWithBase(exec, exec->codeBlock()->identifier(pc[3].u.operand), &LLINT_OP(1));
LLINT_CHECK_EXCEPTION();
LLINT_OP(2) = result;
// FIXME: technically should have profiling, but we don't do it because the DFG won't use it.
@@ -856,7 +832,7 @@ LLINT_SLOW_PATH_DECL(slow_path_resolve_with_base)
LLINT_SLOW_PATH_DECL(slow_path_resolve_with_this)
{
LLINT_BEGIN();
- JSValue result = CommonSlowPaths::opResolveWithThis(exec, exec->codeBlock()->identifier(pc[3].u.operand), LLINT_OP(1));
+ JSValue result = JSScope::resolveWithThis(exec, exec->codeBlock()->identifier(pc[3].u.operand), &LLINT_OP(1));
LLINT_CHECK_EXCEPTION();
LLINT_OP(2) = result;
// FIXME: technically should have profiling, but we don't do it because the DFG won't use it.
@@ -897,10 +873,10 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id)
pc[4].u.structure.set(
globalData, codeBlock->ownerExecutable(), structure);
if (isInlineOffset(slot.cachedOffset())) {
- pc[0].u.opcode = bitwise_cast<void*>(&llint_op_get_by_id);
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_get_by_id);
pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
} else {
- pc[0].u.opcode = bitwise_cast<void*>(&llint_op_get_by_id_out_of_line);
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_get_by_id_out_of_line);
pc[5].u.operand = offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue);
}
}
@@ -953,7 +929,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id)
// This is needed because some of the methods we call
// below may GC.
- pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id);
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id);
normalizePrototypeChain(exec, baseCell);
@@ -973,24 +949,24 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id)
if (pc[8].u.operand) {
if (isInlineOffset(slot.cachedOffset()))
- pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_direct);
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct);
else
- pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_direct_out_of_line);
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line);
} else {
if (isInlineOffset(slot.cachedOffset()))
- pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_normal);
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal);
else
- pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_normal_out_of_line);
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line);
}
}
} else {
pc[4].u.structure.set(
globalData, codeBlock->ownerExecutable(), structure);
if (isInlineOffset(slot.cachedOffset())) {
- pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id);
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id);
pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
} else {
- pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_out_of_line);
+ pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_out_of_line);
pc[5].u.operand = offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue);
}
}
@@ -1128,7 +1104,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_index)
LLINT_BEGIN();
JSValue arrayValue = LLINT_OP_C(1).jsValue();
ASSERT(isJSArray(arrayValue));
- asArray(arrayValue)->putDirectIndex(exec, pc[2].u.operand, LLINT_OP_C(3).jsValue(), false);
+ asArray(arrayValue)->putDirectIndex(exec, pc[2].u.operand, LLINT_OP_C(3).jsValue());
LLINT_END();
}
@@ -1162,10 +1138,10 @@ LLINT_SLOW_PATH_DECL(slow_path_jmp_scopes)
{
LLINT_BEGIN();
unsigned count = pc[1].u.operand;
- ScopeChainNode* tmp = exec->scopeChain();
+ JSScope* tmp = exec->scope();
while (count--)
- tmp = tmp->pop();
- exec->setScopeChain(tmp);
+ tmp = tmp->next();
+ exec->setScope(tmp);
pc += pc[2].u.operand;
LLINT_END();
}
@@ -1284,7 +1260,7 @@ LLINT_SLOW_PATH_DECL(slow_path_new_func)
#if LLINT_SLOW_PATH_TRACING
dataLog("Creating function!\n");
#endif
- LLINT_RETURN(codeBlock->functionDecl(pc[2].u.operand)->make(exec, exec->scopeChain()));
+ LLINT_RETURN(JSFunction::create(exec, codeBlock->functionDecl(pc[2].u.operand), exec->scope()));
}
LLINT_SLOW_PATH_DECL(slow_path_new_func_exp)
@@ -1292,12 +1268,7 @@ LLINT_SLOW_PATH_DECL(slow_path_new_func_exp)
LLINT_BEGIN();
CodeBlock* codeBlock = exec->codeBlock();
FunctionExecutable* function = codeBlock->functionExpr(pc[2].u.operand);
- JSFunction* func = function->make(exec, exec->scopeChain());
-
- if (!function->name().isNull()) {
- JSStaticScopeObject* functionScopeObject = JSStaticScopeObject::create(exec, function->name(), func, ReadOnly | DontDelete);
- func->setScope(globalData, func->scope()->push(functionScopeObject));
- }
+ JSFunction* func = JSFunction::create(exec, function, exec->scope());
LLINT_RETURN(func);
}
@@ -1307,7 +1278,7 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
ExecState* exec = execCallee->callerFrame();
JSGlobalData& globalData = exec->globalData();
- execCallee->setScopeChain(exec->scopeChain());
+ execCallee->setScope(exec->scope());
execCallee->setCodeBlock(0);
execCallee->clearReturnPC();
@@ -1322,7 +1293,7 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
execCallee->setCallee(asObject(callee));
globalData.hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
- LLINT_CALL_RETURN(execCallee, pc, reinterpret_cast<void*>(getHostCallReturnValue));
+ LLINT_CALL_RETURN(execCallee, pc, LLInt::getCodePtr(getHostCallReturnValue));
}
#if LLINT_SLOW_PATH_TRACING
@@ -1345,7 +1316,7 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
execCallee->setCallee(asObject(callee));
globalData.hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
- LLINT_CALL_RETURN(execCallee, pc, reinterpret_cast<void*>(getHostCallReturnValue));
+ LLINT_CALL_RETURN(execCallee, pc, LLInt::getCodePtr(getHostCallReturnValue));
}
#if LLINT_SLOW_PATH_TRACING
@@ -1367,15 +1338,15 @@ inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, Code
return handleHostCall(execCallee, pc, calleeAsValue, kind);
JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
- ScopeChainNode* scope = callee->scopeUnchecked();
- JSGlobalData& globalData = *scope->globalData;
- execCallee->setScopeChain(scope);
+ JSScope* scope = callee->scopeUnchecked();
+ JSGlobalData& globalData = *scope->globalData();
+ execCallee->setScope(scope);
ExecutableBase* executable = callee->executable();
MacroAssemblerCodePtr codePtr;
CodeBlock* codeBlock = 0;
if (executable->isHostFunction())
- codePtr = executable->generatedJITCodeFor(kind).addressForCall();
+ codePtr = executable->hostCodeEntryFor(kind);
else {
FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
JSObject* error = functionExecutable->compileFor(execCallee, callee->scope(), kind);
@@ -1384,9 +1355,9 @@ inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, Code
codeBlock = &functionExecutable->generatedBytecodeFor(kind);
ASSERT(codeBlock);
if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()))
- codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(kind);
+ codePtr = functionExecutable->jsCodeWithArityCheckEntryFor(kind);
else
- codePtr = functionExecutable->generatedJITCodeFor(kind).addressForCall();
+ codePtr = functionExecutable->jsCodeEntryFor(kind);
}
if (callLinkInfo) {
@@ -1399,7 +1370,7 @@ inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, Code
if (codeBlock)
codeBlock->linkIncomingCall(callLinkInfo);
}
-
+
LLINT_CALL_RETURN(execCallee, pc, codePtr.executableAddress());
}
@@ -1467,8 +1438,8 @@ LLINT_SLOW_PATH_DECL(slow_path_call_eval)
execCallee->setArgumentCountIncludingThis(pc[2].u.operand);
execCallee->setCallerFrame(exec);
execCallee->uncheckedR(RegisterFile::Callee) = calleeAsValue;
- execCallee->setScopeChain(exec->scopeChain());
- execCallee->setReturnPC(bitwise_cast<Instruction*>(&llint_generic_return_point));
+ execCallee->setScope(exec->scope());
+ execCallee->setReturnPC(LLInt::getCodePtr(llint_generic_return_point));
execCallee->setCodeBlock(0);
exec->setCurrentVPC(pc + OPCODE_LENGTH(op_call_eval));
@@ -1476,7 +1447,7 @@ LLINT_SLOW_PATH_DECL(slow_path_call_eval)
return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
globalData.hostCallReturnValue = eval(execCallee);
- LLINT_CALL_RETURN(execCallee, pc, reinterpret_cast<void*>(getHostCallReturnValue));
+ LLINT_CALL_RETURN(execCallee, pc, LLInt::getCodePtr(getHostCallReturnValue));
}
LLINT_SLOW_PATH_DECL(slow_path_tear_off_activation)
@@ -1554,15 +1525,14 @@ LLINT_SLOW_PATH_DECL(slow_path_next_pname)
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_push_scope)
+LLINT_SLOW_PATH_DECL(slow_path_push_with_scope)
{
LLINT_BEGIN();
- JSValue v = LLINT_OP(1).jsValue();
+ JSValue v = LLINT_OP_C(1).jsValue();
JSObject* o = v.toObject(exec);
LLINT_CHECK_EXCEPTION();
- LLINT_OP(1) = o;
- exec->setScopeChain(exec->scopeChain()->push(o));
+ exec->setScope(JSWithScope::create(exec, o));
LLINT_END();
}
@@ -1570,17 +1540,17 @@ LLINT_SLOW_PATH_DECL(slow_path_push_scope)
LLINT_SLOW_PATH_DECL(slow_path_pop_scope)
{
LLINT_BEGIN();
- exec->setScopeChain(exec->scopeChain()->pop());
+ exec->setScope(exec->scope()->next());
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_push_new_scope)
+LLINT_SLOW_PATH_DECL(slow_path_push_name_scope)
{
LLINT_BEGIN();
CodeBlock* codeBlock = exec->codeBlock();
- JSObject* scope = JSStaticScopeObject::create(exec, codeBlock->identifier(pc[2].u.operand), LLINT_OP(3).jsValue(), DontDelete);
- exec->setScopeChain(exec->scopeChain()->push(scope));
- LLINT_RETURN(scope);
+ JSNameScope* scope = JSNameScope::create(exec, codeBlock->identifier(pc[1].u.operand), LLINT_OP(2).jsValue(), pc[3].u.operand);
+ exec->setScope(scope);
+ LLINT_END();
}
LLINT_SLOW_PATH_DECL(slow_path_throw)
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
index 2e069d073..fe897d4a4 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.h
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
@@ -44,33 +44,50 @@ namespace LLInt {
// warnings, or worse, a change in the ABI used to return these types.
struct SlowPathReturnType {
void* a;
- void* b;
+ ExecState* b;
};
-inline SlowPathReturnType encodeResult(void* a, void* b)
+inline SlowPathReturnType encodeResult(void* a, ExecState* b)
{
SlowPathReturnType result;
result.a = a;
result.b = b;
return result;
}
-#else
+
+inline void decodeResult(SlowPathReturnType result, void*& a, ExecState*& b)
+{
+ a = result.a;
+ b = result.b;
+}
+
+#else // USE(JSVALUE32_64)
typedef int64_t SlowPathReturnType;
-inline SlowPathReturnType encodeResult(void* a, void* b)
+typedef union {
+ struct {
+ void* a;
+ ExecState* b;
+ } pair;
+ int64_t i;
+} SlowPathReturnTypeEncoding;
+
+inline SlowPathReturnType encodeResult(void* a, ExecState* b)
{
- union {
- struct {
- void* a;
- void* b;
- } pair;
- int64_t i;
- } u;
+ SlowPathReturnTypeEncoding u;
u.pair.a = a;
u.pair.b = b;
return u.i;
}
-#endif
+
+inline void decodeResult(SlowPathReturnType result, void*& a, ExecState*& b)
+{
+ SlowPathReturnTypeEncoding u;
+ u.i = result;
+ a = u.pair.a;
+ b = u.pair.b;
+}
+#endif // USE(JSVALUE32_64)
extern "C" SlowPathReturnType llint_trace_operand(ExecState*, Instruction*, int fromWhere, int operand);
extern "C" SlowPathReturnType llint_trace_value(ExecState*, Instruction*, int fromWhere, int operand);
@@ -185,9 +202,9 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_strcat);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_to_primitive);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_pnames);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_next_pname);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_scope);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_with_scope);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_pop_scope);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_new_scope);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_name_scope);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw_reference_error);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_debug);
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.cpp b/Source/JavaScriptCore/llint/LLIntThunks.cpp
index 6a6a579a3..ef19c766d 100644
--- a/Source/JavaScriptCore/llint/LLIntThunks.cpp
+++ b/Source/JavaScriptCore/llint/LLIntThunks.cpp
@@ -32,10 +32,12 @@
#include "JSObject.h"
#include "LinkBuffer.h"
#include "LowLevelInterpreter.h"
-#include "ScopeChain.h"
+
namespace JSC { namespace LLInt {
+#if !ENABLE(LLINT_C_LOOP)
+
static MacroAssemblerCodeRef generateThunkWithJumpTo(JSGlobalData* globalData, void (*target)(), const char *thunkKind)
{
JSInterfaceJIT jit;
@@ -78,6 +80,8 @@ MacroAssemblerCodeRef programEntryThunkGenerator(JSGlobalData* globalData)
return generateThunkWithJumpTo(globalData, llint_program_prologue, "program");
}
+#endif // !ENABLE(LLINT_C_LOOP)
+
} } // namespace JSC::LLInt
#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
index 10a5aaeab..db4b71dfd 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
@@ -110,9 +110,13 @@ end
# Some common utilities.
macro crash()
- storei t0, 0xbbadbeef[]
- move 0, t0
- call t0
+ if C_LOOP
+ cloopCrash
+ else
+ storei t0, 0xbbadbeef[]
+ move 0, t0
+ call t0
+ end
end
macro assert(assertion)
@@ -124,7 +128,10 @@ macro assert(assertion)
end
macro preserveReturnAddressAfterCall(destinationRegister)
- if ARMv7
+ if C_LOOP
+ # In our case, we're only preserving the bytecode vPC.
+ move lr, destinationRegister
+ elsif ARMv7
move lr, destinationRegister
elsif X86 or X86_64
pop destinationRegister
@@ -134,7 +141,10 @@ macro preserveReturnAddressAfterCall(destinationRegister)
end
macro restoreReturnAddressBeforeReturn(sourceRegister)
- if ARMv7
+ if C_LOOP
+ # In our case, we're only restoring the bytecode vPC.
+ move sourceRegister, lr
+ elsif ARMv7
move sourceRegister, lr
elsif X86 or X86_64
push sourceRegister
@@ -149,13 +159,26 @@ macro traceExecution()
end
end
+macro callTargetFunction(callLinkInfo)
+ if C_LOOP
+ cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
+ else
+ call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
+ dispatchAfterCall()
+ end
+end
+
macro slowPathForCall(advance, slowPath)
callCallSlowPath(
advance,
slowPath,
macro (callee)
- call callee
- dispatchAfterCall()
+ if C_LOOP
+ cloopCallJSFunction callee
+ else
+ call callee
+ dispatchAfterCall()
+ end
end)
end
@@ -532,7 +555,12 @@ _llint_op_jmp_scopes:
_llint_op_loop_if_true:
- nop
+ traceExecution()
+ jumpTrueOrFalse(
+ macro (value, target) btinz value, target end,
+ _llint_slow_path_jtrue)
+
+
_llint_op_jtrue:
traceExecution()
jumpTrueOrFalse(
@@ -541,7 +569,12 @@ _llint_op_jtrue:
_llint_op_loop_if_false:
- nop
+ traceExecution()
+ jumpTrueOrFalse(
+ macro (value, target) btiz value, target end,
+ _llint_slow_path_jfalse)
+
+
_llint_op_jfalse:
traceExecution()
jumpTrueOrFalse(
@@ -550,7 +583,13 @@ _llint_op_jfalse:
_llint_op_loop_if_less:
- nop
+ traceExecution()
+ compare(
+ macro (left, right, target) bilt left, right, target end,
+ macro (left, right, target) bdlt left, right, target end,
+ _llint_slow_path_jless)
+
+
_llint_op_jless:
traceExecution()
compare(
@@ -568,7 +607,13 @@ _llint_op_jnless:
_llint_op_loop_if_greater:
- nop
+ traceExecution()
+ compare(
+ macro (left, right, target) bigt left, right, target end,
+ macro (left, right, target) bdgt left, right, target end,
+ _llint_slow_path_jgreater)
+
+
_llint_op_jgreater:
traceExecution()
compare(
@@ -586,7 +631,13 @@ _llint_op_jngreater:
_llint_op_loop_if_lesseq:
- nop
+ traceExecution()
+ compare(
+ macro (left, right, target) bilteq left, right, target end,
+ macro (left, right, target) bdlteq left, right, target end,
+ _llint_slow_path_jlesseq)
+
+
_llint_op_jlesseq:
traceExecution()
compare(
@@ -604,7 +655,13 @@ _llint_op_jnlesseq:
_llint_op_loop_if_greatereq:
- nop
+ traceExecution()
+ compare(
+ macro (left, right, target) bigteq left, right, target end,
+ macro (left, right, target) bdgteq left, right, target end,
+ _llint_slow_path_jgreatereq)
+
+
_llint_op_jgreatereq:
traceExecution()
compare(
@@ -641,6 +698,7 @@ _llint_op_new_func_exp:
_llint_op_call:
traceExecution()
+ arrayProfileForCall()
doCall(_llint_slow_path_call)
@@ -715,9 +773,9 @@ _llint_op_get_pnames:
dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
-_llint_op_push_scope:
+_llint_op_push_with_scope:
traceExecution()
- callSlowPath(_llint_slow_path_push_scope)
+ callSlowPath(_llint_slow_path_push_with_scope)
dispatch(2)
@@ -727,9 +785,9 @@ _llint_op_pop_scope:
dispatch(1)
-_llint_op_push_new_scope:
+_llint_op_push_name_scope:
traceExecution()
- callSlowPath(_llint_slow_path_push_new_scope)
+ callSlowPath(_llint_slow_path_push_name_scope)
dispatch(4)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
index b95a50082..ebfdadfdb 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
@@ -31,8 +31,439 @@
#include "LLIntOfflineAsmConfig.h"
#include <wtf/InlineASM.h>
+#if ENABLE(LLINT_C_LOOP)
+#include "CodeBlock.h"
+#include "LLIntCLoop.h"
+#include "LLIntSlowPaths.h"
+#include "VMInspector.h"
+#include <wtf/Assertions.h>
+#include <wtf/MathExtras.h>
+
+using namespace JSC::LLInt;
+
+// LLInt C Loop opcodes
+// ====================
+// In the implementation of the C loop, the LLint trampoline glue functions
+// (e.g. llint_program_prologue, llint_eval_prologue, etc) are addressed as
+// if they are bytecode handlers. That means the names of the trampoline
+// functions will be added to the OpcodeID list via the
+// FOR_EACH_LLINT_OPCODE_EXTENSION() macro that FOR_EACH_OPCODE_ID()
+// includes.
+//
+// In addition, some JIT trampoline functions which are needed by LLInt
+// (e.g. getHostCallReturnValue, ctiOpThrowNotCaught) are also added as
+// bytecodes, and the CLoop will provide bytecode handlers for them.
+//
+// In the CLoop, we can only dispatch indirectly to these bytecodes
+// (including the LLInt and JIT extensions). All other dispatches
+// (i.e. goto's) must be to a known label (i.e. local / global labels).
+
+
+// How are the opcodes named?
+// ==========================
+// Here is a table to show examples of how each of the manifestation of the
+// opcodes are named:
+//
+// Type: Opcode Trampoline Glue
+// ====== ===============
+// [In the llint .asm files]
+// llint labels: llint_op_enter llint_program_prologue
+//
+// OpcodeID: op_enter llint_program
+// [in Opcode.h] [in LLIntOpcode.h]
+//
+// When using a switch statement dispatch in the CLoop, each "opcode" is
+// a case statement:
+// Opcode: case op_enter: case llint_program_prologue:
+//
+// When using a computed goto dispatch in the CLoop, each opcode is a label:
+// Opcode: op_enter: llint_program_prologue:
+
+
+//============================================================================
+// Define the opcode dispatch mechanism when using the C loop:
+//
+
+// These are for building a C Loop interpreter:
+#define OFFLINE_ASM_BEGIN
+#define OFFLINE_ASM_END
+
+
+#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode)
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+ #define OFFLINE_ASM_GLUE_LABEL(label) label:
+#else
+ #define OFFLINE_ASM_GLUE_LABEL(label) case label: label:
+#endif
+
+#define OFFLINE_ASM_LOCAL_LABEL(label) label:
+
+
+//============================================================================
+// Some utilities:
+//
+
+namespace JSC {
+namespace LLInt {
+
+#if USE(JSVALUE32_64)
+static double Ints2Double(uint32_t lo, uint32_t hi)
+{
+ union {
+ double dval;
+ uint64_t ival64;
+ } u;
+ u.ival64 = (static_cast<uint64_t>(hi) << 32) | lo;
+ return u.dval;
+}
+#endif // USE(JSVALUE32_64)
+
+} // namespace LLint
+
+
+//============================================================================
+// The llint C++ interpreter loop:
+//
+
+JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
+ bool isInitializationPass)
+{
+ #define CAST reinterpret_cast
+ #define SIGN_BIT32(x) ((x) & 0x80000000)
+
+ // One-time initialization of our address tables. We have to put this code
+ // here because our labels are only in scope inside this function. The
+ // caller (or one of its ancestors) is responsible for ensuring that this
+ // is only called once during the initialization of the VM before threads
+ // are at play.
+ if (UNLIKELY(isInitializationPass)) {
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+ Opcode* opcodeMap = LLInt::opcodeMap();
+ #define OPCODE_ENTRY(__opcode, length) \
+ opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
+ FOR_EACH_OPCODE_ID(OPCODE_ENTRY)
+ #undef OPCODE_ENTRY
+
+ #define LLINT_OPCODE_ENTRY(__opcode, length) \
+ opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
+
+ FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY)
+ #undef LLINT_OPCODE_ENTRY
+#endif
+ // Note: we can only set the exceptionInstructions after we have
+ // initialized the opcodeMap above. This is because getCodePtr()
+ // can depend on the opcodeMap.
+ Instruction* exceptionInstructions = LLInt::exceptionInstructions();
+ for (int i = 0; i < maxOpcodeLength + 1; ++i)
+ exceptionInstructions[i].u.pointer =
+ LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
+
+ return JSValue();
+ }
+
+ ASSERT(callFrame->globalData().topCallFrame == callFrame);
+
+ // Define the pseudo registers used by the LLINT C Loop backend:
+ union CLoopRegister {
+ intptr_t i;
+ uintptr_t u;
+ int32_t i32;
+ uint32_t u32;
+ int8_t i8;
+ uint8_t u8;
+ int8_t* i8p;
+ void* vp;
+ ExecState* execState;
+ void* instruction;
+ NativeFunction nativeFunc;
+#if USE(JSVALUE64)
+ int64_t i64;
+ EncodedJSValue encodedJSValue;
+ double castToDouble;
+#endif
+ Opcode opcode;
+ };
+ union CLoopDoubleRegister {
+ double d;
+#if USE(JSVALUE64)
+ void* castToVoidPtr;
+#endif
+ };
+
+ // The CLoop llint backend is initially based on the ARMv7 backend, and
+ // then further enhanced with a few instructions from the x86 backend to
+ // support building for X64 targets. Hence, the shape of the generated
+ // code and the usage convention of registers will look a lot like the
+ // ARMv7 backend's.
+ //
+ // For example, on a 32-bit build:
+ // 1. Outgoing args will be set up as follows:
+ // arg1 in t0 (r0 on ARM)
+ // arg2 in t1 (r1 on ARM)
+ // 2. 32 bit return values will be in t0 (r0 on ARM).
+ // 3. 64 bit return values (e.g. doubles) will be in t0,t1 (r0,r1 on ARM).
+ //
+ // But instead of naming these simulator registers based on their ARM
+ // counterparts, we'll name them based on their original llint asm names.
+ // This will make it easier to correlate the generated code with the
+ // original llint asm code.
+ //
+ // On a 64-bit build, it more like x64 in that the registers are 64 bit.
+ // Hence:
+ // 1. Outgoing args are still the same: arg1 in t0, arg2 in t1, etc.
+ // 2. 32 bit result values will be in the low 32-bit of t0.
+ // 3. 64 bit result values will be in t0.
+
+ CLoopRegister t0, t1, t2, t3;
+#if USE(JSVALUE64)
+ CLoopRegister rBasePC, tagTypeNumber, tagMask;
+#endif
+ CLoopRegister rRetVPC;
+ CLoopDoubleRegister d0, d1;
+
+#if COMPILER(MSVC)
+ // Keep the compiler happy. We don't really need this, but the compiler
+ // will complain. This makes the warning go away.
+ t0.i = 0;
+ t1.i = 0;
+#endif
+
+ // Instantiate the pseudo JIT stack frame used by the LLINT C Loop backend:
+ JITStackFrame jitStackFrame;
+
+ // The llint expects the native stack pointer, sp, to be pointing to the
+ // jitStackFrame (which is the simulation of the native stack frame):
+ JITStackFrame* const sp = &jitStackFrame;
+ sp->globalData = &callFrame->globalData();
+
+ // Set up an alias for the globalData ptr in the JITStackFrame:
+ JSGlobalData* &globalData = sp->globalData;
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ Instruction* vPC;
+
+ // rPC is an alias for vPC. Set up the alias:
+ CLoopRegister& rPC = *CAST<CLoopRegister*>(&vPC);
+
+#if USE(JSVALUE32_64)
+ vPC = codeBlock->instructions().begin();
+#else // USE(JSVALUE64)
+ vPC = 0;
+ rBasePC.vp = codeBlock->instructions().begin();
+
+ // For the ASM llint, JITStubs takes care of this initialization. We do
+ // it explicitly here for the C loop:
+ tagTypeNumber.i = 0xFFFF000000000000;
+ tagMask.i = 0xFFFF000000000002;
+#endif // USE(JSVALUE64)
+
+ // cfr is an alias for callFrame. Set up this alias:
+ CLoopRegister& cfr = *CAST<CLoopRegister*>(&callFrame);
+
+ // Simulate a native return PC which should never be used:
+ rRetVPC.i = 0xbbadbeef;
+
+ // Interpreter variables for value passing between opcodes and/or helpers:
+ NativeFunction nativeFunc = 0;
+ JSValue functionReturnValue;
+ Opcode opcode;
+
+ opcode = LLInt::getOpcode(bootstrapOpcodeId);
+
+ #if ENABLE(OPCODE_STATS)
+ #define RECORD_OPCODE_STATS(__opcode) \
+ OpcodeStats::recordInstruction(__opcode)
+ #else
+ #define RECORD_OPCODE_STATS(__opcode)
+ #endif
+
+ #if USE(JSVALUE32_64)
+ #define FETCH_OPCODE() vPC->u.opcode
+ #else // USE(JSVALUE64)
+ #define FETCH_OPCODE() *bitwise_cast<Opcode*>(rBasePC.i8p + rPC.i * 8)
+ #endif // USE(JSVALUE64)
+
+ #define NEXT_INSTRUCTION() \
+ do { \
+ opcode = FETCH_OPCODE(); \
+ DISPATCH_OPCODE(); \
+ } while (false)
+
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+
+ //========================================================================
+ // Loop dispatch mechanism using computed goto statements:
+
+ #define DISPATCH_OPCODE() goto *opcode
+
+ #define DEFINE_OPCODE(__opcode) \
+ __opcode: \
+ RECORD_OPCODE_STATS(__opcode);
+
+ // Dispatch to the current PC's bytecode:
+ DISPATCH_OPCODE();
+
+#else // !ENABLE(COMPUTED_GOTO_OPCODES)
+ //========================================================================
+ // Loop dispatch mechanism using a C switch statement:
+
+ #define DISPATCH_OPCODE() goto dispatchOpcode
+
+ #define DEFINE_OPCODE(__opcode) \
+ case __opcode: \
+ RECORD_OPCODE_STATS(__opcode);
+
+ // Dispatch to the current PC's bytecode:
+ dispatchOpcode:
+ switch (opcode)
+
+#endif // !ENABLE(COMPUTED_GOTO_OPCODES)
+
+ //========================================================================
+ // Bytecode handlers:
+ {
+ // This is the file generated by offlineasm, which contains all of the
+ // bytecode handlers for the interpreter, as compiled from
+ // LowLevelInterpreter.asm and its peers.
+
+ #include "LLIntAssembly.h"
+
+ // In the ASM llint, getHostCallReturnValue() is a piece of glue
+ // function provided by the JIT (see dfg/DFGOperations.cpp).
+ // We simulate it here with a pseduo-opcode handler.
+ OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue)
+ {
+ // The ASM part pops the frame:
+ callFrame = callFrame->callerFrame();
+
+ // The part in getHostCallReturnValueWithExecState():
+ JSValue result = globalData->hostCallReturnValue;
+#if USE(JSVALUE32_64)
+ t1.i = result.tag();
+ t0.i = result.payload();
+#else
+ t0.encodedJSValue = JSValue::encode(result);
+#endif
+ goto doReturnHelper;
+ }
+
+ OFFLINE_ASM_GLUE_LABEL(ctiOpThrowNotCaught)
+ {
+ return globalData->exception;
+ }
+
+#if !ENABLE(COMPUTED_GOTO_OPCODES)
+ default:
+ ASSERT(false);
+#endif
+
+ } // END bytecode handler cases.
+
+ //========================================================================
+ // Bytecode helpers:
+
+ doReturnHelper: {
+ ASSERT(!!callFrame);
+ if (callFrame->hasHostCallFrameFlag()) {
+#if USE(JSVALUE32_64)
+ return JSValue(t1.i, t0.i); // returning JSValue(tag, payload);
+#else
+ return JSValue::decode(t0.encodedJSValue);
+#endif
+ }
+
+ // The normal ASM llint call implementation returns to the caller as
+ // recorded in rRetVPC, and the caller would fetch the return address
+ // from ArgumentCount.tag() (see the dispatchAfterCall() macro used in
+ // the callTargetFunction() macro in the llint asm files).
+ //
+ // For the C loop, we don't have the JIT stub to this work for us.
+ // So, we need to implement the equivalent of dispatchAfterCall() here
+ // before dispatching to the PC.
+
+ vPC = callFrame->currentVPC();
+
+#if USE(JSVALUE64)
+ // Based on LowLevelInterpreter64.asm's dispatchAfterCall():
+
+ // When returning from a native trampoline call, unlike the assembly
+ // LLInt, we can't simply return to the caller. In our case, we grab
+ // the caller's VPC and resume execution there. However, the caller's
+ // VPC returned by callFrame->currentVPC() is in the form of the real
+ // address of the target bytecode, but the 64-bit llint expects the
+ // VPC to be a bytecode offset. Hence, we need to map it back to a
+ // bytecode offset before we dispatch via the usual dispatch mechanism
+ // i.e. NEXT_INSTRUCTION():
+
+ codeBlock = callFrame->codeBlock();
+ ASSERT(codeBlock);
+ rPC.vp = callFrame->currentVPC();
+ rPC.i = rPC.i8p - reinterpret_cast<int8_t*>(codeBlock->instructions().begin());
+ rPC.i >>= 3;
+
+ rBasePC.vp = codeBlock->instructions().begin();
+#endif // USE(JSVALUE64)
+
+ NEXT_INSTRUCTION();
+
+ } // END doReturnHelper.
+
+
+ // Keep the compiler happy so that it doesn't complain about unused
+ // labels for the LLInt trampoline glue. The labels are automatically
+ // emitted by label macros above, and some of them are referenced by
+ // the llint generated code. Since we can't tell ahead of time which
+ // will be referenced and which will be not, we'll just passify the
+ // compiler on all such labels:
+ #define LLINT_OPCODE_ENTRY(__opcode, length) \
+ UNUSED_LABEL(__opcode);
+ FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY)
+ #undef LLINT_OPCODE_ENTRY
+
+
+ #undef NEXT_INSTRUCTION
+ #undef DEFINE_OPCODE
+ #undef CHECK_FOR_TIMEOUT
+ #undef CAST
+ #undef SIGN_BIT32
+
+} // Interpreter::llintCLoopExecute()
+
+} // namespace JSC
+
+#else // !ENABLE(LLINT_C_LOOP)
+
+//============================================================================
+// Define the opcode dispatch mechanism when using an ASM loop:
+//
+
+// These are for building an interpreter from generated assembly code:
+#define OFFLINE_ASM_BEGIN asm (
+#define OFFLINE_ASM_END );
+
+#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(llint_##__opcode)
+#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(__opcode)
+
+#if CPU(ARM_THUMB2)
+#define OFFLINE_ASM_GLOBAL_LABEL(label) \
+ ".globl " SYMBOL_STRING(label) "\n" \
+ HIDE_SYMBOL(label) "\n" \
+ ".thumb\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
+ SYMBOL_STRING(label) ":\n"
+#else
+#define OFFLINE_ASM_GLOBAL_LABEL(label) \
+ ".globl " SYMBOL_STRING(label) "\n" \
+ HIDE_SYMBOL(label) "\n" \
+ SYMBOL_STRING(label) ":\n"
+#endif
+
+#define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
+
// This is a file generated by offlineasm, which contains all of the assembly code
// for the interpreter, as compiled from LowLevelInterpreter.asm.
#include "LLIntAssembly.h"
+#endif // !ENABLE(LLINT_C_LOOP)
+
#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.h b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
index 6383757cf..76c950a8c 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.h
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
@@ -32,25 +32,57 @@
#include "Opcode.h"
+#if ENABLE(LLINT_C_LOOP)
+
+namespace JSC {
+
+// The following is a minimal set of alias for the opcode names. This is needed
+// because there is code (e.g. in GetByIdStatus.cpp and PutByIdStatus.cpp)
+// which refers to the opcodes expecting them to be prefixed with "llint_".
+// In the CLoop implementation, the 2 are equivalent. Hence, we set up this
+// alias here.
+//
+// Note: we don't just do this for all opcodes because we only need a few,
+// and currently, FOR_EACH_OPCODE_ID() includes the llint and JIT opcode
+// extensions which we definitely don't want to add an alias for. With some
+// minor refactoring, we can use FOR_EACH_OPCODE_ID() to automatically
+// generate a llint_ alias for all opcodes, but that is not needed at this
+// time.
+
+const OpcodeID llint_op_call = op_call;
+const OpcodeID llint_op_call_eval = op_call_eval;
+const OpcodeID llint_op_call_varargs = op_call_varargs;
+const OpcodeID llint_op_construct = op_construct;
+const OpcodeID llint_op_catch = op_catch;
+const OpcodeID llint_op_get_by_id = op_get_by_id;
+const OpcodeID llint_op_get_by_id_out_of_line = op_get_by_id_out_of_line;
+const OpcodeID llint_op_put_by_id = op_put_by_id;
+const OpcodeID llint_op_put_by_id_out_of_line = op_put_by_id_out_of_line;
+
+const OpcodeID llint_op_put_by_id_transition_direct =
+ op_put_by_id_transition_direct;
+const OpcodeID llint_op_put_by_id_transition_direct_out_of_line =
+ op_put_by_id_transition_direct_out_of_line;
+const OpcodeID llint_op_put_by_id_transition_normal =
+ op_put_by_id_transition_normal;
+const OpcodeID llint_op_put_by_id_transition_normal_out_of_line =
+ op_put_by_id_transition_normal_out_of_line;
+
+const OpcodeID llint_op_method_check = op_method_check;
+
+} // namespace JSC
+
+#else // !ENABLE(LLINT_C_LOOP)
+
#define LLINT_INSTRUCTION_DECL(opcode, length) extern "C" void llint_##opcode();
FOR_EACH_OPCODE_ID(LLINT_INSTRUCTION_DECL);
#undef LLINT_INSTRUCTION_DECL
-extern "C" void llint_begin();
-extern "C" void llint_end();
-extern "C" void llint_program_prologue();
-extern "C" void llint_eval_prologue();
-extern "C" void llint_function_for_call_prologue();
-extern "C" void llint_function_for_construct_prologue();
-extern "C" void llint_function_for_call_arity_check();
-extern "C" void llint_function_for_construct_arity_check();
-extern "C" void llint_generic_return_point();
-extern "C" void llint_throw_from_slow_path_trampoline();
-extern "C" void llint_throw_during_call_trampoline();
-
-// Native call trampolines
-extern "C" void llint_native_call_trampoline();
-extern "C" void llint_native_construct_trampoline();
+#define DECLARE_LLINT_NATIVE_HELPER(name, length) extern "C" void name();
+ FOR_EACH_LLINT_NATIVE_HELPER(DECLARE_LLINT_NATIVE_HELPER)
+#undef DECLARE_LLINT_NATIVE_HELPER
+
+#endif // !ENABLE(LLINT_C_LOOP)
#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
index c0f136889..b011c5425 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
@@ -108,13 +108,16 @@ macro cCall2(function, arg1, arg2)
if ARMv7
move arg1, t0
move arg2, t1
+ call function
elsif X86
poke arg1, 0
poke arg2, 1
+ call function
+ elsif C_LOOP
+ cloopCallSlowPath function, arg1, arg2
else
error
end
- call function
end
# This barely works. arg3 and arg4 should probably be immediates.
@@ -124,15 +127,18 @@ macro cCall4(function, arg1, arg2, arg3, arg4)
move arg2, t1
move arg3, t2
move arg4, t3
+ call function
elsif X86
poke arg1, 0
poke arg2, 1
poke arg3, 2
poke arg4, 3
+ call function
+ elsif C_LOOP
+ error
else
error
end
- call function
end
macro callSlowPath(slowPath)
@@ -1006,14 +1012,14 @@ macro getScope(deBruijinIndexOperand, scopeCheck)
# Need to conditionally skip over one scope.
bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
scopeCheck(t0, t1)
- loadp ScopeChainNode::next[t0], t0
+ loadp JSScope::m_next[t0], t0
.noActivation:
subi 1, t2
btiz t2, .done
.loop:
scopeCheck(t0, t1)
- loadp ScopeChainNode::next[t0], t0
+ loadp JSScope::m_next[t0], t0
subi 1, t2
btinz t2, .loop
@@ -1022,13 +1028,13 @@ end
_llint_op_resolve_global_dynamic:
traceExecution()
- loadp JITStackFrame::globalData[sp], t3
- loadp JSGlobalData::activationStructure[t3], t3
+ loadp CodeBlock[cfr], t3
+ loadp CodeBlock::m_globalObject[t3], t3
+ loadp JSGlobalObject::m_activationStructure[t3], t3
getScope(
20[PC],
macro (scope, scratch)
- loadp ScopeChainNode::object[scope], scratch
- bpneq JSCell::m_structure[scratch], t3, .opResolveGlobalDynamicSuperSlow
+ bpneq JSCell::m_structure[scope], t3, .opResolveGlobalDynamicSuperSlow
end)
resolveGlobal(7, .opResolveGlobalDynamicSlow)
dispatch(7)
@@ -1051,7 +1057,6 @@ _llint_op_get_scoped_var:
getScope(12[PC], macro (scope, scratch) end)
loadi 4[PC], t1
loadi 8[PC], t2
- loadp ScopeChainNode::object[t0], t0
loadp JSVariableObject::m_registers[t0], t0
loadi TagOffset[t0, t2, 8], t3
loadi PayloadOffset[t0, t2, 8], t0
@@ -1069,7 +1074,6 @@ _llint_op_put_scoped_var:
loadConstantOrVariable(t1, t3, t2)
loadi 4[PC], t1
writeBarrier(t3, t2)
- loadp ScopeChainNode::object[t0], t0
loadp JSVariableObject::m_registers[t0], t0
storei t3, TagOffset[t0, t1, 8]
storei t2, PayloadOffset[t0, t1, 8]
@@ -1397,7 +1401,10 @@ _llint_op_put_by_val:
_llint_op_loop:
- nop
+ traceExecution()
+ dispatchBranch(4[PC])
+
+
_llint_op_jmp:
traceExecution()
dispatchBranch(4[PC])
@@ -1590,6 +1597,18 @@ _llint_op_new_func:
dispatch(4)
+macro arrayProfileForCall()
+ if VALUE_PROFILER
+ loadi 12[PC], t3
+ bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
+ loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
+ loadp JSCell::m_structure[t0], t0
+ loadp 20[PC], t1
+ storep t0, ArrayProfile::m_lastSeenStructure[t1]
+ .done:
+ end
+end
+
macro doCall(slowPath)
loadi 4[PC], t0
loadi 16[PC], t1
@@ -1600,7 +1619,7 @@ macro doCall(slowPath)
addp 24, PC
lshifti 3, t3
addp cfr, t3 # t3 contains the new value of cfr
- loadp JSFunction::m_scopeChain[t2], t0
+ loadp JSFunction::m_scope[t2], t0
storei t2, Callee + PayloadOffset[t3]
storei t0, ScopeChain + PayloadOffset[t3]
loadi 8 - 24[PC], t2
@@ -1610,8 +1629,7 @@ macro doCall(slowPath)
storei CellTag, Callee + TagOffset[t3]
storei CellTag, ScopeChain + TagOffset[t3]
move t3, cfr
- call LLIntCallLinkInfo::machineCodeTarget[t1]
- dispatchAfterCall()
+ callTargetFunction(t1)
.opCallSlow:
slowPathForCall(6, slowPath)
@@ -1811,6 +1829,19 @@ macro nativeCallTrampoline(executableOffsetToFunction)
call executableOffsetToFunction[t1]
restoreReturnAddressBeforeReturn(t3)
loadp JITStackFrame::globalData[sp], t3
+ elsif C_LOOP
+ loadp JITStackFrame::globalData[sp], t3
+ storep cfr, JSGlobalData::topCallFrame[t3]
+ move t0, t2
+ preserveReturnAddressAfterCall(t3)
+ storep t3, ReturnPC[cfr]
+ move cfr, t0
+ loadi Callee + PayloadOffset[cfr], t1
+ loadp JSFunction::m_executable[t1], t1
+ move t2, cfr
+ cloopCallNative executableOffsetToFunction[t1]
+ restoreReturnAddressBeforeReturn(t3)
+ loadp JITStackFrame::globalData[sp], t3
else
error
end
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
index 73e8613de..d429542e7 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
@@ -51,18 +51,30 @@ macro dispatchAfterCall()
end
macro cCall2(function, arg1, arg2)
- move arg1, t5
- move arg2, t4
- call function
+ if X86_64
+ move arg1, t5
+ move arg2, t4
+ call function
+ elsif C_LOOP
+ cloopCallSlowPath function, arg1, arg2
+ else
+ error
+ end
end
# This barely works. arg3 and arg4 should probably be immediates.
macro cCall4(function, arg1, arg2, arg3, arg4)
- move arg1, t5
- move arg2, t4
- move arg3, t1
- move arg4, t2
- call function
+ if X86_64
+ move arg1, t5
+ move arg2, t4
+ move arg3, t1
+ move arg4, t2
+ call function
+ elsif C_LOOP
+ error
+ else
+ error
+ end
end
macro prepareStateForCCall()
@@ -857,14 +869,14 @@ macro getScope(deBruijinIndexOperand, scopeCheck)
# Need to conditionally skip over one scope.
btpz [cfr, t1, 8], .noActivation
scopeCheck(t0, t1)
- loadp ScopeChainNode::next[t0], t0
+ loadp JSScope::m_next[t0], t0
.noActivation:
subi 1, t2
btiz t2, .done
.loop:
scopeCheck(t0, t1)
- loadp ScopeChainNode::next[t0], t0
+ loadp JSScope::m_next[t0], t0
subi 1, t2
btinz t2, .loop
@@ -873,13 +885,13 @@ end
_llint_op_resolve_global_dynamic:
traceExecution()
- loadp JITStackFrame::globalData[sp], t3
- loadp JSGlobalData::activationStructure[t3], t3
+ loadp CodeBlock[cfr], t3
+ loadp CodeBlock::m_globalObject[t3], t3
+ loadp JSGlobalObject::m_activationStructure[t3], t3
getScope(
40[PB, PC, 8],
macro (scope, scratch)
- loadp ScopeChainNode::object[scope], scratch
- bpneq JSCell::m_structure[scratch], t3, .opResolveGlobalDynamicSuperSlow
+ bpneq JSCell::m_structure[scope], t3, .opResolveGlobalDynamicSuperSlow
end)
resolveGlobal(7, .opResolveGlobalDynamicSlow)
dispatch(7)
@@ -902,7 +914,6 @@ _llint_op_get_scoped_var:
getScope(24[PB, PC, 8], macro (scope, scratch) end)
loadis 8[PB, PC, 8], t1
loadis 16[PB, PC, 8], t2
- loadp ScopeChainNode::object[t0], t0
loadp JSVariableObject::m_registers[t0], t0
loadp [t0, t2, 8], t3
storep t3, [cfr, t1, 8]
@@ -918,7 +929,6 @@ _llint_op_put_scoped_var:
loadConstantOrVariable(t1, t3)
loadis 8[PB, PC, 8], t1
writeBarrier(t3)
- loadp ScopeChainNode::object[t0], t0
loadp JSVariableObject::m_registers[t0], t0
storep t3, [t0, t1, 8]
dispatch(4)
@@ -1234,7 +1244,10 @@ _llint_op_put_by_val:
_llint_op_loop:
- nop
+ traceExecution()
+ dispatchInt(8[PB, PC, 8])
+
+
_llint_op_jmp:
traceExecution()
dispatchInt(8[PB, PC, 8])
@@ -1430,6 +1443,18 @@ _llint_op_new_func:
dispatch(4)
+macro arrayProfileForCall()
+ if VALUE_PROFILER
+ loadis 24[PB, PC, 8], t3
+ loadp ThisArgumentOffset[cfr, t3, 8], t0
+ btpnz t0, tagMask, .done
+ loadp JSCell::m_structure[t0], t0
+ loadp 40[PB, PC, 8], t1
+ storep t0, ArrayProfile::m_lastSeenStructure[t1]
+ .done:
+ end
+end
+
macro doCall(slowPath)
loadis 8[PB, PC, 8], t0
loadp 32[PB, PC, 8], t1
@@ -1440,7 +1465,7 @@ macro doCall(slowPath)
addi 6, PC
lshifti 3, t3
addp cfr, t3
- loadp JSFunction::m_scopeChain[t2], t0
+ loadp JSFunction::m_scope[t2], t0
storep t2, Callee[t3]
storep t0, ScopeChain[t3]
loadis 16 - 48[PB, PC, 8], t2
@@ -1448,8 +1473,7 @@ macro doCall(slowPath)
storep cfr, CallerFrame[t3]
storei t2, ArgumentCount + PayloadOffset[t3]
move t3, cfr
- call LLIntCallLinkInfo::machineCodeTarget[t1]
- dispatchAfterCall()
+ callTargetFunction(t1)
.opCallSlow:
slowPathForCall(6, slowPath)
@@ -1620,21 +1644,46 @@ _llint_throw_during_call_trampoline:
macro nativeCallTrampoline(executableOffsetToFunction)
storep 0, CodeBlock[cfr]
- loadp JITStackFrame::globalData + 8[sp], t0
- storep cfr, JSGlobalData::topCallFrame[t0]
- loadp CallerFrame[cfr], t0
- loadp ScopeChain[t0], t1
- storep t1, ScopeChain[cfr]
- peek 0, t1
- storep t1, ReturnPC[cfr]
- move cfr, t5 # t5 = rdi
- subp 16 - 8, sp
- loadp Callee[cfr], t4 # t4 = rsi
- loadp JSFunction::m_executable[t4], t1
- move t0, cfr # Restore cfr to avoid loading from stack
- call executableOffsetToFunction[t1]
- addp 16 - 8, sp
- loadp JITStackFrame::globalData + 8[sp], t3
+ if X86_64
+ loadp JITStackFrame::globalData + 8[sp], t0
+ storep cfr, JSGlobalData::topCallFrame[t0]
+ loadp CallerFrame[cfr], t0
+ loadp ScopeChain[t0], t1
+ storep t1, ScopeChain[cfr]
+ peek 0, t1
+ storep t1, ReturnPC[cfr]
+ move cfr, t5 # t5 = rdi
+ subp 16 - 8, sp
+ loadp Callee[cfr], t4 # t4 = rsi
+ loadp JSFunction::m_executable[t4], t1
+ move t0, cfr # Restore cfr to avoid loading from stack
+ call executableOffsetToFunction[t1]
+ addp 16 - 8, sp
+ loadp JITStackFrame::globalData + 8[sp], t3
+
+ elsif C_LOOP
+ loadp CallerFrame[cfr], t0
+ loadp ScopeChain[t0], t1
+ storep t1, ScopeChain[cfr]
+
+ loadp JITStackFrame::globalData[sp], t3
+ storep cfr, JSGlobalData::topCallFrame[t3]
+
+ move t0, t2
+ preserveReturnAddressAfterCall(t3)
+ storep t3, ReturnPC[cfr]
+ move cfr, t0
+ loadp Callee[cfr], t1
+ loadp JSFunction::m_executable[t1], t1
+ move t2, cfr
+ cloopCallNative executableOffsetToFunction[t1]
+
+ restoreReturnAddressBeforeReturn(t3)
+ loadp JITStackFrame::globalData[sp], t3
+ else
+ error
+ end
+
btpnz JSGlobalData::exception[t3], .exception
ret
.exception: