summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/llint
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/llint
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/llint')
-rw-r--r--Source/JavaScriptCore/llint/LLIntCLoop.cpp6
-rw-r--r--Source/JavaScriptCore/llint/LLIntCLoop.h14
-rw-r--r--Source/JavaScriptCore/llint/LLIntCommon.h11
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.cpp382
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.h63
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoint.cpp55
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoint.h11
-rw-r--r--Source/JavaScriptCore/llint/LLIntExceptions.cpp20
-rw-r--r--Source/JavaScriptCore/llint/LLIntExceptions.h11
-rw-r--r--Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h63
-rw-r--r--Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp29
-rw-r--r--Source/JavaScriptCore/llint/LLIntOpcode.h48
-rw-r--r--Source/JavaScriptCore/llint/LLIntPCRanges.h51
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.cpp1176
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.h58
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.cpp85
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.h24
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.asm1518
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.cpp226
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.h25
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm2024
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter64.asm1925
22 files changed, 4787 insertions, 3038 deletions
diff --git a/Source/JavaScriptCore/llint/LLIntCLoop.cpp b/Source/JavaScriptCore/llint/LLIntCLoop.cpp
index 18c2b99c1..e3c6c6ce9 100644
--- a/Source/JavaScriptCore/llint/LLIntCLoop.cpp
+++ b/Source/JavaScriptCore/llint/LLIntCLoop.cpp
@@ -26,7 +26,7 @@
#include "config.h"
#include "LLIntCLoop.h"
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
#include "LLIntData.h"
@@ -35,10 +35,10 @@ namespace LLInt {
void CLoop::initialize()
{
- execute(0, getOpcode(llint_unused), true);
+ execute(llint_entry, 0, 0, 0, true);
}
} // namespace LLInt
} // namespace JSC
-#endif // ENABLE(LLINT_C_LOOP)
+#endif // !ENABLE(JIT)
diff --git a/Source/JavaScriptCore/llint/LLIntCLoop.h b/Source/JavaScriptCore/llint/LLIntCLoop.h
index 8759571f3..886fe1a1b 100644
--- a/Source/JavaScriptCore/llint/LLIntCLoop.h
+++ b/Source/JavaScriptCore/llint/LLIntCLoop.h
@@ -23,30 +23,26 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LLIntCLoop_h
-#define LLIntCLoop_h
+#pragma once
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
#include "CallFrame.h"
#include "JSCJSValue.h"
#include "Opcode.h"
+#include "ProtoCallFrame.h"
namespace JSC {
namespace LLInt {
-const OpcodeID llint_unused = llint_end;
-
class CLoop {
public:
static void initialize();
- static JSValue execute(CallFrame*, Opcode entryOpcode, bool isInitializationPass = false);
+ static JSValue execute(OpcodeID entryOpcodeID, void* executableAddress, VM*, ProtoCallFrame*, bool isInitializationPass = false);
};
} } // namespace JSC::LLInt
using JSC::LLInt::CLoop;
-#endif // ENABLE(LLINT_C_LOOP)
-
-#endif // LLIntCLoop_h
+#endif // !ENABLE(JIT)
diff --git a/Source/JavaScriptCore/llint/LLIntCommon.h b/Source/JavaScriptCore/llint/LLIntCommon.h
index d32a264e5..5da516a37 100644
--- a/Source/JavaScriptCore/llint/LLIntCommon.h
+++ b/Source/JavaScriptCore/llint/LLIntCommon.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LLIntCommon_h
-#define LLIntCommon_h
+#pragma once
+
+// Enables LLINT stats collection.
+#define ENABLE_LLINT_STATS 0
// Print every instruction executed.
#define LLINT_EXECUTION_TRACING 0
@@ -42,6 +44,3 @@
// Disable inline caching of get_by_id and put_by_id.
#define LLINT_ALWAYS_ACCESS_SLOW 0
-
-#endif // LLIntCommon_h
-
diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp
index ac77836c0..8b615a1a3 100644
--- a/Source/JavaScriptCore/llint/LLIntData.cpp
+++ b/Source/JavaScriptCore/llint/LLIntData.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,38 +26,53 @@
#include "config.h"
#include "LLIntData.h"
-#if ENABLE(LLINT)
-
+#include "ArithProfile.h"
#include "BytecodeConventions.h"
+#include "CodeBlock.h"
#include "CodeType.h"
+#include "InitializeThreading.h"
#include "Instruction.h"
#include "JSScope.h"
#include "LLIntCLoop.h"
+#include "LLIntCommon.h"
+#include "MaxFrameExtentForSlowPathCall.h"
#include "Opcode.h"
#include "PropertyOffset.h"
+#include "ShadowChicken.h"
+#include "WriteBarrier.h"
+#include <string>
+#include <wtf/NeverDestroyed.h>
+
+#define STATIC_ASSERT(cond) static_assert(cond, "LLInt assumes " #cond)
namespace JSC { namespace LLInt {
Instruction* Data::s_exceptionInstructions = 0;
-Opcode* Data::s_opcodeMap = 0;
+Opcode Data::s_opcodeMap[numOpcodeIDs] = { };
+OpcodeStatsArray* Data::s_opcodeStatsArray = nullptr;
+
+#if ENABLE(JIT)
+extern "C" void llint_entry(void*);
+#endif
void initialize()
{
Data::s_exceptionInstructions = new Instruction[maxOpcodeLength + 1];
- Data::s_opcodeMap = new Opcode[numOpcodeIDs];
- #if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
CLoop::initialize();
- #else // !ENABLE(LLINT_C_LOOP)
+#else // ENABLE(JIT)
+ llint_entry(&Data::s_opcodeMap);
+
for (int i = 0; i < maxOpcodeLength + 1; ++i)
Data::s_exceptionInstructions[i].u.pointer =
LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
- #define OPCODE_ENTRY(opcode, length) \
- Data::s_opcodeMap[opcode] = LLInt::getCodePtr(llint_##opcode);
- FOR_EACH_OPCODE_ID(OPCODE_ENTRY);
- #undef OPCODE_ENTRY
- #endif // !ENABLE(LLINT_C_LOOP)
+#endif // ENABLE(JIT)
+
+#if ENABLE(LLINT_STATS)
+ Data::ensureStats();
+#endif
}
#if COMPILER(CLANG)
@@ -71,31 +86,29 @@ void Data::performAssertions(VM& vm)
// Assertions to match LowLevelInterpreter.asm. If you change any of this code, be
// prepared to change LowLevelInterpreter.asm as well!!
-#ifndef NDEBUG
#if USE(JSVALUE64)
const ptrdiff_t PtrSize = 8;
- const ptrdiff_t CallFrameHeaderSlots = 6;
+ const ptrdiff_t CallFrameHeaderSlots = 5;
#else // USE(JSVALUE64) // i.e. 32-bit version
const ptrdiff_t PtrSize = 4;
- const ptrdiff_t CallFrameHeaderSlots = 5;
+ const ptrdiff_t CallFrameHeaderSlots = 4;
#endif
const ptrdiff_t SlotSize = 8;
-#endif
- ASSERT(sizeof(void*) == PtrSize);
- ASSERT(sizeof(Register) == SlotSize);
- ASSERT(JSStack::CallFrameHeaderSize == CallFrameHeaderSlots);
+ STATIC_ASSERT(sizeof(void*) == PtrSize);
+ STATIC_ASSERT(sizeof(Register) == SlotSize);
+ STATIC_ASSERT(CallFrame::headerSizeInRegisters == CallFrameHeaderSlots);
ASSERT(!CallFrame::callerFrameOffset());
+ STATIC_ASSERT(CallerFrameAndPC::sizeInRegisters == (PtrSize * 2) / SlotSize);
ASSERT(CallFrame::returnPCOffset() == CallFrame::callerFrameOffset() + PtrSize);
- ASSERT(JSStack::CodeBlock * sizeof(Register) == CallFrame::returnPCOffset() + PtrSize);
- ASSERT(JSStack::ScopeChain * sizeof(Register) == JSStack::CodeBlock * sizeof(Register) + SlotSize);
- ASSERT(JSStack::Callee * sizeof(Register) == JSStack::ScopeChain * sizeof(Register) + SlotSize);
- ASSERT(JSStack::ArgumentCount * sizeof(Register) == JSStack::Callee * sizeof(Register) + SlotSize);
- ASSERT(JSStack::ThisArgument * sizeof(Register) == JSStack::ArgumentCount * sizeof(Register) + SlotSize);
- ASSERT(JSStack::CallFrameHeaderSize == JSStack::ThisArgument);
+ ASSERT(CallFrameSlot::codeBlock * sizeof(Register) == CallFrame::returnPCOffset() + PtrSize);
+ STATIC_ASSERT(CallFrameSlot::callee * sizeof(Register) == CallFrameSlot::codeBlock * sizeof(Register) + SlotSize);
+ STATIC_ASSERT(CallFrameSlot::argumentCount * sizeof(Register) == CallFrameSlot::callee * sizeof(Register) + SlotSize);
+ STATIC_ASSERT(CallFrameSlot::thisArgument * sizeof(Register) == CallFrameSlot::argumentCount * sizeof(Register) + SlotSize);
+ STATIC_ASSERT(CallFrame::headerSizeInRegisters == CallFrameSlot::thisArgument);
- ASSERT(CallFrame::argumentOffsetIncludingThis(0) == JSStack::ThisArgument);
+ ASSERT(CallFrame::argumentOffsetIncludingThis(0) == CallFrameSlot::thisArgument);
#if CPU(BIG_ENDIAN)
ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 0);
@@ -105,46 +118,114 @@ void Data::performAssertions(VM& vm)
ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 0);
#endif
#if USE(JSVALUE32_64)
- ASSERT(JSValue::Int32Tag == static_cast<unsigned>(-1));
- ASSERT(JSValue::BooleanTag == static_cast<unsigned>(-2));
- ASSERT(JSValue::NullTag == static_cast<unsigned>(-3));
- ASSERT(JSValue::UndefinedTag == static_cast<unsigned>(-4));
- ASSERT(JSValue::CellTag == static_cast<unsigned>(-5));
- ASSERT(JSValue::EmptyValueTag == static_cast<unsigned>(-6));
- ASSERT(JSValue::DeletedValueTag == static_cast<unsigned>(-7));
- ASSERT(JSValue::LowestTag == static_cast<unsigned>(-7));
+ STATIC_ASSERT(JSValue::Int32Tag == static_cast<unsigned>(-1));
+ STATIC_ASSERT(JSValue::BooleanTag == static_cast<unsigned>(-2));
+ STATIC_ASSERT(JSValue::NullTag == static_cast<unsigned>(-3));
+ STATIC_ASSERT(JSValue::UndefinedTag == static_cast<unsigned>(-4));
+ STATIC_ASSERT(JSValue::CellTag == static_cast<unsigned>(-5));
+ STATIC_ASSERT(JSValue::EmptyValueTag == static_cast<unsigned>(-6));
+ STATIC_ASSERT(JSValue::DeletedValueTag == static_cast<unsigned>(-7));
+ STATIC_ASSERT(JSValue::LowestTag == static_cast<unsigned>(-7));
#else
- ASSERT(TagBitTypeOther == 0x2);
- ASSERT(TagBitBool == 0x4);
- ASSERT(TagBitUndefined == 0x8);
- ASSERT(ValueEmpty == 0x0);
- ASSERT(ValueFalse == (TagBitTypeOther | TagBitBool));
- ASSERT(ValueTrue == (TagBitTypeOther | TagBitBool | 1));
- ASSERT(ValueUndefined == (TagBitTypeOther | TagBitUndefined));
- ASSERT(ValueNull == TagBitTypeOther);
+ STATIC_ASSERT(TagBitTypeOther == 0x2);
+ STATIC_ASSERT(TagBitBool == 0x4);
+ STATIC_ASSERT(TagBitUndefined == 0x8);
+ STATIC_ASSERT(ValueEmpty == 0x0);
+ STATIC_ASSERT(ValueFalse == (TagBitTypeOther | TagBitBool));
+ STATIC_ASSERT(ValueTrue == (TagBitTypeOther | TagBitBool | 1));
+ STATIC_ASSERT(ValueUndefined == (TagBitTypeOther | TagBitUndefined));
+ STATIC_ASSERT(ValueNull == TagBitTypeOther);
+#endif
+#if (CPU(X86_64) && !OS(WINDOWS)) || CPU(ARM64) || !ENABLE(JIT)
+ STATIC_ASSERT(!maxFrameExtentForSlowPathCall);
+#elif CPU(ARM)
+ STATIC_ASSERT(maxFrameExtentForSlowPathCall == 24);
+#elif CPU(X86) || CPU(MIPS)
+ STATIC_ASSERT(maxFrameExtentForSlowPathCall == 40);
+#elif CPU(X86_64) && OS(WINDOWS)
+ STATIC_ASSERT(maxFrameExtentForSlowPathCall == 64);
+#endif
+
+#if !ENABLE(JIT) || USE(JSVALUE32_64)
+ ASSERT(!CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters());
+#elif (CPU(X86_64) && !OS(WINDOWS)) || CPU(ARM64)
+ ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 3);
+#elif (CPU(X86_64) && OS(WINDOWS))
+ ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 3);
#endif
- ASSERT(StringType == 5);
- ASSERT(ObjectType == 17);
- ASSERT(FinalObjectType == 18);
- ASSERT(MasqueradesAsUndefined == 1);
- ASSERT(ImplementsHasInstance == 2);
- ASSERT(ImplementsDefaultHasInstance == 8);
- ASSERT(FirstConstantRegisterIndex == 0x40000000);
- ASSERT(GlobalCode == 0);
- ASSERT(EvalCode == 1);
- ASSERT(FunctionCode == 2);
-
- ASSERT(GlobalProperty == 0);
- ASSERT(GlobalVar == 1);
- ASSERT(ClosureVar == 2);
- ASSERT(GlobalPropertyWithVarInjectionChecks == 3);
- ASSERT(GlobalVarWithVarInjectionChecks == 4);
- ASSERT(ClosureVarWithVarInjectionChecks == 5);
- ASSERT(Dynamic == 6);
- ASSERT(ResolveModeAndType::mask == 0xffff);
+ STATIC_ASSERT(StringType == 6);
+ STATIC_ASSERT(SymbolType == 7);
+ STATIC_ASSERT(ObjectType == 23);
+ STATIC_ASSERT(FinalObjectType == 24);
+ STATIC_ASSERT(JSFunctionType == 26);
+ STATIC_ASSERT(ArrayType == 34);
+ STATIC_ASSERT(DerivedArrayType == 35);
+ STATIC_ASSERT(ProxyObjectType == 53);
+ STATIC_ASSERT(Int8ArrayType == 36);
+ STATIC_ASSERT(Int16ArrayType == 37);
+ STATIC_ASSERT(Int32ArrayType == 38);
+ STATIC_ASSERT(Uint8ArrayType == 39);
+ STATIC_ASSERT(Uint8ClampedArrayType == 40);
+ STATIC_ASSERT(Uint16ArrayType == 41);
+ STATIC_ASSERT(Uint32ArrayType == 42);
+ STATIC_ASSERT(Float32ArrayType == 43);
+ STATIC_ASSERT(Float64ArrayType == 44);
+ STATIC_ASSERT(MasqueradesAsUndefined == 1);
+ STATIC_ASSERT(ImplementsDefaultHasInstance == 2);
+ STATIC_ASSERT(FirstConstantRegisterIndex == 0x40000000);
+ STATIC_ASSERT(GlobalCode == 0);
+ STATIC_ASSERT(EvalCode == 1);
+ STATIC_ASSERT(FunctionCode == 2);
+ STATIC_ASSERT(ModuleCode == 3);
+
+ STATIC_ASSERT(IsArray == 0x01);
+ STATIC_ASSERT(IndexingShapeMask == 0x0E);
+ STATIC_ASSERT(NoIndexingShape == 0x00);
+ STATIC_ASSERT(Int32Shape == 0x04);
+ STATIC_ASSERT(DoubleShape == 0x06);
+ STATIC_ASSERT(ContiguousShape == 0x08);
+ STATIC_ASSERT(ArrayStorageShape == 0x0A);
+ STATIC_ASSERT(SlowPutArrayStorageShape == 0x0C);
- ASSERT(MarkedBlock::blockMask == ~static_cast<decltype(MarkedBlock::blockMask)>(0xffff));
+ ASSERT(!(reinterpret_cast<ptrdiff_t>((reinterpret_cast<WriteBarrier<JSCell>*>(0x4000)->slot())) - 0x4000));
+ static_assert(PutByIdPrimaryTypeMask == 0x6, "LLInt assumes PutByIdPrimaryTypeMask is == 0x6");
+ static_assert(PutByIdPrimaryTypeSecondary == 0x0, "LLInt assumes PutByIdPrimaryTypeSecondary is == 0x0");
+ static_assert(PutByIdPrimaryTypeObjectWithStructure == 0x2, "LLInt assumes PutByIdPrimaryTypeObjectWithStructure is == 0x2");
+ static_assert(PutByIdPrimaryTypeObjectWithStructureOrOther == 0x4, "LLInt assumes PutByIdPrimaryTypeObjectWithStructureOrOther is == 0x4");
+ static_assert(PutByIdSecondaryTypeMask == -0x8, "LLInt assumes PutByIdSecondaryTypeMask is == -0x8");
+ static_assert(PutByIdSecondaryTypeBottom == 0x0, "LLInt assumes PutByIdSecondaryTypeBottom is == 0x0");
+ static_assert(PutByIdSecondaryTypeBoolean == 0x8, "LLInt assumes PutByIdSecondaryTypeBoolean is == 0x8");
+ static_assert(PutByIdSecondaryTypeOther == 0x10, "LLInt assumes PutByIdSecondaryTypeOther is == 0x10");
+ static_assert(PutByIdSecondaryTypeInt32 == 0x18, "LLInt assumes PutByIdSecondaryTypeInt32 is == 0x18");
+ static_assert(PutByIdSecondaryTypeNumber == 0x20, "LLInt assumes PutByIdSecondaryTypeNumber is == 0x20");
+ static_assert(PutByIdSecondaryTypeString == 0x28, "LLInt assumes PutByIdSecondaryTypeString is == 0x28");
+ static_assert(PutByIdSecondaryTypeSymbol == 0x30, "LLInt assumes PutByIdSecondaryTypeSymbol is == 0x30");
+ static_assert(PutByIdSecondaryTypeObject == 0x38, "LLInt assumes PutByIdSecondaryTypeObject is == 0x38");
+ static_assert(PutByIdSecondaryTypeObjectOrOther == 0x40, "LLInt assumes PutByIdSecondaryTypeObjectOrOther is == 0x40");
+ static_assert(PutByIdSecondaryTypeTop == 0x48, "LLInt assumes PutByIdSecondaryTypeTop is == 0x48");
+
+ static_assert(GlobalProperty == 0, "LLInt assumes GlobalProperty ResultType is == 0");
+ static_assert(GlobalVar == 1, "LLInt assumes GlobalVar ResultType is == 1");
+ static_assert(GlobalLexicalVar == 2, "LLInt assumes GlobalLexicalVar ResultType is == 2");
+ static_assert(ClosureVar == 3, "LLInt assumes ClosureVar ResultType is == 3");
+ static_assert(LocalClosureVar == 4, "LLInt assumes LocalClosureVar ResultType is == 4");
+ static_assert(ModuleVar == 5, "LLInt assumes ModuleVar ResultType is == 5");
+ static_assert(GlobalPropertyWithVarInjectionChecks == 6, "LLInt assumes GlobalPropertyWithVarInjectionChecks ResultType is == 6");
+ static_assert(GlobalVarWithVarInjectionChecks == 7, "LLInt assumes GlobalVarWithVarInjectionChecks ResultType is == 7");
+ static_assert(GlobalLexicalVarWithVarInjectionChecks == 8, "LLInt assumes GlobalLexicalVarWithVarInjectionChecks ResultType is == 8");
+ static_assert(ClosureVarWithVarInjectionChecks == 9, "LLInt assumes ClosureVarWithVarInjectionChecks ResultType is == 9");
+
+ static_assert(static_cast<unsigned>(InitializationMode::NotInitialization) == 2, "LLInt assumes that InitializationMode::NotInitialization is 0");
+
+ STATIC_ASSERT(GetPutInfo::typeBits == 0x3ff);
+ STATIC_ASSERT(GetPutInfo::initializationShift == 10);
+ STATIC_ASSERT(GetPutInfo::initializationBits == 0xffc00);
+
+ STATIC_ASSERT(MarkedBlock::blockSize == 16 * 1024);
+ STATIC_ASSERT(blackThreshold == 0);
+
+ ASSERT(bitwise_cast<uintptr_t>(ShadowChicken::Packet::tailMarker()) == static_cast<uintptr_t>(0x7a11));
// FIXME: make these assertions less horrible.
#if !ASSERT_DISABLED
@@ -154,12 +235,183 @@ void Data::performAssertions(VM& vm)
ASSERT(bitwise_cast<int**>(&testVector)[0] == testVector.begin());
#endif
- ASSERT(StringImpl::s_hashFlag8BitBuffer == 32);
+ ASSERT(StringImpl::s_hashFlag8BitBuffer == 8);
+
+ {
+ uint32_t bits = 0x120000;
+ UNUSED_PARAM(bits);
+ ArithProfile arithProfile;
+ arithProfile.lhsSawInt32();
+ arithProfile.rhsSawInt32();
+ ASSERT(arithProfile.bits() == bits);
+ ASSERT(ArithProfile::fromInt(bits).lhsObservedType().isOnlyInt32());
+ ASSERT(ArithProfile::fromInt(bits).rhsObservedType().isOnlyInt32());
+ }
+ {
+ uint32_t bits = 0x220000;
+ UNUSED_PARAM(bits);
+ ArithProfile arithProfile;
+ arithProfile.lhsSawNumber();
+ arithProfile.rhsSawInt32();
+ ASSERT(arithProfile.bits() == bits);
+ ASSERT(ArithProfile::fromInt(bits).lhsObservedType().isOnlyNumber());
+ ASSERT(ArithProfile::fromInt(bits).rhsObservedType().isOnlyInt32());
+ }
+ {
+ uint32_t bits = 0x240000;
+ UNUSED_PARAM(bits);
+ ArithProfile arithProfile;
+ arithProfile.lhsSawNumber();
+ arithProfile.rhsSawNumber();
+ ASSERT(arithProfile.bits() == bits);
+ ASSERT(ArithProfile::fromInt(bits).lhsObservedType().isOnlyNumber());
+ ASSERT(ArithProfile::fromInt(bits).rhsObservedType().isOnlyNumber());
+ }
+ {
+ uint32_t bits = 0x140000;
+ UNUSED_PARAM(bits);
+ ArithProfile arithProfile;
+ arithProfile.lhsSawInt32();
+ arithProfile.rhsSawNumber();
+ ASSERT(arithProfile.bits() == bits);
+ ASSERT(ArithProfile::fromInt(bits).lhsObservedType().isOnlyInt32());
+ ASSERT(ArithProfile::fromInt(bits).rhsObservedType().isOnlyNumber());
+ }
}
#if COMPILER(CLANG)
#pragma clang diagnostic pop
#endif
-} } // namespace JSC::LLInt
+void Data::finalizeStats()
+{
+#if ENABLE(LLINT_STATS)
+ if (!Options::reportLLIntStats())
+ return;
+
+ if (Options::llintStatsFile())
+ saveStats();
+
+ dumpStats();
+#endif
+}
+
+#if ENABLE(LLINT_STATS)
+static const bool verboseStats = false;
+
+static bool compareStats(const OpcodeStats& a, const OpcodeStats& b)
+{
+ if (a.count > b.count)
+ return true;
+ if (a.count < b.count)
+ return false;
+ return a.slowPathCount > b.slowPathCount;
+}
+
+void Data::dumpStats()
+{
+ ASSERT(Options::reportLLIntStats());
+ auto statsCopy = *s_opcodeStatsArray;
+ std::sort(statsCopy.begin(), statsCopy.end(), compareStats);
+
+ dataLog("Opcode stats:\n");
+ unsigned i = 0;
+ for (auto& stats : statsCopy) {
+ if (stats.count || stats.slowPathCount)
+ dataLog(" [", i++, "]: fast:", stats.count, " slow:", stats.slowPathCount, " ", opcodeNames[stats.id], "\n");
+ }
+}
+
+void Data::ensureStats()
+{
+ static std::once_flag initializeOptionsOnceFlag;
+ std::call_once(initializeOptionsOnceFlag, [] {
+ s_opcodeStatsArray = new OpcodeStatsArray();
+ resetStats();
+ });
+}
+
+void Data::loadStats()
+{
+ static NeverDestroyed<std::string> installedStatsFile;
+ if (!Options::llintStatsFile() || !installedStatsFile.get().compare(Options::llintStatsFile()))
+ return;
-#endif // ENABLE(LLINT)
+ Options::reportLLIntStats() = true; // Force stats collection.
+ installedStatsFile.get() = Options::llintStatsFile();
+
+ ensureStats();
+
+ const char* filename = Options::llintStatsFile();
+ FILE* file = fopen(filename, "r");
+ if (!file) {
+ dataLogF("Failed to open file %s. Did you add the file-read-write-data entitlement to WebProcess.sb?\n", filename);
+ return;
+ }
+
+ resetStats();
+
+ OpcodeStats loaded;
+ unsigned index;
+ char opcodeName[100];
+ while (fscanf(file, "[%u]: fast:%zu slow:%zu id:%u %s\n", &index, &loaded.count, &loaded.slowPathCount, &loaded.id, opcodeName) != EOF) {
+ if (verboseStats)
+ dataLogF("loaded [%u]: fast %zu slow %zu id:%u %s\n", index, loaded.count, loaded.slowPathCount, loaded.id, opcodeName);
+
+ OpcodeStats& stats = opcodeStats(loaded.id);
+ stats.count = loaded.count;
+ stats.slowPathCount = loaded.slowPathCount;
+ }
+
+ if (verboseStats) {
+ dataLogF("After loading from %s, ", filename);
+ dumpStats();
+ }
+
+ int result = fclose(file);
+ if (result)
+ dataLogF("Failed to close file %s: %s\n", filename, strerror(errno));
+}
+
+void Data::resetStats()
+{
+ unsigned i = 0;
+ for (auto& stats : *s_opcodeStatsArray) {
+ stats.id = static_cast<OpcodeID>(i++);
+ stats.count = 0;
+ stats.slowPathCount = 0;
+ }
+}
+
+void Data::saveStats()
+{
+ ASSERT(Options::reportLLIntStats() && Options::llintStatsFile());
+ const char* filename = Options::llintStatsFile();
+
+ FILE* file = fopen(filename, "w");
+ if (!file) {
+ dataLogF("Failed to open file %s. Did you add the file-read-write-data entitlement to WebProcess.sb?\n", filename);
+ return;
+ }
+
+ auto statsCopy = *s_opcodeStatsArray;
+ std::sort(statsCopy.begin(), statsCopy.end(), compareStats);
+
+ int index = 0;
+ for (auto& stats : statsCopy) {
+ if (!stats.count && !stats.slowPathCount)
+ break; // stats are sorted. If we encountered 0 counts, then there are no more non-zero counts.
+
+ if (verboseStats)
+ dataLogF("saved [%u]: fast:%zu slow:%zu id:%u %s\n", index, stats.count, stats.slowPathCount, stats.id, opcodeNames[stats.id]);
+
+ fprintf(file, "[%u]: fast:%zu slow:%zu id:%u %s\n", index, stats.count, stats.slowPathCount, stats.id, opcodeNames[stats.id]);
+ index++;
+ }
+
+ int result = fclose(file);
+ if (result)
+ dataLogF("Failed to close file %s: %s\n", filename, strerror(errno));
+}
+#endif
+
+} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntData.h b/Source/JavaScriptCore/llint/LLIntData.h
index 8ed2bceda..441a2cfc6 100644
--- a/Source/JavaScriptCore/llint/LLIntData.h
+++ b/Source/JavaScriptCore/llint/LLIntData.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,19 +23,18 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LLIntData_h
-#define LLIntData_h
+#pragma once
#include "JSCJSValue.h"
#include "Opcode.h"
-#include <wtf/Platform.h>
+#include <array>
namespace JSC {
class VM;
struct Instruction;
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
typedef OpcodeID LLIntCode;
#else
typedef void (*LLIntCode)();
@@ -43,15 +42,32 @@ typedef void (*LLIntCode)();
namespace LLInt {
-#if ENABLE(LLINT)
+struct OpcodeStats {
+ OpcodeID id;
+ size_t count { 0 };
+ size_t slowPathCount { 0 };
+};
+typedef std::array<OpcodeStats, numOpcodeIDs> OpcodeStatsArray;
class Data {
public:
+
static void performAssertions(VM&);
+ static OpcodeStats& opcodeStats(OpcodeID id) { return (*s_opcodeStatsArray)[id]; }
+
+ JS_EXPORT_PRIVATE static void finalizeStats();
+
+ static void dumpStats();
+ static void loadStats();
private:
+ static void ensureStats();
+ static void resetStats();
+ static void saveStats();
+
static Instruction* s_exceptionInstructions;
- static Opcode* s_opcodeMap;
+ static Opcode s_opcodeMap[numOpcodeIDs];
+ static OpcodeStatsArray* s_opcodeStatsArray;
friend void initialize();
@@ -87,41 +103,16 @@ ALWAYS_INLINE void* getCodePtr(OpcodeID id)
return reinterpret_cast<void*>(getOpcode(id));
}
-#else // !ENABLE(LLINT)
-
-#if COMPILER(CLANG)
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wmissing-noreturn"
-#endif
-
-class Data {
-public:
- static void performAssertions(VM&) { }
-};
-
-#if COMPILER(CLANG)
-#pragma clang diagnostic pop
-#endif
-
-#endif // !ENABLE(LLINT)
-
-ALWAYS_INLINE void* getOpcode(void llintOpcode())
+#if ENABLE(JIT)
+ALWAYS_INLINE LLIntCode getCodeFunctionPtr(OpcodeID codeId)
{
- return bitwise_cast<void*>(llintOpcode);
-}
-
-ALWAYS_INLINE void* getCodePtr(void glueHelper())
-{
- return bitwise_cast<void*>(glueHelper);
+ return reinterpret_cast<LLIntCode>(getCodePtr(codeId));
}
+#endif
ALWAYS_INLINE void* getCodePtr(JSC::EncodedJSValue glueHelper())
{
return bitwise_cast<void*>(glueHelper);
}
-
} } // namespace JSC::LLInt
-
-#endif // LLIntData_h
-
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp b/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp
index 993ec67b9..f7d761dc1 100644
--- a/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp
@@ -25,14 +25,15 @@
#include "config.h"
#include "LLIntEntrypoint.h"
-
-#if ENABLE(LLINT)
-
#include "CodeBlock.h"
+#include "HeapInlines.h"
#include "JITCode.h"
+#include "JSCellInlines.h"
#include "JSObject.h"
#include "LLIntThunks.h"
#include "LowLevelInterpreter.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "StackAlignment.h"
#include "VM.h"
namespace JSC { namespace LLInt {
@@ -45,14 +46,12 @@ static void setFunctionEntrypoint(VM& vm, CodeBlock* codeBlock)
if (vm.canUseJIT()) {
if (kind == CodeForCall) {
codeBlock->setJITCode(
- adoptRef(new DirectJITCode(vm.getCTIStub(functionForCallEntryThunkGenerator), JITCode::InterpreterThunk)),
- vm.getCTIStub(functionForCallArityCheckThunkGenerator).code());
+ adoptRef(*new DirectJITCode(vm.getCTIStub(functionForCallEntryThunkGenerator), vm.getCTIStub(functionForCallArityCheckThunkGenerator).code(), JITCode::InterpreterThunk)));
return;
}
ASSERT(kind == CodeForConstruct);
codeBlock->setJITCode(
- adoptRef(new DirectJITCode(vm.getCTIStub(functionForConstructEntryThunkGenerator), JITCode::InterpreterThunk)),
- vm.getCTIStub(functionForConstructArityCheckThunkGenerator).code());
+ adoptRef(*new DirectJITCode(vm.getCTIStub(functionForConstructEntryThunkGenerator), vm.getCTIStub(functionForConstructArityCheckThunkGenerator).code(), JITCode::InterpreterThunk)));
return;
}
#endif // ENABLE(JIT)
@@ -60,14 +59,12 @@ static void setFunctionEntrypoint(VM& vm, CodeBlock* codeBlock)
UNUSED_PARAM(vm);
if (kind == CodeForCall) {
codeBlock->setJITCode(
- adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_call_prologue), JITCode::InterpreterThunk)),
- MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check));
+ adoptRef(*new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_call_prologue), MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check), JITCode::InterpreterThunk)));
return;
}
ASSERT(kind == CodeForConstruct);
codeBlock->setJITCode(
- adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_construct_prologue), JITCode::InterpreterThunk)),
- MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check));
+ adoptRef(*new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_construct_prologue), MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check), JITCode::InterpreterThunk)));
}
static void setEvalEntrypoint(VM& vm, CodeBlock* codeBlock)
@@ -75,16 +72,14 @@ static void setEvalEntrypoint(VM& vm, CodeBlock* codeBlock)
#if ENABLE(JIT)
if (vm.canUseJIT()) {
codeBlock->setJITCode(
- adoptRef(new DirectJITCode(vm.getCTIStub(evalEntryThunkGenerator), JITCode::InterpreterThunk)),
- MacroAssemblerCodePtr());
+ adoptRef(*new DirectJITCode(vm.getCTIStub(evalEntryThunkGenerator), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
return;
}
#endif // ENABLE(JIT)
UNUSED_PARAM(vm);
codeBlock->setJITCode(
- adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_eval_prologue), JITCode::InterpreterThunk)),
- MacroAssemblerCodePtr());
+ adoptRef(*new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_eval_prologue), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
}
static void setProgramEntrypoint(VM& vm, CodeBlock* codeBlock)
@@ -92,16 +87,29 @@ static void setProgramEntrypoint(VM& vm, CodeBlock* codeBlock)
#if ENABLE(JIT)
if (vm.canUseJIT()) {
codeBlock->setJITCode(
- adoptRef(new DirectJITCode(vm.getCTIStub(programEntryThunkGenerator), JITCode::InterpreterThunk)),
- MacroAssemblerCodePtr());
+ adoptRef(*new DirectJITCode(vm.getCTIStub(programEntryThunkGenerator), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
return;
}
#endif // ENABLE(JIT)
UNUSED_PARAM(vm);
codeBlock->setJITCode(
- adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), JITCode::InterpreterThunk)),
- MacroAssemblerCodePtr());
+ adoptRef(*new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+}
+
+static void setModuleProgramEntrypoint(VM& vm, CodeBlock* codeBlock)
+{
+#if ENABLE(JIT)
+ if (vm.canUseJIT()) {
+ codeBlock->setJITCode(
+ adoptRef(*new DirectJITCode(vm.getCTIStub(moduleProgramEntryThunkGenerator), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+ return;
+ }
+#endif // ENABLE(JIT)
+
+ UNUSED_PARAM(vm);
+ codeBlock->setJITCode(
+ adoptRef(*new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_module_program_prologue), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
}
void setEntrypoint(VM& vm, CodeBlock* codeBlock)
@@ -110,6 +118,9 @@ void setEntrypoint(VM& vm, CodeBlock* codeBlock)
case GlobalCode:
setProgramEntrypoint(vm, codeBlock);
return;
+ case ModuleCode:
+ setModuleProgramEntrypoint(vm, codeBlock);
+ return;
case EvalCode:
setEvalEntrypoint(vm, codeBlock);
return;
@@ -123,9 +134,9 @@ void setEntrypoint(VM& vm, CodeBlock* codeBlock)
unsigned frameRegisterCountFor(CodeBlock* codeBlock)
{
- return codeBlock->m_numCalleeRegisters;
+ ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals)));
+
+ return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters);
}
} } // namespace JSC::LLInt
-
-#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoint.h b/Source/JavaScriptCore/llint/LLIntEntrypoint.h
index 4b687c6de..392a0b9be 100644
--- a/Source/JavaScriptCore/llint/LLIntEntrypoint.h
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoint.h
@@ -23,12 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LLIntEntrypoint_h
-#define LLIntEntrypoint_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(LLINT)
+#pragma once
#include "CodeSpecializationKind.h"
@@ -44,7 +39,3 @@ void setEntrypoint(VM&, CodeBlock*);
unsigned frameRegisterCountFor(CodeBlock*);
} } // namespace JSC::LLInt
-
-#endif // ENABLE(LLINT)
-
-#endif // LLIntEntrypoint_h
diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.cpp b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
index ddacb5016..e0fbf8b7e 100644
--- a/Source/JavaScriptCore/llint/LLIntExceptions.cpp
+++ b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -25,15 +25,17 @@
#include "config.h"
#include "LLIntExceptions.h"
-
-#if ENABLE(LLINT)
-
#include "CallFrame.h"
#include "CodeBlock.h"
#include "Instruction.h"
#include "LLIntCommon.h"
+#include "LLIntData.h"
#include "LowLevelInterpreter.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+
+#if LLINT_SLOW_PATH_TRACING
+#include "Exception.h"
+#endif
namespace JSC { namespace LLInt {
@@ -48,7 +50,8 @@ Instruction* returnToThrow(ExecState* exec)
UNUSED_PARAM(exec);
#if LLINT_SLOW_PATH_TRACING
VM* vm = &exec->vm();
- dataLog("Throwing exception ", vm->exception(), " (returnToThrow).\n");
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+ dataLog("Throwing exception ", JSValue(scope.exception()), " (returnToThrow).\n");
#endif
return LLInt::exceptionInstructions();
}
@@ -58,11 +61,10 @@ void* callToThrow(ExecState* exec)
UNUSED_PARAM(exec);
#if LLINT_SLOW_PATH_TRACING
VM* vm = &exec->vm();
- dataLog("Throwing exception ", vm->exception(), " (callToThrow).\n");
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+ dataLog("Throwing exception ", JSValue(scope.exception()), " (callToThrow).\n");
#endif
return LLInt::getCodePtr(llint_throw_during_call_trampoline);
}
} } // namespace JSC::LLInt
-
-#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.h b/Source/JavaScriptCore/llint/LLIntExceptions.h
index bf18feed3..f471ff89d 100644
--- a/Source/JavaScriptCore/llint/LLIntExceptions.h
+++ b/Source/JavaScriptCore/llint/LLIntExceptions.h
@@ -23,14 +23,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LLIntExceptions_h
-#define LLIntExceptions_h
+#pragma once
-#include <wtf/Platform.h>
#include <wtf/StdLibExtras.h>
-
-#if ENABLE(LLINT)
-
#include "MacroAssemblerCodeRef.h"
namespace JSC {
@@ -53,7 +48,3 @@ Instruction* returnToThrow(ExecState*);
void* callToThrow(ExecState*);
} } // namespace JSC::LLInt
-
-#endif // ENABLE(LLINT)
-
-#endif // LLIntExceptions_h
diff --git a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
index 07a91bb73..fce22550a 100644
--- a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
+++ b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
@@ -23,37 +23,48 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LLIntOfflineAsmConfig_h
-#define LLIntOfflineAsmConfig_h
+#pragma once
#include "LLIntCommon.h"
#include <wtf/Assertions.h>
#include <wtf/InlineASM.h>
-#include <wtf/Platform.h>
-
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
#define OFFLINE_ASM_C_LOOP 1
#define OFFLINE_ASM_X86 0
+#define OFFLINE_ASM_X86_WIN 0
#define OFFLINE_ASM_ARM 0
#define OFFLINE_ASM_ARMv7 0
#define OFFLINE_ASM_ARMv7_TRADITIONAL 0
#define OFFLINE_ASM_ARM64 0
#define OFFLINE_ASM_X86_64 0
+#define OFFLINE_ASM_X86_64_WIN 0
+#define OFFLINE_ASM_ARMv7k 0
#define OFFLINE_ASM_ARMv7s 0
#define OFFLINE_ASM_MIPS 0
-#define OFFLINE_ASM_SH4 0
-#else // !ENABLE(LLINT_C_LOOP)
+#else // ENABLE(JIT)
#define OFFLINE_ASM_C_LOOP 0
-#if CPU(X86)
+#if CPU(X86) && !PLATFORM(WIN)
#define OFFLINE_ASM_X86 1
#else
#define OFFLINE_ASM_X86 0
#endif
+#if CPU(X86) && PLATFORM(WIN)
+#define OFFLINE_ASM_X86_WIN 1
+#else
+#define OFFLINE_ASM_X86_WIN 0
+#endif
+
+#ifdef __ARM_ARCH_7K__
+#define OFFLINE_ASM_ARMv7k 1
+#else
+#define OFFLINE_ASM_ARMv7k 0
+#endif
+
#ifdef __ARM_ARCH_7S__
#define OFFLINE_ASM_ARMv7s 1
#else
@@ -79,22 +90,22 @@
#define OFFLINE_ASM_ARM 0
#endif
-#if CPU(X86_64)
+#if CPU(X86_64) && !PLATFORM(WIN)
#define OFFLINE_ASM_X86_64 1
#else
#define OFFLINE_ASM_X86_64 0
#endif
-#if CPU(MIPS)
-#define OFFLINE_ASM_MIPS 1
+#if CPU(X86_64) && PLATFORM(WIN)
+#define OFFLINE_ASM_X86_64_WIN 1
#else
-#define OFFLINE_ASM_MIPS 0
+#define OFFLINE_ASM_X86_64_WIN 0
#endif
-#if CPU(SH4)
-#define OFFLINE_ASM_SH4 1
+#if CPU(MIPS)
+#define OFFLINE_ASM_MIPS 1
#else
-#define OFFLINE_ASM_SH4 0
+#define OFFLINE_ASM_MIPS 0
#endif
#if CPU(ARM64)
@@ -116,7 +127,7 @@
#endif
#endif
-#endif // !ENABLE(LLINT_C_LOOP)
+#endif // ENABLE(JIT)
#if USE(JSVALUE64)
#define OFFLINE_ASM_JSVALUE64 1
@@ -136,22 +147,14 @@
#define OFFLINE_ASM_BIG_ENDIAN 0
#endif
-#if LLINT_EXECUTION_TRACING
-#define OFFLINE_ASM_EXECUTION_TRACING 1
+#if ENABLE(LLINT_STATS)
+#define OFFLINE_ASM_COLLECT_STATS 1
#else
-#define OFFLINE_ASM_EXECUTION_TRACING 0
+#define OFFLINE_ASM_COLLECT_STATS 0
#endif
-#if LLINT_ALWAYS_ALLOCATE_SLOW
-#define OFFLINE_ASM_ALWAYS_ALLOCATE_SLOW 1
-#else
-#define OFFLINE_ASM_ALWAYS_ALLOCATE_SLOW 0
-#endif
-
-#if ENABLE(GGC)
-#define OFFLINE_ASM_GGC 1
+#if LLINT_EXECUTION_TRACING
+#define OFFLINE_ASM_EXECUTION_TRACING 1
#else
-#define OFFLINE_ASM_GGC 0
+#define OFFLINE_ASM_EXECUTION_TRACING 0
#endif
-
-#endif // LLIntOfflineAsmConfig_h
diff --git a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
index a0cbfa0ba..90e48b4bc 100644
--- a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
+++ b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,29 +27,38 @@
#include "ArrayProfile.h"
#include "CodeBlock.h"
-#include "Debugger.h"
-#include "Executable.h"
+#include "CommonSlowPaths.h"
+#include "DirectArguments.h"
+#include "DirectEvalExecutable.h"
+#include "EvalExecutable.h"
+#include "Exception.h"
#include "Heap.h"
+#include "IndirectEvalExecutable.h"
#include "Interpreter.h"
-#include "JITStubs.h"
#include "JSArray.h"
+#include "JSArrayBufferView.h"
#include "JSCell.h"
+#include "JSEnvironmentRecord.h"
#include "JSFunction.h"
-#include "VM.h"
#include "JSGlobalObject.h"
+#include "JSModuleRecord.h"
#include "JSObject.h"
-#include "JSPropertyNameIterator.h"
-#include "JSStack.h"
#include "JSString.h"
#include "JSTypeInfo.h"
-#include "JSVariableObject.h"
#include "JumpTable.h"
#include "LLIntOfflineAsmConfig.h"
#include "MarkedSpace.h"
+#include "NativeExecutable.h"
#include "ProtoCallFrame.h"
+#include "ShadowChicken.h"
#include "Structure.h"
#include "StructureChain.h"
+#include "TypeProfiler.h"
+#include "TypeProfilerLog.h"
+#include "VM.h"
+#include "VMEntryRecord.h"
#include "ValueProfile.h"
+#include "Watchdog.h"
#include <wtf/text/StringImpl.h>
namespace JSC {
@@ -63,7 +72,6 @@ public:
const unsigned* LLIntOffsetsExtractor::dummy()
{
-#if ENABLE(LLINT)
// This is a file generated by offlineasm/generate_offsets_extractor.rb, and contains code
// to create a table of offsets, sizes, and a header identifying what combination of
// Platform.h macros we have set. We include it inside of a method on LLIntOffsetsExtractor
@@ -72,9 +80,6 @@ const unsigned* LLIntOffsetsExtractor::dummy()
// compiler to kindly step aside and yield to our best intentions.
#include "LLIntDesiredOffsets.h"
return extractorTable;
-#else
- return 0;
-#endif
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/llint/LLIntOpcode.h b/Source/JavaScriptCore/llint/LLIntOpcode.h
index 7ee53df82..85905e370 100644
--- a/Source/JavaScriptCore/llint/LLIntOpcode.h
+++ b/Source/JavaScriptCore/llint/LLIntOpcode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,59 +23,25 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LLIntOpcode_h
-#define LLIntOpcode_h
+#pragma once
-#include <wtf/Platform.h>
-
-#if ENABLE(LLINT)
-
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
#define FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
- macro(getHostCallReturnValue, 1) \
- macro(returnFromJavaScript, 1)
+ FOR_EACH_CLOOP_BYTECODE_HELPER_ID(macro)
-#else // !ENABLE(LLINT_C_LOOP)
+#else // ENABLE(JIT)
#define FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
// Nothing to do here. Use the JIT impl instead.
-#endif // !ENABLE(LLINT_C_LOOP)
+#endif // !ENABLE(JIT)
#define FOR_EACH_LLINT_NATIVE_HELPER(macro) \
FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
\
- macro(llint_begin, 1) \
- \
- macro(llint_program_prologue, 1) \
- macro(llint_eval_prologue, 1) \
- macro(llint_function_for_call_prologue, 1) \
- macro(llint_function_for_construct_prologue, 1) \
- macro(llint_function_for_call_arity_check, 1) \
- macro(llint_function_for_construct_arity_check, 1) \
- macro(llint_generic_return_point, 1) \
- macro(llint_throw_from_slow_path_trampoline, 1) \
- macro(llint_throw_during_call_trampoline, 1) \
- \
- /* Native call trampolines */ \
- macro(llint_native_call_trampoline, 1) \
- macro(llint_native_construct_trampoline, 1) \
- \
- macro(llint_end, 1)
+ FOR_EACH_BYTECODE_HELPER_ID(macro)
-#if ENABLE(LLINT_C_LOOP)
#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) FOR_EACH_LLINT_NATIVE_HELPER(macro)
-#else
-#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) // Nothing to add.
-#endif
-
-#else // !ENABLE(LLINT)
-
-#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) // Nothing to add.
-
-#endif // !ENABLE(LLINT)
-
-#endif // LLIntOpcode_h
diff --git a/Source/JavaScriptCore/llint/LLIntPCRanges.h b/Source/JavaScriptCore/llint/LLIntPCRanges.h
new file mode 100644
index 000000000..a24a9c0e9
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntPCRanges.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+namespace LLInt {
+
+// These are used just to denote where LLInt code begins and where it ends.
+extern "C" {
+ void llintPCRangeStart();
+ void llintPCRangeEnd();
+}
+
+ALWAYS_INLINE bool isLLIntPC(void* pc)
+{
+ uintptr_t pcAsInt = bitwise_cast<uintptr_t>(pc);
+ uintptr_t llintStart = bitwise_cast<uintptr_t>(llintPCRangeStart);
+ uintptr_t llintEnd = bitwise_cast<uintptr_t>(llintPCRangeEnd);
+ RELEASE_ASSERT(llintStart < llintEnd);
+ return llintStart <= pcAsInt && pcAsInt <= llintEnd;
+}
+
+#if ENABLE(JIT)
+static const GPRReg LLIntPC = GPRInfo::regT4;
+#endif
+
+} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
index b2a7b8ea1..80d164083 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,48 +26,63 @@
#include "config.h"
#include "LLIntSlowPaths.h"
-#if ENABLE(LLINT)
-
-#include "Arguments.h"
#include "ArrayConstructor.h"
#include "CallFrame.h"
#include "CommonSlowPaths.h"
#include "CommonSlowPathsExceptions.h"
+#include "Error.h"
+#include "ErrorHandlingScope.h"
+#include "EvalCodeBlock.h"
+#include "Exception.h"
+#include "ExceptionFuzz.h"
+#include "FunctionCodeBlock.h"
+#include "FunctionWhitelist.h"
#include "GetterSetter.h"
#include "HostCallReturnValue.h"
#include "Interpreter.h"
+#include "IteratorOperations.h"
#include "JIT.h"
#include "JITExceptions.h"
-#include "JSActivation.h"
+#include "JITWorklist.h"
+#include "JSAsyncFunction.h"
+#include "JSCInlines.h"
#include "JSCJSValue.h"
+#include "JSGeneratorFunction.h"
#include "JSGlobalObjectFunctions.h"
-#include "JSNameScope.h"
-#include "JSPropertyNameIterator.h"
-#include "JSStackInlines.h"
+#include "JSLexicalEnvironment.h"
#include "JSString.h"
#include "JSWithScope.h"
#include "LLIntCommon.h"
+#include "LLIntData.h"
#include "LLIntExceptions.h"
#include "LowLevelInterpreter.h"
+#include "ModuleProgramCodeBlock.h"
#include "ObjectConstructor.h"
-#include "Operations.h"
+#include "ObjectPropertyConditionSet.h"
+#include "ProgramCodeBlock.h"
+#include "ProtoCallFrame.h"
+#include "RegExpObject.h"
+#include "ShadowChicken.h"
#include "StructureRareDataInlines.h"
+#include "VMInlines.h"
+#include <wtf/NeverDestroyed.h>
#include <wtf/StringPrintStream.h>
namespace JSC { namespace LLInt {
#define LLINT_BEGIN_NO_SET_PC() \
VM& vm = exec->vm(); \
- NativeCallFrameTracer tracer(&vm, exec)
+ NativeCallFrameTracer tracer(&vm, exec); \
+ auto throwScope = DECLARE_THROW_SCOPE(vm)
#ifndef NDEBUG
#define LLINT_SET_PC_FOR_STUBS() do { \
exec->codeBlock()->bytecodeOffset(pc); \
- exec->setCurrentVPC(pc + 1); \
+ exec->setCurrentVPC(pc); \
} while (false)
#else
#define LLINT_SET_PC_FOR_STUBS() do { \
- exec->setCurrentVPC(pc + 1); \
+ exec->setCurrentVPC(pc); \
} while (false)
#endif
@@ -82,16 +97,17 @@ namespace JSC { namespace LLInt {
return encodeResult(first, second); \
} while (false)
-#define LLINT_END_IMPL() LLINT_RETURN_TWO(pc, exec)
+#define LLINT_END_IMPL() LLINT_RETURN_TWO(pc, 0)
#define LLINT_THROW(exceptionToThrow) do { \
- vm.throwException(exec, exceptionToThrow); \
+ throwException(exec, throwScope, exceptionToThrow); \
pc = returnToThrow(exec); \
LLINT_END_IMPL(); \
} while (false)
#define LLINT_CHECK_EXCEPTION() do { \
- if (UNLIKELY(vm.exception())) { \
+ doExceptionFuzzingIfEnabled(exec, throwScope, "LLIntSlowPaths", pc); \
+ if (UNLIKELY(throwScope.exception())) { \
pc = returnToThrow(exec); \
LLINT_END_IMPL(); \
} \
@@ -119,6 +135,14 @@ namespace JSC { namespace LLInt {
LLINT_END_IMPL(); \
} while (false)
+#define LLINT_RETURN_WITH_PC_ADJUSTMENT(value, pcAdjustment) do { \
+ JSValue __r_returnValue = (value); \
+ LLINT_CHECK_EXCEPTION(); \
+ LLINT_OP(1) = __r_returnValue; \
+ pc += (pcAdjustment); \
+ LLINT_END_IMPL(); \
+ } while (false)
+
#define LLINT_RETURN_PROFILED(opcode, value) do { \
JSValue __rp_returnValue = (value); \
LLINT_CHECK_EXCEPTION(); \
@@ -136,23 +160,31 @@ namespace JSC { namespace LLInt {
#define LLINT_CALL_THROW(exec, exceptionToThrow) do { \
ExecState* __ct_exec = (exec); \
- vm.throwException(__ct_exec, exceptionToThrow); \
- LLINT_CALL_END_IMPL(__ct_exec, callToThrow(__ct_exec)); \
+ throwException(__ct_exec, throwScope, exceptionToThrow); \
+ LLINT_CALL_END_IMPL(0, callToThrow(__ct_exec)); \
} while (false)
-#define LLINT_CALL_CHECK_EXCEPTION(exec) do { \
+#define LLINT_CALL_CHECK_EXCEPTION(exec, execCallee) do { \
ExecState* __cce_exec = (exec); \
- if (UNLIKELY(vm.exception())) \
- LLINT_CALL_END_IMPL(__cce_exec, callToThrow(__cce_exec)); \
+ ExecState* __cce_execCallee = (execCallee); \
+ doExceptionFuzzingIfEnabled(__cce_exec, throwScope, "LLIntSlowPaths/call", nullptr); \
+ if (UNLIKELY(throwScope.exception())) \
+ LLINT_CALL_END_IMPL(0, callToThrow(__cce_execCallee)); \
} while (false)
-#define LLINT_CALL_RETURN(exec, callTarget) do { \
+#define LLINT_CALL_RETURN(exec, execCallee, callTarget) do { \
ExecState* __cr_exec = (exec); \
+ ExecState* __cr_execCallee = (execCallee); \
void* __cr_callTarget = (callTarget); \
- LLINT_CALL_CHECK_EXCEPTION(__cr_exec->callerFrame()); \
- LLINT_CALL_END_IMPL(__cr_exec, __cr_callTarget); \
+ LLINT_CALL_CHECK_EXCEPTION(__cr_exec, __cr_execCallee); \
+ LLINT_CALL_END_IMPL(__cr_execCallee, __cr_callTarget); \
} while (false)
+#define LLINT_RETURN_CALLEE_FRAME(execCallee) do { \
+ ExecState* __rcf_exec = (execCallee); \
+ LLINT_RETURN_TWO(pc, __rcf_exec); \
+ } while (false)
+
extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, Instruction* pc, int fromWhere, int operand)
{
LLINT_BEGIN();
@@ -195,19 +227,20 @@ extern "C" SlowPathReturnType llint_trace_value(ExecState* exec, Instruction* pc
LLINT_SLOW_PATH_DECL(trace_prologue)
{
- dataLogF("%p / %p: in prologue.\n", exec->codeBlock(), exec);
+ dataLogF("%p / %p: in prologue of ", exec->codeBlock(), exec);
+ dataLog(*exec->codeBlock(), "\n");
LLINT_END_IMPL();
}
static void traceFunctionPrologue(ExecState* exec, const char* comment, CodeSpecializationKind kind)
{
- JSFunction* callee = jsCast<JSFunction*>(exec->callee());
+ JSFunction* callee = jsCast<JSFunction*>(exec->jsCallee());
FunctionExecutable* executable = callee->jsExecutable();
CodeBlock* codeBlock = executable->codeBlockFor(kind);
- dataLogF("%p / %p: in %s of function %p, executable %p; numVars = %u, numParameters = %u, numCalleeRegisters = %u, caller = %p.\n",
- codeBlock, exec, comment, callee, executable,
- codeBlock->m_numVars, codeBlock->numParameters(), codeBlock->m_numCalleeRegisters,
- exec->callerFrame());
+ dataLogF("%p / %p: in %s of ", codeBlock, exec, comment);
+ dataLog(*codeBlock);
+ dataLogF(" function %p, executable %p; numVars = %u, numParameters = %u, numCalleeLocals = %u, caller = %p.\n",
+ callee, executable, codeBlock->m_numVars, codeBlock->numParameters(), codeBlock->m_numCalleeLocals, exec->callerFrame());
}
LLINT_SLOW_PATH_DECL(trace_prologue_function_for_call)
@@ -236,12 +269,15 @@ LLINT_SLOW_PATH_DECL(trace_arityCheck_for_construct)
LLINT_SLOW_PATH_DECL(trace)
{
- dataLogF("%p / %p: executing bc#%zu, %s, scope %p, pc = %p\n",
+ dataLogF("%p / %p: executing bc#%zu, %s, pc = %p\n",
exec->codeBlock(),
exec,
static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
- opcodeNames[exec->vm().interpreter->getOpcodeID(pc[0].u.opcode)],
- exec->scope(), pc);
+ opcodeNames[exec->vm().interpreter->getOpcodeID(pc[0].u.opcode)], pc);
+ if (exec->vm().interpreter->getOpcodeID(pc[0].u.opcode) == op_enter) {
+ dataLogF("Frame will eventually return to %p\n", exec->returnPC().value());
+ *bitwise_cast<volatile char*>(exec->returnPC().value());
+ }
if (exec->vm().interpreter->getOpcodeID(pc[0].u.opcode) == op_ret) {
dataLogF("Will be returning to %p\n", exec->returnPC().value());
dataLogF("The new cfr will be %p\n", exec->callerFrame());
@@ -263,26 +299,44 @@ LLINT_SLOW_PATH_DECL(special_trace)
enum EntryKind { Prologue, ArityCheck };
#if ENABLE(JIT)
-inline bool shouldJIT(ExecState* exec)
+static FunctionWhitelist& ensureGlobalJITWhitelist()
+{
+ static LazyNeverDestroyed<FunctionWhitelist> baselineWhitelist;
+ static std::once_flag initializeWhitelistFlag;
+ std::call_once(initializeWhitelistFlag, [] {
+ const char* functionWhitelistFile = Options::jitWhitelist();
+ baselineWhitelist.construct(functionWhitelistFile);
+ });
+ return baselineWhitelist;
+}
+
+inline bool shouldJIT(ExecState* exec, CodeBlock* codeBlock)
{
+ if (!Options::bytecodeRangeToJITCompile().isInRange(codeBlock->instructionCount())
+ || !ensureGlobalJITWhitelist().contains(codeBlock))
+ return false;
+
// You can modify this to turn off JITting without rebuilding the world.
return exec->vm().canUseJIT();
}
// Returns true if we should try to OSR.
-inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec)
+inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec, unsigned loopOSREntryBytecodeOffset = 0)
{
VM& vm = exec->vm();
DeferGCForAWhile deferGC(vm.heap); // My callers don't set top callframe, so we don't want to GC here at all.
codeBlock->updateAllValueProfilePredictions();
-
+
if (!codeBlock->checkIfJITThresholdReached()) {
+ CODEBLOCK_LOG_EVENT(codeBlock, "delayJITCompile", ("threshold not reached, counter = ", codeBlock->llintExecuteCounter()));
if (Options::verboseOSR())
dataLogF(" JIT threshold should be lifted.\n");
return false;
}
+ JITWorklist::instance()->poll(vm);
+
switch (codeBlock->jitType()) {
case JITCode::BaselineJIT: {
if (Options::verboseOSR())
@@ -291,25 +345,11 @@ inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec)
return true;
}
case JITCode::InterpreterThunk: {
- CompilationResult result = JIT::compile(&vm, codeBlock, JITCompilationCanFail);
- switch (result) {
- case CompilationFailed:
- if (Options::verboseOSR())
- dataLogF(" JIT compilation failed.\n");
- codeBlock->dontJITAnytimeSoon();
- return false;
- case CompilationSuccessful:
- if (Options::verboseOSR())
- dataLogF(" JIT compilation successful.\n");
- codeBlock->install();
- codeBlock->jitSoon();
- return true;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return false;
- }
+ JITWorklist::instance()->compileLater(codeBlock, loopOSREntryBytecodeOffset);
+ return codeBlock->jitType() == JITCode::BaselineJIT;
}
default:
+ dataLog("Unexpected code block in LLInt: ", *codeBlock, "\n");
RELEASE_ASSERT_NOT_REACHED();
return false;
}
@@ -323,17 +363,19 @@ static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* cod
codeBlock->llintExecuteCounter(), "\n");
}
- if (!shouldJIT(exec)) {
+ if (!shouldJIT(exec, codeBlock)) {
codeBlock->dontJITAnytimeSoon();
- LLINT_RETURN_TWO(0, exec);
+ LLINT_RETURN_TWO(0, 0);
}
if (!jitCompileAndSetHeuristics(codeBlock, exec))
- LLINT_RETURN_TWO(0, exec);
+ LLINT_RETURN_TWO(0, 0);
+
+ CODEBLOCK_LOG_EVENT(codeBlock, "OSR entry", ("in prologue"));
if (kind == Prologue)
- LLINT_RETURN_TWO(codeBlock->jitCode()->executableAddress(), exec);
+ LLINT_RETURN_TWO(codeBlock->jitCode()->executableAddress(), 0);
ASSERT(kind == ArityCheck);
- LLINT_RETURN_TWO(codeBlock->jitCodeWithArityCheck().executableAddress(), exec);
+ LLINT_RETURN_TWO(codeBlock->jitCode()->addressForCall(MustCheckArity).executableAddress(), 0);
}
#else // ENABLE(JIT)
static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* codeBlock, const char*, EntryKind)
@@ -350,22 +392,22 @@ LLINT_SLOW_PATH_DECL(entry_osr)
LLINT_SLOW_PATH_DECL(entry_osr_function_for_call)
{
- return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForCall(), "entry_osr_function_for_call", Prologue);
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->jsCallee())->jsExecutable()->codeBlockForCall(), "entry_osr_function_for_call", Prologue);
}
LLINT_SLOW_PATH_DECL(entry_osr_function_for_construct)
{
- return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForConstruct(), "entry_osr_function_for_construct", Prologue);
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->jsCallee())->jsExecutable()->codeBlockForConstruct(), "entry_osr_function_for_construct", Prologue);
}
LLINT_SLOW_PATH_DECL(entry_osr_function_for_call_arityCheck)
{
- return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForCall(), "entry_osr_function_for_call_arityCheck", ArityCheck);
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->jsCallee())->jsExecutable()->codeBlockForCall(), "entry_osr_function_for_call_arityCheck", ArityCheck);
}
LLINT_SLOW_PATH_DECL(entry_osr_function_for_construct_arityCheck)
{
- return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForConstruct(), "entry_osr_function_for_construct_arityCheck", ArityCheck);
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->jsCallee())->jsExecutable()->codeBlockForConstruct(), "entry_osr_function_for_construct_arityCheck", ArityCheck);
}
LLINT_SLOW_PATH_DECL(loop_osr)
@@ -379,14 +421,18 @@ LLINT_SLOW_PATH_DECL(loop_osr)
codeBlock->llintExecuteCounter(), "\n");
}
- if (!shouldJIT(exec)) {
+ unsigned loopOSREntryBytecodeOffset = pc - codeBlock->instructions().begin();
+
+ if (!shouldJIT(exec, codeBlock)) {
codeBlock->dontJITAnytimeSoon();
- LLINT_RETURN_TWO(0, exec);
+ LLINT_RETURN_TWO(0, 0);
}
- if (!jitCompileAndSetHeuristics(codeBlock, exec))
- LLINT_RETURN_TWO(0, exec);
+ if (!jitCompileAndSetHeuristics(codeBlock, exec, loopOSREntryBytecodeOffset))
+ LLINT_RETURN_TWO(0, 0);
+ CODEBLOCK_LOG_EVENT(codeBlock, "osrEntry", ("at bc#", pc - codeBlock->instructions().begin()));
+
ASSERT(codeBlock->jitType() == JITCode::BaselineJIT);
Vector<BytecodeAndMachineOffset> map;
@@ -398,10 +444,11 @@ LLINT_SLOW_PATH_DECL(loop_osr)
void* jumpTarget = codeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
ASSERT(jumpTarget);
- LLINT_RETURN_TWO(jumpTarget, exec);
+ LLINT_RETURN_TWO(jumpTarget, exec->topOfFrame());
#else // ENABLE(JIT)
+ UNUSED_PARAM(pc);
codeBlock->dontJITAnytimeSoon();
- LLINT_RETURN_TWO(0, exec);
+ LLINT_RETURN_TWO(0, 0);
#endif // ENABLE(JIT)
}
@@ -416,7 +463,7 @@ LLINT_SLOW_PATH_DECL(replace)
codeBlock->llintExecuteCounter(), "\n");
}
- if (shouldJIT(exec))
+ if (shouldJIT(exec, codeBlock))
jitCompileAndSetHeuristics(codeBlock, exec);
else
codeBlock->dontJITAnytimeSoon();
@@ -429,32 +476,48 @@ LLINT_SLOW_PATH_DECL(replace)
LLINT_SLOW_PATH_DECL(stack_check)
{
- LLINT_BEGIN();
+ VM& vm = exec->vm();
+ auto throwScope = DECLARE_THROW_SCOPE(vm);
+
+ VMEntryFrame* vmEntryFrame = vm.topVMEntryFrame;
+ CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
+ if (!callerFrame) {
+ callerFrame = exec;
+ vmEntryFrame = vm.topVMEntryFrame;
+ }
+ NativeCallFrameTracerWithRestore tracer(&vm, vmEntryFrame, callerFrame);
+
+ LLINT_SET_PC_FOR_STUBS();
+
#if LLINT_SLOW_PATH_TRACING
dataLogF("Checking stack height with exec = %p.\n", exec);
- dataLogF("CodeBlock = %p.\n", exec->codeBlock());
- dataLogF("Num callee registers = %u.\n", exec->codeBlock()->m_numCalleeRegisters);
+ dataLog("CodeBlock = ", *exec->codeBlock(), "\n");
+ dataLogF("Num callee registers = %u.\n", exec->codeBlock()->m_numCalleeLocals);
dataLogF("Num vars = %u.\n", exec->codeBlock()->m_numVars);
- dataLogF("Current end is at %p.\n", exec->vm().interpreter->stack().end());
+
+ dataLogF("Current OS stack end is at %p.\n", vm.softStackLimit());
+#if !ENABLE(JIT)
+ dataLogF("Current C Loop stack end is at %p.\n", vm.cloopStackLimit());
#endif
- ASSERT(!exec->vm().interpreter->stack().containsAddress(&exec->registers()[virtualRegisterForLocal(exec->codeBlock()->m_numCalleeRegisters).offset()]));
- if (UNLIKELY(!vm.interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(exec->codeBlock()->m_numCalleeRegisters).offset()]))) {
- exec = exec->callerFrame();
- CommonSlowPaths::interpreterThrowInCaller(exec, createStackOverflowError(exec));
- pc = returnToThrowForThrownException(exec);
- }
- LLINT_END_IMPL();
-}
-LLINT_SLOW_PATH_DECL(slow_path_create_activation)
-{
- LLINT_BEGIN();
-#if LLINT_SLOW_PATH_TRACING
- dataLogF("Creating an activation, exec = %p!\n", exec);
#endif
- JSActivation* activation = JSActivation::create(vm, exec, exec->codeBlock());
- exec->setScope(activation);
- LLINT_RETURN(JSValue(activation));
+ // If the stack check succeeds and we don't need to throw the error, then
+ // we'll return 0 instead. The prologue will check for a non-zero value
+ // when determining whether to set the callFrame or not.
+
+ // For JIT enabled builds which uses the C stack, the stack is not growable.
+ // Hence, if we get here, then we know a stack overflow is imminent. So, just
+ // throw the StackOverflowError unconditionally.
+#if !ENABLE(JIT)
+ ASSERT(!vm.interpreter->cloopStack().containsAddress(exec->topOfFrame()));
+ if (LIKELY(vm.ensureStackCapacityFor(exec->topOfFrame())))
+ LLINT_RETURN_TWO(pc, 0);
+#endif
+
+ ErrorHandlingScope errorScope(vm);
+ throwStackOverflowError(callerFrame, throwScope);
+ pc = returnToThrow(callerFrame);
+ LLINT_RETURN_TWO(pc, exec);
}
LLINT_SLOW_PATH_DECL(slow_path_new_object)
@@ -486,43 +549,110 @@ LLINT_SLOW_PATH_DECL(slow_path_new_regexp)
LLINT_BEGIN();
RegExp* regExp = exec->codeBlock()->regexp(pc[2].u.operand);
if (!regExp->isValid())
- LLINT_THROW(createSyntaxError(exec, "Invalid flag supplied to RegExp constructor."));
+ LLINT_THROW(createSyntaxError(exec, regExp->errorMessage()));
LLINT_RETURN(RegExpObject::create(vm, exec->lexicalGlobalObject()->regExpStructure(), regExp));
}
-LLINT_SLOW_PATH_DECL(slow_path_check_has_instance)
+LLINT_SLOW_PATH_DECL(slow_path_instanceof)
{
LLINT_BEGIN();
-
JSValue value = LLINT_OP_C(2).jsValue();
- JSValue baseVal = LLINT_OP_C(3).jsValue();
- if (baseVal.isObject()) {
- JSObject* baseObject = asObject(baseVal);
- ASSERT(!baseObject->structure()->typeInfo().implementsDefaultHasInstance());
- if (baseObject->structure()->typeInfo().implementsHasInstance()) {
- pc += pc[4].u.operand;
- LLINT_RETURN(jsBoolean(baseObject->methodTable()->customHasInstance(baseObject, exec, value)));
- }
- }
- LLINT_THROW(createInvalidParameterError(exec, "instanceof", baseVal));
+ JSValue proto = LLINT_OP_C(3).jsValue();
+ LLINT_RETURN(jsBoolean(JSObject::defaultHasInstance(exec, value, proto)));
}
-LLINT_SLOW_PATH_DECL(slow_path_instanceof)
+LLINT_SLOW_PATH_DECL(slow_path_instanceof_custom)
{
LLINT_BEGIN();
+
JSValue value = LLINT_OP_C(2).jsValue();
- JSValue proto = LLINT_OP_C(3).jsValue();
- ASSERT(!value.isObject() || !proto.isObject());
- LLINT_RETURN(jsBoolean(JSObject::defaultHasInstance(exec, value, proto)));
+ JSValue constructor = LLINT_OP_C(3).jsValue();
+ JSValue hasInstanceValue = LLINT_OP_C(4).jsValue();
+
+ ASSERT(constructor.isObject());
+ ASSERT(hasInstanceValue != exec->lexicalGlobalObject()->functionProtoHasInstanceSymbolFunction() || !constructor.getObject()->structure()->typeInfo().implementsDefaultHasInstance());
+
+ JSValue result = jsBoolean(constructor.getObject()->hasInstance(exec, value, hasInstanceValue));
+ LLINT_RETURN(result);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_try_get_by_id)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ const Identifier& ident = codeBlock->identifier(pc[3].u.operand);
+ JSValue baseValue = LLINT_OP_C(2).jsValue();
+ PropertySlot slot(baseValue, PropertySlot::PropertySlot::InternalMethodType::VMInquiry);
+
+ baseValue.getPropertySlot(exec, ident, slot);
+ JSValue result = slot.getPureResult();
+
+ LLINT_RETURN_PROFILED(op_try_get_by_id, result);
+}
+
+static void setupGetByIdPrototypeCache(ExecState* exec, VM& vm, Instruction* pc, JSCell* baseCell, PropertySlot& slot, const Identifier& ident)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+ Structure* structure = baseCell->structure();
+
+ if (structure->typeInfo().prohibitsPropertyCaching())
+ return;
+
+ if (structure->needImpurePropertyWatchpoint())
+ return;
+
+ if (structure->isDictionary()) {
+ if (structure->hasBeenFlattenedBefore())
+ return;
+ structure->flattenDictionaryStructure(vm, jsCast<JSObject*>(baseCell));
+ }
+
+ ObjectPropertyConditionSet conditions;
+ if (slot.isUnset())
+ conditions = generateConditionsForPropertyMiss(vm, codeBlock, exec, structure, ident.impl());
+ else
+ conditions = generateConditionsForPrototypePropertyHit(vm, codeBlock, exec, structure, slot.slotBase(), ident.impl());
+
+ if (!conditions.isValid())
+ return;
+
+ PropertyOffset offset = invalidOffset;
+ CodeBlock::StructureWatchpointMap& watchpointMap = codeBlock->llintGetByIdWatchpointMap();
+ auto result = watchpointMap.add(structure, Bag<LLIntPrototypeLoadAdaptiveStructureWatchpoint>());
+ for (ObjectPropertyCondition condition : conditions) {
+ if (!condition.isWatchable())
+ return;
+ if (condition.condition().kind() == PropertyCondition::Presence)
+ offset = condition.condition().offset();
+ result.iterator->value.add(condition, pc)->install();
+ }
+ ASSERT((offset == invalidOffset) == slot.isUnset());
+
+ ConcurrentJSLocker locker(codeBlock->m_lock);
+
+ if (slot.isUnset()) {
+ pc[0].u.opcode = LLInt::getOpcode(op_get_by_id_unset);
+ pc[4].u.structureID = structure->id();
+ return;
+ }
+ ASSERT(slot.isValue());
+
+ pc[0].u.opcode = LLInt::getOpcode(op_get_by_id_proto_load);
+ pc[4].u.structureID = structure->id();
+ pc[5].u.operand = offset;
+ // We know that this pointer will remain valid because it will be cleared by either a watchpoint fire or
+ // during GC when we clear the LLInt caches.
+ pc[6].u.pointer = slot.slotBase();
}
+
LLINT_SLOW_PATH_DECL(slow_path_get_by_id)
{
LLINT_BEGIN();
CodeBlock* codeBlock = exec->codeBlock();
const Identifier& ident = codeBlock->identifier(pc[3].u.operand);
JSValue baseValue = LLINT_OP_C(2).jsValue();
- PropertySlot slot(baseValue);
+ PropertySlot slot(baseValue, PropertySlot::PropertySlot::InternalMethodType::Get);
JSValue result = baseValue.get(exec, ident, slot);
LLINT_CHECK_EXCEPTION();
@@ -530,36 +660,43 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id)
if (!LLINT_ALWAYS_ACCESS_SLOW
&& baseValue.isCell()
- && slot.isCacheable()
- && slot.slotBase() == baseValue
- && slot.isCacheableValue()) {
-
+ && slot.isCacheable()) {
+
JSCell* baseCell = baseValue.asCell();
Structure* structure = baseCell->structure();
+ if (slot.isValue() && slot.slotBase() == baseValue) {
+ // Start out by clearing out the old cache.
+ pc[0].u.opcode = LLInt::getOpcode(op_get_by_id);
+ pc[4].u.pointer = nullptr; // old structure
+ pc[5].u.pointer = nullptr; // offset
+
+ // Prevent the prototype cache from ever happening.
+ pc[7].u.operand = 0;
- if (!structure->isUncacheableDictionary()
- && !structure->typeInfo().prohibitsPropertyCaching()) {
- ConcurrentJITLocker locker(codeBlock->m_lock);
-
- pc[4].u.structure.set(
- vm, codeBlock->ownerExecutable(), structure);
- if (isInlineOffset(slot.cachedOffset())) {
- pc[0].u.opcode = LLInt::getOpcode(llint_op_get_by_id);
- pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
- } else {
- pc[0].u.opcode = LLInt::getOpcode(llint_op_get_by_id_out_of_line);
- pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue);
+ if (structure->propertyAccessesAreCacheable()) {
+ vm.heap.writeBarrier(codeBlock);
+
+ ConcurrentJSLocker locker(codeBlock->m_lock);
+
+ pc[4].u.structureID = structure->id();
+ pc[5].u.operand = slot.cachedOffset();
}
- }
- }
+ } else if (UNLIKELY(pc[7].u.operand && (slot.isValue() || slot.isUnset()))) {
+ ASSERT(slot.slotBase() != baseValue);
- if (!LLINT_ALWAYS_ACCESS_SLOW
+ if (!(--pc[7].u.operand))
+ setupGetByIdPrototypeCache(exec, vm, pc, baseCell, slot, ident);
+ }
+ } else if (!LLINT_ALWAYS_ACCESS_SLOW
&& isJSArray(baseValue)
&& ident == exec->propertyNames().length) {
- pc[0].u.opcode = LLInt::getOpcode(llint_op_get_array_length);
+ pc[0].u.opcode = LLInt::getOpcode(op_get_array_length);
ArrayProfile* arrayProfile = codeBlock->getOrAddArrayProfile(pc - codeBlock->instructions().begin());
arrayProfile->observeStructure(baseValue.asCell()->structure());
pc[4].u.arrayProfile = arrayProfile;
+
+ // Prevent the prototype cache from ever happening.
+ pc[7].u.operand = 0;
}
pc[OPCODE_LENGTH(op_get_by_id) - 1].u.profile->m_buckets[0] = JSValue::encode(result);
@@ -572,7 +709,7 @@ LLINT_SLOW_PATH_DECL(slow_path_get_arguments_length)
CodeBlock* codeBlock = exec->codeBlock();
const Identifier& ident = codeBlock->identifier(pc[3].u.operand);
JSValue baseValue = LLINT_OP(2).jsValue();
- PropertySlot slot(baseValue);
+ PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get);
LLINT_RETURN(baseValue.get(exec, ident, slot));
}
@@ -584,15 +721,23 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id)
JSValue baseValue = LLINT_OP_C(1).jsValue();
PutPropertySlot slot(baseValue, codeBlock->isStrictMode(), codeBlock->putByIdContext());
- if (pc[8].u.operand)
+ if (pc[8].u.putByIdFlags & PutByIdIsDirect)
asObject(baseValue)->putDirect(vm, ident, LLINT_OP_C(3).jsValue(), slot);
else
- baseValue.put(exec, ident, LLINT_OP_C(3).jsValue(), slot);
+ baseValue.putInline(exec, ident, LLINT_OP_C(3).jsValue(), slot);
LLINT_CHECK_EXCEPTION();
if (!LLINT_ALWAYS_ACCESS_SLOW
&& baseValue.isCell()
- && slot.isCacheable()) {
+ && slot.isCacheablePut()) {
+
+ // Start out by clearing out the old cache.
+ pc[4].u.pointer = nullptr; // old structure
+ pc[5].u.pointer = nullptr; // offset
+ pc[6].u.pointer = nullptr; // new structure
+ pc[7].u.pointer = nullptr; // structure chain
+ pc[8].u.putByIdFlags =
+ static_cast<PutByIdFlags>(pc[8].u.putByIdFlags & PutByIdPersistentFlagsMask);
JSCell* baseCell = baseValue.asCell();
Structure* structure = baseCell->structure();
@@ -600,55 +745,38 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id)
if (!structure->isUncacheableDictionary()
&& !structure->typeInfo().prohibitsPropertyCaching()
&& baseCell == slot.base()) {
+
+ vm.heap.writeBarrier(codeBlock);
if (slot.type() == PutPropertySlot::NewProperty) {
- GCSafeConcurrentJITLocker locker(codeBlock->m_lock, vm.heap);
+ GCSafeConcurrentJSLocker locker(codeBlock->m_lock, vm.heap);
if (!structure->isDictionary() && structure->previousID()->outOfLineCapacity() == structure->outOfLineCapacity()) {
ASSERT(structure->previousID()->transitionWatchpointSetHasBeenInvalidated());
-
- // This is needed because some of the methods we call
- // below may GC.
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id);
- if (normalizePrototypeChain(exec, baseCell) != InvalidPrototypeChain) {
+ if (normalizePrototypeChain(exec, structure) != InvalidPrototypeChain) {
ASSERT(structure->previousID()->isObject());
- pc[4].u.structure.set(
- vm, codeBlock->ownerExecutable(), structure->previousID());
- if (isInlineOffset(slot.cachedOffset()))
- pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
- else
- pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue);
- pc[6].u.structure.set(
- vm, codeBlock->ownerExecutable(), structure);
- StructureChain* chain = structure->prototypeChain(exec);
- ASSERT(chain);
- pc[7].u.structureChain.set(
- vm, codeBlock->ownerExecutable(), chain);
-
- if (pc[8].u.operand) {
- if (isInlineOffset(slot.cachedOffset()))
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct);
- else
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line);
- } else {
- if (isInlineOffset(slot.cachedOffset()))
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal);
- else
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line);
+ pc[4].u.structureID = structure->previousID()->id();
+ pc[5].u.operand = slot.cachedOffset();
+ pc[6].u.structureID = structure->id();
+ if (!(pc[8].u.putByIdFlags & PutByIdIsDirect)) {
+ StructureChain* chain = structure->prototypeChain(exec);
+ ASSERT(chain);
+ pc[7].u.structureChain.set(
+ vm, codeBlock, chain);
}
+ pc[8].u.putByIdFlags = static_cast<PutByIdFlags>(
+ pc[8].u.putByIdFlags |
+ structure->inferredTypeDescriptorFor(ident.impl()).putByIdFlags());
}
}
} else {
- pc[4].u.structure.set(
- vm, codeBlock->ownerExecutable(), structure);
- if (isInlineOffset(slot.cachedOffset())) {
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id);
- pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
- } else {
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_out_of_line);
- pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue);
- }
+ structure->didCachePropertyReplacement(vm, slot.cachedOffset());
+ pc[4].u.structureID = structure->id();
+ pc[5].u.operand = slot.cachedOffset();
+ pc[8].u.putByIdFlags = static_cast<PutByIdFlags>(
+ pc[8].u.putByIdFlags |
+ structure->inferredTypeDescriptorFor(ident.impl()).putByIdFlags());
}
}
}
@@ -661,59 +789,48 @@ LLINT_SLOW_PATH_DECL(slow_path_del_by_id)
LLINT_BEGIN();
CodeBlock* codeBlock = exec->codeBlock();
JSObject* baseObject = LLINT_OP_C(2).jsValue().toObject(exec);
+ LLINT_CHECK_EXCEPTION();
bool couldDelete = baseObject->methodTable()->deleteProperty(baseObject, exec, codeBlock->identifier(pc[3].u.operand));
LLINT_CHECK_EXCEPTION();
if (!couldDelete && codeBlock->isStrictMode())
- LLINT_THROW(createTypeError(exec, "Unable to delete property."));
+ LLINT_THROW(createTypeError(exec, UnableToDeletePropertyError));
LLINT_RETURN(jsBoolean(couldDelete));
}
-inline JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript)
+static ALWAYS_INLINE JSValue getByVal(VM& vm, ExecState* exec, JSValue baseValue, JSValue subscript)
{
+ auto scope = DECLARE_THROW_SCOPE(vm);
+
if (LIKELY(baseValue.isCell() && subscript.isString())) {
- if (JSValue result = baseValue.asCell()->fastGetOwnProperty(exec, asString(subscript)->value(exec)))
- return result;
+ Structure& structure = *baseValue.asCell()->structure(vm);
+ if (JSCell::canUseFastGetOwnProperty(structure)) {
+ if (RefPtr<AtomicStringImpl> existingAtomicString = asString(subscript)->toExistingAtomicString(exec)) {
+ if (JSValue result = baseValue.asCell()->fastGetOwnProperty(vm, structure, existingAtomicString.get()))
+ return result;
+ }
+ }
}
if (subscript.isUInt32()) {
uint32_t i = subscript.asUInt32();
if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i))
return asString(baseValue)->getIndex(exec, i);
-
+ scope.release();
return baseValue.get(exec, i);
}
- if (isName(subscript))
- return baseValue.get(exec, jsCast<NameInstance*>(subscript.asCell())->privateName());
-
- Identifier property(exec, subscript.toString(exec)->value(exec));
+ baseValue.requireObjectCoercible(exec);
+ RETURN_IF_EXCEPTION(scope, JSValue());
+ auto property = subscript.toPropertyKey(exec);
+ RETURN_IF_EXCEPTION(scope, JSValue());
+ scope.release();
return baseValue.get(exec, property);
}
LLINT_SLOW_PATH_DECL(slow_path_get_by_val)
{
LLINT_BEGIN();
- LLINT_RETURN_PROFILED(op_get_by_val, getByVal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue()));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_get_argument_by_val)
-{
- LLINT_BEGIN();
- JSValue arguments = LLINT_OP(2).jsValue();
- if (!arguments) {
- arguments = Arguments::create(vm, exec);
- LLINT_CHECK_EXCEPTION();
- LLINT_OP(2) = arguments;
- exec->uncheckedR(unmodifiedArgumentsRegister(VirtualRegister(pc[2].u.operand)).offset()) = arguments;
- }
-
- LLINT_RETURN_PROFILED(op_get_argument_by_val, getByVal(exec, arguments, LLINT_OP_C(3).jsValue()));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_get_by_pname)
-{
- LLINT_BEGIN();
- LLINT_RETURN(getByVal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue()));
+ LLINT_RETURN_PROFILED(op_get_by_val, getByVal(vm, exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue()));
}
LLINT_SLOW_PATH_DECL(slow_path_put_by_val)
@@ -723,6 +840,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_val)
JSValue baseValue = LLINT_OP_C(1).jsValue();
JSValue subscript = LLINT_OP_C(2).jsValue();
JSValue value = LLINT_OP_C(3).jsValue();
+ bool isStrictMode = exec->codeBlock()->isStrictMode();
if (LIKELY(subscript.isUInt32())) {
uint32_t i = subscript.asUInt32();
@@ -731,22 +849,16 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_val)
if (object->canSetIndexQuickly(i))
object->setIndexQuickly(vm, i, value);
else
- object->methodTable()->putByIndex(object, exec, i, value, exec->codeBlock()->isStrictMode());
+ object->methodTable()->putByIndex(object, exec, i, value, isStrictMode);
LLINT_END();
}
- baseValue.putByIndex(exec, i, value, exec->codeBlock()->isStrictMode());
- LLINT_END();
- }
-
- if (isName(subscript)) {
- PutPropertySlot slot(baseValue, exec->codeBlock()->isStrictMode());
- baseValue.put(exec, jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot);
+ baseValue.putByIndex(exec, i, value, isStrictMode);
LLINT_END();
}
- Identifier property(exec, subscript.toString(exec)->value(exec));
+ auto property = subscript.toPropertyKey(exec);
LLINT_CHECK_EXCEPTION();
- PutPropertySlot slot(baseValue, exec->codeBlock()->isStrictMode());
+ PutPropertySlot slot(baseValue, isStrictMode);
baseValue.put(exec, property, value, slot);
LLINT_END();
}
@@ -760,19 +872,34 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_val_direct)
JSValue value = LLINT_OP_C(3).jsValue();
RELEASE_ASSERT(baseValue.isObject());
JSObject* baseObject = asObject(baseValue);
+ bool isStrictMode = exec->codeBlock()->isStrictMode();
if (LIKELY(subscript.isUInt32())) {
- uint32_t i = subscript.asUInt32();
- baseObject->putDirectIndex(exec, i, value);
- } else if (isName(subscript)) {
- PutPropertySlot slot(baseObject, exec->codeBlock()->isStrictMode());
- baseObject->putDirect(exec->vm(), jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot);
- } else {
- Identifier property(exec, subscript.toString(exec)->value(exec));
- if (!exec->vm().exception()) { // Don't put to an object if toString threw an exception.
- PutPropertySlot slot(baseObject, exec->codeBlock()->isStrictMode());
- baseObject->putDirect(exec->vm(), property, value, slot);
+ // Despite its name, JSValue::isUInt32 will return true only for positive boxed int32_t; all those values are valid array indices.
+ ASSERT(isIndex(subscript.asUInt32()));
+ baseObject->putDirectIndex(exec, subscript.asUInt32(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ LLINT_END();
+ }
+
+ if (subscript.isDouble()) {
+ double subscriptAsDouble = subscript.asDouble();
+ uint32_t subscriptAsUInt32 = static_cast<uint32_t>(subscriptAsDouble);
+ if (subscriptAsDouble == subscriptAsUInt32 && isIndex(subscriptAsUInt32)) {
+ baseObject->putDirectIndex(exec, subscriptAsUInt32, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ LLINT_END();
}
}
+
+ // Don't put to an object if toString threw an exception.
+ auto property = subscript.toPropertyKey(exec);
+ if (UNLIKELY(throwScope.exception()))
+ LLINT_END();
+
+ if (std::optional<uint32_t> index = parseIndex(property))
+ baseObject->putDirectIndex(exec, index.value(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ else {
+ PutPropertySlot slot(baseObject, isStrictMode);
+ baseObject->putDirect(exec->vm(), property, value, slot);
+ }
LLINT_END();
}
@@ -781,7 +908,8 @@ LLINT_SLOW_PATH_DECL(slow_path_del_by_val)
LLINT_BEGIN();
JSValue baseValue = LLINT_OP_C(2).jsValue();
JSObject* baseObject = baseValue.toObject(exec);
-
+ LLINT_CHECK_EXCEPTION();
+
JSValue subscript = LLINT_OP_C(3).jsValue();
bool couldDelete;
@@ -789,17 +917,15 @@ LLINT_SLOW_PATH_DECL(slow_path_del_by_val)
uint32_t i;
if (subscript.getUInt32(i))
couldDelete = baseObject->methodTable()->deletePropertyByIndex(baseObject, exec, i);
- else if (isName(subscript))
- couldDelete = baseObject->methodTable()->deleteProperty(baseObject, exec, jsCast<NameInstance*>(subscript.asCell())->privateName());
else {
LLINT_CHECK_EXCEPTION();
- Identifier property(exec, subscript.toString(exec)->value(exec));
+ auto property = subscript.toPropertyKey(exec);
LLINT_CHECK_EXCEPTION();
couldDelete = baseObject->methodTable()->deleteProperty(baseObject, exec, property);
}
if (!couldDelete && exec->codeBlock()->isStrictMode())
- LLINT_THROW(createTypeError(exec, "Unable to delete property."));
+ LLINT_THROW(createTypeError(exec, UnableToDeletePropertyError));
LLINT_RETURN(jsBoolean(couldDelete));
}
@@ -813,29 +939,97 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_index)
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter)
+LLINT_SLOW_PATH_DECL(slow_path_put_getter_by_id)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+
+ unsigned options = pc[3].u.operand;
+
+ JSValue getter = LLINT_OP(4).jsValue();
+ ASSERT(getter.isObject());
+
+ baseObj->putGetter(exec, exec->codeBlock()->identifier(pc[2].u.operand), asObject(getter), options);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_setter_by_id)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+
+ unsigned options = pc[3].u.operand;
+
+ JSValue setter = LLINT_OP(4).jsValue();
+ ASSERT(setter.isObject());
+
+ baseObj->putSetter(exec, exec->codeBlock()->identifier(pc[2].u.operand), asObject(setter), options);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter_by_id)
{
LLINT_BEGIN();
ASSERT(LLINT_OP(1).jsValue().isObject());
JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
- GetterSetter* accessor = GetterSetter::create(vm);
+ GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject());
LLINT_CHECK_EXCEPTION();
-
- JSValue getter = LLINT_OP(3).jsValue();
- JSValue setter = LLINT_OP(4).jsValue();
+
+ JSValue getter = LLINT_OP(4).jsValue();
+ JSValue setter = LLINT_OP(5).jsValue();
ASSERT(getter.isObject() || getter.isUndefined());
ASSERT(setter.isObject() || setter.isUndefined());
ASSERT(getter.isObject() || setter.isObject());
if (!getter.isUndefined())
- accessor->setGetter(vm, asObject(getter));
+ accessor->setGetter(vm, exec->lexicalGlobalObject(), asObject(getter));
if (!setter.isUndefined())
- accessor->setSetter(vm, asObject(setter));
+ accessor->setSetter(vm, exec->lexicalGlobalObject(), asObject(setter));
baseObj->putDirectAccessor(
exec,
exec->codeBlock()->identifier(pc[2].u.operand),
- accessor, Accessor);
+ accessor, pc[3].u.operand);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_getter_by_val)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+ JSValue subscript = LLINT_OP_C(2).jsValue();
+
+ unsigned options = pc[3].u.operand;
+
+ JSValue getter = LLINT_OP(4).jsValue();
+ ASSERT(getter.isObject());
+
+ auto property = subscript.toPropertyKey(exec);
+ LLINT_CHECK_EXCEPTION();
+
+ baseObj->putGetter(exec, property, asObject(getter), options);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_setter_by_val)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+ JSValue subscript = LLINT_OP_C(2).jsValue();
+
+ unsigned options = pc[3].u.operand;
+
+ JSValue setter = LLINT_OP(4).jsValue();
+ ASSERT(setter.isObject());
+
+ auto property = subscript.toPropertyKey(exec);
+ LLINT_CHECK_EXCEPTION();
+
+ baseObj->putSetter(exec, property, asObject(setter), options);
LLINT_END();
}
@@ -947,23 +1141,75 @@ LLINT_SLOW_PATH_DECL(slow_path_new_func)
{
LLINT_BEGIN();
CodeBlock* codeBlock = exec->codeBlock();
- ASSERT(codeBlock->codeType() != FunctionCode
- || !codeBlock->needsFullScopeChain()
- || exec->uncheckedR(codeBlock->activationRegister().offset()).jsValue());
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
#if LLINT_SLOW_PATH_TRACING
dataLogF("Creating function!\n");
#endif
- LLINT_RETURN(JSFunction::create(vm, codeBlock->functionDecl(pc[2].u.operand), exec->scope()));
+ LLINT_RETURN(JSFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_generator_func)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+#if LLINT_SLOW_PATH_TRACING
+ dataLogF("Creating function!\n");
+#endif
+ LLINT_RETURN(JSGeneratorFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_async_func)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+#if LLINT_SLOW_PATH_TRACING
+ dataLogF("Creating async function!\n");
+#endif
+ LLINT_RETURN(JSAsyncFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope));
}
LLINT_SLOW_PATH_DECL(slow_path_new_func_exp)
{
LLINT_BEGIN();
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+ FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand);
+
+ LLINT_RETURN(JSFunction::create(vm, executable, scope));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_generator_func_exp)
+{
+ LLINT_BEGIN();
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+ FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand);
+
+ LLINT_RETURN(JSGeneratorFunction::create(vm, executable, scope));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_async_func_exp)
+{
+ LLINT_BEGIN();
+
CodeBlock* codeBlock = exec->codeBlock();
- FunctionExecutable* function = codeBlock->functionExpr(pc[2].u.operand);
- JSFunction* func = JSFunction::create(vm, function, exec->scope());
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+ FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand);
- LLINT_RETURN(func);
+ LLINT_RETURN(JSAsyncFunction::create(vm, executable, scope));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_set_function_name)
+{
+ LLINT_BEGIN();
+ JSFunction* func = jsCast<JSFunction*>(LLINT_OP(1).Register::unboxedCell());
+ JSValue name = LLINT_OP_C(2).Register::jsValue();
+ func->setFunctionName(exec, name);
+ LLINT_END();
}
static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, JSValue callee, CodeSpecializationKind kind)
@@ -976,8 +1222,8 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
ExecState* exec = execCallee->callerFrame();
VM& vm = exec->vm();
+ auto throwScope = DECLARE_THROW_SCOPE(vm);
- execCallee->setScope(exec->scope());
execCallee->setCodeBlock(0);
execCallee->clearReturnPC();
@@ -985,21 +1231,21 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
CallData callData;
CallType callType = getCallData(callee, callData);
- ASSERT(callType != CallTypeJS);
+ ASSERT(callType != CallType::JS);
- if (callType == CallTypeHost) {
+ if (callType == CallType::Host) {
NativeCallFrameTracer tracer(&vm, execCallee);
execCallee->setCallee(asObject(callee));
vm.hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
- LLINT_CALL_RETURN(execCallee, LLInt::getCodePtr(getHostCallReturnValue));
+ LLINT_CALL_RETURN(execCallee, execCallee, LLInt::getCodePtr(getHostCallReturnValue));
}
#if LLINT_SLOW_PATH_TRACING
dataLog("Call callee is not a function: ", callee, "\n");
#endif
- ASSERT(callType == CallTypeNone);
+ ASSERT(callType == CallType::None);
LLINT_CALL_THROW(exec, createNotAFunctionError(exec, callee));
}
@@ -1008,74 +1254,85 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
ConstructData constructData;
ConstructType constructType = getConstructData(callee, constructData);
- ASSERT(constructType != ConstructTypeJS);
+ ASSERT(constructType != ConstructType::JS);
- if (constructType == ConstructTypeHost) {
+ if (constructType == ConstructType::Host) {
NativeCallFrameTracer tracer(&vm, execCallee);
execCallee->setCallee(asObject(callee));
vm.hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
- LLINT_CALL_RETURN(execCallee, LLInt::getCodePtr(getHostCallReturnValue));
+ LLINT_CALL_RETURN(execCallee, execCallee, LLInt::getCodePtr(getHostCallReturnValue));
}
#if LLINT_SLOW_PATH_TRACING
dataLog("Constructor callee is not a function: ", callee, "\n");
#endif
- ASSERT(constructType == ConstructTypeNone);
+ ASSERT(constructType == ConstructType::None);
LLINT_CALL_THROW(exec, createNotAConstructorError(exec, callee));
}
inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, CodeSpecializationKind kind, JSValue calleeAsValue, LLIntCallLinkInfo* callLinkInfo = 0)
{
+ ExecState* exec = execCallee->callerFrame();
+ VM& vm = exec->vm();
+ auto throwScope = DECLARE_THROW_SCOPE(vm);
+
#if LLINT_SLOW_PATH_TRACING
- dataLogF("Performing call with recorded PC = %p\n", execCallee->callerFrame()->currentVPC());
+ dataLogF("Performing call with recorded PC = %p\n", exec->currentVPC());
#endif
-
+
JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
- if (!calleeAsFunctionCell)
+ if (!calleeAsFunctionCell) {
+ throwScope.release();
return handleHostCall(execCallee, pc, calleeAsValue, kind);
-
+ }
JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
JSScope* scope = callee->scopeUnchecked();
- VM& vm = *scope->vm();
- execCallee->setScope(scope);
ExecutableBase* executable = callee->executable();
-
+
MacroAssemblerCodePtr codePtr;
CodeBlock* codeBlock = 0;
- if (executable->isHostFunction())
- codePtr = executable->hostCodeEntryFor(kind);
- else {
+ if (executable->isHostFunction()) {
+ codePtr = executable->entrypointFor(kind, MustCheckArity);
+ } else {
FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
- JSObject* error = functionExecutable->prepareForExecution(execCallee, callee->scope(), kind);
- if (error)
- LLINT_CALL_THROW(execCallee->callerFrame(), error);
- codeBlock = functionExecutable->codeBlockFor(kind);
+
+ if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct)
+ LLINT_CALL_THROW(exec, createNotAConstructorError(exec, callee));
+
+ CodeBlock** codeBlockSlot = execCallee->addressOfCodeBlock();
+ JSObject* error = functionExecutable->prepareForExecution<FunctionExecutable>(vm, callee, scope, kind, *codeBlockSlot);
+ ASSERT(throwScope.exception() == error);
+ if (UNLIKELY(error))
+ LLINT_CALL_THROW(exec, error);
+ codeBlock = *codeBlockSlot;
ASSERT(codeBlock);
+ ArityCheckMode arity;
if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()))
- codePtr = functionExecutable->jsCodeWithArityCheckEntryFor(kind);
+ arity = MustCheckArity;
else
- codePtr = functionExecutable->jsCodeEntryFor(kind);
+ arity = ArityCheckNotRequired;
+ codePtr = functionExecutable->entrypointFor(kind, arity);
}
+
+ ASSERT(!!codePtr);
if (!LLINT_ALWAYS_ACCESS_SLOW && callLinkInfo) {
- ExecState* execCaller = execCallee->callerFrame();
-
- CodeBlock* callerCodeBlock = execCaller->codeBlock();
+ CodeBlock* callerCodeBlock = exec->codeBlock();
- ConcurrentJITLocker locker(callerCodeBlock->m_lock);
+ ConcurrentJSLocker locker(callerCodeBlock->m_lock);
if (callLinkInfo->isOnList())
callLinkInfo->remove();
- callLinkInfo->callee.set(vm, callerCodeBlock->ownerExecutable(), callee);
- callLinkInfo->lastSeenCallee.set(vm, callerCodeBlock->ownerExecutable(), callee);
+ callLinkInfo->callee.set(vm, callerCodeBlock, callee);
+ callLinkInfo->lastSeenCallee.set(vm, callerCodeBlock, callee);
callLinkInfo->machineCodeTarget = codePtr;
if (codeBlock)
- codeBlock->linkIncomingCall(execCaller, callLinkInfo);
+ codeBlock->linkIncomingCall(exec, callLinkInfo);
}
- LLINT_CALL_RETURN(execCallee, codePtr.executableAddress());
+ LLINT_CALL_RETURN(exec, execCallee, codePtr.executableAddress());
}
inline SlowPathReturnType genericCall(ExecState* exec, Instruction* pc, CodeSpecializationKind kind)
@@ -1091,7 +1348,7 @@ inline SlowPathReturnType genericCall(ExecState* exec, Instruction* pc, CodeSpec
ExecState* execCallee = exec - pc[4].u.operand;
execCallee->setArgumentCountIncludingThis(pc[3].u.operand);
- execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
+ execCallee->uncheckedR(CallFrameSlot::callee) = calleeAsValue;
execCallee->setCallerFrame(exec);
ASSERT(pc[5].u.callLinkInfo);
@@ -1101,51 +1358,100 @@ inline SlowPathReturnType genericCall(ExecState* exec, Instruction* pc, CodeSpec
LLINT_SLOW_PATH_DECL(slow_path_call)
{
LLINT_BEGIN_NO_SET_PC();
+ throwScope.release();
return genericCall(exec, pc, CodeForCall);
}
LLINT_SLOW_PATH_DECL(slow_path_construct)
{
LLINT_BEGIN_NO_SET_PC();
+ throwScope.release();
return genericCall(exec, pc, CodeForConstruct);
}
-LLINT_SLOW_PATH_DECL(slow_path_size_and_alloc_frame_for_varargs)
+LLINT_SLOW_PATH_DECL(slow_path_size_frame_for_varargs)
{
LLINT_BEGIN();
// This needs to:
// - Set up a call frame while respecting the variable arguments.
- ExecState* execCallee = sizeAndAllocFrameForVarargs(exec, &vm.interpreter->stack(),
- LLINT_OP_C(4).jsValue(), pc[5].u.operand);
- LLINT_CALL_CHECK_EXCEPTION(exec);
+ unsigned numUsedStackSlots = -pc[5].u.operand;
+ unsigned length = sizeFrameForVarargs(exec, vm,
+ LLINT_OP_C(4).jsValue(), numUsedStackSlots, pc[6].u.operand);
+ LLINT_CALL_CHECK_EXCEPTION(exec, exec);
+ ExecState* execCallee = calleeFrameForVarargs(exec, numUsedStackSlots, length + 1);
+ vm.varargsLength = length;
vm.newCallFrameReturnValue = execCallee;
- LLINT_END();
+ LLINT_RETURN_CALLEE_FRAME(execCallee);
}
-LLINT_SLOW_PATH_DECL(slow_path_call_varargs)
+LLINT_SLOW_PATH_DECL(slow_path_size_frame_for_forward_arguments)
+{
+ LLINT_BEGIN();
+ // This needs to:
+ // - Set up a call frame with the same arguments as the current frame.
+
+ unsigned numUsedStackSlots = -pc[5].u.operand;
+
+ unsigned arguments = sizeFrameForForwardArguments(exec, vm, numUsedStackSlots);
+ LLINT_CALL_CHECK_EXCEPTION(exec, exec);
+
+ ExecState* execCallee = calleeFrameForVarargs(exec, numUsedStackSlots, arguments + 1);
+
+ vm.varargsLength = arguments;
+ vm.newCallFrameReturnValue = execCallee;
+
+ LLINT_RETURN_CALLEE_FRAME(execCallee);
+}
+
+enum class SetArgumentsWith {
+ Object,
+ CurrentArguments
+};
+
+inline SlowPathReturnType varargsSetup(ExecState* exec, Instruction* pc, CodeSpecializationKind kind, SetArgumentsWith set)
{
LLINT_BEGIN_NO_SET_PC();
// This needs to:
// - Figure out what to call and compile it if necessary.
// - Return a tuple of machine code address to call and the new call frame.
-
+
JSValue calleeAsValue = LLINT_OP_C(2).jsValue();
-
+
ExecState* execCallee = vm.newCallFrameReturnValue;
- loadVarargs(exec, execCallee, LLINT_OP_C(3).jsValue(), LLINT_OP_C(4).jsValue());
- LLINT_CALL_CHECK_EXCEPTION(exec);
-
- execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
+ if (set == SetArgumentsWith::Object) {
+ setupVarargsFrameAndSetThis(exec, execCallee, LLINT_OP_C(3).jsValue(), LLINT_OP_C(4).jsValue(), pc[6].u.operand, vm.varargsLength);
+ LLINT_CALL_CHECK_EXCEPTION(exec, exec);
+ } else
+ setupForwardArgumentsFrameAndSetThis(exec, execCallee, LLINT_OP_C(3).jsValue(), vm.varargsLength);
+
execCallee->setCallerFrame(exec);
+ execCallee->uncheckedR(CallFrameSlot::callee) = calleeAsValue;
exec->setCurrentVPC(pc);
-
- return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
+
+ throwScope.release();
+ return setUpCall(execCallee, pc, kind, calleeAsValue);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_call_varargs)
+{
+ return varargsSetup(exec, pc, CodeForCall, SetArgumentsWith::Object);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_tail_call_forward_arguments)
+{
+ return varargsSetup(exec, pc, CodeForCall, SetArgumentsWith::CurrentArguments);
}
+LLINT_SLOW_PATH_DECL(slow_path_construct_varargs)
+{
+ return varargsSetup(exec, pc, CodeForConstruct, SetArgumentsWith::Object);
+}
+
+
LLINT_SLOW_PATH_DECL(slow_path_call_eval)
{
LLINT_BEGIN_NO_SET_PC();
@@ -1155,37 +1461,18 @@ LLINT_SLOW_PATH_DECL(slow_path_call_eval)
execCallee->setArgumentCountIncludingThis(pc[3].u.operand);
execCallee->setCallerFrame(exec);
- execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
- execCallee->setScope(exec->scope());
+ execCallee->uncheckedR(CallFrameSlot::callee) = calleeAsValue;
execCallee->setReturnPC(LLInt::getCodePtr(llint_generic_return_point));
execCallee->setCodeBlock(0);
exec->setCurrentVPC(pc);
- if (!isHostFunction(calleeAsValue, globalFuncEval))
+ if (!isHostFunction(calleeAsValue, globalFuncEval)) {
+ throwScope.release();
return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
+ }
vm.hostCallReturnValue = eval(execCallee);
- LLINT_CALL_RETURN(execCallee, LLInt::getCodePtr(getHostCallReturnValue));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_tear_off_activation)
-{
- LLINT_BEGIN();
- ASSERT(exec->codeBlock()->needsFullScopeChain());
- jsCast<JSActivation*>(LLINT_OP(1).jsValue())->tearOff(vm);
- LLINT_END();
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_tear_off_arguments)
-{
- LLINT_BEGIN();
- ASSERT(exec->codeBlock()->usesArguments());
- Arguments* arguments = jsCast<Arguments*>(exec->uncheckedR(unmodifiedArgumentsRegister(VirtualRegister(pc[1].u.operand)).offset()).jsValue());
- if (JSValue activationValue = LLINT_OP_C(2).jsValue())
- arguments->didTearOffActivation(exec, jsCast<JSActivation*>(activationValue));
- else
- arguments->tearOff(exec);
- LLINT_END();
+ LLINT_CALL_RETURN(exec, execCallee, LLInt::getCodePtr(getHostCallReturnValue));
}
LLINT_SLOW_PATH_DECL(slow_path_strcat)
@@ -1200,199 +1487,198 @@ LLINT_SLOW_PATH_DECL(slow_path_to_primitive)
LLINT_RETURN(LLINT_OP_C(2).jsValue().toPrimitive(exec));
}
-LLINT_SLOW_PATH_DECL(slow_path_get_pnames)
+LLINT_SLOW_PATH_DECL(slow_path_throw)
{
LLINT_BEGIN();
- JSValue v = LLINT_OP(2).jsValue();
- if (v.isUndefinedOrNull()) {
- pc += pc[5].u.operand;
- LLINT_END();
- }
-
- JSObject* o = v.toObject(exec);
- Structure* structure = o->structure();
- JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache();
- if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(exec))
- jsPropertyNameIterator = JSPropertyNameIterator::create(exec, o);
-
- LLINT_OP(1) = JSValue(jsPropertyNameIterator);
- LLINT_OP(2) = JSValue(o);
- LLINT_OP(3) = Register::withInt(0);
- LLINT_OP(4) = Register::withInt(jsPropertyNameIterator->size());
-
- pc += OPCODE_LENGTH(op_get_pnames);
- LLINT_END();
+ LLINT_THROW(LLINT_OP_C(1).jsValue());
}
-LLINT_SLOW_PATH_DECL(slow_path_next_pname)
+LLINT_SLOW_PATH_DECL(slow_path_handle_watchdog_timer)
{
- LLINT_BEGIN();
- JSObject* base = asObject(LLINT_OP(2).jsValue());
- JSString* property = asString(LLINT_OP(1).jsValue());
- if (base->hasProperty(exec, Identifier(exec, property->value(exec)))) {
- // Go to target.
- pc += pc[6].u.operand;
- } // Else, don't change the PC, so the interpreter will reloop.
- LLINT_END();
+ LLINT_BEGIN_NO_SET_PC();
+ ASSERT(vm.watchdog());
+ if (UNLIKELY(vm.shouldTriggerTermination(exec)))
+ LLINT_THROW(createTerminatedExecutionException(&vm));
+ LLINT_RETURN_TWO(0, exec);
}
-LLINT_SLOW_PATH_DECL(slow_path_push_with_scope)
+LLINT_SLOW_PATH_DECL(slow_path_debug)
{
LLINT_BEGIN();
- JSValue v = LLINT_OP_C(1).jsValue();
- JSObject* o = v.toObject(exec);
- LLINT_CHECK_EXCEPTION();
-
- exec->setScope(JSWithScope::create(exec, o));
+ int debugHookType = pc[1].u.operand;
+ vm.interpreter->debug(exec, static_cast<DebugHookType>(debugHookType));
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_pop_scope)
+LLINT_SLOW_PATH_DECL(slow_path_handle_exception)
{
- LLINT_BEGIN();
- exec->setScope(exec->scope()->next());
- LLINT_END();
+ LLINT_BEGIN_NO_SET_PC();
+ UNUSED_PARAM(throwScope);
+ genericUnwind(&vm, exec);
+ LLINT_END_IMPL();
}
-LLINT_SLOW_PATH_DECL(slow_path_push_name_scope)
+LLINT_SLOW_PATH_DECL(slow_path_get_from_scope)
{
LLINT_BEGIN();
- CodeBlock* codeBlock = exec->codeBlock();
- JSNameScope* scope = JSNameScope::create(exec, codeBlock->identifier(pc[1].u.operand), LLINT_OP(2).jsValue(), pc[3].u.operand);
- exec->setScope(scope);
- LLINT_END();
-}
+ const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand);
+ JSObject* scope = jsCast<JSObject*>(LLINT_OP(2).jsValue());
+ GetPutInfo getPutInfo(pc[4].u.operand);
-LLINT_SLOW_PATH_DECL(slow_path_throw)
-{
- LLINT_BEGIN();
- LLINT_THROW(LLINT_OP_C(1).jsValue());
-}
+ // ModuleVar is always converted to ClosureVar for get_from_scope.
+ ASSERT(getPutInfo.resolveType() != ModuleVar);
-LLINT_SLOW_PATH_DECL(slow_path_throw_static_error)
-{
- LLINT_BEGIN();
- if (pc[2].u.operand)
- LLINT_THROW(createReferenceError(exec, errorDescriptionForValue(exec, LLINT_OP_C(1).jsValue())->value(exec)));
- else
- LLINT_THROW(createTypeError(exec, errorDescriptionForValue(exec, LLINT_OP_C(1).jsValue())->value(exec)));
-}
+ LLINT_RETURN(scope->getPropertySlot(exec, ident, [&] (bool found, PropertySlot& slot) -> JSValue {
+ if (!found) {
+ if (getPutInfo.resolveMode() == ThrowIfNotFound)
+ return throwException(exec, throwScope, createUndefinedVariableError(exec, ident));
+ return jsUndefined();
+ }
-LLINT_SLOW_PATH_DECL(slow_path_handle_watchdog_timer)
-{
- LLINT_BEGIN_NO_SET_PC();
- if (UNLIKELY(vm.watchdog.didFire(exec)))
- LLINT_THROW(createTerminatedExecutionException(&vm));
- LLINT_RETURN_TWO(0, exec);
+ JSValue result = JSValue();
+ if (scope->isGlobalLexicalEnvironment()) {
+ // When we can't statically prove we need a TDZ check, we must perform the check on the slow path.
+ result = slot.getValue(exec, ident);
+ if (result == jsTDZValue())
+ return throwException(exec, throwScope, createTDZError(exec));
+ }
+
+ CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, pc, scope, slot, ident);
+
+ if (!result)
+ return slot.getValue(exec, ident);
+ return result;
+ }));
}
-LLINT_SLOW_PATH_DECL(slow_path_debug)
+LLINT_SLOW_PATH_DECL(slow_path_put_to_scope)
{
LLINT_BEGIN();
- int debugHookID = pc[1].u.operand;
- vm.interpreter->debug(exec, static_cast<DebugHookID>(debugHookID));
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ const Identifier& ident = codeBlock->identifier(pc[2].u.operand);
+ JSObject* scope = jsCast<JSObject*>(LLINT_OP(1).jsValue());
+ JSValue value = LLINT_OP_C(3).jsValue();
+ GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+ if (getPutInfo.resolveType() == LocalClosureVar) {
+ JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope);
+ environment->variableAt(ScopeOffset(pc[6].u.operand)).set(vm, environment, value);
+
+ // Have to do this *after* the write, because if this puts the set into IsWatched, then we need
+ // to have already changed the value of the variable. Otherwise we might watch and constant-fold
+ // to the Undefined value from before the assignment.
+ if (WatchpointSet* set = pc[5].u.watchpointSet)
+ set->touch(vm, "Executed op_put_scope<LocalClosureVar>");
+ LLINT_END();
+ }
+
+ bool hasProperty = scope->hasProperty(exec, ident);
+ LLINT_CHECK_EXCEPTION();
+ if (hasProperty
+ && scope->isGlobalLexicalEnvironment()
+ && !isInitialization(getPutInfo.initializationMode())) {
+ // When we can't statically prove we need a TDZ check, we must perform the check on the slow path.
+ PropertySlot slot(scope, PropertySlot::InternalMethodType::Get);
+ JSGlobalLexicalEnvironment::getOwnPropertySlot(scope, exec, ident, slot);
+ if (slot.getValue(exec, ident) == jsTDZValue())
+ LLINT_THROW(createTDZError(exec));
+ }
+
+ if (getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty)
+ LLINT_THROW(createUndefinedVariableError(exec, ident));
+
+ PutPropertySlot slot(scope, codeBlock->isStrictMode(), PutPropertySlot::UnknownContext, isInitialization(getPutInfo.initializationMode()));
+ scope->methodTable()->put(scope, exec, ident, value, slot);
+ CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, getPutInfo, slot, ident);
+
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_profile_will_call)
+LLINT_SLOW_PATH_DECL(slow_path_check_if_exception_is_uncatchable_and_notify_profiler)
{
LLINT_BEGIN();
- if (LegacyProfiler* profiler = vm.enabledProfiler())
- profiler->willExecute(exec, LLINT_OP(1).jsValue());
- LLINT_END();
+ RELEASE_ASSERT(!!throwScope.exception());
+
+ if (isTerminatedExecutionException(vm, throwScope.exception()))
+ LLINT_RETURN_TWO(pc, bitwise_cast<void*>(static_cast<uintptr_t>(1)));
+ LLINT_RETURN_TWO(pc, 0);
}
-LLINT_SLOW_PATH_DECL(slow_path_profile_did_call)
+LLINT_SLOW_PATH_DECL(slow_path_log_shadow_chicken_prologue)
{
LLINT_BEGIN();
- if (LegacyProfiler* profiler = vm.enabledProfiler())
- profiler->didExecute(exec, LLINT_OP(1).jsValue());
+
+ JSScope* scope = exec->uncheckedR(pc[1].u.operand).Register::scope();
+ vm.shadowChicken().log(vm, exec, ShadowChicken::Packet::prologue(exec->jsCallee(), exec, exec->callerFrame(), scope));
+
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(throw_from_native_call)
+LLINT_SLOW_PATH_DECL(slow_path_log_shadow_chicken_tail)
{
LLINT_BEGIN();
- ASSERT(vm.exception());
+
+ JSValue thisValue = LLINT_OP(1).jsValue();
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+
+#if USE(JSVALUE64)
+ CallSiteIndex callSiteIndex(exec->codeBlock()->bytecodeOffset(pc));
+#else
+ CallSiteIndex callSiteIndex(pc);
+#endif
+ vm.shadowChicken().log(vm, exec, ShadowChicken::Packet::tail(exec, thisValue, scope, exec->codeBlock(), callSiteIndex));
+
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_handle_exception)
+extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM* vm, ProtoCallFrame* protoFrame)
{
- LLINT_BEGIN_NO_SET_PC();
- ASSERT(vm.exception());
- genericUnwind(&vm, exec, vm.exception());
- LLINT_END_IMPL();
+ ExecState* exec = vm->topCallFrame;
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+
+ if (!exec)
+ exec = protoFrame->callee()->globalObject()->globalExec();
+ throwStackOverflowError(exec, scope);
+ return encodeResult(0, 0);
}
-LLINT_SLOW_PATH_DECL(slow_path_resolve_scope)
+#if !ENABLE(JIT)
+extern "C" SlowPathReturnType llint_stack_check_at_vm_entry(VM* vm, Register* newTopOfStack)
{
- LLINT_BEGIN();
- const Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
- LLINT_RETURN(JSScope::resolve(exec, exec->scope(), ident));
+ bool success = vm->ensureStackCapacityFor(newTopOfStack);
+ return encodeResult(reinterpret_cast<void*>(success), 0);
}
+#endif
-LLINT_SLOW_PATH_DECL(slow_path_get_from_scope)
+extern "C" void llint_write_barrier_slow(ExecState* exec, JSCell* cell)
{
- LLINT_BEGIN();
- const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand);
- JSObject* scope = jsCast<JSObject*>(LLINT_OP(2).jsValue());
- ResolveModeAndType modeAndType(pc[4].u.operand);
-
- PropertySlot slot(scope);
- if (!scope->getPropertySlot(exec, ident, slot)) {
- if (modeAndType.mode() == ThrowIfNotFound)
- LLINT_RETURN(exec->vm().throwException(exec, createUndefinedVariableError(exec, ident)));
- LLINT_RETURN(jsUndefined());
- }
-
- // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time.
- if (slot.isCacheableValue() && slot.slotBase() == scope && scope->structure()->propertyAccessesAreCacheable()) {
- if (modeAndType.type() == GlobalProperty || modeAndType.type() == GlobalPropertyWithVarInjectionChecks) {
- CodeBlock* codeBlock = exec->codeBlock();
- ConcurrentJITLocker locker(codeBlock->m_lock);
- pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), scope->structure());
- pc[6].u.pointer = reinterpret_cast<void*>(slot.cachedOffset());
- }
- }
-
- LLINT_RETURN(slot.getValue(exec, ident));
+ VM& vm = exec->vm();
+ vm.heap.writeBarrier(cell);
}
-LLINT_SLOW_PATH_DECL(slow_path_put_to_scope)
+extern "C" NO_RETURN_DUE_TO_CRASH void llint_crash()
{
- LLINT_BEGIN();
- CodeBlock* codeBlock = exec->codeBlock();
- const Identifier& ident = codeBlock->identifier(pc[2].u.operand);
- JSObject* scope = jsCast<JSObject*>(LLINT_OP(1).jsValue());
- JSValue value = LLINT_OP_C(3).jsValue();
- ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
-
- if (modeAndType.mode() == ThrowIfNotFound && !scope->hasProperty(exec, ident))
- LLINT_THROW(createUndefinedVariableError(exec, ident));
-
- PutPropertySlot slot(scope, codeBlock->isStrictMode());
- scope->methodTable()->put(scope, exec, ident, value, slot);
+ CRASH();
+}
- // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time.
- if (modeAndType.type() == GlobalProperty || modeAndType.type() == GlobalPropertyWithVarInjectionChecks) {
- if (slot.isCacheable() && slot.base() == scope && scope->structure()->propertyAccessesAreCacheable()) {
- ConcurrentJITLocker locker(codeBlock->m_lock);
- pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), scope->structure());
- pc[6].u.pointer = reinterpret_cast<void*>(slot.cachedOffset());
- }
- }
+#if ENABLE(LLINT_STATS)
- LLINT_END();
+LLINT_SLOW_PATH_DECL(count_opcode)
+{
+ OpcodeID opcodeID = exec->vm().interpreter->getOpcodeID(pc[0].u.opcode);
+ Data::opcodeStats(opcodeID).count++;
+ LLINT_END_IMPL();
}
-extern "C" void llint_write_barrier_slow(ExecState*, JSCell* cell)
+LLINT_SLOW_PATH_DECL(count_opcode_slow_path)
{
- Heap::writeBarrier(cell);
+ OpcodeID opcodeID = exec->vm().interpreter->getOpcodeID(pc[0].u.opcode);
+ Data::opcodeStats(opcodeID).slowPathCount++;
+ LLINT_END_IMPL();
}
-} } // namespace JSC::LLInt
+#endif // ENABLE(LLINT_STATS)
-#endif // ENABLE(LLINT)
+} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
index 8d60afa24..d76138c12 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.h
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,25 +23,22 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LLIntSlowPaths_h
-#define LLIntSlowPaths_h
+#pragma once
#include "CommonSlowPaths.h"
-#include <wtf/Platform.h>
#include <wtf/StdLibExtras.h>
-#if ENABLE(LLINT)
-
namespace JSC {
class ExecState;
struct Instruction;
+struct ProtoCallFrame;
namespace LLInt {
extern "C" SlowPathReturnType llint_trace_operand(ExecState*, Instruction*, int fromWhere, int operand);
extern "C" SlowPathReturnType llint_trace_value(ExecState*, Instruction*, int fromWhere, int operand);
-extern "C" void llint_write_barrier_slow(ExecState*, JSCell*);
+extern "C" void llint_write_barrier_slow(ExecState*, JSCell*) WTF_INTERNAL;
#define LLINT_SLOW_PATH_DECL(name) \
extern "C" SlowPathReturnType llint_##name(ExecState* exec, Instruction* pc)
@@ -56,6 +53,8 @@ LLINT_SLOW_PATH_HIDDEN_DECL(trace_arityCheck_for_call);
LLINT_SLOW_PATH_HIDDEN_DECL(trace_arityCheck_for_construct);
LLINT_SLOW_PATH_HIDDEN_DECL(trace);
LLINT_SLOW_PATH_HIDDEN_DECL(special_trace);
+LLINT_SLOW_PATH_HIDDEN_DECL(count_opcode);
+LLINT_SLOW_PATH_HIDDEN_DECL(count_opcode_slow_path);
LLINT_SLOW_PATH_HIDDEN_DECL(entry_osr);
LLINT_SLOW_PATH_HIDDEN_DECL(entry_osr_function_for_call);
LLINT_SLOW_PATH_HIDDEN_DECL(entry_osr_function_for_construct);
@@ -64,26 +63,29 @@ LLINT_SLOW_PATH_HIDDEN_DECL(entry_osr_function_for_construct_arityCheck);
LLINT_SLOW_PATH_HIDDEN_DECL(loop_osr);
LLINT_SLOW_PATH_HIDDEN_DECL(replace);
LLINT_SLOW_PATH_HIDDEN_DECL(stack_check);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_create_activation);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_object);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array_with_size);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array_buffer);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_regexp);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_check_has_instance);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_instanceof);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_instanceof_custom);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_try_get_by_id);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_id);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_arguments_length);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_id);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_del_by_id);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_val);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_argument_by_val);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_pname);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_val);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_val_direct);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_del_by_val);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_index);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_setter);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_by_id);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_setter_by_id);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_setter_by_id);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_by_val);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_setter_by_val);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jtrue);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jfalse);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jless);
@@ -99,35 +101,35 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_switch_char);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_switch_string);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_func);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_func_exp);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_generator_func);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_generator_func_exp);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_async_func);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_async_func_exp);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_set_function_name);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_size_and_alloc_frame_for_varargs);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_size_frame_for_varargs);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_size_frame_for_forward_arguments);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_varargs);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tail_call_forward_arguments);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct_varargs);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_eval);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tear_off_activation);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tear_off_arguments);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_strcat);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_to_primitive);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_pnames);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_next_pname);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_with_scope);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_pop_scope);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_name_scope);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw_static_error);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_handle_watchdog_timer);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_debug);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_will_call);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_did_call);
-LLINT_SLOW_PATH_HIDDEN_DECL(throw_from_native_call);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_handle_exception);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_scope);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_from_scope);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_to_scope);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_check_if_exception_is_uncatchable_and_notify_profiler);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_log_shadow_chicken_prologue);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_log_shadow_chicken_tail);
+extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM*, ProtoCallFrame*) WTF_INTERNAL;
+#if !ENABLE(JIT)
+extern "C" SlowPathReturnType llint_stack_check_at_vm_entry(VM*, Register*) WTF_INTERNAL;
+#endif
+extern "C" NO_RETURN_DUE_TO_CRASH void llint_crash() WTF_INTERNAL;
} } // namespace JSC::LLInt
-
-#endif // ENABLE(LLINT)
-
-#endif // LLIntSlowPaths_h
-
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.cpp b/Source/JavaScriptCore/llint/LLIntThunks.cpp
index 9429e6cb5..a9fa7dd65 100644
--- a/Source/JavaScriptCore/llint/LLIntThunks.cpp
+++ b/Source/JavaScriptCore/llint/LLIntThunks.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,19 +29,25 @@
#include "CallData.h"
#include "ExceptionHelpers.h"
#include "Interpreter.h"
+#include "JSCJSValueInlines.h"
#include "JSInterfaceJIT.h"
#include "JSObject.h"
-#include "JSStackInlines.h"
#include "LLIntCLoop.h"
+#include "LLIntData.h"
#include "LinkBuffer.h"
#include "LowLevelInterpreter.h"
#include "ProtoCallFrame.h"
+#include "StackAlignment.h"
#include "VM.h"
namespace JSC {
+EncodedJSValue JS_EXPORT_PRIVATE vmEntryToWasm(void* code, VM* vm, ProtoCallFrame* frame)
+{
+ return vmEntryToJavaScript(code, vm, frame);
+}
+
#if ENABLE(JIT)
-#if ENABLE(LLINT)
namespace LLInt {
@@ -53,100 +59,71 @@ static MacroAssemblerCodeRef generateThunkWithJumpTo(VM* vm, void (*target)(), c
jit.move(JSInterfaceJIT::TrustedImmPtr(bitwise_cast<void*>(target)), JSInterfaceJIT::regT0);
jit.jump(JSInterfaceJIT::regT0);
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("LLInt %s prologue thunk", thunkKind));
}
MacroAssemblerCodeRef functionForCallEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_function_for_call_prologue, "function for call");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_call_prologue), "function for call");
}
MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_function_for_construct_prologue, "function for construct");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_construct_prologue), "function for construct");
}
MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_function_for_call_arity_check, "function for call with arity check");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_call_arity_check), "function for call with arity check");
}
MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_function_for_construct_arity_check, "function for construct with arity check");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_construct_arity_check), "function for construct with arity check");
}
MacroAssemblerCodeRef evalEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_eval_prologue, "eval");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_eval_prologue), "eval");
}
MacroAssemblerCodeRef programEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_program_prologue, "program");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_program_prologue), "program");
+}
+
+MacroAssemblerCodeRef moduleProgramEntryThunkGenerator(VM* vm)
+{
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_module_program_prologue), "module_program");
}
} // namespace LLInt
-#endif // ENABLE(LLINT)
#else // ENABLE(JIT)
// Non-JIT (i.e. C Loop LLINT) case:
-typedef JSValue (*ExecuteCode) (CallFrame*, void* executableAddress);
-
-template<ExecuteCode execute>
-EncodedJSValue doCallToJavaScript(void* executableAddress, ProtoCallFrame* protoCallFrame)
+EncodedJSValue vmEntryToJavaScript(void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame)
{
- CodeBlock* codeBlock = protoCallFrame->codeBlock();
- JSScope* scope = protoCallFrame->scope();
- JSObject* callee = protoCallFrame->callee();
- int argCountIncludingThis = protoCallFrame->argumentCountIncludingThis();
- int argCount = protoCallFrame->argumentCount();
- JSValue thisValue = protoCallFrame->thisValue();
- JSStack& stack = scope->vm()->interpreter->stack();
-
- CallFrame* newCallFrame = stack.pushFrame(codeBlock, scope, argCountIncludingThis, callee);
- if (UNLIKELY(!newCallFrame)) {
- JSGlobalObject* globalObject = scope->globalObject();
- ExecState* exec = globalObject->globalExec();
- return JSValue::encode(throwStackOverflowError(exec));
- }
-
- // Set the arguments for the callee:
- newCallFrame->setThisValue(thisValue);
- for (int i = 0; i < argCount; ++i)
- newCallFrame->setArgument(i, protoCallFrame->argument(i));
-
- JSValue result = execute(newCallFrame, executableAddress);
-
- stack.popFrame(newCallFrame);
-
+ JSValue result = CLoop::execute(llint_vm_entry_to_javascript, executableAddress, vm, protoCallFrame);
return JSValue::encode(result);
}
-static inline JSValue executeJS(CallFrame* newCallFrame, void* executableAddress)
-{
- Opcode entryOpcode = *reinterpret_cast<Opcode*>(&executableAddress);
- return CLoop::execute(newCallFrame, entryOpcode);
-}
-
-EncodedJSValue callToJavaScript(void* executableAddress, ExecState**, ProtoCallFrame* protoCallFrame, Register*)
+EncodedJSValue vmEntryToNative(void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame)
{
- return doCallToJavaScript<executeJS>(executableAddress, protoCallFrame);
+ JSValue result = CLoop::execute(llint_vm_entry_to_native, executableAddress, vm, protoCallFrame);
+ return JSValue::encode(result);
}
-static inline JSValue executeNative(CallFrame* newCallFrame, void* executableAddress)
+extern "C" VMEntryRecord* vmEntryRecord(VMEntryFrame* entryFrame)
{
- NativeFunction function = reinterpret_cast<NativeFunction>(executableAddress);
- return JSValue::decode(function(newCallFrame));
+ // The C Loop doesn't have any callee save registers, so the VMEntryRecord is allocated at the base of the frame.
+ intptr_t stackAlignment = stackAlignmentBytes();
+ intptr_t VMEntryTotalFrameSize = (sizeof(VMEntryRecord) + (stackAlignment - 1)) & ~(stackAlignment - 1);
+ return reinterpret_cast<VMEntryRecord*>(reinterpret_cast<char*>(entryFrame) - VMEntryTotalFrameSize);
}
-EncodedJSValue callToNativeFunction(void* executableAddress, ExecState**, ProtoCallFrame* protoCallFrame, Register*)
-{
- return doCallToJavaScript<executeNative>(executableAddress, protoCallFrame);
-}
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.h b/Source/JavaScriptCore/llint/LLIntThunks.h
index 8a894aa41..fc9742c42 100644
--- a/Source/JavaScriptCore/llint/LLIntThunks.h
+++ b/Source/JavaScriptCore/llint/LLIntThunks.h
@@ -23,30 +23,23 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LLIntThunks_h
-#define LLIntThunks_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(LLINT)
+#pragma once
#include "MacroAssemblerCodeRef.h"
namespace JSC {
-class ExecState;
-class Register;
class VM;
struct ProtoCallFrame;
+typedef int64_t EncodedJSValue;
extern "C" {
- EncodedJSValue callToJavaScript(void*, ExecState**, ProtoCallFrame*, Register*);
- EncodedJSValue callToNativeFunction(void*, ExecState**, ProtoCallFrame*, Register*);
-#if ENABLE(JIT)
- void returnFromJavaScript();
-#endif
+ EncodedJSValue vmEntryToJavaScript(void*, VM*, ProtoCallFrame*);
+ EncodedJSValue vmEntryToNative(void*, VM*, ProtoCallFrame*);
}
+EncodedJSValue JS_EXPORT_PRIVATE vmEntryToWasm(void*, VM*, ProtoCallFrame*);
+
namespace LLInt {
MacroAssemblerCodeRef functionForCallEntryThunkGenerator(VM*);
@@ -55,9 +48,6 @@ MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(VM*);
MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(VM*);
MacroAssemblerCodeRef evalEntryThunkGenerator(VM*);
MacroAssemblerCodeRef programEntryThunkGenerator(VM*);
+MacroAssemblerCodeRef moduleProgramEntryThunkGenerator(VM*);
} } // namespace JSC::LLInt
-
-#endif // ENABLE(LLINT)
-
-#endif // LLIntThunks_h
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
index 8f21f6d89..6b4996406 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
+# Copyright (C) 2011-2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -21,75 +21,271 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
+# Crash course on the language that this is written in (which I just call
+# "assembly" even though it's more than that):
+#
+# - Mostly gas-style operand ordering. The last operand tends to be the
+# destination. So "a := b" is written as "mov b, a". But unlike gas,
+# comparisons are in-order, so "if (a < b)" is written as
+# "bilt a, b, ...".
+#
+# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
+# For 32-bit, "i" and "p" are interchangeable except when an op supports one
+# but not the other.
+#
+# - In general, valid operands for macro invocations and instructions are
+# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
+# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
+# (eg "_foo" or ".foo"). Macro invocations can also take anonymous
+# macros as operands. Instructions cannot take anonymous macros.
+#
+# - Labels must have names that begin with either "_" or ".". A "." label
+# is local and gets renamed before code gen to minimize namespace
+# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
+# may or may not be removed during code gen depending on whether the asm
+# conventions for C name mangling on the target platform mandate a "_"
+# prefix.
+#
+# - A "macro" is a lambda expression, which may be either anonymous or
+# named. But this has caveats. "macro" can take zero or more arguments,
+# which may be macros or any valid operands, but it can only return
+# code. But you can do Turing-complete things via continuation passing
+# style: "macro foo (a, b) b(a, a) end foo(foo, foo)". Actually, don't do
+# that, since you'll just crash the assembler.
+#
+# - An "if" is a conditional on settings. Any identifier supplied in the
+# predicate of an "if" is assumed to be a #define that is available
+# during code gen. So you can't use "if" for computation in a macro, but
+# you can use it to select different pieces of code for different
+# platforms.
+#
+# - Arguments to macros follow lexical scoping rather than dynamic scoping.
+# Const's also follow lexical scoping and may override (hide) arguments
+# or other consts. All variables (arguments and constants) can be bound
+# to operands. Additionally, arguments (but not constants) can be bound
+# to macros.
+
+# The following general-purpose registers are available:
+#
+# - cfr and sp hold the call frame and (native) stack pointer respectively.
+# They are callee-save registers, and guaranteed to be distinct from all other
+# registers on all architectures.
+#
+# - lr is defined on non-X86 architectures (ARM64, ARMv7, ARM,
+# ARMv7_TRADITIONAL, MIPS and CLOOP) and holds the return PC
+#
+# - pc holds the (native) program counter on 32-bits ARM architectures (ARM,
+# ARMv7, ARMv7_TRADITIONAL)
+#
+# - t0, t1, t2, t3, t4 and optionally t5 are temporary registers that can get trashed on
+# calls, and are pairwise distinct registers. t4 holds the JS program counter, so use
+# with caution in opcodes (actually, don't use it in opcodes at all, except as PC).
+#
+# - r0 and r1 are the platform's customary return registers, and thus are
+# two distinct registers
+#
+# - a0, a1, a2 and a3 are the platform's customary argument registers, and
+# thus are pairwise distinct registers. Be mindful that:
+# + On X86, there are no argument registers. a0 and a1 are edx and
+# ecx following the fastcall convention, but you should still use the stack
+# to pass your arguments. The cCall2 and cCall4 macros do this for you.
+# + On X86_64_WIN, you should allocate space on the stack for the arguments,
+# and the return convention is weird for > 8 bytes types. The only place we
+# use > 8 bytes return values is on a cCall, and cCall2 and cCall4 handle
+# this for you.
+#
+# - The only registers guaranteed to be caller-saved are r0, r1, a0, a1 and a2, and
+# you should be mindful of that in functions that are called directly from C.
+# If you need more registers, you should push and pop them like a good
+# assembly citizen, because any other register will be callee-saved on X86.
+#
+# You can additionally assume:
+#
+# - a3, t2, t3, t4 and t5 are never return registers; t0, t1, a0, a1 and a2
+# can be return registers.
+#
+# - t4 and t5 are never argument registers, t3 can only be a3, t1 can only be
+# a1; but t0 and t2 can be either a0 or a2.
+#
+# - On 64 bits, there are callee-save registers named csr0, csr1, ... csrN.
+# The last three csr registers are used used to store the PC base and
+# two special tag values. Don't use them for anything else.
+#
+# Additional platform-specific details (you shouldn't rely on this remaining
+# true):
+#
+# - For consistency with the baseline JIT, t0 is always r0 (and t1 is always
+# r1 on 32 bits platforms). You should use the r version when you need return
+# registers, and the t version otherwise: code using t0 (or t1) should still
+# work if swapped with e.g. t3, while code using r0 (or r1) should not. There
+# *may* be legacy code relying on this.
+#
+# - On all platforms other than X86, t0 can only be a0 and t2 can only be a2.
+#
+# - On all platforms other than X86 and X86_64, a2 is not a return register.
+# a2 is r0 on X86 (because we have so few registers) and r1 on X86_64 (because
+# the ABI enforces it).
+#
+# The following floating-point registers are available:
+#
+# - ft0-ft5 are temporary floating-point registers that get trashed on calls,
+# and are pairwise distinct.
+#
+# - fa0 and fa1 are the platform's customary floating-point argument
+# registers, and are both distinct. On 64-bits platforms, fa2 and fa3 are
+# additional floating-point argument registers.
+#
+# - fr is the platform's customary floating-point return register
+#
+# You can assume that ft1-ft5 or fa1-fa3 are never fr, and that ftX is never
+# faY if X != Y.
+
# First come the common protocols that both interpreters use. Note that each
# of these must have an ASSERT() in LLIntData.cpp
-# Work-around for the fact that the toolchain's awareness of armv7s results in
-# a separate slab in the fat binary, yet the offlineasm doesn't know to expect
-# it.
+# Work-around for the fact that the toolchain's awareness of armv7k / armv7s
+# results in a separate slab in the fat binary, yet the offlineasm doesn't know
+# to expect it.
+if ARMv7k
+end
if ARMv7s
end
# These declarations must match interpreter/JSStack.h.
if JSVALUE64
-const PtrSize = 8
-const CallFrameHeaderSlots = 6
+ const PtrSize = 8
+ const CallFrameHeaderSlots = 5
else
-const PtrSize = 4
-const CallFrameHeaderSlots = 5
+ const PtrSize = 4
+ const CallFrameHeaderSlots = 4
+ const CallFrameAlignSlots = 1
end
const SlotSize = 8
+const JSEnvironmentRecord_variables = (sizeof JSEnvironmentRecord + SlotSize - 1) & ~(SlotSize - 1)
+const DirectArguments_storage = (sizeof DirectArguments + SlotSize - 1) & ~(SlotSize - 1)
+
+const StackAlignment = 16
+const StackAlignmentSlots = 2
+const StackAlignmentMask = StackAlignment - 1
+
+const CallerFrameAndPCSize = 2 * PtrSize
+
const CallerFrame = 0
const ReturnPC = CallerFrame + PtrSize
const CodeBlock = ReturnPC + PtrSize
-const ScopeChain = CodeBlock + SlotSize
-const Callee = ScopeChain + SlotSize
+const Callee = CodeBlock + SlotSize
const ArgumentCount = Callee + SlotSize
const ThisArgumentOffset = ArgumentCount + SlotSize
+const FirstArgumentOffset = ThisArgumentOffset + SlotSize
const CallFrameHeaderSize = ThisArgumentOffset
# Some value representation constants.
if JSVALUE64
-const TagBitTypeOther = 0x2
-const TagBitBool = 0x4
-const TagBitUndefined = 0x8
-const ValueEmpty = 0x0
-const ValueFalse = TagBitTypeOther | TagBitBool
-const ValueTrue = TagBitTypeOther | TagBitBool | 1
-const ValueUndefined = TagBitTypeOther | TagBitUndefined
-const ValueNull = TagBitTypeOther
+ const TagBitTypeOther = 0x2
+ const TagBitBool = 0x4
+ const TagBitUndefined = 0x8
+ const ValueEmpty = 0x0
+ const ValueFalse = TagBitTypeOther | TagBitBool
+ const ValueTrue = TagBitTypeOther | TagBitBool | 1
+ const ValueUndefined = TagBitTypeOther | TagBitUndefined
+ const ValueNull = TagBitTypeOther
+ const TagTypeNumber = 0xffff000000000000
+ const TagMask = TagTypeNumber | TagBitTypeOther
else
-const Int32Tag = -1
-const BooleanTag = -2
-const NullTag = -3
-const UndefinedTag = -4
-const CellTag = -5
-const EmptyValueTag = -6
-const DeletedValueTag = -7
-const LowestTag = DeletedValueTag
+ const Int32Tag = -1
+ const BooleanTag = -2
+ const NullTag = -3
+ const UndefinedTag = -4
+ const CellTag = -5
+ const EmptyValueTag = -6
+ const DeletedValueTag = -7
+ const LowestTag = DeletedValueTag
end
+# NOTE: The values below must be in sync with what is in PutByIdFlags.h.
+const PutByIdPrimaryTypeMask = 0x6
+const PutByIdPrimaryTypeSecondary = 0x0
+const PutByIdPrimaryTypeObjectWithStructure = 0x2
+const PutByIdPrimaryTypeObjectWithStructureOrOther = 0x4
+const PutByIdSecondaryTypeMask = -0x8
+const PutByIdSecondaryTypeBottom = 0x0
+const PutByIdSecondaryTypeBoolean = 0x8
+const PutByIdSecondaryTypeOther = 0x10
+const PutByIdSecondaryTypeInt32 = 0x18
+const PutByIdSecondaryTypeNumber = 0x20
+const PutByIdSecondaryTypeString = 0x28
+const PutByIdSecondaryTypeSymbol = 0x30
+const PutByIdSecondaryTypeObject = 0x38
+const PutByIdSecondaryTypeObjectOrOther = 0x40
+const PutByIdSecondaryTypeTop = 0x48
+
+const CallOpCodeSize = 9
+
+if X86_64 or ARM64 or C_LOOP
+ const maxFrameExtentForSlowPathCall = 0
+elsif ARM or ARMv7_TRADITIONAL or ARMv7
+ const maxFrameExtentForSlowPathCall = 24
+elsif X86 or X86_WIN
+ const maxFrameExtentForSlowPathCall = 40
+elsif MIPS
+ const maxFrameExtentForSlowPathCall = 40
+elsif X86_64_WIN
+ const maxFrameExtentForSlowPathCall = 64
+end
+
+if X86_64 or X86_64_WIN or ARM64
+ const CalleeSaveSpaceAsVirtualRegisters = 3
+else
+ const CalleeSaveSpaceAsVirtualRegisters = 0
+end
+
+const CalleeSaveSpaceStackAligned = (CalleeSaveSpaceAsVirtualRegisters * SlotSize + StackAlignment - 1) & ~StackAlignmentMask
+
+
# Watchpoint states
const ClearWatchpoint = 0
const IsWatched = 1
const IsInvalidated = 2
+# ShadowChicken data
+const ShadowChickenTailMarker = 0x7a11
+
+# ArithProfile data
+const ArithProfileInt = 0x100000
+const ArithProfileIntInt = 0x120000
+const ArithProfileNumber = 0x200000
+const ArithProfileNumberInt = 0x220000
+const ArithProfileNumberNumber = 0x240000
+const ArithProfileIntNumber = 0x140000
+
# Some register conventions.
if JSVALUE64
# - Use a pair of registers to represent the PC: one register for the
# base of the bytecodes, and one register for the index.
- # - The PC base (or PB for short) should be stored in the csr. It will
- # get clobbered on calls to other JS code, but will get saved on calls
- # to C functions.
+ # - The PC base (or PB for short) must be stored in a callee-save register.
# - C calls are still given the Instruction* rather than the PC index.
# This requires an add before the call, and a sub after.
- const PC = t4
- const PB = t6
- const tagTypeNumber = csr1
- const tagMask = csr2
-
+ const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h
+ if ARM64
+ const PB = csr7
+ const tagTypeNumber = csr8
+ const tagMask = csr9
+ elsif X86_64
+ const PB = csr2
+ const tagTypeNumber = csr3
+ const tagMask = csr4
+ elsif X86_64_WIN
+ const PB = csr4
+ const tagTypeNumber = csr5
+ const tagMask = csr6
+ elsif C_LOOP
+ const PB = csr0
+ const tagTypeNumber = csr1
+ const tagMask = csr2
+ end
+
macro loadisFromInstruction(offset, dest)
loadis offset * 8[PB, PC, 8], dest
end
@@ -98,12 +294,16 @@ if JSVALUE64
loadp offset * 8[PB, PC, 8], dest
end
+ macro storeisToInstruction(value, offset)
+ storei value, offset * 8[PB, PC, 8]
+ end
+
macro storepToInstruction(value, offset)
storep value, offset * 8[PB, PC, 8]
end
else
- const PC = t4
+ const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h
macro loadisFromInstruction(offset, dest)
loadis offset * 4[PC], dest
end
@@ -111,6 +311,16 @@ else
macro loadpFromInstruction(offset, dest)
loadp offset * 4[PC], dest
end
+
+ macro storeisToInstruction(value, offset)
+ storei value, offset * 4[PC]
+ end
+end
+
+if X86_64_WIN
+ const extraTempReg = t0
+else
+ const extraTempReg = t5
end
# Constants for reasoning about value representation.
@@ -123,24 +333,43 @@ else
end
# Constant for reasoning about butterflies.
-const IsArray = 1
-const IndexingShapeMask = 30
-const NoIndexingShape = 0
-const Int32Shape = 20
-const DoubleShape = 22
-const ContiguousShape = 26
-const ArrayStorageShape = 28
-const SlowPutArrayStorageShape = 30
+const IsArray = 0x01
+const IndexingShapeMask = 0x0E
+const NoIndexingShape = 0x00
+const Int32Shape = 0x04
+const DoubleShape = 0x06
+const ContiguousShape = 0x08
+const ArrayStorageShape = 0x0A
+const SlowPutArrayStorageShape = 0x0C
# Type constants.
-const StringType = 5
-const ObjectType = 17
-const FinalObjectType = 18
+const StringType = 6
+const SymbolType = 7
+const ObjectType = 23
+const FinalObjectType = 24
+const JSFunctionType = 26
+const ArrayType = 34
+const DerivedArrayType = 35
+const ProxyObjectType = 53
+
+# The typed array types need to be numbered in a particular order because of the manually written
+# switch statement in get_by_val and put_by_val.
+const Int8ArrayType = 36
+const Int16ArrayType = 37
+const Int32ArrayType = 38
+const Uint8ArrayType = 39
+const Uint8ClampedArrayType = 40
+const Uint16ArrayType = 41
+const Uint32ArrayType = 42
+const Float32ArrayType = 43
+const Float64ArrayType = 44
+
+const FirstArrayType = Int8ArrayType
+const LastArrayType = Float64ArrayType
# Type flags constants.
const MasqueradesAsUndefined = 1
-const ImplementsHasInstance = 2
-const ImplementsDefaultHasInstance = 8
+const ImplementsDefaultHasInstance = 2
# Bytecode operand constants.
const FirstConstantRegisterIndex = 0x40000000
@@ -149,12 +378,13 @@ const FirstConstantRegisterIndex = 0x40000000
const GlobalCode = 0
const EvalCode = 1
const FunctionCode = 2
+const ModuleCode = 3
# The interpreter steals the tag word of the argument count.
const LLIntReturnPC = ArgumentCount + TagOffset
# String flags.
-const HashFlags8BitBuffer = 32
+const HashFlags8BitBuffer = 8
# Copied from PropertyOffset.h
const firstOutOfLineOffset = 100
@@ -162,19 +392,24 @@ const firstOutOfLineOffset = 100
# ResolveType
const GlobalProperty = 0
const GlobalVar = 1
-const ClosureVar = 2
-const GlobalPropertyWithVarInjectionChecks = 3
-const GlobalVarWithVarInjectionChecks = 4
-const ClosureVarWithVarInjectionChecks = 5
-const Dynamic = 6
-
-const ResolveModeMask = 0xffff
-
-const MarkedBlockSize = 64 * 1024
+const GlobalLexicalVar = 2
+const ClosureVar = 3
+const LocalClosureVar = 4
+const ModuleVar = 5
+const GlobalPropertyWithVarInjectionChecks = 6
+const GlobalVarWithVarInjectionChecks = 7
+const GlobalLexicalVarWithVarInjectionChecks = 8
+const ClosureVarWithVarInjectionChecks = 9
+
+const ResolveTypeMask = 0x3ff
+const InitializationModeMask = 0xffc00
+const InitializationModeShift = 10
+const NotInitialization = 2
+
+const MarkedBlockSize = 16 * 1024
const MarkedBlockMask = ~(MarkedBlockSize - 1)
-# Constants for checking mark bits.
-const AtomNumberShift = 3
-const BitMapWordShift = 4
+
+const BlackThreshold = 0
# Allocation constants
if JSVALUE64
@@ -196,9 +431,7 @@ macro crash()
if C_LOOP
cloopCrash
else
- storei t0, 0xbbadbeef[]
- move 0, t0
- call t0
+ call _llint_crash
end
end
@@ -210,78 +443,450 @@ macro assert(assertion)
end
end
+# The probe macro can be used to insert some debugging code without perturbing scalar
+# registers. Presently, the probe macro only preserves scalar registers. Hence, the
+# C probe callback function should not trash floating point registers.
+#
+# The macro you pass to probe() can pass whatever registers you like to your probe
+# callback function. However, you need to be mindful of which of the registers are
+# also used as argument registers, and ensure that you don't trash the register value
+# before storing it in the probe callback argument register that you desire.
+#
+# Here's an example of how it's used:
+#
+# probe(
+# macro()
+# move cfr, a0 # pass the ExecState* as arg0.
+# move t0, a1 # pass the value of register t0 as arg1.
+# call _cProbeCallbackFunction # to do whatever you want.
+# end
+# )
+#
+if X86_64
+ macro probe(action)
+ # save all the registers that the LLInt may use.
+ push a0, a1
+ push a2, a3
+ push t0, t1
+ push t2, t3
+ push t4, t5
+
+ action()
+
+ # restore all the registers we saved previously.
+ pop t5, t4
+ pop t3, t2
+ pop t1, t0
+ pop a3, a2
+ pop a1, a0
+ end
+end
+
+macro checkStackPointerAlignment(tempReg, location)
+ if ARM64 or C_LOOP
+ # ARM64 will check for us!
+ # C_LOOP does not need the alignment, and can use a little perf
+ # improvement from avoiding useless work.
+ else
+ if ARM or ARMv7 or ARMv7_TRADITIONAL
+ # ARM can't do logical ops with the sp as a source
+ move sp, tempReg
+ andp StackAlignmentMask, tempReg
+ else
+ andp sp, StackAlignmentMask, tempReg
+ end
+ btpz tempReg, .stackPointerOkay
+ move location, tempReg
+ break
+ .stackPointerOkay:
+ end
+end
+
+if C_LOOP or ARM64 or X86_64 or X86_64_WIN
+ const CalleeSaveRegisterCount = 0
+elsif ARM or ARMv7_TRADITIONAL or ARMv7
+ const CalleeSaveRegisterCount = 7
+elsif MIPS
+ const CalleeSaveRegisterCount = 1
+elsif X86 or X86_WIN
+ const CalleeSaveRegisterCount = 3
+end
+
+const CalleeRegisterSaveSize = CalleeSaveRegisterCount * PtrSize
+
+# VMEntryTotalFrameSize includes the space for struct VMEntryRecord and the
+# callee save registers rounded up to keep the stack aligned
+const VMEntryTotalFrameSize = (CalleeRegisterSaveSize + sizeof VMEntryRecord + StackAlignment - 1) & ~StackAlignmentMask
+
+macro pushCalleeSaves()
+ if C_LOOP or ARM64 or X86_64 or X86_64_WIN
+ elsif ARM or ARMv7_TRADITIONAL
+ emit "push {r4-r10}"
+ elsif ARMv7
+ emit "push {r4-r6, r8-r11}"
+ elsif MIPS
+ emit "addiu $sp, $sp, -4"
+ emit "sw $s4, 0($sp)"
+ # save $gp to $s4 so that we can restore it after a function call
+ emit "move $s4, $gp"
+ elsif X86
+ emit "push %esi"
+ emit "push %edi"
+ emit "push %ebx"
+ elsif X86_WIN
+ emit "push esi"
+ emit "push edi"
+ emit "push ebx"
+ end
+end
+
+macro popCalleeSaves()
+ if C_LOOP or ARM64 or X86_64 or X86_64_WIN
+ elsif ARM or ARMv7_TRADITIONAL
+ emit "pop {r4-r10}"
+ elsif ARMv7
+ emit "pop {r4-r6, r8-r11}"
+ elsif MIPS
+ emit "lw $s4, 0($sp)"
+ emit "addiu $sp, $sp, 4"
+ elsif X86
+ emit "pop %ebx"
+ emit "pop %edi"
+ emit "pop %esi"
+ elsif X86_WIN
+ emit "pop ebx"
+ emit "pop edi"
+ emit "pop esi"
+ end
+end
+
+macro preserveCallerPCAndCFR()
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
+ push lr
+ push cfr
+ elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+ push cfr
+ elsif ARM64
+ push cfr, lr
+ else
+ error
+ end
+ move sp, cfr
+end
+
+macro restoreCallerPCAndCFR()
+ move cfr, sp
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
+ pop cfr
+ pop lr
+ elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+ pop cfr
+ elsif ARM64
+ pop lr, cfr
+ end
+end
+
+macro preserveCalleeSavesUsedByLLInt()
+ subp CalleeSaveSpaceStackAligned, sp
+ if C_LOOP
+ elsif ARM or ARMv7_TRADITIONAL
+ elsif ARMv7
+ elsif ARM64
+ emit "stp x27, x28, [x29, #-16]"
+ emit "stp xzr, x26, [x29, #-32]"
+ elsif MIPS
+ elsif X86
+ elsif X86_WIN
+ elsif X86_64
+ storep csr4, -8[cfr]
+ storep csr3, -16[cfr]
+ storep csr2, -24[cfr]
+ elsif X86_64_WIN
+ storep csr6, -8[cfr]
+ storep csr5, -16[cfr]
+ storep csr4, -24[cfr]
+ end
+end
+
+macro restoreCalleeSavesUsedByLLInt()
+ if C_LOOP
+ elsif ARM or ARMv7_TRADITIONAL
+ elsif ARMv7
+ elsif ARM64
+ emit "ldp xzr, x26, [x29, #-32]"
+ emit "ldp x27, x28, [x29, #-16]"
+ elsif MIPS
+ elsif X86
+ elsif X86_WIN
+ elsif X86_64
+ loadp -24[cfr], csr2
+ loadp -16[cfr], csr3
+ loadp -8[cfr], csr4
+ elsif X86_64_WIN
+ loadp -24[cfr], csr4
+ loadp -16[cfr], csr5
+ loadp -8[cfr], csr6
+ end
+end
+
+macro copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(vm, temp)
+ if ARM64 or X86_64 or X86_64_WIN
+ loadp VM::topVMEntryFrame[vm], temp
+ vmEntryRecord(temp, temp)
+ leap VMEntryRecord::calleeSaveRegistersBuffer[temp], temp
+ if ARM64
+ storep csr0, [temp]
+ storep csr1, 8[temp]
+ storep csr2, 16[temp]
+ storep csr3, 24[temp]
+ storep csr4, 32[temp]
+ storep csr5, 40[temp]
+ storep csr6, 48[temp]
+ storep csr7, 56[temp]
+ storep csr8, 64[temp]
+ storep csr9, 72[temp]
+ stored csfr0, 80[temp]
+ stored csfr1, 88[temp]
+ stored csfr2, 96[temp]
+ stored csfr3, 104[temp]
+ stored csfr4, 112[temp]
+ stored csfr5, 120[temp]
+ stored csfr6, 128[temp]
+ stored csfr7, 136[temp]
+ elsif X86_64
+ storep csr0, [temp]
+ storep csr1, 8[temp]
+ storep csr2, 16[temp]
+ storep csr3, 24[temp]
+ storep csr4, 32[temp]
+ elsif X86_64_WIN
+ storep csr0, [temp]
+ storep csr1, 8[temp]
+ storep csr2, 16[temp]
+ storep csr3, 24[temp]
+ storep csr4, 32[temp]
+ storep csr5, 40[temp]
+ storep csr6, 48[temp]
+ end
+ end
+end
+
+macro restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(vm, temp)
+ if ARM64 or X86_64 or X86_64_WIN
+ loadp VM::topVMEntryFrame[vm], temp
+ vmEntryRecord(temp, temp)
+ leap VMEntryRecord::calleeSaveRegistersBuffer[temp], temp
+ if ARM64
+ loadp [temp], csr0
+ loadp 8[temp], csr1
+ loadp 16[temp], csr2
+ loadp 24[temp], csr3
+ loadp 32[temp], csr4
+ loadp 40[temp], csr5
+ loadp 48[temp], csr6
+ loadp 56[temp], csr7
+ loadp 64[temp], csr8
+ loadp 72[temp], csr9
+ loadd 80[temp], csfr0
+ loadd 88[temp], csfr1
+ loadd 96[temp], csfr2
+ loadd 104[temp], csfr3
+ loadd 112[temp], csfr4
+ loadd 120[temp], csfr5
+ loadd 128[temp], csfr6
+ loadd 136[temp], csfr7
+ elsif X86_64
+ loadp [temp], csr0
+ loadp 8[temp], csr1
+ loadp 16[temp], csr2
+ loadp 24[temp], csr3
+ loadp 32[temp], csr4
+ elsif X86_64_WIN
+ loadp [temp], csr0
+ loadp 8[temp], csr1
+ loadp 16[temp], csr2
+ loadp 24[temp], csr3
+ loadp 32[temp], csr4
+ loadp 40[temp], csr5
+ loadp 48[temp], csr6
+ end
+ end
+end
+
macro preserveReturnAddressAfterCall(destinationRegister)
- if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS
# In C_LOOP case, we're only preserving the bytecode vPC.
move lr, destinationRegister
- elsif X86 or X86_64
+ elsif X86 or X86_WIN or X86_64 or X86_64_WIN
pop destinationRegister
else
error
end
end
-macro restoreReturnAddressBeforeReturn(sourceRegister)
- if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
- # In C_LOOP case, we're only restoring the bytecode vPC.
- move sourceRegister, lr
- elsif X86 or X86_64
- push sourceRegister
+macro functionPrologue()
+ if X86 or X86_WIN or X86_64 or X86_64_WIN
+ push cfr
+ elsif ARM64
+ push cfr, lr
+ elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
+ push lr
+ push cfr
+ end
+ move sp, cfr
+end
+
+macro functionEpilogue()
+ if X86 or X86_WIN or X86_64 or X86_64_WIN
+ pop cfr
+ elsif ARM64
+ pop lr, cfr
+ elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
+ pop cfr
+ pop lr
+ end
+end
+
+macro vmEntryRecord(entryFramePointer, resultReg)
+ subp entryFramePointer, VMEntryTotalFrameSize, resultReg
+end
+
+macro getFrameRegisterSizeForCodeBlock(codeBlock, size)
+ loadi CodeBlock::m_numCalleeLocals[codeBlock], size
+ lshiftp 3, size
+ addp maxFrameExtentForSlowPathCall, size
+end
+
+macro restoreStackPointerAfterCall()
+ loadp CodeBlock[cfr], t2
+ getFrameRegisterSizeForCodeBlock(t2, t2)
+ if ARMv7
+ subp cfr, t2, t2
+ move t2, sp
else
- error
+ subp cfr, t2, sp
end
end
macro traceExecution()
+ if COLLECT_STATS
+ callSlowPath(_llint_count_opcode)
+ end
if EXECUTION_TRACING
callSlowPath(_llint_trace)
end
end
-macro callTargetFunction(callLinkInfo)
+macro traceSlowPathExecution()
+ if COLLECT_STATS
+ callSlowPath(_llint_count_opcode_slow_path)
+ end
+end
+
+macro callOpcodeSlowPath(slowPath)
+ traceSlowPathExecution()
+ callSlowPath(slowPath)
+end
+
+macro callTargetFunction(callee)
if C_LOOP
- cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
+ cloopCallJSFunction callee
else
- call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
- dispatchAfterCall()
+ call callee
end
+ restoreStackPointerAfterCall()
+ dispatchAfterCall()
+end
+
+macro prepareForRegularCall(callee, temp1, temp2, temp3)
+ addp CallerFrameAndPCSize, sp
+end
+
+# sp points to the new frame
+macro prepareForTailCall(callee, temp1, temp2, temp3)
+ restoreCalleeSavesUsedByLLInt()
+
+ loadi PayloadOffset + ArgumentCount[cfr], temp2
+ loadp CodeBlock[cfr], temp1
+ loadp CodeBlock::m_numParameters[temp1], temp1
+ bilteq temp1, temp2, .noArityFixup
+ move temp1, temp2
+
+.noArityFixup:
+ # We assume < 2^28 arguments
+ muli SlotSize, temp2
+ addi StackAlignment - 1 + CallFrameHeaderSize, temp2
+ andi ~StackAlignmentMask, temp2
+
+ move cfr, temp1
+ addp temp2, temp1
+
+ loadi PayloadOffset + ArgumentCount[sp], temp2
+ # We assume < 2^28 arguments
+ muli SlotSize, temp2
+ addi StackAlignment - 1 + CallFrameHeaderSize, temp2
+ andi ~StackAlignmentMask, temp2
+
+ if ARM or ARMv7_TRADITIONAL or ARMv7 or ARM64 or C_LOOP or MIPS
+ addp 2 * PtrSize, sp
+ subi 2 * PtrSize, temp2
+ loadp PtrSize[cfr], lr
+ else
+ addp PtrSize, sp
+ subi PtrSize, temp2
+ loadp PtrSize[cfr], temp3
+ storep temp3, [sp]
+ end
+
+ subp temp2, temp1
+ loadp [cfr], cfr
+
+.copyLoop:
+ subi PtrSize, temp2
+ loadp [sp, temp2, 1], temp3
+ storep temp3, [temp1, temp2, 1]
+ btinz temp2, .copyLoop
+
+ move temp1, sp
+ jmp callee
end
-macro slowPathForCall(slowPath)
+macro slowPathForCall(slowPath, prepareCall)
+ traceSlowPathExecution()
callCallSlowPath(
slowPath,
- macro (callee)
- if C_LOOP
- cloopCallJSFunction callee
- else
- call callee
- dispatchAfterCall()
- end
+ # Those are r0 and r1
+ macro (callee, calleeFramePtr)
+ btpz calleeFramePtr, .dontUpdateSP
+ move calleeFramePtr, sp
+ prepareCall(callee, t2, t3, t4)
+ .dontUpdateSP:
+ callTargetFunction(callee)
end)
end
-macro arrayProfile(structureAndIndexingType, profile, scratch)
- const structure = structureAndIndexingType
- const indexingType = structureAndIndexingType
- storep structure, ArrayProfile::m_lastSeenStructure[profile]
- loadb Structure::m_indexingType[structure], indexingType
+macro arrayProfile(cellAndIndexingType, profile, scratch)
+ const cell = cellAndIndexingType
+ const indexingType = cellAndIndexingType
+ loadi JSCell::m_structureID[cell], scratch
+ storei scratch, ArrayProfile::m_lastSeenStructureID[profile]
+ loadb JSCell::m_indexingTypeAndMisc[cell], indexingType
end
-macro checkMarkByte(cell, scratch1, scratch2, continuation)
- move cell, scratch1
- move cell, scratch2
-
- andp MarkedBlockMask, scratch1
- andp ~MarkedBlockMask, scratch2
+macro skipIfIsRememberedOrInEden(cell, slowPath)
+ memfence
+ bba JSCell::m_cellState[cell], BlackThreshold, .done
+ slowPath()
+.done:
+end
- rshiftp AtomNumberShift + BitMapWordShift, scratch2
- loadb MarkedBlock::m_marks[scratch1, scratch2, 1], scratch1
- continuation(scratch1)
+macro notifyWrite(set, slow)
+ bbneq WatchpointSet::m_state[set], IsInvalidated, slow
end
macro checkSwitchToJIT(increment, action)
loadp CodeBlock[cfr], t0
- baddis increment, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t0], .continue
+ baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue
action()
.continue:
end
@@ -290,7 +895,7 @@ macro checkSwitchToJITForEpilogue()
checkSwitchToJIT(
10,
macro ()
- callSlowPath(_llint_replace)
+ callOpcodeSlowPath(_llint_replace)
end)
end
@@ -333,26 +938,51 @@ end
# Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
# in t1. May also trigger prologue entry OSR.
macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
- preserveReturnAddressAfterCall(t2)
-
# Set up the call frame and check if we should OSR.
- storep t2, ReturnPC[cfr]
+ preserveCallerPCAndCFR()
+
if EXECUTION_TRACING
+ subp maxFrameExtentForSlowPathCall, sp
callSlowPath(traceSlowPath)
+ addp maxFrameExtentForSlowPathCall, sp
end
codeBlockGetter(t1)
- baddis 5, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t1], .continue
- cCall2(osrSlowPath, cfr, PC)
- move t1, cfr
- btpz t0, .recover
- loadp ReturnPC[cfr], t2
- restoreReturnAddressBeforeReturn(t2)
- jmp t0
-.recover:
- codeBlockGetter(t1)
-.continue:
+ if not C_LOOP
+ baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue
+ if JSVALUE64
+ move cfr, a0
+ move PC, a1
+ cCall2(osrSlowPath)
+ else
+ # We are after the function prologue, but before we have set up sp from the CodeBlock.
+ # Temporarily align stack pointer for this call.
+ subp 8, sp
+ move cfr, a0
+ move PC, a1
+ cCall2(osrSlowPath)
+ addp 8, sp
+ end
+ btpz r0, .recover
+ move cfr, sp # restore the previous sp
+ # pop the callerFrame since we will jump to a function that wants to save it
+ if ARM64
+ pop lr, cfr
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
+ pop cfr
+ pop lr
+ else
+ pop cfr
+ end
+ jmp r0
+ .recover:
+ codeBlockGetter(t1)
+ .continue:
+ end
+
codeBlockSetter(t1)
-
+
+ preserveCalleeSavesUsedByLLInt()
+
# Set up the PC.
if JSVALUE64
loadp CodeBlock::m_instructions[t1], PB
@@ -360,6 +990,39 @@ macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
else
loadp CodeBlock::m_instructions[t1], PC
end
+
+ # Get new sp in t0 and check stack height.
+ getFrameRegisterSizeForCodeBlock(t1, t0)
+ subp cfr, t0, t0
+ loadp CodeBlock::m_vm[t1], t2
+ if C_LOOP
+ bpbeq VM::m_cloopStackLimit[t2], t0, .stackHeightOK
+ else
+ bpbeq VM::m_softStackLimit[t2], t0, .stackHeightOK
+ end
+
+ # Stack height check failed - need to call a slow_path.
+ # Set up temporary stack pointer for call including callee saves
+ subp maxFrameExtentForSlowPathCall, sp
+ callSlowPath(_llint_stack_check)
+ bpeq r1, 0, .stackHeightOKGetCodeBlock
+ move r1, cfr
+ dispatch(0) # Go to exception handler in PC
+
+.stackHeightOKGetCodeBlock:
+ # Stack check slow path returned that the stack was ok.
+ # Since they were clobbered, need to get CodeBlock and new sp
+ codeBlockGetter(t1)
+ getFrameRegisterSizeForCodeBlock(t1, t0)
+ subp cfr, t0, t0
+
+.stackHeightOK:
+ move t0, sp
+
+ if JSVALUE64
+ move TagTypeNumber, tagTypeNumber
+ addp TagBitTypeOther, tagTypeNumber, tagMask
+ end
end
# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
@@ -394,73 +1057,158 @@ macro functionInitialization(profileArgSkip)
end
baddpnz -8, t0, .argumentProfileLoop
.argumentProfileDone:
-
- # Check stack height.
- loadi CodeBlock::m_numCalleeRegisters[t1], t0
- addi 1, t0 # Account that local0 goes at slot -1
- loadp CodeBlock::m_vm[t1], t2
- lshiftp 3, t0
- subp cfr, t0, t0
- bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK
+end
- # Stack height check failed - need to call a slow_path.
- callSlowPath(_llint_stack_check)
-.stackHeightOK:
+macro doReturn()
+ restoreCalleeSavesUsedByLLInt()
+ restoreCallerPCAndCFR()
+ ret
end
-macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
- if ALWAYS_ALLOCATE_SLOW
- jmp slowCase
- else
- const offsetOfFirstFreeCell =
- MarkedAllocator::m_freeList +
- MarkedBlock::FreeList::head
-
- # Get the object from the free list.
- loadp offsetOfFirstFreeCell[allocator], result
- btpz result, slowCase
-
- # Remove the object from the free list.
- loadp [result], scratch1
- storep scratch1, offsetOfFirstFreeCell[allocator]
-
- # Initialize the object.
- storep structure, JSCell::m_structure[result]
- storep 0, JSObject::m_butterfly[result]
- end
+# stub to call into JavaScript or Native functions
+# EncodedJSValue vmEntryToJavaScript(void* code, VM* vm, ProtoCallFrame* protoFrame)
+# EncodedJSValue vmEntryToNativeFunction(void* code, VM* vm, ProtoCallFrame* protoFrame)
+
+if C_LOOP
+ _llint_vm_entry_to_javascript:
+else
+ global _vmEntryToJavaScript
+ _vmEntryToJavaScript:
end
+ doVMEntry(makeJavaScriptCall)
-macro doReturn()
- loadp ReturnPC[cfr], t2
- loadp CallerFrame[cfr], cfr
- restoreReturnAddressBeforeReturn(t2)
- ret
+
+if C_LOOP
+ _llint_vm_entry_to_native:
+else
+ global _vmEntryToNative
+ _vmEntryToNative:
+end
+ doVMEntry(makeHostFunctionCall)
+
+
+if not C_LOOP
+ # void sanitizeStackForVMImpl(VM* vm)
+ global _sanitizeStackForVMImpl
+ _sanitizeStackForVMImpl:
+ # We need three non-aliased caller-save registers. We are guaranteed
+ # this for a0, a1 and a2 on all architectures.
+ if X86 or X86_WIN
+ loadp 4[sp], a0
+ end
+ const vm = a0
+ const address = a1
+ const zeroValue = a2
+
+ loadp VM::m_lastStackTop[vm], address
+ bpbeq sp, address, .zeroFillDone
+
+ move 0, zeroValue
+ .zeroFillLoop:
+ storep zeroValue, [address]
+ addp PtrSize, address
+ bpa sp, address, .zeroFillLoop
+
+ .zeroFillDone:
+ move sp, address
+ storep address, VM::m_lastStackTop[vm]
+ ret
+
+ # VMEntryRecord* vmEntryRecord(const VMEntryFrame* entryFrame)
+ global _vmEntryRecord
+ _vmEntryRecord:
+ if X86 or X86_WIN
+ loadp 4[sp], a0
+ end
+
+ vmEntryRecord(a0, r0)
+ ret
end
if C_LOOP
+ # Dummy entry point the C Loop uses to initialize.
+ _llint_entry:
+ crash()
else
-# stub to call into JavaScript or Native functions
-# EncodedJSValue callToJavaScript(void* code, ExecState** vm, ProtoCallFrame* protoFrame, Register* topOfStack)
-# EncodedJSValue callToNativeFunction(void* code, ExecState** vm, ProtoCallFrame* protoFrame, Register* topOfStack)
-# Note, if these stubs or one of their related macros are changed, make the
-# equivalent changes in jit/JITStubsX86.h and/or jit/JITStubsMSVC64.asm
-_callToJavaScript:
- doCallToJavaScript(makeJavaScriptCall, doReturnFromJavaScript)
+ macro initPCRelative(pcBase)
+ if X86_64 or X86_64_WIN or X86 or X86_WIN
+ call _relativePCBase
+ _relativePCBase:
+ pop pcBase
+ elsif ARM64
+ elsif ARMv7
+ _relativePCBase:
+ move pc, pcBase
+ subp 3, pcBase # Need to back up the PC and set the Thumb2 bit
+ elsif ARM or ARMv7_TRADITIONAL
+ _relativePCBase:
+ move pc, pcBase
+ subp 8, pcBase
+ elsif MIPS
+ la _relativePCBase, pcBase
+ setcallreg pcBase # needed to set $t9 to the right value for the .cpload created by the label.
+ _relativePCBase:
+ end
+end
-_callToNativeFunction:
- doCallToJavaScript(makeHostFunctionCall, doReturnFromHostFunction)
+# The PC base is in t1, as this is what _llint_entry leaves behind through
+# initPCRelative(t1)
+macro setEntryAddress(index, label)
+ if X86_64 or X86_64_WIN
+ leap (label - _relativePCBase)[t1], t3
+ move index, t4
+ storep t3, [a0, t4, 8]
+ elsif X86 or X86_WIN
+ leap (label - _relativePCBase)[t1], t3
+ move index, t4
+ storep t3, [a0, t4, 4]
+ elsif ARM64
+ pcrtoaddr label, t1
+ move index, t4
+ storep t1, [a0, t4, 8]
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+ mvlbl (label - _relativePCBase), t4
+ addp t4, t1, t4
+ move index, t3
+ storep t4, [a0, t3, 4]
+ elsif MIPS
+ la label, t4
+ la _relativePCBase, t3
+ subp t3, t4
+ addp t4, t1, t4
+ move index, t3
+ storep t4, [a0, t3, 4]
+ end
end
-# Indicate the beginning of LLInt.
-_llint_begin:
- crash()
+global _llint_entry
+# Entry point for the llint to initialize.
+_llint_entry:
+ functionPrologue()
+ pushCalleeSaves()
+ if X86 or X86_WIN
+ loadp 20[sp], a0
+ end
+ initPCRelative(t1)
+ # Include generated bytecode initialization file.
+ include InitBytecodes
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+end
_llint_program_prologue:
prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
dispatch(0)
+_llint_module_program_prologue:
+ prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
+ dispatch(0)
+
+
_llint_eval_prologue:
prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
dispatch(0)
@@ -468,14 +1216,12 @@ _llint_eval_prologue:
_llint_function_for_call_prologue:
prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
-.functionForCallBegin:
functionInitialization(0)
dispatch(0)
_llint_function_for_construct_prologue:
prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
-.functionForConstructBegin:
functionInitialization(1)
dispatch(0)
@@ -483,11 +1229,17 @@ _llint_function_for_construct_prologue:
_llint_function_for_call_arity_check:
prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck)
+.functionForCallBegin:
+ functionInitialization(0)
+ dispatch(0)
_llint_function_for_construct_arity_check:
prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck)
+.functionForConstructBegin:
+ functionInitialization(1)
+ dispatch(0)
# Value-representation-specific code.
@@ -499,128 +1251,215 @@ end
# Value-representation-agnostic code.
-_llint_op_touch_entry:
+_llint_op_create_direct_arguments:
traceExecution()
- callSlowPath(_slow_path_touch_entry)
- dispatch(1)
+ callOpcodeSlowPath(_slow_path_create_direct_arguments)
+ dispatch(2)
+
+
+_llint_op_create_scoped_arguments:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_create_scoped_arguments)
+ dispatch(3)
+
+
+_llint_op_create_cloned_arguments:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_create_cloned_arguments)
+ dispatch(2)
+
+
+_llint_op_create_this:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_create_this)
+ dispatch(5)
+
+
+_llint_op_new_object:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_new_object)
+ dispatch(4)
+
+
+_llint_op_new_func:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_new_func)
+ dispatch(4)
+
+
+_llint_op_new_generator_func:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_new_generator_func)
+ dispatch(4)
+
+
+_llint_op_new_async_func:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_async_func)
+ dispatch(4)
_llint_op_new_array:
traceExecution()
- callSlowPath(_llint_slow_path_new_array)
+ callOpcodeSlowPath(_llint_slow_path_new_array)
dispatch(5)
+_llint_op_new_array_with_spread:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_new_array_with_spread)
+ dispatch(5)
+
+
+_llint_op_spread:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_spread)
+ dispatch(3)
+
+
_llint_op_new_array_with_size:
traceExecution()
- callSlowPath(_llint_slow_path_new_array_with_size)
+ callOpcodeSlowPath(_llint_slow_path_new_array_with_size)
dispatch(4)
_llint_op_new_array_buffer:
traceExecution()
- callSlowPath(_llint_slow_path_new_array_buffer)
+ callOpcodeSlowPath(_llint_slow_path_new_array_buffer)
dispatch(5)
_llint_op_new_regexp:
traceExecution()
- callSlowPath(_llint_slow_path_new_regexp)
+ callOpcodeSlowPath(_llint_slow_path_new_regexp)
dispatch(3)
_llint_op_less:
traceExecution()
- callSlowPath(_slow_path_less)
+ callOpcodeSlowPath(_slow_path_less)
dispatch(4)
_llint_op_lesseq:
traceExecution()
- callSlowPath(_slow_path_lesseq)
+ callOpcodeSlowPath(_slow_path_lesseq)
dispatch(4)
_llint_op_greater:
traceExecution()
- callSlowPath(_slow_path_greater)
+ callOpcodeSlowPath(_slow_path_greater)
dispatch(4)
_llint_op_greatereq:
traceExecution()
- callSlowPath(_slow_path_greatereq)
+ callOpcodeSlowPath(_slow_path_greatereq)
dispatch(4)
_llint_op_mod:
traceExecution()
- callSlowPath(_slow_path_mod)
+ callOpcodeSlowPath(_slow_path_mod)
+ dispatch(4)
+
+
+_llint_op_pow:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_pow)
dispatch(4)
_llint_op_typeof:
traceExecution()
- callSlowPath(_slow_path_typeof)
+ callOpcodeSlowPath(_slow_path_typeof)
dispatch(3)
-_llint_op_is_object:
+_llint_op_is_object_or_null:
traceExecution()
- callSlowPath(_slow_path_is_object)
+ callOpcodeSlowPath(_slow_path_is_object_or_null)
dispatch(3)
-
_llint_op_is_function:
traceExecution()
- callSlowPath(_slow_path_is_function)
+ callOpcodeSlowPath(_slow_path_is_function)
dispatch(3)
_llint_op_in:
traceExecution()
- callSlowPath(_slow_path_in)
- dispatch(4)
+ callOpcodeSlowPath(_slow_path_in)
+ dispatch(5)
-macro withInlineStorage(object, propertyStorage, continuation)
- # Indicate that the object is the property storage, and that the
- # property storage register is unused.
- continuation(object, propertyStorage)
-end
-macro withOutOfLineStorage(object, propertyStorage, continuation)
- loadp JSObject::m_butterfly[object], propertyStorage
- # Indicate that the propertyStorage register now points to the
- # property storage, and that the object register may be reused
- # if the object pointer is not needed anymore.
- continuation(propertyStorage, object)
-end
+_llint_op_try_get_by_id:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_try_get_by_id)
+ dispatch(5)
_llint_op_del_by_id:
traceExecution()
- callSlowPath(_llint_slow_path_del_by_id)
+ callOpcodeSlowPath(_llint_slow_path_del_by_id)
dispatch(4)
_llint_op_del_by_val:
traceExecution()
- callSlowPath(_llint_slow_path_del_by_val)
+ callOpcodeSlowPath(_llint_slow_path_del_by_val)
dispatch(4)
_llint_op_put_by_index:
traceExecution()
- callSlowPath(_llint_slow_path_put_by_index)
+ callOpcodeSlowPath(_llint_slow_path_put_by_index)
dispatch(4)
-_llint_op_put_getter_setter:
+_llint_op_put_getter_by_id:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_put_getter_by_id)
+ dispatch(5)
+
+
+_llint_op_put_setter_by_id:
traceExecution()
- callSlowPath(_llint_slow_path_put_getter_setter)
+ callOpcodeSlowPath(_llint_slow_path_put_setter_by_id)
dispatch(5)
+_llint_op_put_getter_setter_by_id:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_put_getter_setter_by_id)
+ dispatch(6)
+
+
+_llint_op_put_getter_by_val:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_put_getter_by_val)
+ dispatch(5)
+
+
+_llint_op_put_setter_by_val:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_put_setter_by_val)
+ dispatch(5)
+
+
+_llint_op_define_data_property:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_define_data_property)
+ dispatch(5)
+
+
+_llint_op_define_accessor_property:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_define_accessor_property)
+ dispatch(6)
+
+
_llint_op_jtrue:
traceExecution()
jumpTrueOrFalse(
@@ -701,47 +1540,122 @@ _llint_op_jngreatereq:
_llint_op_loop_hint:
traceExecution()
+ checkSwitchToJITForLoop()
+ dispatch(1)
+
+
+_llint_op_watchdog:
+ traceExecution()
loadp CodeBlock[cfr], t1
loadp CodeBlock::m_vm[t1], t1
- loadb VM::watchdog+Watchdog::m_timerDidFire[t1], t0
- btbnz t0, .handleWatchdogTimer
+ loadp VM::m_watchdog[t1], t0
+ btpnz t0, .handleWatchdogTimer
.afterWatchdogTimerCheck:
- checkSwitchToJITForLoop()
dispatch(1)
.handleWatchdogTimer:
+ loadb Watchdog::m_timerDidFire[t0], t0
+ btbz t0, .afterWatchdogTimerCheck
callWatchdogTimerHandler(.throwHandler)
jmp .afterWatchdogTimerCheck
.throwHandler:
jmp _llint_throw_from_slow_path_trampoline
+
+# Returns the packet pointer in t0.
+macro acquireShadowChickenPacket(slow)
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_vm[t1], t1
+ loadp VM::m_shadowChicken[t1], t2
+ loadp ShadowChicken::m_logCursor[t2], t0
+ bpaeq t0, ShadowChicken::m_logEnd[t2], slow
+ addp sizeof ShadowChicken::Packet, t0, t1
+ storep t1, ShadowChicken::m_logCursor[t2]
+end
+
+
_llint_op_switch_string:
traceExecution()
- callSlowPath(_llint_slow_path_switch_string)
+ callOpcodeSlowPath(_llint_slow_path_switch_string)
dispatch(0)
_llint_op_new_func_exp:
traceExecution()
- callSlowPath(_llint_slow_path_new_func_exp)
- dispatch(3)
+ callOpcodeSlowPath(_llint_slow_path_new_func_exp)
+ dispatch(4)
+
+_llint_op_new_generator_func_exp:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_new_generator_func_exp)
+ dispatch(4)
+
+_llint_op_new_async_func_exp:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_async_func_exp)
+ dispatch(4)
+_llint_op_set_function_name:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_set_function_name)
+ dispatch(3)
+
_llint_op_call:
traceExecution()
arrayProfileForCall()
- doCall(_llint_slow_path_call)
+ doCall(_llint_slow_path_call, prepareForRegularCall)
+_llint_op_tail_call:
+ traceExecution()
+ arrayProfileForCall()
+ checkSwitchToJITForEpilogue()
+ doCall(_llint_slow_path_call, prepareForTailCall)
_llint_op_construct:
traceExecution()
- doCall(_llint_slow_path_construct)
+ doCall(_llint_slow_path_construct, prepareForRegularCall)
+macro doCallVarargs(frameSlowPath, slowPath, prepareCall)
+ callOpcodeSlowPath(frameSlowPath)
+ branchIfException(_llint_throw_from_slow_path_trampoline)
+ # calleeFrame in r1
+ if JSVALUE64
+ move r1, sp
+ else
+ # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
+ if ARMv7
+ subp r1, CallerFrameAndPCSize, t2
+ move t2, sp
+ else
+ subp r1, CallerFrameAndPCSize, sp
+ end
+ end
+ slowPathForCall(slowPath, prepareCall)
+end
_llint_op_call_varargs:
traceExecution()
- callSlowPath(_llint_slow_path_size_and_alloc_frame_for_varargs)
- branchIfException(_llint_throw_from_slow_path_trampoline)
- slowPathForCall(_llint_slow_path_call_varargs)
+ doCallVarargs(_llint_slow_path_size_frame_for_varargs, _llint_slow_path_call_varargs, prepareForRegularCall)
+
+_llint_op_tail_call_varargs:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ # We lie and perform the tail call instead of preparing it since we can't
+ # prepare the frame for a call opcode
+ doCallVarargs(_llint_slow_path_size_frame_for_varargs, _llint_slow_path_call_varargs, prepareForTailCall)
+
+
+_llint_op_tail_call_forward_arguments:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ # We lie and perform the tail call instead of preparing it since we can't
+ # prepare the frame for a call opcode
+ doCallVarargs(_llint_slow_path_size_frame_for_forward_arguments, _llint_slow_path_tail_call_forward_arguments, prepareForTailCall)
+
+
+_llint_op_construct_varargs:
+ traceExecution()
+ doCallVarargs(_llint_slow_path_size_frame_for_varargs, _llint_slow_path_construct_varargs, prepareForRegularCall)
_llint_op_call_eval:
@@ -780,7 +1694,7 @@ _llint_op_call_eval:
# and a PC to call, and that PC may be a dummy thunk that just
# returns the JS value that the eval returned.
- slowPathForCall(_llint_slow_path_call_eval)
+ slowPathForCall(_llint_slow_path_call_eval, prepareForRegularCall)
_llint_generic_return_point:
@@ -789,74 +1703,50 @@ _llint_generic_return_point:
_llint_op_strcat:
traceExecution()
- callSlowPath(_slow_path_strcat)
+ callOpcodeSlowPath(_slow_path_strcat)
dispatch(4)
-_llint_op_get_pnames:
+_llint_op_push_with_scope:
traceExecution()
- callSlowPath(_llint_slow_path_get_pnames)
- dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
+ callOpcodeSlowPath(_slow_path_push_with_scope)
+ dispatch(4)
-_llint_op_push_with_scope:
+_llint_op_assert:
traceExecution()
- callSlowPath(_llint_slow_path_push_with_scope)
- dispatch(2)
+ callOpcodeSlowPath(_slow_path_assert)
+ dispatch(3)
-_llint_op_pop_scope:
- traceExecution()
- callSlowPath(_llint_slow_path_pop_scope)
- dispatch(1)
+_llint_op_yield:
+ notSupported()
-_llint_op_push_name_scope:
+_llint_op_create_lexical_environment:
traceExecution()
- callSlowPath(_llint_slow_path_push_name_scope)
- dispatch(4)
+ callOpcodeSlowPath(_slow_path_create_lexical_environment)
+ dispatch(5)
_llint_op_throw:
traceExecution()
- callSlowPath(_llint_slow_path_throw)
+ callOpcodeSlowPath(_llint_slow_path_throw)
dispatch(2)
_llint_op_throw_static_error:
traceExecution()
- callSlowPath(_llint_slow_path_throw_static_error)
+ callOpcodeSlowPath(_slow_path_throw_static_error)
dispatch(3)
-_llint_op_profile_will_call:
- traceExecution()
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_vm[t0], t0
- loadi VM::m_enabledProfiler[t0], t0
- btpz t0, .opProfilerWillCallDone
- callSlowPath(_llint_slow_path_profile_will_call)
-.opProfilerWillCallDone:
- dispatch(2)
-
-
-_llint_op_profile_did_call:
- traceExecution()
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_vm[t0], t0
- loadi VM::m_enabledProfiler[t0], t0
- btpz t0, .opProfilerDidCallDone
- callSlowPath(_llint_slow_path_profile_did_call)
-.opProfilerDidCallDone:
- dispatch(2)
-
-
_llint_op_debug:
traceExecution()
loadp CodeBlock[cfr], t0
loadi CodeBlock::m_debuggerRequests[t0], t0
btiz t0, .opDebugDone
- callSlowPath(_llint_slow_path_debug)
+ callOpcodeSlowPath(_llint_slow_path_debug)
.opDebugDone:
dispatch(3)
@@ -868,6 +1758,80 @@ _llint_native_call_trampoline:
_llint_native_construct_trampoline:
nativeCallTrampoline(NativeExecutable::m_constructor)
+_llint_op_get_enumerable_length:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_get_enumerable_length)
+ dispatch(3)
+
+_llint_op_has_indexed_property:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_has_indexed_property)
+ dispatch(5)
+
+_llint_op_has_structure_property:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_has_structure_property)
+ dispatch(5)
+
+_llint_op_has_generic_property:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_has_generic_property)
+ dispatch(4)
+
+_llint_op_get_direct_pname:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_get_direct_pname)
+ dispatch(7)
+
+_llint_op_get_property_enumerator:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_get_property_enumerator)
+ dispatch(3)
+
+_llint_op_enumerator_structure_pname:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_next_structure_enumerator_pname)
+ dispatch(4)
+
+_llint_op_enumerator_generic_pname:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_next_generic_enumerator_pname)
+ dispatch(4)
+
+_llint_op_to_index_string:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_to_index_string)
+ dispatch(3)
+
+_llint_op_create_rest:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_create_rest)
+ dispatch(4)
+
+_llint_op_instanceof:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_instanceof)
+ dispatch(4)
+
+_llint_op_get_by_id_with_this:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_get_by_id_with_this)
+ dispatch(6)
+
+_llint_op_get_by_val_with_this:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_get_by_val_with_this)
+ dispatch(6)
+
+_llint_op_put_by_id_with_this:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_put_by_id_with_this)
+ dispatch(5)
+
+_llint_op_put_by_val_with_this:
+ traceExecution()
+ callOpcodeSlowPath(_slow_path_put_by_val_with_this)
+ dispatch(5)
# Lastly, make sure that we can link even though we don't support all opcodes.
# These opcodes should never arise when using LLInt or either JIT. We assert
@@ -886,53 +1850,3 @@ macro notSupported()
break
end
end
-
-_llint_op_get_by_id_chain:
- notSupported()
-
-_llint_op_get_by_id_custom_chain:
- notSupported()
-
-_llint_op_get_by_id_custom_proto:
- notSupported()
-
-_llint_op_get_by_id_custom_self:
- notSupported()
-
-_llint_op_get_by_id_generic:
- notSupported()
-
-_llint_op_get_by_id_getter_chain:
- notSupported()
-
-_llint_op_get_by_id_getter_proto:
- notSupported()
-
-_llint_op_get_by_id_getter_self:
- notSupported()
-
-_llint_op_get_by_id_proto:
- notSupported()
-
-_llint_op_get_by_id_self:
- notSupported()
-
-_llint_op_get_string_length:
- notSupported()
-
-_llint_op_put_by_id_generic:
- notSupported()
-
-_llint_op_put_by_id_replace:
- notSupported()
-
-_llint_op_put_by_id_transition:
- notSupported()
-
-_llint_op_init_global_const_nop:
- dispatch(5)
-
-# Indicate the end of LLInt.
-_llint_end:
- crash()
-
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
index 48148c6f4..59a250fc7 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,18 +26,18 @@
#include "config.h"
#include "LowLevelInterpreter.h"
-#if ENABLE(LLINT)
-
#include "LLIntOfflineAsmConfig.h"
#include <wtf/InlineASM.h>
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
+#include "CLoopStackInlines.h"
#include "CodeBlock.h"
#include "CommonSlowPaths.h"
+#include "Interpreter.h"
#include "LLIntCLoop.h"
+#include "LLIntData.h"
#include "LLIntSlowPaths.h"
-#include "Operations.h"
-#include "VMInspector.h"
+#include "JSCInlines.h"
#include <wtf/Assertions.h>
#include <wtf/MathExtras.h>
@@ -90,6 +90,12 @@ using namespace JSC::LLInt;
#define OFFLINE_ASM_BEGIN
#define OFFLINE_ASM_END
+#if ENABLE(OPCODE_TRACING)
+#define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode)
+#else
+#define TRACE_OPCODE(opcode)
+#endif
+
// To keep compilers happy in case of unused labels, force usage of the label:
#define USE_LABEL(label) \
do { \
@@ -97,7 +103,9 @@ using namespace JSC::LLInt;
goto label; \
} while (false)
-#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode);
+#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode);
+
+#define OFFLINE_ASM_GLOBAL_LABEL(label) OFFLINE_ASM_GLUE_LABEL(label)
#if ENABLE(COMPUTED_GOTO_OPCODES)
#define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label);
@@ -147,6 +155,7 @@ static void Double2Ints(double val, uint32_t& lo, uint32_t& hi)
// pseudo register, as well as hides endianness differences.
struct CLoopRegister {
+ CLoopRegister() { i = static_cast<intptr_t>(0xbadbeef0baddbeef); }
union {
intptr_t i;
uintptr_t u;
@@ -212,10 +221,15 @@ struct CLoopRegister {
#endif // !CPU(BIG_ENDIAN)
#endif // !USE(JSVALUE64)
+ intptr_t* ip;
int8_t* i8p;
void* vp;
+ CallFrame* callFrame;
ExecState* execState;
void* instruction;
+ VM* vm;
+ JSCell* cell;
+ ProtoCallFrame* protoCallFrame;
NativeFunction nativeFunc;
#if USE(JSVALUE64)
int64_t i64;
@@ -226,6 +240,13 @@ struct CLoopRegister {
Opcode opcode;
};
+ operator ExecState*() { return execState; }
+ operator Instruction*() { return reinterpret_cast<Instruction*>(instruction); }
+ operator VM*() { return vm; }
+ operator ProtoCallFrame*() { return protoCallFrame; }
+ operator Register*() { return reinterpret_cast<Register*>(vp); }
+ operator JSCell*() { return cell; }
+
#if USE(JSVALUE64)
inline void clearHighWord() { i32padding = 0; }
#else
@@ -237,7 +258,7 @@ struct CLoopRegister {
// The llint C++ interpreter loop:
//
-JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitializationPass)
+JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame, bool isInitializationPass)
{
#define CAST reinterpret_cast
#define SIGN_BIT32(x) ((x) & 0x80000000)
@@ -272,8 +293,6 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali
return JSValue();
}
- ASSERT(callFrame->vm().topCallFrame == callFrame);
-
// Define the pseudo registers used by the LLINT C Loop backend:
ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t));
@@ -308,69 +327,66 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali
// 2. 32 bit result values will be in the low 32-bit of t0.
// 3. 64 bit result values will be in t0.
- CLoopRegister t0, t1, t2, t3;
+ CLoopRegister t0, t1, t2, t3, t5, t7, sp, cfr, lr, pc;
#if USE(JSVALUE64)
- CLoopRegister rBasePC, tagTypeNumber, tagMask;
+ CLoopRegister pcBase, tagTypeNumber, tagMask;
#endif
- CLoopRegister rRetVPC;
CLoopDoubleRegister d0, d1;
- // Keep the compiler happy. We don't really need this, but the compiler
- // will complain. This makes the warning go away.
- t0.i = 0;
- t1.i = 0;
-
- VM* vm = &callFrame->vm();
-
- CodeBlock* codeBlock = callFrame->codeBlock();
- Instruction* vPC;
-
- // rPC is an alias for vPC. Set up the alias:
- CLoopRegister& rPC = *CAST<CLoopRegister*>(&vPC);
+ lr.opcode = getOpcode(llint_return_to_host);
+ sp.vp = vm->interpreter->cloopStack().topOfStack() + 1;
+ cfr.callFrame = vm->topCallFrame;
+#ifndef NDEBUG
+ void* startSP = sp.vp;
+ CallFrame* startCFR = cfr.callFrame;
+#endif
-#if USE(JSVALUE32_64)
- vPC = codeBlock->instructions().begin();
-#else // USE(JSVALUE64)
- vPC = 0;
- rBasePC.vp = codeBlock->instructions().begin();
+ // Initialize the incoming args for doVMEntryToJavaScript:
+ t0.vp = executableAddress;
+ t1.vm = vm;
+ t2.protoCallFrame = protoCallFrame;
+#if USE(JSVALUE64)
// For the ASM llint, JITStubs takes care of this initialization. We do
// it explicitly here for the C loop:
tagTypeNumber.i = 0xFFFF000000000000;
tagMask.i = 0xFFFF000000000002;
#endif // USE(JSVALUE64)
- // cfr is an alias for callFrame. Set up this alias:
- CLoopRegister& cfr = *CAST<CLoopRegister*>(&callFrame);
-
- // Simulate a native return PC which should never be used:
- rRetVPC.i = 0xbbadbeef;
-
// Interpreter variables for value passing between opcodes and/or helpers:
NativeFunction nativeFunc = 0;
JSValue functionReturnValue;
- Opcode opcode;
+ Opcode opcode = getOpcode(entryOpcodeID);
- opcode = entryOpcode;
+#define PUSH(cloopReg) \
+ do { \
+ sp.ip--; \
+ *sp.ip = cloopReg.i; \
+ } while (false)
- #if ENABLE(OPCODE_STATS)
- #define RECORD_OPCODE_STATS(__opcode) \
- OpcodeStats::recordInstruction(__opcode)
- #else
- #define RECORD_OPCODE_STATS(__opcode)
- #endif
+#define POP(cloopReg) \
+ do { \
+ cloopReg.i = *sp.ip; \
+ sp.ip++; \
+ } while (false)
- #if USE(JSVALUE32_64)
- #define FETCH_OPCODE() vPC->u.opcode
- #else // USE(JSVALUE64)
- #define FETCH_OPCODE() *bitwise_cast<Opcode*>(rBasePC.i8p + rPC.i * 8)
- #endif // USE(JSVALUE64)
+#if ENABLE(OPCODE_STATS)
+#define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode)
+#else
+#define RECORD_OPCODE_STATS(__opcode)
+#endif
- #define NEXT_INSTRUCTION() \
- do { \
- opcode = FETCH_OPCODE(); \
- DISPATCH_OPCODE(); \
- } while (false)
+#if USE(JSVALUE32_64)
+#define FETCH_OPCODE() pc.opcode
+#else // USE(JSVALUE64)
+#define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8)
+#endif // USE(JSVALUE64)
+
+#define NEXT_INSTRUCTION() \
+ do { \
+ opcode = FETCH_OPCODE(); \
+ DISPATCH_OPCODE(); \
+ } while (false)
#if ENABLE(COMPUTED_GOTO_OPCODES)
@@ -412,14 +428,22 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali
#include "LLIntAssembly.h"
+ OFFLINE_ASM_GLUE_LABEL(llint_return_to_host)
+ {
+ ASSERT(startSP == sp.vp);
+ ASSERT(startCFR == cfr.callFrame);
+#if USE(JSVALUE32_64)
+ return JSValue(t1.i, t0.i); // returning JSValue(tag, payload);
+#else
+ return JSValue::decode(t0.encodedJSValue);
+#endif
+ }
+
// In the ASM llint, getHostCallReturnValue() is a piece of glue
- // function provided by the JIT (see dfg/DFGOperations.cpp).
+ // function provided by the JIT (see jit/JITOperations.cpp).
// We simulate it here with a pseduo-opcode handler.
OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue)
{
- // The ASM part pops the frame:
- callFrame = callFrame->callerFrame();
-
// The part in getHostCallReturnValueWithExecState():
JSValue result = vm->hostCallReturnValue;
#if USE(JSVALUE32_64)
@@ -428,12 +452,8 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali
#else
t0.encodedJSValue = JSValue::encode(result);
#endif
- goto doReturnHelper;
- }
-
- OFFLINE_ASM_GLUE_LABEL(returnFromJavaScript)
- {
- return vm->exception();
+ opcode = lr.opcode;
+ DISPATCH_OPCODE();
}
#if !ENABLE(COMPUTED_GOTO_OPCODES)
@@ -443,55 +463,6 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali
} // END bytecode handler cases.
- //========================================================================
- // Bytecode helpers:
-
- doReturnHelper: {
- ASSERT(!!callFrame);
- if (callFrame->isVMEntrySentinel()) {
-#if USE(JSVALUE32_64)
- return JSValue(t1.i, t0.i); // returning JSValue(tag, payload);
-#else
- return JSValue::decode(t0.encodedJSValue);
-#endif
- }
-
- // The normal ASM llint call implementation returns to the caller as
- // recorded in rRetVPC, and the caller would fetch the return address
- // from ArgumentCount.tag() (see the dispatchAfterCall() macro used in
- // the callTargetFunction() macro in the llint asm files).
- //
- // For the C loop, we don't have the JIT stub to do this work for us. So,
- // we jump to llint_generic_return_point.
-
- vPC = callFrame->currentVPC();
-
-#if USE(JSVALUE64)
- // Based on LowLevelInterpreter64.asm's dispatchAfterCall():
-
- // When returning from a native trampoline call, unlike the assembly
- // LLInt, we can't simply return to the caller. In our case, we grab
- // the caller's VPC and resume execution there. However, the caller's
- // VPC returned by callFrame->currentVPC() is in the form of the real
- // address of the target bytecode, but the 64-bit llint expects the
- // VPC to be a bytecode offset. Hence, we need to map it back to a
- // bytecode offset before we dispatch via the usual dispatch mechanism
- // i.e. NEXT_INSTRUCTION():
-
- codeBlock = callFrame->codeBlock();
- ASSERT(codeBlock);
- rPC.vp = callFrame->currentVPC();
- rPC.i = rPC.i8p - reinterpret_cast<int8_t*>(codeBlock->instructions().begin());
- rPC.i >>= 3;
-
- rBasePC.vp = codeBlock->instructions().begin();
-#endif // USE(JSVALUE64)
-
- goto llint_generic_return_point;
-
- } // END doReturnHelper.
-
-
#if ENABLE(COMPUTED_GOTO_OPCODES)
// Keep the compiler happy so that it doesn't complain about unused
// labels for the LLInt trampoline glue. The labels are automatically
@@ -511,49 +482,40 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali
#undef CAST
#undef SIGN_BIT32
+ return JSValue(); // to suppress a compiler warning.
} // Interpreter::llintCLoopExecute()
} // namespace JSC
-#else // !ENABLE(LLINT_C_LOOP)
+#elif !COMPILER(MSVC)
//============================================================================
// Define the opcode dispatch mechanism when using an ASM loop:
//
// These are for building an interpreter from generated assembly code:
-#if CPU(X86_64) && COMPILER(CLANG)
-#define OFFLINE_ASM_BEGIN asm ( \
- ".cfi_startproc\n"
-
-#define OFFLINE_ASM_END \
- ".cfi_endproc\n" \
-);
-#else
#define OFFLINE_ASM_BEGIN asm (
#define OFFLINE_ASM_END );
-#endif
-#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(llint_##__opcode)
-#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(__opcode)
+#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode)
+#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode)
#if CPU(ARM_THUMB2)
#define OFFLINE_ASM_GLOBAL_LABEL(label) \
".text\n" \
+ ".align 4\n" \
".globl " SYMBOL_STRING(label) "\n" \
HIDE_SYMBOL(label) "\n" \
".thumb\n" \
".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
SYMBOL_STRING(label) ":\n"
-#elif CPU(X86_64) && COMPILER(CLANG)
+#elif CPU(ARM64)
#define OFFLINE_ASM_GLOBAL_LABEL(label) \
".text\n" \
+ ".align 4\n" \
".globl " SYMBOL_STRING(label) "\n" \
HIDE_SYMBOL(label) "\n" \
- SYMBOL_STRING(label) ":\n" \
- ".cfi_def_cfa rbp, 0\n" \
- ".cfi_offset 16, 8\n" \
- ".cfi_offset 6, 0\n"
+ SYMBOL_STRING(label) ":\n"
#else
#define OFFLINE_ASM_GLOBAL_LABEL(label) \
".text\n" \
@@ -568,6 +530,4 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali
// for the interpreter, as compiled from LowLevelInterpreter.asm.
#include "LLIntAssembly.h"
-#endif // !ENABLE(LLINT_C_LOOP)
-
-#endif // ENABLE(LLINT)
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.h b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
index f45a07303..83008e122 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.h
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
@@ -23,16 +23,11 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LowLevelInterpreter_h
-#define LowLevelInterpreter_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(LLINT)
+#pragma once
#include "Opcode.h"
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
namespace JSC {
@@ -49,18 +44,4 @@ FOR_EACH_CORE_OPCODE_ID(LLINT_OPCODE_ALIAS)
} // namespace JSC
-#else // !ENABLE(LLINT_C_LOOP)
-
-#define LLINT_INSTRUCTION_DECL(opcode, length) extern "C" void llint_##opcode();
- FOR_EACH_OPCODE_ID(LLINT_INSTRUCTION_DECL);
-#undef LLINT_INSTRUCTION_DECL
-
-#define DECLARE_LLINT_NATIVE_HELPER(name, length) extern "C" void name();
- FOR_EACH_LLINT_NATIVE_HELPER(DECLARE_LLINT_NATIVE_HELPER)
-#undef DECLARE_LLINT_NATIVE_HELPER
-
-#endif // !ENABLE(LLINT_C_LOOP)
-
-#endif // ENABLE(LLINT)
-
-#endif // LowLevelInterpreter_h
+#endif // !ENABLE(JIT)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
index 9689edf01..d9fed6bcd 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+# Copyright (C) 2011-2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -22,54 +22,6 @@
# THE POSSIBILITY OF SUCH DAMAGE.
-# Crash course on the language that this is written in (which I just call
-# "assembly" even though it's more than that):
-#
-# - Mostly gas-style operand ordering. The last operand tends to be the
-# destination. So "a := b" is written as "mov b, a". But unlike gas,
-# comparisons are in-order, so "if (a < b)" is written as
-# "bilt a, b, ...".
-#
-# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
-# Currently this is just 32-bit so "i" and "p" are interchangeable
-# except when an op supports one but not the other.
-#
-# - In general, valid operands for macro invocations and instructions are
-# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
-# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
-# (eg "_foo" or ".foo"). Macro invocations can also take anonymous
-# macros as operands. Instructions cannot take anonymous macros.
-#
-# - Labels must have names that begin with either "_" or ".". A "." label
-# is local and gets renamed before code gen to minimize namespace
-# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
-# may or may not be removed during code gen depending on whether the asm
-# conventions for C name mangling on the target platform mandate a "_"
-# prefix.
-#
-# - A "macro" is a lambda expression, which may be either anonymous or
-# named. But this has caveats. "macro" can take zero or more arguments,
-# which may be macros or any valid operands, but it can only return
-# code. But you can do Turing-complete things via continuation passing
-# style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
-# that, since you'll just crash the assembler.
-#
-# - An "if" is a conditional on settings. Any identifier supplied in the
-# predicate of an "if" is assumed to be a #define that is available
-# during code gen. So you can't use "if" for computation in a macro, but
-# you can use it to select different pieces of code for different
-# platforms.
-#
-# - Arguments to macros follow lexical scoping rather than dynamic scoping.
-# Const's also follow lexical scoping and may override (hide) arguments
-# or other consts. All variables (arguments and constants) can be bound
-# to operands. Additionally, arguments (but not constants) can be bound
-# to macros.
-
-
-# Below we have a bunch of constant declarations. Each constant must have
-# a corresponding ASSERT() in LLIntData.cpp.
-
# Utilities
macro dispatch(advance)
addp advance * 4, PC
@@ -89,49 +41,47 @@ end
macro dispatchAfterCall()
loadi ArgumentCount + TagOffset[cfr], PC
- loadi 4[PC], t2
- storei t1, TagOffset[cfr, t2, 8]
- storei t0, PayloadOffset[cfr, t2, 8]
- valueProfile(t1, t0, 28, t3)
- dispatch(8)
+ loadi 4[PC], t3
+ storei r1, TagOffset[cfr, t3, 8]
+ storei r0, PayloadOffset[cfr, t3, 8]
+ valueProfile(r1, r0, 4 * (CallOpCodeSize - 1), t3)
+ dispatch(CallOpCodeSize)
end
-macro cCall2(function, arg1, arg2)
+macro cCall2(function)
if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
- move arg1, a0
- move arg2, a1
- call function
- elsif X86
- poke arg1, 0
- poke arg2, 1
call function
- elsif SH4
- setargs arg1, arg2
+ elsif X86 or X86_WIN
+ subp 8, sp
+ push a1
+ push a0
call function
+ addp 16, sp
elsif C_LOOP
- cloopCallSlowPath function, arg1, arg2
+ cloopCallSlowPath function, a0, a1
else
error
end
end
-# This barely works. arg3 and arg4 should probably be immediates.
-macro cCall4(function, arg1, arg2, arg3, arg4)
+macro cCall2Void(function)
+ if C_LOOP
+ cloopCallSlowPathVoid function, a0, a1
+ else
+ cCall2(function)
+ end
+end
+
+macro cCall4(function)
if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
- move arg1, a0
- move arg2, a1
- move arg3, a2
- move arg4, a3
- call function
- elsif X86
- poke arg1, 0
- poke arg2, 1
- poke arg3, 2
- poke arg4, 3
call function
- elsif SH4
- setargs arg1, arg2, arg3, arg4
+ elsif X86 or X86_WIN
+ push a3
+ push a2
+ push a1
+ push a0
call function
+ addp 16, sp
elsif C_LOOP
error
else
@@ -140,214 +90,251 @@ macro cCall4(function, arg1, arg2, arg3, arg4)
end
macro callSlowPath(slowPath)
- cCall2(slowPath, cfr, PC)
- move t0, PC
- move t1, cfr
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
+ move r0, PC
end
-macro functionPrologue(extraStackSpace)
- if X86
- push cfr
- move sp, cfr
+macro doVMEntry(makeCall)
+ functionPrologue()
+ pushCalleeSaves()
+
+ # x86 needs to load arguments from the stack
+ if X86 or X86_WIN
+ loadp 16[cfr], a2
+ loadp 12[cfr], a1
+ loadp 8[cfr], a0
end
- pushCalleeSaves
- if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
- push cfr
- push lr
+
+ const entry = a0
+ const vm = a1
+ const protoCallFrame = a2
+
+ # We are using t3, t4 and t5 as temporaries through the function.
+ # Since we have the guarantee that tX != aY when X != Y, we are safe from
+ # aliasing problems with our arguments.
+
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
end
- subp extraStackSpace, sp
-end
-macro functionEpilogue(extraStackSpace)
- addp extraStackSpace, sp
- if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
- pop lr
- pop cfr
+ storep vm, VMEntryRecord::m_vm[sp]
+ loadp VM::topCallFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopCallFrame[sp]
+ loadp VM::topVMEntryFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopVMEntryFrame[sp]
+
+ # Align stack pointer
+ if X86_WIN or MIPS
+ addp CallFrameAlignSlots * SlotSize, sp, t3
+ andp ~StackAlignmentMask, t3
+ subp t3, CallFrameAlignSlots * SlotSize, sp
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+ addp CallFrameAlignSlots * SlotSize, sp, t3
+ clrbp t3, StackAlignmentMask, t3
+ if ARMv7
+ subp t3, CallFrameAlignSlots * SlotSize, t3
+ move t3, sp
+ else
+ subp t3, CallFrameAlignSlots * SlotSize, sp
+ end
end
- popCalleeSaves
- if X86
- pop cfr
+
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t4
+ addp CallFrameHeaderSlots, t4, t4
+ lshiftp 3, t4
+ subp sp, t4, t3
+
+ # Ensure that we have enough additional stack capacity for the incoming args,
+ # and the frame for the JS code we're executing. We need to do this check
+ # before we start copying the args from the protoCallFrame below.
+ if C_LOOP
+ bpaeq t3, VM::m_cloopStackLimit[vm], .stackHeightOK
+ else
+ bpaeq t3, VM::m_softStackLimit[vm], .stackHeightOK
end
-end
-macro doCallToJavaScript(makeCall, doReturn)
- if X86
- const entry = t5
- const vmTopCallFrame = t2
- const protoCallFrame = t4
-
- const extraStackSpace = 28
- const previousCFR = t0
- const previousPC = t1
- const temp1 = t0 # Same as previousCFR
- const temp2 = t1 # Same as previousPC
- const temp3 = t2 # same as vmTopCallFrame
- const temp4 = t3
- elsif ARM or ARMv7_TRADITIONAL
- const entry = a0
- const vmTopCallFrame = a1
- const protoCallFrame = a2
- const topOfStack = a3
-
- const extraStackSpace = 16
- const previousCFR = t3
- const previousPC = lr
- const temp1 = t3 # Same as previousCFR
- const temp2 = a3 # Same as topOfStack
- const temp3 = t4
- const temp4 = t5
- elsif ARMv7
- const entry = a0
- const vmTopCallFrame = a1
- const protoCallFrame = a2
- const topOfStack = a3
-
- const extraStackSpace = 28
- const previousCFR = t3
- const previousPC = lr
- const temp1 = t3 # Same as previousCFR
- const temp2 = a3 # Same as topOfStack
- const temp3 = t4
- const temp4 = t5
- elsif MIPS
- const entry = a0
- const vmTopCallFrame = a1
- const protoCallFrame = a2
- const topOfStack = a3
-
- const extraStackSpace = 36
- const previousCFR = t2
- const previousPC = lr
- const temp1 = t3
- const temp2 = t4
- const temp3 = t5
- const temp4 = t6
- elsif SH4
- const entry = a0
- const vmTopCallFrame = a1
- const protoCallFrame = a2
- const topOfStack = a3
-
- const extraStackSpace = 20
- const previousCFR = t3
- const previousPC = lr
- const temp1 = t3 # Same as previousCFR
- const temp2 = a3 # Same as topOfStack
- const temp3 = t8
- const temp4 = t9
+ if C_LOOP
+ move entry, t4
+ move vm, t5
+ cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t3
+ bpeq t0, 0, .stackCheckFailed
+ move t4, entry
+ move t5, vm
+ jmp .stackHeightOK
+
+.stackCheckFailed:
+ move t4, entry
+ move t5, vm
end
- if X86
- loadp [sp], previousPC
- move cfr, previousCFR
+ subp 8, sp # Align stack for cCall2() to make a call.
+ move vm, a0
+ move protoCallFrame, a1
+ cCall2(_llint_throw_stack_overflow_error)
+
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
end
- functionPrologue(extraStackSpace)
- if X86
- loadp extraStackSpace+20[sp], entry
- loadp extraStackSpace+24[sp], vmTopCallFrame
- loadp extraStackSpace+28[sp], protoCallFrame
- loadp extraStackSpace+32[sp], cfr
+
+ loadp VMEntryRecord::m_vm[sp], t5
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], t4
+ storep t4, VM::topCallFrame[t5]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t4
+ storep t4, VM::topVMEntryFrame[t5]
+
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, t5
+ move t5, sp
else
- move cfr, previousCFR
- move topOfStack, cfr
+ subp cfr, CalleeRegisterSaveSize, sp
end
- subp (CallFrameHeaderSlots-1)*8, cfr
- storep 0, ArgumentCount+4[cfr]
- storep 0, ArgumentCount[cfr]
- storep 0, Callee+4[cfr]
- storep vmTopCallFrame, Callee[cfr]
- loadp [vmTopCallFrame], temp4
- storep 0, ScopeChain+4[cfr]
- storep temp4, ScopeChain[cfr]
- storep 0, CodeBlock+4[cfr]
- storep 1, CodeBlock[cfr]
- storep previousPC, ReturnPC[cfr]
- storep previousCFR, CallerFrame[cfr]
- move cfr, temp1
-
- loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2
- addp CallFrameHeaderSlots, temp2, temp2
- lshiftp 3, temp2
- subp temp2, cfr
- storep temp1, CallerFrame[cfr]
-
- move 5, temp1
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+.stackHeightOK:
+ move t3, sp
+ move 4, t3
.copyHeaderLoop:
- subi 1, temp1
- loadp [protoCallFrame, temp1, 8], temp3
- storep temp3, CodeBlock[cfr, temp1, 8]
- loadp 4[protoCallFrame, temp1, 8], temp3
- storep temp3, CodeBlock+4[cfr, temp1, 8]
- btinz temp1, .copyHeaderLoop
-
- loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
- subi 1, temp2
- loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3
- subi 1, temp3
-
- bieq temp2, temp3, .copyArgs
- move 0, temp1
- move UndefinedTag, temp4
+ subi 1, t3
+ loadi TagOffset[protoCallFrame, t3, 8], t5
+ storei t5, TagOffset + CodeBlock[sp, t3, 8]
+ loadi PayloadOffset[protoCallFrame, t3, 8], t5
+ storei t5, PayloadOffset + CodeBlock[sp, t3, 8]
+ btinz t3, .copyHeaderLoop
+
+ loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t4
+ subi 1, t4
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t5
+ subi 1, t5
+
+ bieq t4, t5, .copyArgs
.fillExtraArgsLoop:
- subi 1, temp3
- storep temp1, ThisArgumentOffset+8+PayloadOffset[cfr, temp3, 8]
- storep temp4, ThisArgumentOffset+8+TagOffset[cfr, temp3, 8]
- bineq temp2, temp3, .fillExtraArgsLoop
+ subi 1, t5
+ storei UndefinedTag, ThisArgumentOffset + 8 + TagOffset[sp, t5, 8]
+ storei 0, ThisArgumentOffset + 8 + PayloadOffset[sp, t5, 8]
+ bineq t4, t5, .fillExtraArgsLoop
.copyArgs:
- loadp ProtoCallFrame::args[protoCallFrame], temp1
+ loadp ProtoCallFrame::args[protoCallFrame], t3
.copyArgsLoop:
- btiz temp2, .copyArgsDone
- subi 1, temp2
- loadp PayloadOffset[temp1, temp2, 8], temp3
- loadp TagOffset[temp1, temp2, 8], temp4
- storep temp3, ThisArgumentOffset+8+PayloadOffset[cfr, temp2, 8]
- storep temp4, ThisArgumentOffset+8+TagOffset[cfr, temp2, 8]
+ btiz t4, .copyArgsDone
+ subi 1, t4
+ loadi TagOffset[t3, t4, 8], t5
+ storei t5, ThisArgumentOffset + 8 + TagOffset[sp, t4, 8]
+ loadi PayloadOffset[t3, t4, 8], t5
+ storei t5, ThisArgumentOffset + 8 + PayloadOffset[sp, t4, 8]
jmp .copyArgsLoop
.copyArgsDone:
- if X86
- loadp extraStackSpace+24[sp], vmTopCallFrame
- end
- storep cfr, [vmTopCallFrame]
+ storep sp, VM::topCallFrame[vm]
+ storep cfr, VM::topVMEntryFrame[vm]
- makeCall(entry, temp1)
+ makeCall(entry, t3, t4)
- bpeq CodeBlock[cfr], 1, .calleeFramePopped
- loadp CallerFrame[cfr], cfr
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
-.calleeFramePopped:
- loadp Callee + PayloadOffset[cfr], temp3 # VM.topCallFrame
- loadp ScopeChain + PayloadOffset[cfr], temp4
- storep temp4, [temp3]
+ loadp VMEntryRecord::m_vm[sp], t5
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], t4
+ storep t4, VM::topCallFrame[t5]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t4
+ storep t4, VM::topVMEntryFrame[t5]
- doReturn(extraStackSpace)
-end
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, t5
+ move t5, sp
+ else
+ subp cfr, CalleeRegisterSaveSize, sp
+ end
-macro makeJavaScriptCall(entry, temp)
- call entry
+ popCalleeSaves()
+ functionEpilogue()
+ ret
end
-macro makeHostFunctionCall(entry, temp)
- move entry, temp
- if X86
- # Put cfr on stack as arg0, also put it in ecx for "fastcall" targets
- poke cfr, 0
- move cfr, t2
+macro makeJavaScriptCall(entry, temp, unused)
+ addp CallerFrameAndPCSize, sp
+ checkStackPointerAlignment(temp, 0xbad0dc02)
+ if C_LOOP
+ cloopCallJSFunction entry
else
- move cfr, a0
+ call entry
+ end
+ checkStackPointerAlignment(temp, 0xbad0dc03)
+ subp CallerFrameAndPCSize, sp
+end
+
+macro makeHostFunctionCall(entry, temp1, temp2)
+ move entry, temp1
+ storep cfr, [sp]
+ if C_LOOP
+ move sp, a0
+ storep lr, PtrSize[sp]
+ cloopCallNative temp1
+ elsif X86 or X86_WIN
+ # Put callee frame pointer on stack as arg0, also put it in ecx for "fastcall" targets
+ move 0, temp2
+ move temp2, 4[sp] # put 0 in ReturnPC
+ move sp, a0 # a0 is ecx
+ push temp2 # Push dummy arg1
+ push a0
+ call temp1
+ addp 8, sp
+ else
+ move sp, a0
+ call temp1
end
- call temp
end
-macro doReturnFromJavaScript(extraStackSpace)
-_returnFromJavaScript:
- functionEpilogue(extraStackSpace)
+_handleUncaughtException:
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_vm[t3], t3
+ restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+
+ loadp CallerFrame[cfr], cfr
+
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
+
+ loadp VMEntryRecord::m_vm[sp], t3
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], t5
+ storep t5, VM::topCallFrame[t3]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t5
+ storep t5, VM::topVMEntryFrame[t3]
+
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, t3
+ move t3, sp
+ else
+ subp cfr, CalleeRegisterSaveSize, sp
+ end
+
+ popCalleeSaves()
+ functionEpilogue()
ret
-end
macro doReturnFromHostFunction(extraStackSpace)
functionEpilogue(extraStackSpace)
@@ -359,33 +346,43 @@ end
# debugging from. operand should likewise be an immediate, and should identify the operand
# in the instruction stream you'd like to print out.
macro traceOperand(fromWhere, operand)
- cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
- move t0, PC
- move t1, cfr
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_operand)
+ move r0, PC
+ move r1, cfr
end
# Debugging operation if you'd like to print the value of an operand in the instruction
# stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
# value.
macro traceValue(fromWhere, operand)
- cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
- move t0, PC
- move t1, cfr
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_value)
+ move r0, PC
+ move r1, cfr
end
# Call a slowPath for call opcodes.
macro callCallSlowPath(slowPath, action)
storep PC, ArgumentCount + TagOffset[cfr]
- cCall2(slowPath, cfr, PC)
- move t1, cfr
- action(t0)
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
+ action(r0, r1)
end
macro callWatchdogTimerHandler(throwHandler)
storei PC, ArgumentCount + TagOffset[cfr]
- cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
- move t1, cfr
- btpnz t0, throwHandler
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_slow_path_handle_watchdog_timer)
+ btpnz r0, throwHandler
loadi ArgumentCount + TagOffset[cfr], PC
end
@@ -394,10 +391,12 @@ macro checkSwitchToJITForLoop()
1,
macro ()
storei PC, ArgumentCount + TagOffset[cfr]
- cCall2(_llint_loop_osr, cfr, PC)
- move t1, cfr
- btpz t0, .recover
- jmp t0
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_loop_osr)
+ btpz r0, .recover
+ move r1, sp
+ jmp r0
.recover:
loadi ArgumentCount + TagOffset[cfr], PC
end)
@@ -492,55 +491,69 @@ macro loadConstantOrVariablePayloadUnchecked(index, payload)
end
macro writeBarrierOnOperand(cellOperand)
- if GGC
- loadisFromInstruction(cellOperand, t1)
- loadConstantOrVariablePayload(t1, CellTag, t2, .writeBarrierDone)
- checkMarkByte(t2, t1, t3,
- macro(marked)
- btbz marked, .writeBarrierDone
- push cfr, PC
- # We make two extra slots because cCall2 will poke.
- subp 8, sp
- cCall2(_llint_write_barrier_slow, cfr, t2)
- addp 8, sp
- pop PC, cfr
- end
- )
- .writeBarrierDone:
- end
+ loadisFromInstruction(cellOperand, t1)
+ loadConstantOrVariablePayload(t1, CellTag, t2, .writeBarrierDone)
+ skipIfIsRememberedOrInEden(
+ t2,
+ macro()
+ push cfr, PC
+ # We make two extra slots because cCall2 will poke.
+ subp 8, sp
+ move t2, a1 # t2 can be a0 on x86
+ move cfr, a0
+ cCall2Void(_llint_write_barrier_slow)
+ addp 8, sp
+ pop PC, cfr
+ end)
+.writeBarrierDone:
end
macro writeBarrierOnOperands(cellOperand, valueOperand)
- if GGC
- loadisFromInstruction(valueOperand, t1)
- loadConstantOrVariableTag(t1, t0)
- bineq t0, CellTag, .writeBarrierDone
-
- writeBarrierOnOperand(cellOperand)
- .writeBarrierDone:
- end
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableTag(t1, t0)
+ bineq t0, CellTag, .writeBarrierDone
+
+ writeBarrierOnOperand(cellOperand)
+.writeBarrierDone:
+end
+
+macro writeBarrierOnGlobal(valueOperand, loadHelper)
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableTag(t1, t0)
+ bineq t0, CellTag, .writeBarrierDone
+
+ loadHelper(t3)
+
+ skipIfIsRememberedOrInEden(
+ t3,
+ macro()
+ push cfr, PC
+ # We make two extra slots because cCall2 will poke.
+ subp 8, sp
+ move cfr, a0
+ move t3, a1
+ cCall2Void(_llint_write_barrier_slow)
+ addp 8, sp
+ pop PC, cfr
+ end)
+.writeBarrierDone:
end
macro writeBarrierOnGlobalObject(valueOperand)
- if GGC
- loadisFromInstruction(valueOperand, t1)
- bineq t0, CellTag, .writeBarrierDone
-
- loadp CodeBlock[cfr], t3
- loadp CodeBlock::m_globalObject[t3], t3
- checkMarkByte(t3, t1, t2,
- macro(marked)
- btbz marked, .writeBarrierDone
- push cfr, PC
- # We make two extra slots because cCall2 will poke.
- subp 8, sp
- cCall2(_llint_write_barrier_slow, cfr, t3)
- addp 8, sp
- pop PC, cfr
- end
- )
- .writeBarrierDone:
- end
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ end)
+end
+
+macro writeBarrierOnGlobalLexicalEnvironment(valueOperand)
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ loadp JSGlobalObject::m_globalLexicalEnvironment[registerToStoreGlobal], registerToStoreGlobal
+ end)
end
macro valueProfile(tag, payload, operand, scratch)
@@ -553,22 +566,57 @@ end
# Entrypoints into the interpreter
# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
-macro functionArityCheck(doneLabel, slow_path)
+macro functionArityCheck(doneLabel, slowPath)
loadi PayloadOffset + ArgumentCount[cfr], t0
biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
- cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
- btiz t0, .isArityFixupNeeded
- move t1, cfr # t1 contains caller frame
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath) # This slowPath has a simple protocol: t0 = 0 => no error, t0 != 0 => error
+ btiz r0, .noError
+ move r1, cfr # r1 contains caller frame
jmp _llint_throw_from_slow_path_trampoline
-.isArityFixupNeeded:
+.noError:
+ # r1 points to ArityCheckData.
+ loadp CommonSlowPaths::ArityCheckData::thunkToCall[r1], t3
+ btpz t3, .proceedInline
+
+ loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], a0
+ call t3
+ if ASSERT_ENABLED
+ loadp ReturnPC[cfr], t0
+ loadp [t0], t0
+ end
+ jmp .continue
+
+.proceedInline:
+ loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1
btiz t1, .continue
+ loadi PayloadOffset + ArgumentCount[cfr], t2
+ addi CallFrameHeaderSlots, t2
- // Move frame up "t1" slots
+ // Check if there are some unaligned slots we can use
+ move t1, t3
+ andi StackAlignmentSlots - 1, t3
+ btiz t3, .noExtraSlot
+.fillExtraSlots:
+ move 0, t0
+ storei t0, PayloadOffset[cfr, t2, 8]
+ move UndefinedTag, t0
+ storei t0, TagOffset[cfr, t2, 8]
+ addi 1, t2
+ bsubinz 1, t3, .fillExtraSlots
+ andi ~(StackAlignmentSlots - 1), t1
+ btiz t1, .continue
+
+.noExtraSlot:
+ // Move frame up t1 slots
negi t1
move cfr, t3
- loadi PayloadOffset + ArgumentCount[cfr], t2
- addi CallFrameHeaderSlots, t2
+ move t1, t0
+ lshiftp 3, t0
+ addp t0, cfr
+ addp t0, sp
.copyLoop:
loadi PayloadOffset[t3], t0
storei t0, PayloadOffset[t3, t1, 8]
@@ -587,8 +635,6 @@ macro functionArityCheck(doneLabel, slow_path)
addp 8, t3
baddinz 1, t2, .fillLoop
- lshiftp 3, t1
- addp t1, cfr
.continue:
# Reload CodeBlock and PC, since the slow_path clobbered it.
loadp CodeBlock[cfr], t1
@@ -596,12 +642,11 @@ macro functionArityCheck(doneLabel, slow_path)
jmp doneLabel
end
-
macro branchIfException(label)
- loadp ScopeChain + PayloadOffset[cfr], t3
+ loadp Callee + PayloadOffset[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- bieq VM::m_exception + TagOffset[t3], EmptyValueTag, .noException
+ loadp MarkedBlock::m_vm[t3], t3
+ btiz VM::m_exception[t3], .noException
jmp label
.noException:
end
@@ -611,6 +656,7 @@ end
_llint_op_enter:
traceExecution()
+ checkStackPointerAlignment(t2, 0xdead00e1)
loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
btiz t2, .opEnterDone
@@ -623,98 +669,75 @@ _llint_op_enter:
addi 1, t2
btinz t2, .opEnterLoop
.opEnterDone:
- callSlowPath(_slow_path_enter)
+ callOpcodeSlowPath(_slow_path_enter)
dispatch(1)
-_llint_op_create_activation:
+_llint_op_get_argument:
traceExecution()
- loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateActivationDone
- callSlowPath(_llint_slow_path_create_activation)
-.opCreateActivationDone:
- dispatch(2)
-
+ loadisFromInstruction(1, t1)
+ loadisFromInstruction(2, t2)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ bilteq t0, t2, .opGetArgumentOutOfBounds
+ loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
+ loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t3
+ storei t0, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ valueProfile(t0, t3, 12, t1)
+ dispatch(4)
-_llint_op_init_lazy_reg:
- traceExecution()
- loadi 4[PC], t0
- storei EmptyValueTag, TagOffset[cfr, t0, 8]
- storei 0, PayloadOffset[cfr, t0, 8]
- dispatch(2)
+.opGetArgumentOutOfBounds:
+ storei UndefinedTag, TagOffset[cfr, t1, 8]
+ storei 0, PayloadOffset[cfr, t1, 8]
+ valueProfile(UndefinedTag, 0, 12, t1)
+ dispatch(4)
-_llint_op_create_arguments:
+_llint_op_argument_count:
traceExecution()
- loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateArgumentsDone
- callSlowPath(_slow_path_create_arguments)
-.opCreateArgumentsDone:
+ loadisFromInstruction(1, t2)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ subi 1, t0
+ move Int32Tag, t1
+ storei t1, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
dispatch(2)
-_llint_op_create_this:
+_llint_op_get_scope:
traceExecution()
- loadi 8[PC], t0
- loadp PayloadOffset[cfr, t0, 8], t0
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
- btpz t1, .opCreateThisSlow
- allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
- loadi 4[PC], t1
+ loadi Callee + PayloadOffset[cfr], t0
+ loadi JSCallee::m_scope[t0], t0
+ loadisFromInstruction(1, t1)
storei CellTag, TagOffset[cfr, t1, 8]
storei t0, PayloadOffset[cfr, t1, 8]
- dispatch(4)
-
-.opCreateThisSlow:
- callSlowPath(_slow_path_create_this)
- dispatch(4)
+ dispatch(2)
-_llint_op_get_callee:
- traceExecution()
- loadi 4[PC], t0
- loadp PayloadOffset + Callee[cfr], t1
- loadpFromInstruction(2, t2)
- bpneq t1, t2, .opGetCalleeSlow
- storei CellTag, TagOffset[cfr, t0, 8]
- storei t1, PayloadOffset[cfr, t0, 8]
- dispatch(3)
-
-.opGetCalleeSlow:
- callSlowPath(_slow_path_get_callee)
- dispatch(3)
-
_llint_op_to_this:
traceExecution()
loadi 4[PC], t0
bineq TagOffset[cfr, t0, 8], CellTag, .opToThisSlow
loadi PayloadOffset[cfr, t0, 8], t0
- loadp JSCell::m_structure[t0], t0
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], FinalObjectType, .opToThisSlow
+ bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
loadpFromInstruction(2, t2)
- bpneq t0, t2, .opToThisSlow
- dispatch(3)
+ bpneq JSCell::m_structureID[t0], t2, .opToThisSlow
+ dispatch(4)
.opToThisSlow:
- callSlowPath(_slow_path_to_this)
- dispatch(3)
+ callOpcodeSlowPath(_slow_path_to_this)
+ dispatch(4)
-_llint_op_new_object:
+_llint_op_check_tdz:
traceExecution()
- loadpFromInstruction(3, t0)
- loadp ObjectAllocationProfile::m_allocator[t0], t1
- loadp ObjectAllocationProfile::m_structure[t0], t2
- allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
- loadi 4[PC], t1
- storei CellTag, TagOffset[cfr, t1, 8]
- storei t0, PayloadOffset[cfr, t1, 8]
- dispatch(4)
+ loadisFromInstruction(1, t0)
+ loadConstantOrVariableTag(t0, t1)
+ bineq t1, EmptyValueTag, .opNotTDZ
+ callOpcodeSlowPath(_slow_path_throw_tdz_error)
-.opNewObjectSlow:
- callSlowPath(_llint_slow_path_new_object)
- dispatch(4)
+.opNotTDZ:
+ dispatch(2)
_llint_op_mov:
@@ -727,45 +750,6 @@ _llint_op_mov:
dispatch(3)
-macro notifyWrite(set, valueTag, valuePayload, scratch, slow)
- loadb VariableWatchpointSet::m_state[set], scratch
- bieq scratch, IsInvalidated, .done
- bineq scratch, ClearWatchpoint, .overwrite
- storei valueTag, VariableWatchpointSet::m_inferredValue + TagOffset[set]
- storei valuePayload, VariableWatchpointSet::m_inferredValue + PayloadOffset[set]
- storeb IsWatched, VariableWatchpointSet::m_state[set]
- jmp .done
-
-.overwrite:
- bineq valuePayload, VariableWatchpointSet::m_inferredValue + PayloadOffset[set], .definitelyDifferent
- bieq valueTag, VariableWatchpointSet::m_inferredValue + TagOffset[set], .done
-.definitelyDifferent:
- btbnz VariableWatchpointSet::m_setIsNotEmpty[set], slow
- storei EmptyValueTag, VariableWatchpointSet::m_inferredValue + TagOffset[set]
- storei 0, VariableWatchpointSet::m_inferredValue + PayloadOffset[set]
- storeb IsInvalidated, VariableWatchpointSet::m_state[set]
-
-.done:
-end
-
-_llint_op_captured_mov:
- traceExecution()
- loadi 8[PC], t1
- loadConstantOrVariable(t1, t2, t3)
- loadpFromInstruction(3, t0)
- btpz t0, .opCapturedMovReady
- notifyWrite(t0, t2, t3, t1, .opCapturedMovSlow)
-.opCapturedMovReady:
- loadi 4[PC], t0
- storei t2, TagOffset[cfr, t0, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
- dispatch(4)
-
-.opCapturedMovSlow:
- callSlowPath(_slow_path_captured_mov)
- dispatch(4)
-
-
_llint_op_not:
traceExecution()
loadi 8[PC], t0
@@ -778,7 +762,7 @@ _llint_op_not:
dispatch(3)
.opNotSlow:
- callSlowPath(_slow_path_not)
+ callOpcodeSlowPath(_slow_path_not)
dispatch(3)
@@ -798,7 +782,7 @@ _llint_op_eq:
dispatch(4)
.opEqSlow:
- callSlowPath(_slow_path_eq)
+ callOpcodeSlowPath(_slow_path_eq)
dispatch(4)
@@ -810,11 +794,11 @@ _llint_op_eq_null:
loadi TagOffset[cfr, t0, 8], t1
loadi PayloadOffset[cfr, t0, 8], t0
bineq t1, CellTag, .opEqNullImmediate
- loadp JSCell::m_structure[t0], t1
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined
move 0, t1
jmp .opEqNullNotImmediate
.opEqNullMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t0], t1
loadp CodeBlock[cfr], t0
loadp CodeBlock::m_globalObject[t0], t0
cpeq Structure::m_globalObject[t1], t0, t1
@@ -845,7 +829,7 @@ _llint_op_neq:
dispatch(4)
.opNeqSlow:
- callSlowPath(_slow_path_neq)
+ callOpcodeSlowPath(_slow_path_neq)
dispatch(4)
@@ -857,11 +841,11 @@ _llint_op_neq_null:
loadi TagOffset[cfr, t0, 8], t1
loadi PayloadOffset[cfr, t0, 8], t0
bineq t1, CellTag, .opNeqNullImmediate
- loadp JSCell::m_structure[t0], t1
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined
move 1, t1
jmp .opNeqNullNotImmediate
.opNeqNullMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t0], t1
loadp CodeBlock[cfr], t0
loadp CodeBlock::m_globalObject[t0], t0
cpneq Structure::m_globalObject[t1], t0, t1
@@ -883,12 +867,10 @@ macro strictEq(equalityOperation, slowPath)
loadConstantOrVariable2Reg(t0, t2, t0)
bineq t2, t3, .slow
bib t2, LowestTag, .slow
- bineq t2, CellTag, .notString
- loadp JSCell::m_structure[t0], t2
- loadp JSCell::m_structure[t1], t3
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .notString
- bbeq Structure::m_typeInfo + TypeInfo::m_type[t3], StringType, .slow
-.notString:
+ bineq t2, CellTag, .notStringOrSymbol
+ bbaeq JSCell::m_type[t0], ObjectType, .notStringOrSymbol
+ bbb JSCell::m_type[t1], ObjectType, .slow
+.notStringOrSymbol:
loadi 4[PC], t2
equalityOperation(t0, t1, t0)
storei BooleanTag, TagOffset[cfr, t2, 8]
@@ -896,7 +878,7 @@ macro strictEq(equalityOperation, slowPath)
dispatch(4)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(4)
end
@@ -920,7 +902,7 @@ _llint_op_inc:
dispatch(2)
.opIncSlow:
- callSlowPath(_slow_path_inc)
+ callOpcodeSlowPath(_slow_path_inc)
dispatch(2)
@@ -934,7 +916,7 @@ _llint_op_dec:
dispatch(2)
.opDecSlow:
- callSlowPath(_slow_path_dec)
+ callOpcodeSlowPath(_slow_path_dec)
dispatch(2)
@@ -948,10 +930,28 @@ _llint_op_to_number:
.opToNumberIsInt:
storei t2, TagOffset[cfr, t1, 8]
storei t3, PayloadOffset[cfr, t1, 8]
- dispatch(3)
+ valueProfile(t2, t3, 12, t1)
+ dispatch(4)
.opToNumberSlow:
- callSlowPath(_slow_path_to_number)
+ callOpcodeSlowPath(_slow_path_to_number)
+ dispatch(4)
+
+
+_llint_op_to_string:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ loadConstantOrVariable(t0, t2, t3)
+ bineq t2, CellTag, .opToStringSlow
+ bbneq JSCell::m_type[t3], StringType, .opToStringSlow
+.opToStringIsString:
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opToStringSlow:
+ callOpcodeSlowPath(_slow_path_to_string)
dispatch(3)
@@ -960,22 +960,27 @@ _llint_op_negate:
loadi 8[PC], t0
loadi 4[PC], t3
loadConstantOrVariable(t0, t1, t2)
+ loadisFromInstruction(3, t0)
bineq t1, Int32Tag, .opNegateSrcNotInt
btiz t2, 0x7fffffff, .opNegateSlow
negi t2
+ ori ArithProfileInt, t0
storei Int32Tag, TagOffset[cfr, t3, 8]
+ storeisToInstruction(t0, 3)
storei t2, PayloadOffset[cfr, t3, 8]
- dispatch(3)
+ dispatch(4)
.opNegateSrcNotInt:
bia t1, LowestTag, .opNegateSlow
xori 0x80000000, t1
- storei t1, TagOffset[cfr, t3, 8]
+ ori ArithProfileNumber, t0
storei t2, PayloadOffset[cfr, t3, 8]
- dispatch(3)
+ storeisToInstruction(t0, 3)
+ storei t1, TagOffset[cfr, t3, 8]
+ dispatch(4)
.opNegateSlow:
- callSlowPath(_slow_path_negate)
- dispatch(3)
+ callOpcodeSlowPath(_slow_path_negate)
+ dispatch(4)
macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
@@ -985,6 +990,9 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
loadConstantOrVariable2Reg(t0, t2, t0)
bineq t2, Int32Tag, .op1NotInt
bineq t3, Int32Tag, .op2NotInt
+ loadisFromInstruction(4, t5)
+ ori ArithProfileIntInt, t5
+ storeisToInstruction(t5, 4)
loadi 4[PC], t2
integerOperationAndStore(t3, t1, t0, .slow, t2)
dispatch(5)
@@ -994,10 +1002,16 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
bia t2, LowestTag, .slow
bib t3, LowestTag, .op1NotIntOp2Double
bineq t3, Int32Tag, .slow
+ loadisFromInstruction(4, t5)
+ ori ArithProfileNumberInt, t5
+ storeisToInstruction(t5, 4)
ci2d t1, ft1
jmp .op1NotIntReady
.op1NotIntOp2Double:
fii2d t1, t3, ft1
+ loadisFromInstruction(4, t5)
+ ori ArithProfileNumberNumber, t5
+ storeisToInstruction(t5, 4)
.op1NotIntReady:
loadi 4[PC], t1
fii2d t0, t2, ft0
@@ -1009,6 +1023,9 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
# First operand is definitely an int, the second operand is definitely not.
loadi 4[PC], t2
bia t3, LowestTag, .slow
+ loadisFromInstruction(4, t5)
+ ori ArithProfileIntNumber, t5
+ storeisToInstruction(t5, 4)
ci2d t0, ft0
fii2d t1, t3, ft1
doubleOperation(ft1, ft0)
@@ -1016,7 +1033,7 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
dispatch(5)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(5)
end
@@ -1097,7 +1114,7 @@ macro bitOp(operation, slowPath, advance)
dispatch(advance)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(advance)
end
@@ -1135,7 +1152,7 @@ _llint_op_unsigned:
storei Int32Tag, TagOffset[cfr, t0, 8]
dispatch(3)
.opUnsignedSlow:
- callSlowPath(_slow_path_unsigned)
+ callOpcodeSlowPath(_slow_path_unsigned)
dispatch(3)
@@ -1163,48 +1180,52 @@ _llint_op_bitor:
5)
-_llint_op_check_has_instance:
+_llint_op_overrides_has_instance:
traceExecution()
- loadi 12[PC], t1
- loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
- loadp JSCell::m_structure[t0], t0
- btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
- dispatch(5)
-
-.opCheckHasInstanceSlow:
- callSlowPath(_llint_slow_path_check_has_instance)
- dispatch(0)
+ loadisFromInstruction(1, t3)
+ storei BooleanTag, TagOffset[cfr, t3, 8]
-_llint_op_instanceof:
- traceExecution()
- # Actually do the work.
- loadi 12[PC], t0
- loadi 4[PC], t3
- loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
- loadp JSCell::m_structure[t1], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
- loadi 8[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
-
- # Register state: t1 = prototype, t2 = value
- move 1, t0
-.opInstanceofLoop:
- loadp JSCell::m_structure[t2], t2
- loadi Structure::m_prototype + PayloadOffset[t2], t2
- bpeq t2, t1, .opInstanceofDone
- btinz t2, .opInstanceofLoop
+ # First check if hasInstanceValue is the one on Function.prototype[Symbol.hasInstance]
+ loadisFromInstruction(3, t0)
+ loadConstantOrVariablePayload(t0, CellTag, t2, .opOverrideshasInstanceValueNotCell)
+ loadConstantOrVariable(t0, t1, t2)
+ bineq t1, CellTag, .opOverrideshasInstanceValueNotCell
- move 0, t0
-.opInstanceofDone:
- storei BooleanTag, TagOffset[cfr, t3, 8]
+ # We don't need hasInstanceValue's tag register anymore.
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_globalObject[t1], t1
+ loadp JSGlobalObject::m_functionProtoHasInstanceSymbolFunction[t1], t1
+ bineq t1, t2, .opOverrideshasInstanceValueNotDefault
+
+ # We know the constructor is a cell.
+ loadisFromInstruction(2, t0)
+ loadConstantOrVariablePayloadUnchecked(t0, t1)
+ tbz JSCell::m_flags[t1], ImplementsDefaultHasInstance, t0
storei t0, PayloadOffset[cfr, t3, 8]
dispatch(4)
-.opInstanceofSlow:
- callSlowPath(_llint_slow_path_instanceof)
+.opOverrideshasInstanceValueNotCell:
+.opOverrideshasInstanceValueNotDefault:
+ storei 1, PayloadOffset[cfr, t3, 8]
dispatch(4)
+_llint_op_instanceof_custom:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_instanceof_custom)
+ dispatch(5)
+
+
+_llint_op_is_empty:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t0
+ loadConstantOrVariable(t1, t2, t3)
+ cieq t2, EmptyValueTag, t3
+ storei BooleanTag, TagOffset[cfr, t0, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+ dispatch(3)
+
_llint_op_is_undefined:
traceExecution()
@@ -1217,12 +1238,12 @@ _llint_op_is_undefined:
storei t3, PayloadOffset[cfr, t0, 8]
dispatch(3)
.opIsUndefinedCell:
- loadp JSCell::m_structure[t3], t1
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined
+ btbnz JSCell::m_flags[t3], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined
move 0, t1
storei t1, PayloadOffset[cfr, t0, 8]
dispatch(3)
.opIsUndefinedMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t3], t1
loadp CodeBlock[cfr], t3
loadp CodeBlock::m_globalObject[t3], t3
cpeq Structure::m_globalObject[t1], t3, t1
@@ -1253,18 +1274,33 @@ _llint_op_is_number:
dispatch(3)
-_llint_op_is_string:
+_llint_op_is_cell_with_type:
traceExecution()
loadi 8[PC], t1
loadi 4[PC], t2
loadConstantOrVariable(t1, t0, t3)
storei BooleanTag, TagOffset[cfr, t2, 8]
- bineq t0, CellTag, .opIsStringNotCell
- loadp JSCell::m_structure[t3], t0
- cbeq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, t1
+ bineq t0, CellTag, .notCellCase
+ loadi 12[PC], t0
+ cbeq JSCell::m_type[t3], t0, t1
+ storei t1, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+.notCellCase:
+ storep 0, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+
+
+_llint_op_is_object:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t2
+ loadConstantOrVariable(t1, t0, t3)
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ bineq t0, CellTag, .opIsObjectNotCell
+ cbaeq JSCell::m_type[t3], ObjectType, t1
storei t1, PayloadOffset[cfr, t2, 8]
dispatch(3)
-.opIsStringNotCell:
+.opIsObjectNotCell:
storep 0, PayloadOffset[cfr, t2, 8]
dispatch(3)
@@ -1302,54 +1338,68 @@ macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, tag,
end
-_llint_op_init_global_const:
- traceExecution()
- writeBarrierOnGlobalObject(2)
- loadi 8[PC], t1
- loadi 4[PC], t0
- loadConstantOrVariable(t1, t2, t3)
- storei t2, TagOffset[t0]
- storei t3, PayloadOffset[t0]
- dispatch(5)
-
-
# We only do monomorphic get_by_id caching for now, and we do not modify the
-# opcode. We do, however, allow for the cache to change anytime if fails, since
-# ping-ponging is free. At best we get lucky and the get_by_id will continue
+# opcode for own properties. We also allow for the cache to change anytime it fails,
+# since ping-ponging is free. At best we get lucky and the get_by_id will continue
# to take fast path on the new cache. At worst we take slow path, which is what
-# we would have been doing anyway.
+# we would have been doing anyway. For prototype/unset properties, we will attempt to
+# convert opcode into a get_by_id_proto_load/get_by_id_unset, respectively, after an
+# execution counter hits zero.
-macro getById(getPropertyStorage)
+_llint_op_get_by_id:
traceExecution()
loadi 8[PC], t0
loadi 16[PC], t1
loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
loadi 20[PC], t2
- getPropertyStorage(
- t3,
- t0,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
- loadi 4[PC], t1
- loadi TagOffset[propertyStorage, t2], scratch
- loadi PayloadOffset[propertyStorage, t2], t2
- storei scratch, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- valueProfile(scratch, t2, 32, t1)
- dispatch(9)
- end)
+ bineq JSCell::m_structureID[t3], t1, .opGetByIdSlow
+ loadPropertyAtVariableOffset(t2, t3, t0, t1)
+ loadi 4[PC], t2
+ storei t0, TagOffset[cfr, t2, 8]
+ storei t1, PayloadOffset[cfr, t2, 8]
+ valueProfile(t0, t1, 32, t2)
+ dispatch(9)
- .opGetByIdSlow:
- callSlowPath(_llint_slow_path_get_by_id)
- dispatch(9)
-end
+.opGetByIdSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
-_llint_op_get_by_id:
- getById(withInlineStorage)
+_llint_op_get_by_id_proto_load:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdProtoSlow)
+ loadi 20[PC], t2
+ bineq JSCell::m_structureID[t3], t1, .opGetByIdProtoSlow
+ loadpFromInstruction(6, t3)
+ loadPropertyAtVariableOffset(t2, t3, t0, t1)
+ loadi 4[PC], t2
+ storei t0, TagOffset[cfr, t2, 8]
+ storei t1, PayloadOffset[cfr, t2, 8]
+ valueProfile(t0, t1, 32, t2)
+ dispatch(9)
+
+.opGetByIdProtoSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
-_llint_op_get_by_id_out_of_line:
- getById(withOutOfLineStorage)
+
+_llint_op_get_by_id_unset:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdUnsetSlow)
+ bineq JSCell::m_structureID[t3], t1, .opGetByIdUnsetSlow
+ loadi 4[PC], t2
+ storei UndefinedTag, TagOffset[cfr, t2, 8]
+ storei 0, PayloadOffset[cfr, t2, 8]
+ valueProfile(UndefinedTag, 0, 32, t2)
+ dispatch(9)
+
+.opGetByIdUnsetSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
_llint_op_get_array_length:
@@ -1357,7 +1407,7 @@ _llint_op_get_array_length:
loadi 8[PC], t0
loadp 16[PC], t1
loadConstantOrVariablePayload(t0, CellTag, t3, .opGetArrayLengthSlow)
- loadp JSCell::m_structure[t3], t2
+ move t3, t2
arrayProfile(t2, t1, t0)
btiz t2, IsArray, .opGetArrayLengthSlow
btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
@@ -1371,123 +1421,152 @@ _llint_op_get_array_length:
dispatch(9)
.opGetArrayLengthSlow:
- callSlowPath(_llint_slow_path_get_by_id)
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
dispatch(9)
-_llint_op_get_arguments_length:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t1
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentsLengthSlow
- loadi ArgumentCount + PayloadOffset[cfr], t2
- subi 1, t2
- storei Int32Tag, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- dispatch(4)
-
-.opGetArgumentsLengthSlow:
- callSlowPath(_llint_slow_path_get_arguments_length)
- dispatch(4)
-
-
-macro putById(getPropertyStorage)
+_llint_op_put_by_id:
traceExecution()
writeBarrierOnOperands(1, 3)
loadi 4[PC], t3
- loadi 16[PC], t1
loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
- loadi 12[PC], t2
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- loadi 20[PC], t1
- loadConstantOrVariable2Reg(t2, scratch, t2)
- storei scratch, TagOffset[propertyStorage, t1]
- storei t2, PayloadOffset[propertyStorage, t1]
- dispatch(9)
- end)
-end
+ loadi JSCell::m_structureID[t0], t2
+ bineq t2, 16[PC], .opPutByIdSlow
-_llint_op_put_by_id:
- putById(withInlineStorage)
+ # At this point, we have:
+ # t2 -> currentStructureID
+ # t0 -> object base
+ # We will lose currentStructureID in the shenanigans below.
-.opPutByIdSlow:
- callSlowPath(_llint_slow_path_put_by_id)
+ loadi 12[PC], t1
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 32[PC], t1
+
+ # At this point, we have:
+ # t0 -> object base
+ # t1 -> put by id flags
+ # t2 -> value tag
+ # t3 -> value payload
+
+ btinz t1, PutByIdPrimaryTypeMask, .opPutByIdTypeCheckObjectWithStructureOrOther
+
+ # We have one of the non-structure type checks. Find out which one.
+ andi PutByIdSecondaryTypeMask, t1
+ bilt t1, PutByIdSecondaryTypeString, .opPutByIdTypeCheckLessThanString
+
+ # We are one of the following: String, Symbol, Object, ObjectOrOther, Top
+ bilt t1, PutByIdSecondaryTypeObjectOrOther, .opPutByIdTypeCheckLessThanObjectOrOther
+
+ # We are either ObjectOrOther or Top.
+ bieq t1, PutByIdSecondaryTypeTop, .opPutByIdDoneCheckingTypes
+
+ # Check if we are ObjectOrOther.
+ bieq t2, CellTag, .opPutByIdTypeCheckObject
+.opPutByIdTypeCheckOther:
+ bieq t2, NullTag, .opPutByIdDoneCheckingTypes
+ bieq t2, UndefinedTag, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanObjectOrOther:
+ # We are either String, Symbol or Object.
+ bineq t2, CellTag, .opPutByIdSlow
+ bieq t1, PutByIdSecondaryTypeObject, .opPutByIdTypeCheckObject
+ bieq t1, PutByIdSecondaryTypeSymbol, .opPutByIdTypeCheckSymbol
+ bbeq JSCell::m_type[t3], StringType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckObject:
+ bbaeq JSCell::m_type[t3], ObjectType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckSymbol:
+ bbeq JSCell::m_type[t3], SymbolType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanString:
+ # We are one of the following: Bottom, Boolean, Other, Int32, Number.
+ bilt t1, PutByIdSecondaryTypeInt32, .opPutByIdTypeCheckLessThanInt32
+
+ # We are either Int32 or Number.
+ bieq t1, PutByIdSecondaryTypeNumber, .opPutByIdTypeCheckNumber
+
+ bieq t2, Int32Tag, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckNumber:
+ bib t2, LowestTag + 1, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanInt32:
+ # We are one of the following: Bottom, Boolean, Other
+ bineq t1, PutByIdSecondaryTypeBoolean, .opPutByIdTypeCheckBottomOrOther
+ bieq t2, BooleanTag, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckBottomOrOther:
+ bieq t1, PutByIdSecondaryTypeOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructureOrOther:
+ bieq t2, CellTag, .opPutByIdTypeCheckObjectWithStructure
+ btinz t1, PutByIdPrimaryTypeObjectWithStructureOrOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructure:
+ andi PutByIdSecondaryTypeMask, t1
+ bineq t1, JSCell::m_structureID[t3], .opPutByIdSlow
+
+.opPutByIdDoneCheckingTypes:
+ loadi 24[PC], t1
+
+ btiz t1, .opPutByIdNotTransition
+
+ # This is the transition case. t1 holds the new Structure*. If we have a chain, we need to
+ # check it. t0 is the base. We may clobber t1 to use it as scratch.
+ loadp 28[PC], t3
+ btpz t3, .opPutByIdTransitionDirect
+
+ loadi 16[PC], t2 # Need old structure again.
+ loadp StructureChain::m_vector[t3], t3
+ assert(macro (ok) btpnz t3, ok end)
+
+ loadp Structure::m_prototype[t2], t2
+ btpz t2, .opPutByIdTransitionChainDone
+.opPutByIdTransitionChainLoop:
+ loadp [t3], t1
+ bpneq t1, JSCell::m_structureID[t2], .opPutByIdSlow
+ addp 4, t3
+ loadp Structure::m_prototype[t1], t2
+ btpnz t2, .opPutByIdTransitionChainLoop
+
+.opPutByIdTransitionChainDone:
+ loadi 24[PC], t1
+
+.opPutByIdTransitionDirect:
+ storei t1, JSCell::m_structureID[t0]
+ loadi 12[PC], t1
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 20[PC], t1
+ storePropertyAtVariableOffset(t1, t0, t2, t3)
+ writeBarrierOnOperand(1)
dispatch(9)
-
-_llint_op_put_by_id_out_of_line:
- putById(withOutOfLineStorage)
-
-
-macro putByIdTransition(additionalChecks, getPropertyStorage)
- traceExecution()
- writeBarrierOnOperand(1)
- loadi 4[PC], t3
- loadi 16[PC], t1
- loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
- loadi 12[PC], t2
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- additionalChecks(t1, t3)
+.opPutByIdNotTransition:
+ # The only thing live right now is t0, which holds the base.
+ loadi 12[PC], t1
+ loadConstantOrVariable(t1, t2, t3)
loadi 20[PC], t1
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- addp t1, propertyStorage, t3
- loadConstantOrVariable2Reg(t2, t1, t2)
- storei t1, TagOffset[t3]
- loadi 24[PC], t1
- storei t2, PayloadOffset[t3]
- storep t1, JSCell::m_structure[t0]
- dispatch(9)
- end)
-end
-
-macro noAdditionalChecks(oldStructure, scratch)
-end
-
-macro structureChainChecks(oldStructure, scratch)
- const protoCell = oldStructure # Reusing the oldStructure register for the proto
-
- loadp 28[PC], scratch
- assert(macro (ok) btpnz scratch, ok end)
- loadp StructureChain::m_vector[scratch], scratch
- assert(macro (ok) btpnz scratch, ok end)
- bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
-.loop:
- loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
- loadp JSCell::m_structure[protoCell], oldStructure
- bpneq oldStructure, [scratch], .opPutByIdSlow
- addp 4, scratch
- bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
-.done:
-end
-
-_llint_op_put_by_id_transition_direct:
- putByIdTransition(noAdditionalChecks, withInlineStorage)
-
-
-_llint_op_put_by_id_transition_direct_out_of_line:
- putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
-
-
-_llint_op_put_by_id_transition_normal:
- putByIdTransition(structureChainChecks, withInlineStorage)
-
+ storePropertyAtVariableOffset(t1, t0, t2, t3)
+ dispatch(9)
-_llint_op_put_by_id_transition_normal_out_of_line:
- putByIdTransition(structureChainChecks, withOutOfLineStorage)
+.opPutByIdSlow:
+ callOpcodeSlowPath(_llint_slow_path_put_by_id)
+ dispatch(9)
_llint_op_get_by_val:
traceExecution()
loadi 8[PC], t2
loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
- loadp JSCell::m_structure[t0], t2
+ move t0, t2
loadp 16[PC], t3
arrayProfile(t2, t3, t1)
loadi 12[PC], t3
@@ -1533,65 +1612,10 @@ _llint_op_get_by_val:
loadpFromInstruction(4, t0)
storeb 1, ArrayProfile::m_outOfBounds[t0]
.opGetByValSlow:
- callSlowPath(_llint_slow_path_get_by_val)
+ callOpcodeSlowPath(_llint_slow_path_get_by_val)
dispatch(6)
-_llint_op_get_argument_by_val:
- # FIXME: At some point we should array profile this. Right now it isn't necessary
- # since the DFG will never turn a get_argument_by_val into a GetByVal.
- traceExecution()
- loadi 8[PC], t0
- loadi 12[PC], t1
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentByValSlow
- loadConstantOrVariablePayload(t1, Int32Tag, t2, .opGetArgumentByValSlow)
- addi 1, t2
- loadi ArgumentCount + PayloadOffset[cfr], t1
- biaeq t2, t1, .opGetArgumentByValSlow
- loadi 4[PC], t3
- loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
- loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1
- storei t0, TagOffset[cfr, t3, 8]
- storei t1, PayloadOffset[cfr, t3, 8]
- valueProfile(t0, t1, 20, t2)
- dispatch(6)
-
-.opGetArgumentByValSlow:
- callSlowPath(_llint_slow_path_get_argument_by_val)
- dispatch(6)
-
-
-_llint_op_get_by_pname:
- traceExecution()
- loadi 12[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t1, .opGetByPnameSlow)
- loadi 16[PC], t0
- bpneq t1, PayloadOffset[cfr, t0, 8], .opGetByPnameSlow
- loadi 8[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t2, .opGetByPnameSlow)
- loadi 20[PC], t0
- loadi PayloadOffset[cfr, t0, 8], t3
- loadp JSCell::m_structure[t2], t0
- bpneq t0, JSPropertyNameIterator::m_cachedStructure[t3], .opGetByPnameSlow
- loadi 24[PC], t0
- loadi PayloadOffset[cfr, t0, 8], t0
- subi 1, t0
- biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow
- bilt t0, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], .opGetByPnameInlineProperty
- addi firstOutOfLineOffset, t0
- subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], t0
-.opGetByPnameInlineProperty:
- loadPropertyAtVariableOffset(t0, t2, t1, t3)
- loadi 4[PC], t0
- storei t1, TagOffset[cfr, t0, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
- dispatch(7)
-
-.opGetByPnameSlow:
- callSlowPath(_llint_slow_path_get_by_pname)
- dispatch(7)
-
-
macro contiguousPutByVal(storeCallback)
biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
.storeResult:
@@ -1608,12 +1632,12 @@ macro contiguousPutByVal(storeCallback)
jmp .storeResult
end
-macro putByVal(holeCheck, slowPath)
+macro putByVal(slowPath)
traceExecution()
writeBarrierOnOperands(1, 3)
loadi 4[PC], t0
loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
- loadp JSCell::m_structure[t1], t2
+ move t1, t2
loadp 16[PC], t3
arrayProfile(t2, t3, t0)
loadi 8[PC], t0
@@ -1659,7 +1683,7 @@ macro putByVal(holeCheck, slowPath)
.opPutByValNotContiguous:
bineq t2, ArrayStorageShape, .opPutByValSlow
biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
- holeCheck(ArrayStorage::m_vector + TagOffset[t0, t3, 8], .opPutByValArrayStorageEmpty)
+ bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty
.opPutByValArrayStorageStoreResult:
loadi 12[PC], t2
loadConstantOrVariable2Reg(t2, t1, t2)
@@ -1680,18 +1704,15 @@ macro putByVal(holeCheck, slowPath)
loadpFromInstruction(4, t0)
storeb 1, ArrayProfile::m_outOfBounds[t0]
.opPutByValSlow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(5)
end
_llint_op_put_by_val:
- putByVal(macro(addr, slowPath)
- bieq addr, EmptyValueTag, slowPath
- end, _llint_slow_path_put_by_val)
+ putByVal(_llint_slow_path_put_by_val)
_llint_op_put_by_val_direct:
- putByVal(macro(addr, slowPath)
- end, _llint_slow_path_put_by_val_direct)
+ putByVal(_llint_slow_path_put_by_val_direct)
_llint_op_jmp:
traceExecution()
@@ -1708,7 +1729,7 @@ macro jumpTrueOrFalse(conditionOp, slow)
dispatchBranch(8[PC])
.slow:
- callSlowPath(slow)
+ callOpcodeSlowPath(slow)
dispatch(0)
end
@@ -1719,8 +1740,8 @@ macro equalNull(cellHandler, immediateHandler)
loadi TagOffset[cfr, t0, 8], t1
loadi PayloadOffset[cfr, t0, 8], t0
bineq t1, CellTag, .immediate
- loadp JSCell::m_structure[t0], t2
- cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
+ loadp JSCell::m_structureID[t0], t2
+ cellHandler(t2, JSCell::m_flags[t0], .target)
dispatch(3)
.target:
@@ -1767,9 +1788,10 @@ _llint_op_jneq_ptr:
loadp JSGlobalObject::m_specialPointers[t2, t1, 4], t1
bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
.opJneqPtrBranch:
+ storei 1, 16[PC]
dispatchBranch(12[PC])
.opJneqPtrFallThrough:
- dispatch(4)
+ dispatch(5)
macro compare(integerCompare, doubleCompare, slowPath)
@@ -1806,7 +1828,7 @@ macro compare(integerCompare, doubleCompare, slowPath)
dispatchBranch(12[PC])
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(0)
end
@@ -1835,7 +1857,7 @@ _llint_op_switch_imm:
dispatchBranch(8[PC])
.opSwitchImmSlow:
- callSlowPath(_llint_slow_path_switch_imm)
+ callOpcodeSlowPath(_llint_slow_path_switch_imm)
dispatch(0)
@@ -1850,8 +1872,7 @@ _llint_op_switch_char:
loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
addp t3, t2
bineq t1, CellTag, .opSwitchCharFallThrough
- loadp JSCell::m_structure[t0], t1
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t1], StringType, .opSwitchCharFallThrough
+ bbneq JSCell::m_type[t0], StringType, .opSwitchCharFallThrough
bineq JSString::m_length[t0], 1, .opSwitchCharFallThrough
loadp JSString::m_value[t0], t0
btpz t0, .opSwitchOnRope
@@ -1873,39 +1894,22 @@ _llint_op_switch_char:
dispatchBranch(8[PC])
.opSwitchOnRope:
- callSlowPath(_llint_slow_path_switch_char)
+ callOpcodeSlowPath(_llint_slow_path_switch_char)
dispatch(0)
-_llint_op_new_func:
- traceExecution()
- btiz 12[PC], .opNewFuncUnchecked
- loadi 4[PC], t1
- bineq TagOffset[cfr, t1, 8], EmptyValueTag, .opNewFuncDone
-.opNewFuncUnchecked:
- callSlowPath(_llint_slow_path_new_func)
-.opNewFuncDone:
- dispatch(4)
-
-
-_llint_op_new_captured_func:
- traceExecution()
- callSlowPath(_slow_path_new_captured_func)
- dispatch(4)
-
-
macro arrayProfileForCall()
loadi 16[PC], t3
negi t3
bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
- loadp JSCell::m_structure[t0], t0
- loadp 24[PC], t1
- storep t0, ArrayProfile::m_lastSeenStructure[t1]
+ loadp JSCell::m_structureID[t0], t0
+ loadpFromInstruction(CallOpCodeSize - 2, t1)
+ storep t0, ArrayProfile::m_lastSeenStructureID[t1]
.done:
end
-macro doCall(slowPath)
+macro doCall(slowPath, prepareCall)
loadi 8[PC], t0
loadi 20[PC], t1
loadp LLIntCallLinkInfo::callee[t1], t2
@@ -1915,42 +1919,19 @@ macro doCall(slowPath)
lshifti 3, t3
negi t3
addp cfr, t3 # t3 contains the new value of cfr
- loadp JSFunction::m_scope[t2], t0
storei t2, Callee + PayloadOffset[t3]
- storei t0, ScopeChain + PayloadOffset[t3]
loadi 12[PC], t2
storei PC, ArgumentCount + TagOffset[cfr]
- storep cfr, CallerFrame[t3]
storei t2, ArgumentCount + PayloadOffset[t3]
storei CellTag, Callee + TagOffset[t3]
- storei CellTag, ScopeChain + TagOffset[t3]
- move t3, cfr
- callTargetFunction(t1)
+ move t3, sp
+ prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4)
+ callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1])
.opCallSlow:
- slowPathForCall(slowPath)
+ slowPathForCall(slowPath, prepareCall)
end
-
-_llint_op_tear_off_activation:
- traceExecution()
- loadi 4[PC], t0
- bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationNotCreated
- callSlowPath(_llint_slow_path_tear_off_activation)
-.opTearOffActivationNotCreated:
- dispatch(2)
-
-
-_llint_op_tear_off_arguments:
- traceExecution()
- loadi 4[PC], t0
- addi 1, t0 # Get the unmodifiedArgumentsRegister
- bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated
- callSlowPath(_llint_slow_path_tear_off_arguments)
-.opTearOffArgumentsNotCreated:
- dispatch(3)
-
-
_llint_op_ret:
traceExecution()
checkSwitchToJITForEpilogue()
@@ -1959,135 +1940,62 @@ _llint_op_ret:
doReturn()
-_llint_op_ret_object_or_this:
- traceExecution()
- checkSwitchToJITForEpilogue()
- loadi 4[PC], t2
- loadConstantOrVariable(t2, t1, t0)
- bineq t1, CellTag, .opRetObjectOrThisNotObject
- loadp JSCell::m_structure[t0], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
- doReturn()
-
-.opRetObjectOrThisNotObject:
- loadi 8[PC], t2
- loadConstantOrVariable(t2, t1, t0)
- doReturn()
-
-
_llint_op_to_primitive:
traceExecution()
loadi 8[PC], t2
loadi 4[PC], t3
loadConstantOrVariable(t2, t1, t0)
bineq t1, CellTag, .opToPrimitiveIsImm
- loadp JSCell::m_structure[t0], t2
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
+ bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase
.opToPrimitiveIsImm:
storei t1, TagOffset[cfr, t3, 8]
storei t0, PayloadOffset[cfr, t3, 8]
dispatch(3)
.opToPrimitiveSlowCase:
- callSlowPath(_slow_path_to_primitive)
+ callOpcodeSlowPath(_slow_path_to_primitive)
dispatch(3)
-_llint_op_next_pname:
- traceExecution()
- loadi 12[PC], t1
- loadi 16[PC], t2
- loadi PayloadOffset[cfr, t1, 8], t0
- bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
- loadi 20[PC], t2
- loadi PayloadOffset[cfr, t2, 8], t2
- loadp JSPropertyNameIterator::m_jsStrings[t2], t3
- loadi PayloadOffset[t3, t0, 8], t3
- addi 1, t0
- storei t0, PayloadOffset[cfr, t1, 8]
- loadi 4[PC], t1
- storei CellTag, TagOffset[cfr, t1, 8]
- storei t3, PayloadOffset[cfr, t1, 8]
- loadi 8[PC], t3
- loadi PayloadOffset[cfr, t3, 8], t3
- loadp JSCell::m_structure[t3], t1
- bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
- loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
- loadp StructureChain::m_vector[t0], t0
- btpz [t0], .opNextPnameTarget
-.opNextPnameCheckPrototypeLoop:
- bieq Structure::m_prototype + TagOffset[t1], NullTag, .opNextPnameSlow
- loadp Structure::m_prototype + PayloadOffset[t1], t2
- loadp JSCell::m_structure[t2], t1
- bpneq t1, [t0], .opNextPnameSlow
- addp 4, t0
- btpnz [t0], .opNextPnameCheckPrototypeLoop
-.opNextPnameTarget:
- dispatchBranch(24[PC])
-
-.opNextPnameEnd:
- dispatch(7)
-
-.opNextPnameSlow:
- callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
- dispatch(0)
-
-
_llint_op_catch:
# This is where we end up from the JIT's throw trampoline (because the
# machine code return address will be set to _llint_op_catch), and from
# the interpreter's throw trampoline (see _llint_throw_trampoline).
# The throwing code must have known that we were throwing to the interpreter,
# and have set VM::targetInterpreterPCForThrow.
- loadp ScopeChain + PayloadOffset[cfr], t3
+ loadp Callee + PayloadOffset[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- loadp VM::callFrameForThrow[t3], cfr
- loadi VM::targetInterpreterPCForThrow[t3], PC
- loadi VM::m_exception + PayloadOffset[t3], t0
- loadi VM::m_exception + TagOffset[t3], t1
- storei 0, VM::m_exception + PayloadOffset[t3]
- storei EmptyValueTag, VM::m_exception + TagOffset[t3]
- loadi 4[PC], t2
- storei t0, PayloadOffset[cfr, t2, 8]
- storei t1, TagOffset[cfr, t2, 8]
- traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
- dispatch(2)
-
-
-# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
-# scopes as they are traversed. scopeCheck() is called with two arguments: the register
-# holding the scope, and a register that can be used for scratch. Note that this does not
-# use t3, so you can hold stuff in t3 if need be.
-macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck)
- loadp ScopeChain + PayloadOffset[cfr], t0
- loadi deBruijinIndexOperand, t2
-
- btiz t2, .done
+ loadp MarkedBlock::m_vm[t3], t3
+ restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+ restoreStackPointerAfterCall()
- loadp CodeBlock[cfr], t1
- bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
- btbz CodeBlock::m_needsActivation[t1], .loop
+ loadi VM::targetInterpreterPCForThrow[t3], PC
- loadi CodeBlock::m_activationRegister[t1], t1
+ callOpcodeSlowPath(_llint_slow_path_check_if_exception_is_uncatchable_and_notify_profiler)
+ bpeq r1, 0, .isCatchableException
+ jmp _llint_throw_from_slow_path_trampoline
- # Need to conditionally skip over one scope.
- bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
- scopeCheck(t0, t1)
- loadp JSScope::m_next[t0], t0
-.noActivation:
- subi 1, t2
+.isCatchableException:
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_vm[t3], t3
- btiz t2, .done
-.loop:
- scopeCheck(t0, t1)
- loadp JSScope::m_next[t0], t0
- subi 1, t2
- btinz t2, .loop
+ loadi VM::m_exception[t3], t0
+ storei 0, VM::m_exception[t3]
+ loadi 4[PC], t2
+ storei t0, PayloadOffset[cfr, t2, 8]
+ storei CellTag, TagOffset[cfr, t2, 8]
-.done:
+ loadi Exception::m_value + TagOffset[t0], t1
+ loadi Exception::m_value + PayloadOffset[t0], t0
+ loadi 8[PC], t2
+ storei t0, PayloadOffset[cfr, t2, 8]
+ storei t1, TagOffset[cfr, t2, 8]
-end
+ traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
+ dispatch(3)
_llint_op_end:
traceExecution()
@@ -2105,8 +2013,10 @@ _llint_throw_from_slow_path_trampoline:
# When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
# the throw target is not necessarily interpreted code, we come to here.
# This essentially emulates the JIT's throwing protocol.
- loadp CodeBlock[cfr], t1
- loadp CodeBlock::m_vm[t1], t1
+ loadp Callee[cfr], t1
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_vm[t1], t1
+ copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(t1, t2)
jmp VM::targetMachinePCForThrow[t1]
@@ -2116,81 +2026,62 @@ _llint_throw_during_call_trampoline:
macro nativeCallTrampoline(executableOffsetToFunction)
+
+ functionPrologue()
storep 0, CodeBlock[cfr]
- loadp CallerFrame[cfr], t0
- loadi ScopeChain + PayloadOffset[t0], t1
- storei CellTag, ScopeChain + TagOffset[cfr]
- storei t1, ScopeChain + PayloadOffset[cfr]
- if X86
- loadp PayloadOffset + ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ loadi Callee + PayloadOffset[cfr], t1
+ // Callee is still in t1 for code below
+ if X86 or X86_WIN
+ subp 8, sp # align stack pointer
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_vm[t1], t3
storep cfr, VM::topCallFrame[t3]
- peek 0, t1
- storep t1, ReturnPC[cfr]
- move cfr, t2 # t2 = ecx
- subp 16 - 4, sp
+ move cfr, a0 # a0 = ecx
+ storep a0, [sp]
loadi Callee + PayloadOffset[cfr], t1
loadp JSFunction::m_executable[t1], t1
- move t0, cfr
+ checkStackPointerAlignment(t3, 0xdead0001)
call executableOffsetToFunction[t1]
- addp 16 - 4, sp
- loadp PayloadOffset + ScopeChain[cfr], t3
+ loadp Callee + PayloadOffset[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
- loadp PayloadOffset + ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- storep cfr, VM::topCallFrame[t3]
- move t0, t2
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
+ loadp MarkedBlock::m_vm[t3], t3
+ addp 8, sp
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL or C_LOOP or MIPS
+ subp 8, sp # align stack pointer
+ # t1 already contains the Callee.
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_vm[t1], t1
+ storep cfr, VM::topCallFrame[t1]
+ move cfr, a0
loadi Callee + PayloadOffset[cfr], t1
loadp JSFunction::m_executable[t1], t1
- move t2, cfr
- if MIPS or SH4
- move t0, a0
+ checkStackPointerAlignment(t3, 0xdead0001)
+ if C_LOOP
+ cloopCallNative executableOffsetToFunction[t1]
+ else
+ call executableOffsetToFunction[t1]
end
- call executableOffsetToFunction[t1]
- restoreReturnAddressBeforeReturn(t3)
- loadp PayloadOffset + ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- elsif C_LOOP
- loadp PayloadOffset + ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- storep cfr, VM::topCallFrame[t3]
- move t0, t2
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
- loadi Callee + PayloadOffset[cfr], t1
- loadp JSFunction::m_executable[t1], t1
- move t2, cfr
- cloopCallNative executableOffsetToFunction[t1]
- restoreReturnAddressBeforeReturn(t3)
- loadp PayloadOffset + ScopeChain[cfr], t3
+ loadp Callee + PayloadOffset[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ loadp MarkedBlock::m_vm[t3], t3
+ addp 8, sp
else
error
end
- bineq VM::m_exception + TagOffset[t3], EmptyValueTag, .exception
+
+ btinz VM::m_exception[t3], .handleException
+
+ functionEpilogue()
ret
-.exception:
- preserveReturnAddressAfterCall(t1) # This is really only needed on X86
- loadi ArgumentCount + TagOffset[cfr], PC
- callSlowPath(_llint_throw_from_native_call)
+
+.handleException:
+ storep cfr, VM::topCallFrame[t3]
jmp _llint_throw_from_slow_path_trampoline
end
-macro getGlobalObject(dst)
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
+macro getConstantScope(dst)
+ loadpFromInstruction(6, t0)
loadisFromInstruction(dst, t1)
storei CellTag, TagOffset[cfr, t1, 8]
storei t0, PayloadOffset[cfr, t1, 8]
@@ -2205,14 +2096,10 @@ end
macro resolveScope()
loadp CodeBlock[cfr], t0
- loadisFromInstruction(4, t2)
- btbz CodeBlock::m_needsActivation[t0], .resolveScopeAfterActivationCheck
- loadis CodeBlock::m_activationRegister[t0], t1
- btpz PayloadOffset[cfr, t1, 8], .resolveScopeAfterActivationCheck
- addi 1, t2
+ loadisFromInstruction(5, t2)
-.resolveScopeAfterActivationCheck:
- loadp ScopeChain + PayloadOffset[cfr], t0
+ loadisFromInstruction(2, t0)
+ loadp PayloadOffset[cfr, t0, 8], t0
btiz t2, .resolveScopeLoopEnd
.resolveScopeLoop:
@@ -2229,55 +2116,71 @@ end
_llint_op_resolve_scope:
traceExecution()
- loadisFromInstruction(3, t0)
+ loadisFromInstruction(4, t0)
#rGlobalProperty:
bineq t0, GlobalProperty, .rGlobalVar
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
.rGlobalVar:
- bineq t0, GlobalVar, .rClosureVar
- getGlobalObject(1)
- dispatch(6)
+ bineq t0, GlobalVar, .rGlobalLexicalVar
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .rClosureVar
+ getConstantScope(1)
+ dispatch(7)
.rClosureVar:
- bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks
+ bineq t0, ClosureVar, .rModuleVar
resolveScope()
- dispatch(6)
+ dispatch(7)
+
+.rModuleVar:
+ bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks
+ getConstantScope(1)
+ dispatch(7)
.rGlobalPropertyWithVarInjectionChecks:
bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
varInjectionCheck(.rDynamic)
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
.rGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks
varInjectionCheck(.rDynamic)
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getConstantScope(1)
+ dispatch(7)
.rClosureVarWithVarInjectionChecks:
bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
varInjectionCheck(.rDynamic)
resolveScope()
- dispatch(6)
+ dispatch(7)
.rDynamic:
- callSlowPath(_llint_slow_path_resolve_scope)
- dispatch(6)
+ callOpcodeSlowPath(_slow_path_resolve_scope)
+ dispatch(7)
macro loadWithStructureCheck(operand, slowPath)
loadisFromInstruction(operand, t0)
loadp PayloadOffset[cfr, t0, 8], t0
loadpFromInstruction(5, t1)
- bpneq JSCell::m_structure[t0], t1, slowPath
+ bpneq JSCell::m_structureID[t0], t1, slowPath
end
macro getProperty()
- loadpFromInstruction(6, t3)
+ loadisFromInstruction(6, t3)
loadPropertyAtVariableOffset(t3, t0, t1, t2)
valueProfile(t1, t2, 28, t0)
loadisFromInstruction(1, t0)
@@ -2285,10 +2188,11 @@ macro getProperty()
storei t2, PayloadOffset[cfr, t0, 8]
end
-macro getGlobalVar()
+macro getGlobalVar(tdzCheckIfNecessary)
loadpFromInstruction(6, t0)
loadp TagOffset[t0], t1
loadp PayloadOffset[t0], t2
+ tdzCheckIfNecessary(t1)
valueProfile(t1, t2, 28, t0)
loadisFromInstruction(1, t0)
storei t1, TagOffset[cfr, t0, 8]
@@ -2296,10 +2200,9 @@ macro getGlobalVar()
end
macro getClosureVar()
- loadp JSVariableObject::m_registers[t0], t0
- loadpFromInstruction(6, t3)
- loadp TagOffset[t0, t3, 8], t1
- loadp PayloadOffset[t0, t3, 8], t2
+ loadisFromInstruction(6, t3)
+ loadp JSEnvironmentRecord_variables + TagOffset[t0, t3, 8], t1
+ loadp JSEnvironmentRecord_variables + PayloadOffset[t0, t3, 8], t2
valueProfile(t1, t2, 28, t0)
loadisFromInstruction(1, t0)
storei t1, TagOffset[cfr, t0, 8]
@@ -2309,7 +2212,7 @@ end
_llint_op_get_from_scope:
traceExecution()
loadisFromInstruction(4, t0)
- andi ResolveModeMask, t0
+ andi ResolveTypeMask, t0
#gGlobalProperty:
bineq t0, GlobalProperty, .gGlobalVar
@@ -2318,8 +2221,16 @@ _llint_op_get_from_scope:
dispatch(8)
.gGlobalVar:
- bineq t0, GlobalVar, .gClosureVar
- getGlobalVar()
+ bineq t0, GlobalVar, .gGlobalLexicalVar
+ getGlobalVar(macro(t) end)
+ dispatch(8)
+
+.gGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .gClosureVar
+ getGlobalVar(
+ macro(tag)
+ bieq tag, EmptyValueTag, .gDynamic
+ end)
dispatch(8)
.gClosureVar:
@@ -2335,10 +2246,18 @@ _llint_op_get_from_scope:
dispatch(8)
.gGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks
varInjectionCheck(.gDynamic)
- loadVariable(2, t2, t1, t0)
- getGlobalVar()
+ getGlobalVar(macro(t) end)
+ dispatch(8)
+
+.gGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ varInjectionCheck(.gDynamic)
+ getGlobalVar(
+ macro(tag)
+ bieq tag, EmptyValueTag, .gDynamic
+ end)
dispatch(8)
.gClosureVarWithVarInjectionChecks:
@@ -2349,22 +2268,22 @@ _llint_op_get_from_scope:
dispatch(8)
.gDynamic:
- callSlowPath(_llint_slow_path_get_from_scope)
+ callOpcodeSlowPath(_llint_slow_path_get_from_scope)
dispatch(8)
macro putProperty()
loadisFromInstruction(3, t1)
loadConstantOrVariable(t1, t2, t3)
- loadpFromInstruction(6, t1)
+ loadisFromInstruction(6, t1)
storePropertyAtVariableOffset(t1, t0, t2, t3)
end
-macro putGlobalVar()
+macro putGlobalVariable()
loadisFromInstruction(3, t0)
loadConstantOrVariable(t0, t1, t2)
loadpFromInstruction(5, t3)
- notifyWrite(t3, t1, t2, t0, .pDynamic)
+ notifyWrite(t3, .pDynamic)
loadpFromInstruction(6, t0)
storei t1, TagOffset[t0]
storei t2, PayloadOffset[t0]
@@ -2373,19 +2292,37 @@ end
macro putClosureVar()
loadisFromInstruction(3, t1)
loadConstantOrVariable(t1, t2, t3)
- loadp JSVariableObject::m_registers[t0], t0
- loadpFromInstruction(6, t1)
- storei t2, TagOffset[t0, t1, 8]
- storei t3, PayloadOffset[t0, t1, 8]
+ loadisFromInstruction(6, t1)
+ storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8]
+ storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8]
+end
+
+macro putLocalClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadpFromInstruction(5, t5)
+ btpz t5, .noVariableWatchpointSet
+ notifyWrite(t5, .pDynamic)
+.noVariableWatchpointSet:
+ loadisFromInstruction(6, t1)
+ storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8]
+ storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8]
end
_llint_op_put_to_scope:
traceExecution()
loadisFromInstruction(4, t0)
- andi ResolveModeMask, t0
+ andi ResolveTypeMask, t0
-#pGlobalProperty:
+#pLocalClosureVar:
+ bineq t0, LocalClosureVar, .pGlobalProperty
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t2, t1, t0)
+ putLocalClosureVar()
+ dispatch(7)
+
+.pGlobalProperty:
bineq t0, GlobalProperty, .pGlobalVar
writeBarrierOnOperands(1, 3)
loadWithStructureCheck(1, .pDynamic)
@@ -2393,9 +2330,15 @@ _llint_op_put_to_scope:
dispatch(7)
.pGlobalVar:
- bineq t0, GlobalVar, .pClosureVar
+ bineq t0, GlobalVar, .pGlobalLexicalVar
writeBarrierOnGlobalObject(3)
- putGlobalVar()
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .pClosureVar
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ putGlobalVariable()
dispatch(7)
.pClosureVar:
@@ -2413,20 +2356,179 @@ _llint_op_put_to_scope:
dispatch(7)
.pGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks
writeBarrierOnGlobalObject(3)
varInjectionCheck(.pDynamic)
- putGlobalVar()
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ varInjectionCheck(.pDynamic)
+ putGlobalVariable()
dispatch(7)
.pClosureVarWithVarInjectionChecks:
- bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic
+ bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar
writeBarrierOnOperands(1, 3)
varInjectionCheck(.pDynamic)
loadVariable(1, t2, t1, t0)
putClosureVar()
dispatch(7)
+.pModuleVar:
+ bineq t0, ModuleVar, .pDynamic
+ callOpcodeSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error)
+ dispatch(7)
+
.pDynamic:
- callSlowPath(_llint_slow_path_put_to_scope)
+ callOpcodeSlowPath(_llint_slow_path_put_to_scope)
dispatch(7)
+
+
+_llint_op_get_from_arguments:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadi 12[PC], t1
+ loadi DirectArguments_storage + TagOffset[t0, t1, 8], t2
+ loadi DirectArguments_storage + PayloadOffset[t0, t1, 8], t3
+ loadisFromInstruction(1, t1)
+ valueProfile(t2, t3, 16, t0)
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(5)
+
+
+_llint_op_put_to_arguments:
+ traceExecution()
+ writeBarrierOnOperands(1, 3)
+ loadisFromInstruction(1, t0)
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 8[PC], t1
+ storei t2, DirectArguments_storage + TagOffset[t0, t1, 8]
+ storei t3, DirectArguments_storage + PayloadOffset[t0, t1, 8]
+ dispatch(4)
+
+
+_llint_op_get_parent_scope:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadp PayloadOffset[cfr, t0, 8], t0
+ loadp JSScope::m_next[t0], t0
+ loadisFromInstruction(1, t1)
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_profile_type:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_vm[t1], t1
+ # t1 is holding the pointer to the typeProfilerLog.
+ loadp VM::m_typeProfilerLog[t1], t1
+
+ # t0 is holding the payload, t5 is holding the tag.
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t2, t5, t0)
+
+ bieq t5, EmptyValueTag, .opProfileTypeDone
+
+ # t2 is holding the pointer to the current log entry.
+ loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
+
+ # Store the JSValue onto the log entry.
+ storei t5, TypeProfilerLog::LogEntry::value + TagOffset[t2]
+ storei t0, TypeProfilerLog::LogEntry::value + PayloadOffset[t2]
+
+ # Store the TypeLocation onto the log entry.
+ loadpFromInstruction(2, t3)
+ storep t3, TypeProfilerLog::LogEntry::location[t2]
+
+ bieq t5, CellTag, .opProfileTypeIsCell
+ storei 0, TypeProfilerLog::LogEntry::structureID[t2]
+ jmp .opProfileTypeSkipIsCell
+.opProfileTypeIsCell:
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, TypeProfilerLog::LogEntry::structureID[t2]
+.opProfileTypeSkipIsCell:
+
+ # Increment the current log entry.
+ addp sizeof TypeProfilerLog::LogEntry, t2
+ storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
+
+ loadp TypeProfilerLog::m_logEndPtr[t1], t1
+ bpneq t2, t1, .opProfileTypeDone
+ callOpcodeSlowPath(_slow_path_profile_type_clear_log)
+
+.opProfileTypeDone:
+ dispatch(6)
+
+
+_llint_op_profile_control_flow:
+ traceExecution()
+ loadpFromInstruction(1, t0)
+ loadi BasicBlockLocation::m_executionCount[t0], t1
+ addi 1, t1
+ bieq t1, 0, .done # We overflowed.
+ storei t1, BasicBlockLocation::m_executionCount[t0]
+.done:
+ dispatch(2)
+
+
+_llint_op_get_rest_length:
+ traceExecution()
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ subi 1, t0
+ loadisFromInstruction(2, t1)
+ bilteq t0, t1, .storeZero
+ subi t1, t0
+ jmp .finish
+.storeZero:
+ move 0, t0
+.finish:
+ loadisFromInstruction(1, t1)
+ storei t0, PayloadOffset[cfr, t1, 8]
+ storei Int32Tag, TagOffset[cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_log_shadow_chicken_prologue:
+ traceExecution()
+ acquireShadowChickenPacket(.opLogShadowChickenPrologueSlow)
+ storep cfr, ShadowChicken::Packet::frame[t0]
+ loadp CallerFrame[cfr], t1
+ storep t1, ShadowChicken::Packet::callerFrame[t0]
+ loadp Callee + PayloadOffset[cfr], t1
+ storep t1, ShadowChicken::Packet::callee[t0]
+ loadisFromInstruction(1, t1)
+ loadi PayloadOffset[cfr, t1, 8], t1
+ storep t1, ShadowChicken::Packet::scope[t0]
+ dispatch(2)
+.opLogShadowChickenPrologueSlow:
+ callOpcodeSlowPath(_llint_slow_path_log_shadow_chicken_prologue)
+ dispatch(2)
+
+
+_llint_op_log_shadow_chicken_tail:
+ traceExecution()
+ acquireShadowChickenPacket(.opLogShadowChickenTailSlow)
+ storep cfr, ShadowChicken::Packet::frame[t0]
+ storep ShadowChickenTailMarker, ShadowChicken::Packet::callee[t0]
+ loadVariable(1, t3, t2, t1)
+ storei t2, TagOffset + ShadowChicken::Packet::thisValue[t0]
+ storei t1, PayloadOffset + ShadowChicken::Packet::thisValue[t0]
+ loadisFromInstruction(2, t1)
+ loadi PayloadOffset[cfr, t1, 8], t1
+ storep t1, ShadowChicken::Packet::scope[t0]
+ loadp CodeBlock[cfr], t1
+ storep t1, ShadowChicken::Packet::codeBlock[t0]
+ storei PC, ShadowChicken::Packet::callSiteIndex[t0]
+ dispatch(3)
+.opLogShadowChickenTailSlow:
+ callOpcodeSlowPath(_llint_slow_path_log_shadow_chicken_tail)
+ dispatch(3)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
index abfec65a4..0881d3721 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+# Copyright (C) 2011-2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -46,225 +46,294 @@ macro dispatchAfterCall()
loadp CodeBlock[cfr], PB
loadp CodeBlock::m_instructions[PB], PB
loadisFromInstruction(1, t1)
- storeq t0, [cfr, t1, 8]
- valueProfile(t0, 7, t2)
- dispatch(8)
+ storeq r0, [cfr, t1, 8]
+ valueProfile(r0, (CallOpCodeSize - 1), t3)
+ dispatch(CallOpCodeSize)
end
-macro cCall2(function, arg1, arg2)
- if X86_64
- move arg1, t5
- move arg2, t4
+macro cCall2(function)
+ checkStackPointerAlignment(t4, 0xbad0c002)
+ if X86_64 or ARM64
call function
- elsif ARM64
- move arg1, t0
- move arg2, t1
+ elsif X86_64_WIN
+ # Note: this implementation is only correct if the return type size is > 8 bytes.
+ # See macro cCall2Void for an implementation when the return type <= 8 bytes.
+ # On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
+ # On entry rcx (a0), should contain a pointer to this stack space. The other parameters are shifted to the right,
+ # rdx (a1) should contain the first argument, and r8 (a2) should contain the second argument.
+ # On return, rax contains a pointer to this stack value, and we then need to copy the 16 byte return value into rax (r0) and rdx (r1)
+ # since the return value is expected to be split between the two.
+ # See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx
+ move a1, a2
+ move a0, a1
+ subp 48, sp
+ move sp, a0
+ addp 32, a0
call function
+ addp 48, sp
+ move 8[r0], r1
+ move [r0], r0
elsif C_LOOP
- cloopCallSlowPath function, arg1, arg2
+ cloopCallSlowPath function, a0, a1
else
error
end
end
+macro cCall2Void(function)
+ if C_LOOP
+ cloopCallSlowPathVoid function, a0, a1
+ elsif X86_64_WIN
+ # Note: we cannot use the cCall2 macro for Win64 in this case,
+ # as the Win64 cCall2 implemenation is only correct when the return type size is > 8 bytes.
+ # On Win64, rcx and rdx are used for passing the first two parameters.
+ # We also need to make room on the stack for all four parameter registers.
+ # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
+ subp 32, sp
+ call function
+ addp 32, sp
+ else
+ cCall2(function)
+ end
+end
+
# This barely works. arg3 and arg4 should probably be immediates.
-macro cCall4(function, arg1, arg2, arg3, arg4)
- if X86_64
- move arg1, t5
- move arg2, t4
- move arg3, t1
- move arg4, t2
+macro cCall4(function)
+ checkStackPointerAlignment(t4, 0xbad0c004)
+ if X86_64 or ARM64
call function
- elsif ARM64
- move arg1, t0
- move arg2, t1
- move arg3, t2
- move arg4, t3
+ elsif X86_64_WIN
+ # On Win64, rcx, rdx, r8, and r9 are used for passing the first four parameters.
+ # We also need to make room on the stack for all four parameter registers.
+ # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
+ subp 64, sp
call function
- elsif C_LOOP
- error
+ addp 64, sp
else
error
end
end
-macro functionPrologue(extraStackSpace)
- if X86_64
- push cfr
- move sp, cfr
- elsif ARM64
- pushLRAndFP
- end
- pushCalleeSaves
- if X86_64
- subp extraStackSpace, sp
- end
-end
+macro doVMEntry(makeCall)
+ functionPrologue()
+ pushCalleeSaves()
-macro functionEpilogue(extraStackSpace)
- if X86_64
- addp extraStackSpace, sp
- end
- popCalleeSaves
- if X86_64
- pop cfr
- elsif ARM64
- popLRAndFP
- end
-end
+ const entry = a0
+ const vm = a1
+ const protoCallFrame = a2
+
+ vmEntryRecord(cfr, sp)
+
+ checkStackPointerAlignment(t4, 0xbad0dc01)
+
+ storep vm, VMEntryRecord::m_vm[sp]
+ loadp VM::topCallFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopCallFrame[sp]
+ loadp VM::topVMEntryFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopVMEntryFrame[sp]
-macro doCallToJavaScript(makeCall, doReturn)
- if X86_64
- const entry = t5
- const vmTopCallFrame = t4
- const protoCallFrame = t1
- const topOfStack = t2
-
- const extraStackSpace = 8
- const previousCFR = t0
- const previousPC = t6
- const temp1 = t0
- const temp2 = t3
- const temp3 = t6
- elsif ARM64
- const entry = a0
- const vmTopCallFrame = a1
- const protoCallFrame = a2
- const topOfStack = a3
-
- const extraStackSpace = 0
- const previousCFR = t4
- const previousPC = lr
- const temp1 = t3
- const temp2 = t5
- const temp3 = t6
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t4
+ addp CallFrameHeaderSlots, t4, t4
+ lshiftp 3, t4
+ subp sp, t4, t3
+
+ # Ensure that we have enough additional stack capacity for the incoming args,
+ # and the frame for the JS code we're executing. We need to do this check
+ # before we start copying the args from the protoCallFrame below.
+ if C_LOOP
+ bpaeq t3, VM::m_cloopStackLimit[vm], .stackHeightOK
+ else
+ bpaeq t3, VM::m_softStackLimit[vm], .stackHeightOK
end
- functionPrologue(extraStackSpace)
-
- move topOfStack, cfr
- subp (CallFrameHeaderSlots-1)*8, cfr
- storep 0, ArgumentCount[cfr]
- storep vmTopCallFrame, Callee[cfr]
- loadp [vmTopCallFrame], temp1
- storep temp1, ScopeChain[cfr]
- storep 1, CodeBlock[cfr]
- if X86_64
- loadp 7*8[sp], previousPC
- loadp 6*8[sp], previousCFR
+ if C_LOOP
+ move entry, t4
+ move vm, t5
+ cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t3
+ bpeq t0, 0, .stackCheckFailed
+ move t4, entry
+ move t5, vm
+ jmp .stackHeightOK
+
+.stackCheckFailed:
+ move t4, entry
+ move t5, vm
end
- storep previousPC, ReturnPC[cfr]
- storep previousCFR, CallerFrame[cfr]
- move cfr, temp1
- loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2
- addp CallFrameHeaderSlots, temp2, temp2
- lshiftp 3, temp2
- subp temp2, cfr
- storep temp1, CallerFrame[cfr]
+ move vm, a0
+ move protoCallFrame, a1
+ cCall2(_llint_throw_stack_overflow_error)
+
+ vmEntryRecord(cfr, t4)
- move 5, temp1
+ loadp VMEntryRecord::m_vm[t4], vm
+ loadp VMEntryRecord::m_prevTopCallFrame[t4], extraTempReg
+ storep extraTempReg, VM::topCallFrame[vm]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[t4], extraTempReg
+ storep extraTempReg, VM::topVMEntryFrame[vm]
+
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+.stackHeightOK:
+ move t3, sp
+ move 4, t3
.copyHeaderLoop:
- subi 1, temp1
- loadp [protoCallFrame, temp1, 8], temp3
- storep temp3, CodeBlock[cfr, temp1, 8]
- btinz temp1, .copyHeaderLoop
-
- loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
- subi 1, temp2
- loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3
- subi 1, temp3
-
- bieq temp2, temp3, .copyArgs
- move ValueUndefined, temp1
+ # Copy the CodeBlock/Callee/ArgumentCount/|this| from protoCallFrame into the callee frame.
+ subi 1, t3
+ loadq [protoCallFrame, t3, 8], extraTempReg
+ storeq extraTempReg, CodeBlock[sp, t3, 8]
+ btinz t3, .copyHeaderLoop
+
+ loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t4
+ subi 1, t4
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], extraTempReg
+ subi 1, extraTempReg
+
+ bieq t4, extraTempReg, .copyArgs
+ move ValueUndefined, t3
.fillExtraArgsLoop:
- subi 1, temp3
- storep temp1, ThisArgumentOffset+8[cfr, temp3, 8]
- bineq temp2, temp3, .fillExtraArgsLoop
+ subi 1, extraTempReg
+ storeq t3, ThisArgumentOffset + 8[sp, extraTempReg, 8]
+ bineq t4, extraTempReg, .fillExtraArgsLoop
.copyArgs:
- loadp ProtoCallFrame::args[protoCallFrame], temp1
+ loadp ProtoCallFrame::args[protoCallFrame], t3
.copyArgsLoop:
- btiz temp2, .copyArgsDone
- subi 1, temp2
- loadp [temp1, temp2, 8], temp3
- storep temp3, ThisArgumentOffset+8[cfr, temp2, 8]
+ btiz t4, .copyArgsDone
+ subi 1, t4
+ loadq [t3, t4, 8], extraTempReg
+ storeq extraTempReg, ThisArgumentOffset + 8[sp, t4, 8]
jmp .copyArgsLoop
.copyArgsDone:
- storep cfr, [vmTopCallFrame]
+ if ARM64
+ move sp, t4
+ storep t4, VM::topCallFrame[vm]
+ else
+ storep sp, VM::topCallFrame[vm]
+ end
+ storep cfr, VM::topVMEntryFrame[vm]
- move 0xffff000000000000, csr1
- addp 2, csr1, csr2
+ checkStackPointerAlignment(extraTempReg, 0xbad0dc02)
- makeCall(entry, temp1)
+ makeCall(entry, t3)
- bpeq CodeBlock[cfr], 1, .calleeFramePopped
- loadp CallerFrame[cfr], cfr
+ # We may have just made a call into a JS function, so we can't rely on sp
+ # for anything but the fact that our own locals (ie the VMEntryRecord) are
+ # not below it. It also still has to be aligned, though.
+ checkStackPointerAlignment(t2, 0xbad0dc03)
+
+ vmEntryRecord(cfr, t4)
-.calleeFramePopped:
- loadp Callee[cfr], temp2 # VM.topCallFrame
- loadp ScopeChain[cfr], temp3
- storep temp3, [temp2]
+ loadp VMEntryRecord::m_vm[t4], vm
+ loadp VMEntryRecord::m_prevTopCallFrame[t4], t2
+ storep t2, VM::topCallFrame[vm]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[t4], t2
+ storep t2, VM::topVMEntryFrame[vm]
- doReturn(extraStackSpace)
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
+
+ ret
end
+
macro makeJavaScriptCall(entry, temp)
- call entry
+ addp 16, sp
+ if C_LOOP
+ cloopCallJSFunction entry
+ else
+ call entry
+ end
+ subp 16, sp
end
+
macro makeHostFunctionCall(entry, temp)
move entry, temp
- if X86_64
- move cfr, t5
- elsif ARM64 or C_LOOP
- move cfr, a0
+ storep cfr, [sp]
+ move sp, a0
+ if C_LOOP
+ storep lr, 8[sp]
+ cloopCallNative temp
+ elsif X86_64_WIN
+ # We need to allocate 32 bytes on the stack for the shadow space.
+ subp 32, sp
+ call temp
+ addp 32, sp
+ else
+ call temp
end
- call temp
end
-macro doReturnFromJavaScript(extraStackSpace)
-_returnFromJavaScript:
- functionEpilogue(extraStackSpace)
- ret
-end
-macro doReturnFromHostFunction(extraStackSpace)
- functionEpilogue(extraStackSpace)
+_handleUncaughtException:
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_vm[t3], t3
+ restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+
+ loadp CallerFrame[cfr], cfr
+ vmEntryRecord(cfr, t2)
+
+ loadp VMEntryRecord::m_vm[t2], t3
+ loadp VMEntryRecord::m_prevTopCallFrame[t2], extraTempReg
+ storep extraTempReg, VM::topCallFrame[t3]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[t2], extraTempReg
+ storep extraTempReg, VM::topVMEntryFrame[t3]
+
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
ret
-end
+
macro prepareStateForCCall()
leap [PB, PC, 8], PC
- move PB, t3
end
macro restoreStateAfterCCall()
- move t0, PC
- move t1, cfr
- move t3, PB
+ move r0, PC
subp PB, PC
rshiftp 3, PC
end
macro callSlowPath(slowPath)
prepareStateForCCall()
- cCall2(slowPath, cfr, PC)
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
restoreStateAfterCCall()
end
macro traceOperand(fromWhere, operand)
prepareStateForCCall()
- cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_operand)
restoreStateAfterCCall()
end
macro traceValue(fromWhere, operand)
prepareStateForCCall()
- cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_value)
restoreStateAfterCCall()
end
@@ -272,18 +341,19 @@ end
macro callCallSlowPath(slowPath, action)
storei PC, ArgumentCount + TagOffset[cfr]
prepareStateForCCall()
- cCall2(slowPath, cfr, PC)
- move t1, cfr
- action(t0)
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
+ action(r0, r1)
end
macro callWatchdogTimerHandler(throwHandler)
storei PC, ArgumentCount + TagOffset[cfr]
prepareStateForCCall()
- cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
- move t1, cfr
- btpnz t0, throwHandler
- move t3, PB
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_slow_path_handle_watchdog_timer)
+ btpnz r0, throwHandler
loadi ArgumentCount + TagOffset[cfr], PC
end
@@ -293,12 +363,13 @@ macro checkSwitchToJITForLoop()
macro()
storei PC, ArgumentCount + TagOffset[cfr]
prepareStateForCCall()
- cCall2(_llint_loop_osr, cfr, PC)
- move t1, cfr
- btpz t0, .recover
- jmp t0
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_loop_osr)
+ btpz r0, .recover
+ move r1, sp
+ jmp r0
.recover:
- move t3, PB
loadi ArgumentCount + TagOffset[cfr], PC
end)
end
@@ -332,50 +403,63 @@ macro loadConstantOrVariableCell(index, value, slow)
end
macro writeBarrierOnOperand(cellOperand)
- if GGC
- loadisFromInstruction(cellOperand, t1)
- loadConstantOrVariableCell(t1, t2, .writeBarrierDone)
- checkMarkByte(t2, t1, t3,
- macro(marked)
- btbz marked, .writeBarrierDone
- push PB, PC
- cCall2(_llint_write_barrier_slow, cfr, t2)
- pop PC, PB
- end
- )
- .writeBarrierDone:
- end
+ loadisFromInstruction(cellOperand, t1)
+ loadConstantOrVariableCell(t1, t2, .writeBarrierDone)
+ skipIfIsRememberedOrInEden(
+ t2,
+ macro()
+ push PB, PC
+ move t2, a1 # t2 can be a0 (not on 64 bits, but better safe than sorry)
+ move cfr, a0
+ cCall2Void(_llint_write_barrier_slow)
+ pop PC, PB
+ end)
+.writeBarrierDone:
end
macro writeBarrierOnOperands(cellOperand, valueOperand)
- if GGC
- loadisFromInstruction(valueOperand, t1)
- loadConstantOrVariable(t1, t0)
- btpz t0, .writeBarrierDone
-
- writeBarrierOnOperand(cellOperand)
- .writeBarrierDone:
- end
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
+ btpz t0, .writeBarrierDone
+
+ writeBarrierOnOperand(cellOperand)
+.writeBarrierDone:
+end
+
+macro writeBarrierOnGlobal(valueOperand, loadHelper)
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
+ btpz t0, .writeBarrierDone
+
+ loadHelper(t3)
+ skipIfIsRememberedOrInEden(
+ t3,
+ macro()
+ push PB, PC
+ move cfr, a0
+ move t3, a1
+ cCall2Void(_llint_write_barrier_slow)
+ pop PC, PB
+ end
+ )
+.writeBarrierDone:
end
macro writeBarrierOnGlobalObject(valueOperand)
- if GGC
- loadisFromInstruction(valueOperand, t1)
- loadConstantOrVariable(t1, t0)
- btpz t0, .writeBarrierDone
-
- loadp CodeBlock[cfr], t3
- loadp CodeBlock::m_globalObject[t3], t3
- checkMarkByte(t3, t1, t2,
- macro(marked)
- btbz marked, .writeBarrierDone
- push PB, PC
- cCall2(_llint_write_barrier_slow, cfr, t3)
- pop PC, PB
- end
- )
- .writeBarrierDone:
- end
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ end)
+end
+
+macro writeBarrierOnGlobalLexicalEnvironment(valueOperand)
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ loadp JSGlobalObject::m_globalLexicalEnvironment[registerToStoreGlobal], registerToStoreGlobal
+ end)
end
macro valueProfile(value, operand, scratch)
@@ -383,27 +467,68 @@ macro valueProfile(value, operand, scratch)
storeq value, ValueProfile::m_buckets[scratch]
end
+macro structureIDToStructureWithScratch(structureIDThenStructure, scratch)
+ loadp CodeBlock[cfr], scratch
+ loadp CodeBlock::m_vm[scratch], scratch
+ loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[scratch], scratch
+ loadp [scratch, structureIDThenStructure, 8], structureIDThenStructure
+end
+
+macro loadStructureWithScratch(cell, structure, scratch)
+ loadi JSCell::m_structureID[cell], structure
+ structureIDToStructureWithScratch(structure, scratch)
+end
+
+macro loadStructureAndClobberFirstArg(cell, structure)
+ loadi JSCell::m_structureID[cell], structure
+ loadp CodeBlock[cfr], cell
+ loadp CodeBlock::m_vm[cell], cell
+ loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[cell], cell
+ loadp [cell, structure, 8], structure
+end
# Entrypoints into the interpreter.
# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
-macro functionArityCheck(doneLabel, slow_path)
+macro functionArityCheck(doneLabel, slowPath)
loadi PayloadOffset + ArgumentCount[cfr], t0
biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
prepareStateForCCall()
- cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
- btiz t0, .isArityFixupNeeded
- move t1, cfr # t1 contains caller frame
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath) # This slowPath has the protocol: r0 = 0 => no error, r0 != 0 => error
+ btiz r0, .noError
+ move r1, cfr # r1 contains caller frame
jmp _llint_throw_from_slow_path_trampoline
-.isArityFixupNeeded:
+.noError:
+ loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1
btiz t1, .continue
+ loadi PayloadOffset + ArgumentCount[cfr], t2
+ addi CallFrameHeaderSlots, t2
- // Move frame up "t1" slots
+ // Check if there are some unaligned slots we can use
+ move t1, t3
+ andi StackAlignmentSlots - 1, t3
+ btiz t3, .noExtraSlot
+ move ValueUndefined, t0
+.fillExtraSlots:
+ storeq t0, [cfr, t2, 8]
+ addi 1, t2
+ bsubinz 1, t3, .fillExtraSlots
+ andi ~(StackAlignmentSlots - 1), t1
+ btiz t1, .continue
+
+.noExtraSlot:
+ // Move frame up t1 slots
negq t1
move cfr, t3
- loadi PayloadOffset + ArgumentCount[cfr], t2
- addi CallFrameHeaderSlots, t2
+ subp CalleeSaveSpaceAsVirtualRegisters * 8, t3
+ addi CalleeSaveSpaceAsVirtualRegisters, t2
+ move t1, t0
+ lshiftp 3, t0
+ addp t0, cfr
+ addp t0, sp
.copyLoop:
loadq [t3], t0
storeq t0, [t3, t1, 8]
@@ -418,9 +543,6 @@ macro functionArityCheck(doneLabel, slow_path)
addp 8, t3
baddinz 1, t2, .fillLoop
- lshiftp 3, t1
- addp t1, cfr
-
.continue:
# Reload CodeBlock and reset PC, since the slow_path clobbered them.
loadp CodeBlock[cfr], t1
@@ -429,11 +551,10 @@ macro functionArityCheck(doneLabel, slow_path)
jmp doneLabel
end
-
macro branchIfException(label)
- loadp ScopeChain[cfr], t3
+ loadp Callee[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ loadp MarkedBlock::m_vm[t3], t3
btqz VM::m_exception[t3], .noException
jmp label
.noException:
@@ -441,108 +562,88 @@ end
# Instruction implementations
-
_llint_op_enter:
traceExecution()
+ checkStackPointerAlignment(t2, 0xdead00e1)
loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
+ subq CalleeSaveSpaceAsVirtualRegisters, t2
+ move cfr, t1
+ subq CalleeSaveSpaceAsVirtualRegisters * 8, t1
btiz t2, .opEnterDone
move ValueUndefined, t0
negi t2
sxi2q t2, t2
.opEnterLoop:
- storeq t0, [cfr, t2, 8]
+ storeq t0, [t1, t2, 8]
addq 1, t2
btqnz t2, .opEnterLoop
.opEnterDone:
- callSlowPath(_slow_path_enter)
+ callOpcodeSlowPath(_slow_path_enter)
dispatch(1)
-_llint_op_create_activation:
+_llint_op_get_argument:
traceExecution()
- loadisFromInstruction(1, t0)
- bqneq [cfr, t0, 8], ValueEmpty, .opCreateActivationDone
- callSlowPath(_llint_slow_path_create_activation)
-.opCreateActivationDone:
- dispatch(2)
-
+ loadisFromInstruction(1, t1)
+ loadisFromInstruction(2, t2)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ bilteq t0, t2, .opGetArgumentOutOfBounds
+ loadq ThisArgumentOffset[cfr, t2, 8], t0
+ storeq t0, [cfr, t1, 8]
+ valueProfile(t0, 3, t2)
+ dispatch(4)
-_llint_op_init_lazy_reg:
- traceExecution()
- loadisFromInstruction(1, t0)
- storeq ValueEmpty, [cfr, t0, 8]
- dispatch(2)
+.opGetArgumentOutOfBounds:
+ storeq ValueUndefined, [cfr, t1, 8]
+ valueProfile(ValueUndefined, 3, t2)
+ dispatch(4)
-_llint_op_create_arguments:
+_llint_op_argument_count:
traceExecution()
- loadisFromInstruction(1, t0)
- bqneq [cfr, t0, 8], ValueEmpty, .opCreateArgumentsDone
- callSlowPath(_slow_path_create_arguments)
-.opCreateArgumentsDone:
+ loadisFromInstruction(1, t1)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ subi 1, t0
+ orq TagTypeNumber, t0
+ storeq t0, [cfr, t1, 8]
dispatch(2)
-_llint_op_create_this:
+_llint_op_get_scope:
traceExecution()
- loadisFromInstruction(2, t0)
- loadp [cfr, t0, 8], t0
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
- btpz t1, .opCreateThisSlow
- allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
+ loadp Callee[cfr], t0
+ loadp JSCallee::m_scope[t0], t0
loadisFromInstruction(1, t1)
storeq t0, [cfr, t1, 8]
- dispatch(4)
-
-.opCreateThisSlow:
- callSlowPath(_slow_path_create_this)
- dispatch(4)
-
-
-_llint_op_get_callee:
- traceExecution()
- loadisFromInstruction(1, t0)
- loadp Callee[cfr], t1
- loadpFromInstruction(2, t2)
- bpneq t1, t2, .opGetCalleeSlow
- storep t1, [cfr, t0, 8]
- dispatch(3)
+ dispatch(2)
-.opGetCalleeSlow:
- callSlowPath(_slow_path_get_callee)
- dispatch(3)
_llint_op_to_this:
traceExecution()
loadisFromInstruction(1, t0)
loadq [cfr, t0, 8], t0
btqnz t0, tagMask, .opToThisSlow
- loadp JSCell::m_structure[t0], t0
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], FinalObjectType, .opToThisSlow
+ bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
+ loadStructureWithScratch(t0, t1, t2)
loadpFromInstruction(2, t2)
- bpneq t0, t2, .opToThisSlow
- dispatch(3)
+ bpneq t1, t2, .opToThisSlow
+ dispatch(4)
.opToThisSlow:
- callSlowPath(_slow_path_to_this)
- dispatch(3)
+ callOpcodeSlowPath(_slow_path_to_this)
+ dispatch(4)
-_llint_op_new_object:
+_llint_op_check_tdz:
traceExecution()
- loadpFromInstruction(3, t0)
- loadp ObjectAllocationProfile::m_allocator[t0], t1
- loadp ObjectAllocationProfile::m_structure[t0], t2
- allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
- loadisFromInstruction(1, t1)
- storeq t0, [cfr, t1, 8]
- dispatch(4)
+ loadisFromInstruction(1, t0)
+ loadConstantOrVariable(t0, t1)
+ bqneq t1, ValueEmpty, .opNotTDZ
+ callOpcodeSlowPath(_slow_path_throw_tdz_error)
-.opNewObjectSlow:
- callSlowPath(_llint_slow_path_new_object)
- dispatch(4)
+.opNotTDZ:
+ dispatch(2)
_llint_op_mov:
@@ -554,40 +655,6 @@ _llint_op_mov:
dispatch(3)
-macro notifyWrite(set, value, scratch, slow)
- loadb VariableWatchpointSet::m_state[set], scratch
- bieq scratch, IsInvalidated, .done
- bineq scratch, ClearWatchpoint, .overwrite
- storeq value, VariableWatchpointSet::m_inferredValue[set]
- storeb IsWatched, VariableWatchpointSet::m_state[set]
- jmp .done
-
-.overwrite:
- bqeq value, VariableWatchpointSet::m_inferredValue[set], .done
- btbnz VariableWatchpointSet::m_setIsNotEmpty[set], slow
- storeq 0, VariableWatchpointSet::m_inferredValue[set]
- storeb IsInvalidated, VariableWatchpointSet::m_state[set]
-
-.done:
-end
-
-_llint_op_captured_mov:
- traceExecution()
- loadisFromInstruction(2, t1)
- loadConstantOrVariable(t1, t2)
- loadpFromInstruction(3, t0)
- btpz t0, .opCapturedMovReady
- notifyWrite(t0, t2, t1, .opCapturedMovSlow)
-.opCapturedMovReady:
- loadisFromInstruction(1, t0)
- storeq t2, [cfr, t0, 8]
- dispatch(4)
-
-.opCapturedMovSlow:
- callSlowPath(_slow_path_captured_mov)
- dispatch(4)
-
-
_llint_op_not:
traceExecution()
loadisFromInstruction(2, t0)
@@ -600,7 +667,7 @@ _llint_op_not:
dispatch(3)
.opNotSlow:
- callSlowPath(_slow_path_not)
+ callOpcodeSlowPath(_slow_path_not)
dispatch(3)
@@ -617,7 +684,7 @@ macro equalityComparison(integerComparison, slowPath)
dispatch(4)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(4)
end
@@ -637,11 +704,11 @@ macro equalNullComparison()
loadisFromInstruction(2, t0)
loadq [cfr, t0, 8], t0
btqnz t0, tagMask, .immediate
- loadp JSCell::m_structure[t0], t2
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t2], MasqueradesAsUndefined, .masqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
move 0, t0
jmp .done
.masqueradesAsUndefined:
+ loadStructureWithScratch(t0, t2, t1)
loadp CodeBlock[cfr], t0
loadp CodeBlock::m_globalObject[t0], t0
cpeq Structure::m_globalObject[t2], t0, t0
@@ -692,7 +759,7 @@ macro strictEq(equalityOperation, slowPath)
dispatch(4)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(4)
end
@@ -719,7 +786,7 @@ macro preOp(arithmeticOperation, slowPath)
dispatch(2)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(2)
end
@@ -744,10 +811,27 @@ _llint_op_to_number:
btqz t2, tagTypeNumber, .opToNumberSlow
.opToNumberIsImmediate:
storeq t2, [cfr, t1, 8]
- dispatch(3)
+ valueProfile(t2, 3, t0)
+ dispatch(4)
.opToNumberSlow:
- callSlowPath(_slow_path_to_number)
+ callOpcodeSlowPath(_slow_path_to_number)
+ dispatch(4)
+
+
+_llint_op_to_string:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ btqnz t0, tagMask, .opToStringSlow
+ bbneq JSCell::m_type[t0], StringType, .opToStringSlow
+.opToStringIsString:
+ storeq t0, [cfr, t2, 8]
+ dispatch(3)
+
+.opToStringSlow:
+ callOpcodeSlowPath(_slow_path_to_string)
dispatch(3)
@@ -755,22 +839,27 @@ _llint_op_negate:
traceExecution()
loadisFromInstruction(2, t0)
loadisFromInstruction(1, t1)
- loadConstantOrVariable(t0, t2)
- bqb t2, tagTypeNumber, .opNegateNotInt
- btiz t2, 0x7fffffff, .opNegateSlow
- negi t2
- orq tagTypeNumber, t2
- storeq t2, [cfr, t1, 8]
- dispatch(3)
+ loadConstantOrVariable(t0, t3)
+ loadisFromInstruction(3, t2)
+ bqb t3, tagTypeNumber, .opNegateNotInt
+ btiz t3, 0x7fffffff, .opNegateSlow
+ negi t3
+ ori ArithProfileInt, t2
+ orq tagTypeNumber, t3
+ storeisToInstruction(t2, 3)
+ storeq t3, [cfr, t1, 8]
+ dispatch(4)
.opNegateNotInt:
- btqz t2, tagTypeNumber, .opNegateSlow
- xorq 0x8000000000000000, t2
- storeq t2, [cfr, t1, 8]
- dispatch(3)
+ btqz t3, tagTypeNumber, .opNegateSlow
+ xorq 0x8000000000000000, t3
+ ori ArithProfileNumber, t2
+ storeq t3, [cfr, t1, 8]
+ storeisToInstruction(t2, 3)
+ dispatch(4)
.opNegateSlow:
- callSlowPath(_slow_path_negate)
- dispatch(3)
+ callOpcodeSlowPath(_slow_path_negate)
+ dispatch(4)
macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
@@ -782,6 +871,9 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
bqb t1, tagTypeNumber, .op2NotInt
loadisFromInstruction(1, t2)
integerOperationAndStore(t1, t0, .slow, t2)
+ loadisFromInstruction(4, t1)
+ ori ArithProfileIntInt, t1
+ storeisToInstruction(t1, 4)
dispatch(5)
.op1NotInt:
@@ -791,8 +883,14 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
btqz t1, tagTypeNumber, .slow
addq tagTypeNumber, t1
fq2d t1, ft1
+ loadisFromInstruction(4, t2)
+ ori ArithProfileNumberNumber, t2
+ storeisToInstruction(t2, 4)
jmp .op1NotIntReady
.op1NotIntOp2Int:
+ loadisFromInstruction(4, t2)
+ ori ArithProfileNumberInt, t2
+ storeisToInstruction(t2, 4)
ci2d t1, ft1
.op1NotIntReady:
loadisFromInstruction(1, t2)
@@ -808,6 +906,9 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
# First operand is definitely an int, the second is definitely not.
loadisFromInstruction(1, t2)
btqz t1, tagTypeNumber, .slow
+ loadisFromInstruction(4, t3)
+ ori ArithProfileIntNumber, t3
+ storeisToInstruction(t3, 4)
ci2d t0, ft0
addq tagTypeNumber, t1
fq2d t1, ft1
@@ -818,7 +919,7 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
dispatch(5)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(5)
end
@@ -868,7 +969,7 @@ _llint_op_sub:
_llint_op_div:
traceExecution()
- if X86_64
+ if X86_64 or X86_64_WIN
binaryOpCustomStore(
macro (left, right, slow, index)
# Assume t3 is scratchable.
@@ -890,7 +991,7 @@ _llint_op_div:
macro (left, right) divd left, right end,
_slow_path_div)
else
- callSlowPath(_slow_path_div)
+ callOpcodeSlowPath(_slow_path_div)
dispatch(5)
end
@@ -909,7 +1010,7 @@ macro bitOp(operation, slowPath, advance)
dispatch(advance)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(advance)
end
@@ -946,7 +1047,7 @@ _llint_op_unsigned:
storeq t2, [cfr, t0, 8]
dispatch(3)
.opUnsignedSlow:
- callSlowPath(_slow_path_unsigned)
+ callOpcodeSlowPath(_slow_path_unsigned)
dispatch(3)
@@ -974,47 +1075,44 @@ _llint_op_bitor:
5)
-_llint_op_check_has_instance:
+_llint_op_overrides_has_instance:
traceExecution()
+ loadisFromInstruction(1, t3)
+
loadisFromInstruction(3, t1)
- loadConstantOrVariableCell(t1, t0, .opCheckHasInstanceSlow)
- loadp JSCell::m_structure[t0], t0
- btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
- dispatch(5)
+ loadConstantOrVariable(t1, t0)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_globalObject[t2], t2
+ loadp JSGlobalObject::m_functionProtoHasInstanceSymbolFunction[t2], t2
+ bqneq t0, t2, .opOverridesHasInstanceNotDefaultSymbol
-.opCheckHasInstanceSlow:
- callSlowPath(_llint_slow_path_check_has_instance)
- dispatch(0)
+ loadisFromInstruction(2, t1)
+ loadConstantOrVariable(t1, t0)
+ tbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, t1
+ orq ValueFalse, t1
+ storeq t1, [cfr, t3, 8]
+ dispatch(4)
+
+.opOverridesHasInstanceNotDefaultSymbol:
+ storeq ValueTrue, [cfr, t3, 8]
+ dispatch(4)
-_llint_op_instanceof:
+_llint_op_instanceof_custom:
traceExecution()
- # Actually do the work.
- loadisFromInstruction(3, t0)
- loadisFromInstruction(1, t3)
- loadConstantOrVariableCell(t0, t1, .opInstanceofSlow)
- loadp JSCell::m_structure[t1], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
- loadisFromInstruction(2, t0)
- loadConstantOrVariableCell(t0, t2, .opInstanceofSlow)
-
- # Register state: t1 = prototype, t2 = value
- move 1, t0
-.opInstanceofLoop:
- loadp JSCell::m_structure[t2], t2
- loadq Structure::m_prototype[t2], t2
- bqeq t2, t1, .opInstanceofDone
- btqz t2, tagMask, .opInstanceofLoop
+ callOpcodeSlowPath(_llint_slow_path_instanceof_custom)
+ dispatch(5)
- move 0, t0
-.opInstanceofDone:
- orq ValueFalse, t0
- storeq t0, [cfr, t3, 8]
- dispatch(4)
-.opInstanceofSlow:
- callSlowPath(_llint_slow_path_instanceof)
- dispatch(4)
+_llint_op_is_empty:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ cqeq t0, ValueEmpty, t3
+ orq ValueFalse, t3
+ storeq t3, [cfr, t2, 8]
+ dispatch(3)
_llint_op_is_undefined:
@@ -1028,17 +1126,17 @@ _llint_op_is_undefined:
storeq t3, [cfr, t2, 8]
dispatch(3)
.opIsUndefinedCell:
- loadp JSCell::m_structure[t0], t0
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
move ValueFalse, t1
storeq t1, [cfr, t2, 8]
dispatch(3)
.masqueradesAsUndefined:
+ loadStructureWithScratch(t0, t3, t1)
loadp CodeBlock[cfr], t1
loadp CodeBlock::m_globalObject[t1], t1
- cpeq Structure::m_globalObject[t0], t1, t3
- orq ValueFalse, t3
- storeq t3, [cfr, t2, 8]
+ cpeq Structure::m_globalObject[t3], t1, t0
+ orq ValueFalse, t0
+ storeq t0, [cfr, t2, 8]
dispatch(3)
@@ -1065,18 +1163,33 @@ _llint_op_is_number:
dispatch(3)
-_llint_op_is_string:
+_llint_op_is_cell_with_type:
+ traceExecution()
+ loadisFromInstruction(3, t0)
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t3)
+ btqnz t3, tagMask, .notCellCase
+ cbeq JSCell::m_type[t3], t0, t1
+ orq ValueFalse, t1
+ storeq t1, [cfr, t2, 8]
+ dispatch(4)
+.notCellCase:
+ storeq ValueFalse, [cfr, t2, 8]
+ dispatch(4)
+
+
+_llint_op_is_object:
traceExecution()
loadisFromInstruction(2, t1)
loadisFromInstruction(1, t2)
loadConstantOrVariable(t1, t0)
- btqnz t0, tagMask, .opIsStringNotCell
- loadp JSCell::m_structure[t0], t0
- cbeq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, t1
+ btqnz t0, tagMask, .opIsObjectNotCell
+ cbaeq JSCell::m_type[t0], ObjectType, t1
orq ValueFalse, t1
storeq t1, [cfr, t2, 8]
dispatch(3)
-.opIsStringNotCell:
+.opIsObjectNotCell:
storeq ValueFalse, [cfr, t2, 8]
dispatch(3)
@@ -1106,50 +1219,60 @@ macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value
storeq value, (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
end
-_llint_op_init_global_const:
+_llint_op_get_by_id:
traceExecution()
- writeBarrierOnGlobalObject(2)
- loadisFromInstruction(2, t1)
- loadpFromInstruction(1, t0)
- loadConstantOrVariable(t1, t2)
- storeq t2, [t0]
- dispatch(5)
+ loadisFromInstruction(2, t0)
+ loadConstantOrVariableCell(t0, t3, .opGetByIdSlow)
+ loadi JSCell::m_structureID[t3], t1
+ loadisFromInstruction(4, t2)
+ bineq t2, t1, .opGetByIdSlow
+ loadisFromInstruction(5, t1)
+ loadisFromInstruction(1, t2)
+ loadPropertyAtVariableOffset(t1, t3, t0)
+ storeq t0, [cfr, t2, 8]
+ valueProfile(t0, 8, t1)
+ dispatch(9)
+
+.opGetByIdSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
-macro getById(getPropertyStorage)
+_llint_op_get_by_id_proto_load:
traceExecution()
- # We only do monomorphic get_by_id caching for now, and we do not modify the
- # opcode. We do, however, allow for the cache to change anytime if fails, since
- # ping-ponging is free. At best we get lucky and the get_by_id will continue
- # to take fast path on the new cache. At worst we take slow path, which is what
- # we would have been doing anyway.
loadisFromInstruction(2, t0)
- loadpFromInstruction(4, t1)
- loadConstantOrVariableCell(t0, t3, .opGetByIdSlow)
- loadisFromInstruction(5, t2)
- getPropertyStorage(
- t3,
- t0,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
- loadisFromInstruction(1, t1)
- loadq [propertyStorage, t2], scratch
- storeq scratch, [cfr, t1, 8]
- valueProfile(scratch, 8, t1)
- dispatch(9)
- end)
-
- .opGetByIdSlow:
- callSlowPath(_llint_slow_path_get_by_id)
- dispatch(9)
-end
+ loadConstantOrVariableCell(t0, t3, .opGetByIdProtoSlow)
+ loadi JSCell::m_structureID[t3], t1
+ loadisFromInstruction(4, t2)
+ bineq t2, t1, .opGetByIdProtoSlow
+ loadisFromInstruction(5, t1)
+ loadpFromInstruction(6, t3)
+ loadisFromInstruction(1, t2)
+ loadPropertyAtVariableOffset(t1, t3, t0)
+ storeq t0, [cfr, t2, 8]
+ valueProfile(t0, 8, t1)
+ dispatch(9)
+
+.opGetByIdProtoSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
-_llint_op_get_by_id:
- getById(withInlineStorage)
+_llint_op_get_by_id_unset:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadConstantOrVariableCell(t0, t3, .opGetByIdUnsetSlow)
+ loadi JSCell::m_structureID[t3], t1
+ loadisFromInstruction(4, t2)
+ bineq t2, t1, .opGetByIdUnsetSlow
+ loadisFromInstruction(1, t2)
+ storeq ValueUndefined, [cfr, t2, 8]
+ valueProfile(ValueUndefined, 8, t1)
+ dispatch(9)
-_llint_op_get_by_id_out_of_line:
- getById(withOutOfLineStorage)
+.opGetByIdUnsetSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
_llint_op_get_array_length:
@@ -1157,7 +1280,7 @@ _llint_op_get_array_length:
loadisFromInstruction(2, t0)
loadpFromInstruction(4, t1)
loadConstantOrVariableCell(t0, t3, .opGetArrayLengthSlow)
- loadp JSCell::m_structure[t3], t2
+ move t3, t2
arrayProfile(t2, t1, t0)
btiz t2, IsArray, .opGetArrayLengthSlow
btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
@@ -1171,121 +1294,176 @@ _llint_op_get_array_length:
dispatch(9)
.opGetArrayLengthSlow:
- callSlowPath(_llint_slow_path_get_by_id)
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
dispatch(9)
-_llint_op_get_arguments_length:
- traceExecution()
- loadisFromInstruction(2, t0)
- loadisFromInstruction(1, t1)
- btqnz [cfr, t0, 8], .opGetArgumentsLengthSlow
- loadi ArgumentCount + PayloadOffset[cfr], t2
- subi 1, t2
- orq tagTypeNumber, t2
- storeq t2, [cfr, t1, 8]
- dispatch(4)
-
-.opGetArgumentsLengthSlow:
- callSlowPath(_llint_slow_path_get_arguments_length)
- dispatch(4)
-
-
-macro putById(getPropertyStorage)
+_llint_op_put_by_id:
traceExecution()
- writeBarrierOnOperands(1, 3)
loadisFromInstruction(1, t3)
- loadpFromInstruction(4, t1)
loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
- loadisFromInstruction(3, t2)
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- loadisFromInstruction(5, t1)
- loadConstantOrVariable(t2, scratch)
- storeq scratch, [propertyStorage, t1]
- dispatch(9)
- end)
-end
+ loadisFromInstruction(4, t2)
+ bineq t2, JSCell::m_structureID[t0], .opPutByIdSlow
-_llint_op_put_by_id:
- putById(withInlineStorage)
+ # At this point, we have:
+ # t2 -> current structure ID
+ # t0 -> object base
-.opPutByIdSlow:
- callSlowPath(_llint_slow_path_put_by_id)
- dispatch(9)
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t3)
+
+ loadpFromInstruction(8, t1)
+
+ # At this point, we have:
+ # t0 -> object base
+ # t1 -> put by id flags
+ # t2 -> current structure ID
+ # t3 -> value to put
+
+ btpnz t1, PutByIdPrimaryTypeMask, .opPutByIdTypeCheckObjectWithStructureOrOther
+
+ # We have one of the non-structure type checks. Find out which one.
+ andp PutByIdSecondaryTypeMask, t1
+ bplt t1, PutByIdSecondaryTypeString, .opPutByIdTypeCheckLessThanString
+
+ # We are one of the following: String, Symbol, Object, ObjectOrOther, Top
+ bplt t1, PutByIdSecondaryTypeObjectOrOther, .opPutByIdTypeCheckLessThanObjectOrOther
+
+ # We are either ObjectOrOther or Top.
+ bpeq t1, PutByIdSecondaryTypeTop, .opPutByIdDoneCheckingTypes
+
+ # Check if we are ObjectOrOther.
+ btqz t3, tagMask, .opPutByIdTypeCheckObject
+.opPutByIdTypeCheckOther:
+ andq ~TagBitUndefined, t3
+ bqeq t3, ValueNull, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanObjectOrOther:
+ # We are either String, Symbol or Object.
+ btqnz t3, tagMask, .opPutByIdSlow
+ bpeq t1, PutByIdSecondaryTypeObject, .opPutByIdTypeCheckObject
+ bpeq t1, PutByIdSecondaryTypeSymbol, .opPutByIdTypeCheckSymbol
+ bbeq JSCell::m_type[t3], StringType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckObject:
+ bbaeq JSCell::m_type[t3], ObjectType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckSymbol:
+ bbeq JSCell::m_type[t3], SymbolType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanString:
+ # We are one of the following: Bottom, Boolean, Other, Int32, Number
+ bplt t1, PutByIdSecondaryTypeInt32, .opPutByIdTypeCheckLessThanInt32
+
+ # We are either Int32 or Number.
+ bpeq t1, PutByIdSecondaryTypeNumber, .opPutByIdTypeCheckNumber
+
+ bqaeq t3, tagTypeNumber, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckNumber:
+ btqnz t3, tagTypeNumber, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanInt32:
+ # We are one of the following: Bottom, Boolean, Other.
+ bpneq t1, PutByIdSecondaryTypeBoolean, .opPutByIdTypeCheckBottomOrOther
+ xorq ValueFalse, t3
+ btqz t3, ~1, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckBottomOrOther:
+ bpeq t1, PutByIdSecondaryTypeOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructureOrOther:
+ btqz t3, tagMask, .opPutByIdTypeCheckObjectWithStructure
+ btpnz t1, PutByIdPrimaryTypeObjectWithStructureOrOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructure:
+ urshiftp 3, t1
+ bineq t1, JSCell::m_structureID[t3], .opPutByIdSlow
+
+.opPutByIdDoneCheckingTypes:
+ loadisFromInstruction(6, t1)
+
+ btiz t1, .opPutByIdNotTransition
+ # This is the transition case. t1 holds the new structureID. t2 holds the old structure ID.
+ # If we have a chain, we need to check it. t0 is the base. We may clobber t1 to use it as
+ # scratch.
+ loadpFromInstruction(7, t3)
+ btpz t3, .opPutByIdTransitionDirect
-_llint_op_put_by_id_out_of_line:
- putById(withOutOfLineStorage)
+ loadp StructureChain::m_vector[t3], t3
+ assert(macro (ok) btpnz t3, ok end)
+
+ structureIDToStructureWithScratch(t2, t1)
+ loadq Structure::m_prototype[t2], t2
+ bqeq t2, ValueNull, .opPutByIdTransitionChainDone
+.opPutByIdTransitionChainLoop:
+ # At this point, t2 contains a prototye, and [t3] contains the Structure* that we want that
+ # prototype to have. We don't want to have to load the Structure* for t2. Instead, we load
+ # the Structure* from [t3], and then we compare its id to the id in the header of t2.
+ loadp [t3], t1
+ loadi JSCell::m_structureID[t2], t2
+ # Now, t1 has the Structure* and t2 has the StructureID that we want that Structure* to have.
+ bineq t2, Structure::m_blob + StructureIDBlob::u.fields.structureID[t1], .opPutByIdSlow
+ addp 8, t3
+ loadq Structure::m_prototype[t1], t2
+ bqneq t2, ValueNull, .opPutByIdTransitionChainLoop
+.opPutByIdTransitionChainDone:
+ # Reload the new structure, since we clobbered it above.
+ loadisFromInstruction(6, t1)
-macro putByIdTransition(additionalChecks, getPropertyStorage)
- traceExecution()
+.opPutByIdTransitionDirect:
+ storei t1, JSCell::m_structureID[t0]
writeBarrierOnOperand(1)
- loadisFromInstruction(1, t3)
- loadpFromInstruction(4, t1)
- loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
- loadisFromInstruction(3, t2)
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- additionalChecks(t1, t3)
+ # Reload base into t0
+ loadisFromInstruction(1, t1)
+ loadConstantOrVariable(t1, t0)
+
+.opPutByIdNotTransition:
+ # The only thing live right now is t0, which holds the base.
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2)
loadisFromInstruction(5, t1)
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- addp t1, propertyStorage, t3
- loadConstantOrVariable(t2, t1)
- storeq t1, [t3]
- loadpFromInstruction(6, t1)
- storep t1, JSCell::m_structure[t0]
- dispatch(9)
- end)
-end
+ storePropertyAtVariableOffset(t1, t0, t2)
+ writeBarrierOnOperands(1, 3)
+ dispatch(9)
-macro noAdditionalChecks(oldStructure, scratch)
-end
+.opPutByIdSlow:
+ callOpcodeSlowPath(_llint_slow_path_put_by_id)
+ dispatch(9)
-macro structureChainChecks(oldStructure, scratch)
- const protoCell = oldStructure # Reusing the oldStructure register for the proto
- loadpFromInstruction(7, scratch)
- assert(macro (ok) btpnz scratch, ok end)
- loadp StructureChain::m_vector[scratch], scratch
- assert(macro (ok) btpnz scratch, ok end)
- bqeq Structure::m_prototype[oldStructure], ValueNull, .done
-.loop:
- loadq Structure::m_prototype[oldStructure], protoCell
- loadp JSCell::m_structure[protoCell], oldStructure
- bpneq oldStructure, [scratch], .opPutByIdSlow
- addp 8, scratch
- bqneq Structure::m_prototype[oldStructure], ValueNull, .loop
-.done:
+macro finishGetByVal(result, scratch)
+ loadisFromInstruction(1, scratch)
+ storeq result, [cfr, scratch, 8]
+ valueProfile(result, 5, scratch)
+ dispatch(6)
end
-_llint_op_put_by_id_transition_direct:
- putByIdTransition(noAdditionalChecks, withInlineStorage)
-
-
-_llint_op_put_by_id_transition_direct_out_of_line:
- putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
-
-
-_llint_op_put_by_id_transition_normal:
- putByIdTransition(structureChainChecks, withInlineStorage)
-
-
-_llint_op_put_by_id_transition_normal_out_of_line:
- putByIdTransition(structureChainChecks, withOutOfLineStorage)
+macro finishIntGetByVal(result, scratch)
+ orq tagTypeNumber, result
+ finishGetByVal(result, scratch)
+end
+macro finishDoubleGetByVal(result, scratch1, scratch2)
+ fd2q result, scratch1
+ subq tagTypeNumber, scratch1
+ finishGetByVal(scratch1, scratch2)
+end
_llint_op_get_by_val:
traceExecution()
loadisFromInstruction(2, t2)
loadConstantOrVariableCell(t2, t0, .opGetByValSlow)
- loadp JSCell::m_structure[t0], t2
loadpFromInstruction(4, t3)
+ move t0, t2
arrayProfile(t2, t3, t1)
loadisFromInstruction(3, t3)
loadConstantOrVariableInt32(t3, t1, .opGetByValSlow)
@@ -1314,7 +1492,7 @@ _llint_op_get_by_val:
.opGetByValNotDouble:
subi ArrayStorageShape, t2
- bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
+ bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValNotIndexedStorage
biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValOutOfBounds
loadisFromInstruction(1, t0)
loadq ArrayStorage::m_vector[t3, t1, 8], t2
@@ -1328,64 +1506,78 @@ _llint_op_get_by_val:
.opGetByValOutOfBounds:
loadpFromInstruction(4, t0)
storeb 1, ArrayProfile::m_outOfBounds[t0]
-.opGetByValSlow:
- callSlowPath(_llint_slow_path_get_by_val)
- dispatch(6)
+ jmp .opGetByValSlow
+
+.opGetByValNotIndexedStorage:
+ # First lets check if we even have a typed array. This lets us do some boilerplate up front.
+ loadb JSCell::m_type[t0], t2
+ subi FirstArrayType, t2
+ bia t2, LastArrayType - FirstArrayType, .opGetByValSlow
+
+ # Sweet, now we know that we have a typed array. Do some basic things now.
+ loadp JSArrayBufferView::m_vector[t0], t3
+ biaeq t1, JSArrayBufferView::m_length[t0], .opGetByValSlow
+
+ # Now bisect through the various types. Note that we can treat Uint8ArrayType and
+ # Uint8ClampedArrayType the same.
+ bia t2, Uint8ClampedArrayType - FirstArrayType, .opGetByValAboveUint8ClampedArray
+
+ # We have one of Int8ArrayType .. Uint8ClampedArrayType.
+ bia t2, Int16ArrayType - FirstArrayType, .opGetByValInt32ArrayOrUint8Array
+
+ # We have one of Int8ArrayType or Int16ArrayType
+ bineq t2, Int8ArrayType - FirstArrayType, .opGetByValInt16Array
+
+ # We have Int8ArrayType
+ loadbs [t3, t1], t0
+ finishIntGetByVal(t0, t1)
+.opGetByValInt16Array:
+ loadhs [t3, t1, 2], t0
+ finishIntGetByVal(t0, t1)
-_llint_op_get_argument_by_val:
- # FIXME: At some point we should array profile this. Right now it isn't necessary
- # since the DFG will never turn a get_argument_by_val into a GetByVal.
- traceExecution()
- loadisFromInstruction(2, t0)
- loadisFromInstruction(3, t1)
- btqnz [cfr, t0, 8], .opGetArgumentByValSlow
- loadConstantOrVariableInt32(t1, t2, .opGetArgumentByValSlow)
- addi 1, t2
- loadi ArgumentCount + PayloadOffset[cfr], t1
- biaeq t2, t1, .opGetArgumentByValSlow
- loadisFromInstruction(1, t3)
- loadpFromInstruction(5, t1)
- loadq ThisArgumentOffset[cfr, t2, 8], t0
- storeq t0, [cfr, t3, 8]
- valueProfile(t0, 5, t1)
- dispatch(6)
+.opGetByValInt32ArrayOrUint8Array:
+ # We have one of Int16Array, Uint8Array, or Uint8ClampedArray.
+ bieq t2, Int32ArrayType - FirstArrayType, .opGetByValInt32Array
+
+ # We have either Uint8Array or Uint8ClampedArray. They behave the same so that's cool.
+ loadb [t3, t1], t0
+ finishIntGetByVal(t0, t1)
-.opGetArgumentByValSlow:
- callSlowPath(_llint_slow_path_get_argument_by_val)
- dispatch(6)
+.opGetByValInt32Array:
+ loadi [t3, t1, 4], t0
+ finishIntGetByVal(t0, t1)
+.opGetByValAboveUint8ClampedArray:
+ # We have one of Uint16ArrayType .. Float64ArrayType.
+ bia t2, Uint32ArrayType - FirstArrayType, .opGetByValAboveUint32Array
+
+ # We have either Uint16ArrayType or Uint32ArrayType.
+ bieq t2, Uint32ArrayType - FirstArrayType, .opGetByValUint32Array
-_llint_op_get_by_pname:
- traceExecution()
- loadisFromInstruction(3, t1)
- loadConstantOrVariable(t1, t0)
- loadisFromInstruction(4, t1)
- assertNotConstant(t1)
- bqneq t0, [cfr, t1, 8], .opGetByPnameSlow
- loadisFromInstruction(2, t2)
- loadisFromInstruction(5, t3)
- loadConstantOrVariableCell(t2, t0, .opGetByPnameSlow)
- assertNotConstant(t3)
- loadq [cfr, t3, 8], t1
- loadp JSCell::m_structure[t0], t2
- bpneq t2, JSPropertyNameIterator::m_cachedStructure[t1], .opGetByPnameSlow
- loadisFromInstruction(6, t3)
- loadi PayloadOffset[cfr, t3, 8], t3
- subi 1, t3
- biaeq t3, JSPropertyNameIterator::m_numCacheableSlots[t1], .opGetByPnameSlow
- bilt t3, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t1], .opGetByPnameInlineProperty
- addi firstOutOfLineOffset, t3
- subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t1], t3
-.opGetByPnameInlineProperty:
- loadPropertyAtVariableOffset(t3, t0, t0)
- loadisFromInstruction(1, t1)
- storeq t0, [cfr, t1, 8]
- dispatch(7)
+ # We have Uint16ArrayType.
+ loadh [t3, t1, 2], t0
+ finishIntGetByVal(t0, t1)
-.opGetByPnameSlow:
- callSlowPath(_llint_slow_path_get_by_pname)
- dispatch(7)
+.opGetByValUint32Array:
+ # This is the hardest part because of large unsigned values.
+ loadi [t3, t1, 4], t0
+ bilt t0, 0, .opGetByValSlow # This case is still awkward to implement in LLInt.
+ finishIntGetByVal(t0, t1)
+
+.opGetByValAboveUint32Array:
+ # We have one of Float32ArrayType or Float64ArrayType. Sadly, we cannot handle Float32Array
+ # inline yet. That would require some offlineasm changes.
+ bieq t2, Float32ArrayType - FirstArrayType, .opGetByValSlow
+
+ # We have Float64ArrayType.
+ loadd [t3, t1, 8], ft0
+ bdnequn ft0, ft0, .opGetByValSlow
+ finishDoubleGetByVal(ft0, t0, t1)
+
+.opGetByValSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_val)
+ dispatch(6)
macro contiguousPutByVal(storeCallback)
@@ -1404,13 +1596,12 @@ macro contiguousPutByVal(storeCallback)
jmp .storeResult
end
-macro putByVal(holeCheck, slowPath)
+macro putByVal(slowPath)
traceExecution()
- writeBarrierOnOperands(1, 3)
loadisFromInstruction(1, t0)
loadConstantOrVariableCell(t0, t1, .opPutByValSlow)
- loadp JSCell::m_structure[t1], t2
loadpFromInstruction(4, t3)
+ move t1, t2
arrayProfile(t2, t3, t0)
loadisFromInstruction(2, t0)
loadConstantOrVariableInt32(t0, t3, .opPutByValSlow)
@@ -1423,6 +1614,7 @@ macro putByVal(holeCheck, slowPath)
loadConstantOrVariable(operand, scratch)
bpb scratch, tagTypeNumber, .opPutByValSlow
storep scratch, address
+ writeBarrierOnOperands(1, 3)
end)
.opPutByValNotInt32:
@@ -1439,6 +1631,7 @@ macro putByVal(holeCheck, slowPath)
bdnequn ft0, ft0, .opPutByValSlow
.ready:
stored ft0, address
+ writeBarrierOnOperands(1, 3)
end)
.opPutByValNotDouble:
@@ -1447,16 +1640,18 @@ macro putByVal(holeCheck, slowPath)
macro (operand, scratch, address)
loadConstantOrVariable(operand, scratch)
storep scratch, address
+ writeBarrierOnOperands(1, 3)
end)
.opPutByValNotContiguous:
bineq t2, ArrayStorageShape, .opPutByValSlow
biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
- holeCheck(ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty)
+ btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty
.opPutByValArrayStorageStoreResult:
loadisFromInstruction(3, t2)
loadConstantOrVariable(t2, t1)
storeq t1, ArrayStorage::m_vector[t0, t3, 8]
+ writeBarrierOnOperands(1, 3)
dispatch(5)
.opPutByValArrayStorageEmpty:
@@ -1472,18 +1667,15 @@ macro putByVal(holeCheck, slowPath)
loadpFromInstruction(4, t0)
storeb 1, ArrayProfile::m_outOfBounds[t0]
.opPutByValSlow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(5)
end
_llint_op_put_by_val:
- putByVal(macro(slot, slowPath)
- btqz slot, slowPath
- end, _llint_slow_path_put_by_val)
+ putByVal(_llint_slow_path_put_by_val)
_llint_op_put_by_val_direct:
- putByVal(macro(slot, slowPath)
- end, _llint_slow_path_put_by_val_direct)
+ putByVal(_llint_slow_path_put_by_val_direct)
_llint_op_jmp:
@@ -1503,7 +1695,7 @@ macro jumpTrueOrFalse(conditionOp, slow)
dispatchIntIndirect(2)
.slow:
- callSlowPath(slow)
+ callOpcodeSlowPath(slow)
dispatch(0)
end
@@ -1513,8 +1705,8 @@ macro equalNull(cellHandler, immediateHandler)
assertNotConstant(t0)
loadq [cfr, t0, 8], t0
btqnz t0, tagMask, .immediate
- loadp JSCell::m_structure[t0], t2
- cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
+ loadStructureWithScratch(t0, t2, t1)
+ cellHandler(t2, JSCell::m_flags[t0], .target)
dispatch(3)
.target:
@@ -1559,9 +1751,10 @@ _llint_op_jneq_ptr:
loadp CodeBlock::m_globalObject[t2], t2
loadp JSGlobalObject::m_specialPointers[t2, t1, 8], t1
bpneq t1, [cfr, t0, 8], .opJneqPtrTarget
- dispatch(4)
+ dispatch(5)
.opJneqPtrTarget:
+ storei 1, 32[PB, PC, 8]
dispatchIntIndirect(3)
@@ -1602,7 +1795,7 @@ macro compare(integerCompare, doubleCompare, slowPath)
dispatchIntIndirect(3)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(0)
end
@@ -1631,7 +1824,7 @@ _llint_op_switch_imm:
dispatchIntIndirect(2)
.opSwitchImmSlow:
- callSlowPath(_llint_slow_path_switch_imm)
+ callOpcodeSlowPath(_llint_slow_path_switch_imm)
dispatch(0)
@@ -1646,8 +1839,7 @@ _llint_op_switch_char:
loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
addp t3, t2
btqnz t1, tagMask, .opSwitchCharFallThrough
- loadp JSCell::m_structure[t1], t0
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, .opSwitchCharFallThrough
+ bbneq JSCell::m_type[t1], StringType, .opSwitchCharFallThrough
bineq JSString::m_length[t1], 1, .opSwitchCharFallThrough
loadp JSString::m_value[t1], t0
btpz t0, .opSwitchOnRope
@@ -1669,40 +1861,22 @@ _llint_op_switch_char:
dispatchIntIndirect(2)
.opSwitchOnRope:
- callSlowPath(_llint_slow_path_switch_char)
+ callOpcodeSlowPath(_llint_slow_path_switch_char)
dispatch(0)
-_llint_op_new_func:
- traceExecution()
- loadisFromInstruction(3, t2)
- btiz t2, .opNewFuncUnchecked
- loadisFromInstruction(1, t1)
- btqnz [cfr, t1, 8], .opNewFuncDone
-.opNewFuncUnchecked:
- callSlowPath(_llint_slow_path_new_func)
-.opNewFuncDone:
- dispatch(4)
-
-
-_llint_op_new_captured_func:
- traceExecution()
- callSlowPath(_slow_path_new_captured_func)
- dispatch(4)
-
-
macro arrayProfileForCall()
loadisFromInstruction(4, t3)
negp t3
loadq ThisArgumentOffset[cfr, t3, 8], t0
btqnz t0, tagMask, .done
- loadp JSCell::m_structure[t0], t0
- loadpFromInstruction(6, t1)
- storep t0, ArrayProfile::m_lastSeenStructure[t1]
+ loadpFromInstruction((CallOpCodeSize - 2), t1)
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, ArrayProfile::m_lastSeenStructureID[t1]
.done:
end
-macro doCall(slowPath)
+macro doCall(slowPath, prepareCall)
loadisFromInstruction(2, t0)
loadpFromInstruction(5, t1)
loadp LLIntCallLinkInfo::callee[t1], t2
@@ -1712,61 +1886,23 @@ macro doCall(slowPath)
lshifti 3, t3
negp t3
addp cfr, t3
- loadp JSFunction::m_scope[t2], t0
storeq t2, Callee[t3]
- storeq t0, ScopeChain[t3]
loadisFromInstruction(3, t2)
storei PC, ArgumentCount + TagOffset[cfr]
- storeq cfr, CallerFrame[t3]
storei t2, ArgumentCount + PayloadOffset[t3]
- move t3, cfr
- callTargetFunction(t1)
+ move t3, sp
+ prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4)
+ callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1])
.opCallSlow:
- slowPathForCall(slowPath)
+ slowPathForCall(slowPath, prepareCall)
end
-
-_llint_op_tear_off_activation:
- traceExecution()
- loadisFromInstruction(1, t0)
- btqz [cfr, t0, 8], .opTearOffActivationNotCreated
- callSlowPath(_llint_slow_path_tear_off_activation)
-.opTearOffActivationNotCreated:
- dispatch(2)
-
-
-_llint_op_tear_off_arguments:
- traceExecution()
- loadisFromInstruction(1, t0)
- addq 1, t0 # Get the unmodifiedArgumentsRegister
- btqz [cfr, t0, 8], .opTearOffArgumentsNotCreated
- callSlowPath(_llint_slow_path_tear_off_arguments)
-.opTearOffArgumentsNotCreated:
- dispatch(3)
-
-
_llint_op_ret:
traceExecution()
checkSwitchToJITForEpilogue()
loadisFromInstruction(1, t2)
- loadConstantOrVariable(t2, t0)
- doReturn()
-
-
-_llint_op_ret_object_or_this:
- traceExecution()
- checkSwitchToJITForEpilogue()
- loadisFromInstruction(1, t2)
- loadConstantOrVariable(t2, t0)
- btqnz t0, tagMask, .opRetObjectOrThisNotObject
- loadp JSCell::m_structure[t0], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
- doReturn()
-
-.opRetObjectOrThisNotObject:
- loadisFromInstruction(2, t2)
- loadConstantOrVariable(t2, t0)
+ loadConstantOrVariable(t2, r0)
doReturn()
@@ -1776,81 +1912,56 @@ _llint_op_to_primitive:
loadisFromInstruction(1, t3)
loadConstantOrVariable(t2, t0)
btqnz t0, tagMask, .opToPrimitiveIsImm
- loadp JSCell::m_structure[t0], t2
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
+ bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase
.opToPrimitiveIsImm:
storeq t0, [cfr, t3, 8]
dispatch(3)
.opToPrimitiveSlowCase:
- callSlowPath(_slow_path_to_primitive)
+ callOpcodeSlowPath(_slow_path_to_primitive)
dispatch(3)
-_llint_op_next_pname:
- traceExecution()
- loadisFromInstruction(3, t1)
- loadisFromInstruction(4, t2)
- assertNotConstant(t1)
- assertNotConstant(t2)
- loadi PayloadOffset[cfr, t1, 8], t0
- bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
- loadisFromInstruction(5, t2)
- assertNotConstant(t2)
- loadp [cfr, t2, 8], t2
- loadp JSPropertyNameIterator::m_jsStrings[t2], t3
- loadq [t3, t0, 8], t3
- addi 1, t0
- storei t0, PayloadOffset[cfr, t1, 8]
- loadisFromInstruction(1, t1)
- storeq t3, [cfr, t1, 8]
- loadisFromInstruction(2, t3)
- assertNotConstant(t3)
- loadq [cfr, t3, 8], t3
- loadp JSCell::m_structure[t3], t1
- bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
- loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
- loadp StructureChain::m_vector[t0], t0
- btpz [t0], .opNextPnameTarget
-.opNextPnameCheckPrototypeLoop:
- bqeq Structure::m_prototype[t1], ValueNull, .opNextPnameSlow
- loadq Structure::m_prototype[t1], t2
- loadp JSCell::m_structure[t2], t1
- bpneq t1, [t0], .opNextPnameSlow
- addp 8, t0
- btpnz [t0], .opNextPnameCheckPrototypeLoop
-.opNextPnameTarget:
- dispatchIntIndirect(6)
-
-.opNextPnameEnd:
- dispatch(7)
-
-.opNextPnameSlow:
- callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
- dispatch(0)
-
-
_llint_op_catch:
# This is where we end up from the JIT's throw trampoline (because the
# machine code return address will be set to _llint_op_catch), and from
# the interpreter's throw trampoline (see _llint_throw_trampoline).
# The throwing code must have known that we were throwing to the interpreter,
# and have set VM::targetInterpreterPCForThrow.
- loadp ScopeChain[cfr], t3
+ loadp Callee[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- loadp VM::callFrameForThrow[t3], cfr
+ loadp MarkedBlock::m_vm[t3], t3
+ restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+ restoreStackPointerAfterCall()
+
loadp CodeBlock[cfr], PB
loadp CodeBlock::m_instructions[PB], PB
loadp VM::targetInterpreterPCForThrow[t3], PC
subp PB, PC
rshiftp 3, PC
+
+ callOpcodeSlowPath(_llint_slow_path_check_if_exception_is_uncatchable_and_notify_profiler)
+ bpeq r1, 0, .isCatchableException
+ jmp _llint_throw_from_slow_path_trampoline
+
+.isCatchableException:
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_vm[t3], t3
+
loadq VM::m_exception[t3], t0
storeq 0, VM::m_exception[t3]
loadisFromInstruction(1, t2)
storeq t0, [cfr, t2, 8]
+
+ loadq Exception::m_value[t0], t3
+ loadisFromInstruction(2, t2)
+ storeq t3, [cfr, t2, 8]
+
traceExecution()
- dispatch(2)
+ dispatch(3)
_llint_op_end:
@@ -1858,18 +1969,24 @@ _llint_op_end:
checkSwitchToJITForEpilogue()
loadisFromInstruction(1, t0)
assertNotConstant(t0)
- loadq [cfr, t0, 8], t0
+ loadq [cfr, t0, 8], r0
doReturn()
_llint_throw_from_slow_path_trampoline:
+ loadp Callee[cfr], t1
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_vm[t1], t1
+ copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(t1, t2)
+
callSlowPath(_llint_slow_path_handle_exception)
# When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
# the throw target is not necessarily interpreted code, we come to here.
# This essentially emulates the JIT's throwing protocol.
- loadp CodeBlock[cfr], t1
- loadp CodeBlock::m_vm[t1], t1
+ loadp Callee[cfr], t1
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_vm[t1], t1
jmp VM::targetMachinePCForThrow[t1]
@@ -1879,90 +1996,48 @@ _llint_throw_during_call_trampoline:
macro nativeCallTrampoline(executableOffsetToFunction)
+
+ functionPrologue()
storep 0, CodeBlock[cfr]
- if X86_64
- loadp ScopeChain[cfr], t0
- andp MarkedBlockMask, t0
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t0], t0
- storep cfr, VM::topCallFrame[t0]
- loadp CallerFrame[cfr], t0
- loadq ScopeChain[t0], t1
- storeq t1, ScopeChain[cfr]
- peek 0, t1
- storep t1, ReturnPC[cfr]
- move cfr, t5 # t5 = rdi
- subp 16 - 8, sp
- loadp Callee[cfr], t4 # t4 = rsi
- loadp JSFunction::m_executable[t4], t1
- move t0, cfr # Restore cfr to avoid loading from stack
- call executableOffsetToFunction[t1]
- addp 16 - 8, sp
- loadp ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- elsif ARM64
- loadp ScopeChain[cfr], t0
- andp MarkedBlockMask, t0
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t0], t0
- storep cfr, VM::topCallFrame[t0]
- loadp CallerFrame[cfr], t2
- loadp ScopeChain[t2], t1
- storep t1, ScopeChain[cfr]
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
- loadp Callee[cfr], t1
- loadp JSFunction::m_executable[t1], t1
- move t2, cfr # Restore cfr to avoid loading from stack
- call executableOffsetToFunction[t1]
- restoreReturnAddressBeforeReturn(t3)
- loadp ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- elsif C_LOOP
- loadp CallerFrame[cfr], t0
- loadp ScopeChain[t0], t1
- storep t1, ScopeChain[cfr]
-
- loadp ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- storep cfr, VM::topCallFrame[t3]
-
- move t0, t2
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
- loadp Callee[cfr], t1
- loadp JSFunction::m_executable[t1], t1
- move t2, cfr
+ loadp Callee[cfr], t0
+ andp MarkedBlockMask, t0, t1
+ loadp MarkedBlock::m_vm[t1], t1
+ storep cfr, VM::topCallFrame[t1]
+ if ARM64 or C_LOOP
+ storep lr, ReturnPC[cfr]
+ end
+ move cfr, a0
+ loadp Callee[cfr], t1
+ loadp JSFunction::m_executable[t1], t1
+ checkStackPointerAlignment(t3, 0xdead0001)
+ if C_LOOP
cloopCallNative executableOffsetToFunction[t1]
-
- restoreReturnAddressBeforeReturn(t3)
- loadp ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
else
- error
+ if X86_64_WIN
+ subp 32, sp
+ end
+ call executableOffsetToFunction[t1]
+ if X86_64_WIN
+ addp 32, sp
+ end
end
- btqnz VM::m_exception[t3], .exception
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_vm[t3], t3
+
+ btqnz VM::m_exception[t3], .handleException
+
+ functionEpilogue()
ret
-.exception:
- preserveReturnAddressAfterCall(t1) # This is really only needed on X86_64
- loadi ArgumentCount + TagOffset[cfr], PC
- loadp CodeBlock[cfr], PB
- loadp CodeBlock::m_vm[PB], t0
- loadp CodeBlock::m_instructions[PB], PB
- storep cfr, VM::topCallFrame[t0]
- callSlowPath(_llint_throw_from_native_call)
+
+.handleException:
+ storep cfr, VM::topCallFrame[t3]
jmp _llint_throw_from_slow_path_trampoline
end
-
-macro getGlobalObject(dst)
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
+macro getConstantScope(dst)
+ loadpFromInstruction(6, t0)
loadisFromInstruction(dst, t1)
storeq t0, [cfr, t1, 8]
end
@@ -1975,15 +2050,9 @@ macro varInjectionCheck(slowPath)
end
macro resolveScope()
- loadp CodeBlock[cfr], t0
- loadisFromInstruction(4, t2)
- btbz CodeBlock::m_needsActivation[t0], .resolveScopeAfterActivationCheck
- loadis CodeBlock::m_activationRegister[t0], t1
- btpz [cfr, t1, 8], .resolveScopeAfterActivationCheck
- addi 1, t2
-
-.resolveScopeAfterActivationCheck:
- loadp ScopeChain[cfr], t0
+ loadisFromInstruction(5, t2)
+ loadisFromInstruction(2, t0)
+ loadp [cfr, t0, 8], t0
btiz t2, .resolveScopeLoopEnd
.resolveScopeLoop:
@@ -1999,73 +2068,90 @@ end
_llint_op_resolve_scope:
traceExecution()
- loadisFromInstruction(3, t0)
+ loadisFromInstruction(4, t0)
#rGlobalProperty:
bineq t0, GlobalProperty, .rGlobalVar
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
.rGlobalVar:
- bineq t0, GlobalVar, .rClosureVar
- getGlobalObject(1)
- dispatch(6)
+ bineq t0, GlobalVar, .rGlobalLexicalVar
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .rClosureVar
+ getConstantScope(1)
+ dispatch(7)
.rClosureVar:
- bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks
+ bineq t0, ClosureVar, .rModuleVar
resolveScope()
- dispatch(6)
+ dispatch(7)
+
+.rModuleVar:
+ bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks
+ getConstantScope(1)
+ dispatch(7)
.rGlobalPropertyWithVarInjectionChecks:
bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
varInjectionCheck(.rDynamic)
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
.rGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks
varInjectionCheck(.rDynamic)
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getConstantScope(1)
+ dispatch(7)
.rClosureVarWithVarInjectionChecks:
bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
varInjectionCheck(.rDynamic)
resolveScope()
- dispatch(6)
+ dispatch(7)
.rDynamic:
- callSlowPath(_llint_slow_path_resolve_scope)
- dispatch(6)
+ callOpcodeSlowPath(_slow_path_resolve_scope)
+ dispatch(7)
macro loadWithStructureCheck(operand, slowPath)
loadisFromInstruction(operand, t0)
loadq [cfr, t0, 8], t0
+ loadStructureWithScratch(t0, t2, t1)
loadpFromInstruction(5, t1)
- bpneq JSCell::m_structure[t0], t1, slowPath
+ bpneq t2, t1, slowPath
end
macro getProperty()
- loadpFromInstruction(6, t1)
+ loadisFromInstruction(6, t1)
loadPropertyAtVariableOffset(t1, t0, t2)
valueProfile(t2, 7, t0)
loadisFromInstruction(1, t0)
storeq t2, [cfr, t0, 8]
end
-macro getGlobalVar()
+macro getGlobalVar(tdzCheckIfNecessary)
loadpFromInstruction(6, t0)
loadq [t0], t0
+ tdzCheckIfNecessary(t0)
valueProfile(t0, 7, t1)
loadisFromInstruction(1, t1)
storeq t0, [cfr, t1, 8]
end
macro getClosureVar()
- loadp JSVariableObject::m_registers[t0], t0
- loadpFromInstruction(6, t1)
- loadq [t0, t1, 8], t0
+ loadisFromInstruction(6, t1)
+ loadq JSEnvironmentRecord_variables[t0, t1, 8], t0
valueProfile(t0, 7, t1)
loadisFromInstruction(1, t1)
storeq t0, [cfr, t1, 8]
@@ -2074,7 +2160,7 @@ end
_llint_op_get_from_scope:
traceExecution()
loadisFromInstruction(4, t0)
- andi ResolveModeMask, t0
+ andi ResolveTypeMask, t0
#gGlobalProperty:
bineq t0, GlobalProperty, .gGlobalVar
@@ -2083,8 +2169,16 @@ _llint_op_get_from_scope:
dispatch(8)
.gGlobalVar:
- bineq t0, GlobalVar, .gClosureVar
- getGlobalVar()
+ bineq t0, GlobalVar, .gGlobalLexicalVar
+ getGlobalVar(macro(v) end)
+ dispatch(8)
+
+.gGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .gClosureVar
+ getGlobalVar(
+ macro (value)
+ bqeq value, ValueEmpty, .gDynamic
+ end)
dispatch(8)
.gClosureVar:
@@ -2100,10 +2194,18 @@ _llint_op_get_from_scope:
dispatch(8)
.gGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks
varInjectionCheck(.gDynamic)
- loadVariable(2, t0)
- getGlobalVar()
+ getGlobalVar(macro(v) end)
+ dispatch(8)
+
+.gGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ varInjectionCheck(.gDynamic)
+ getGlobalVar(
+ macro (value)
+ bqeq value, ValueEmpty, .gDynamic
+ end)
dispatch(8)
.gClosureVarWithVarInjectionChecks:
@@ -2114,82 +2216,259 @@ _llint_op_get_from_scope:
dispatch(8)
.gDynamic:
- callSlowPath(_llint_slow_path_get_from_scope)
+ callOpcodeSlowPath(_llint_slow_path_get_from_scope)
dispatch(8)
macro putProperty()
loadisFromInstruction(3, t1)
loadConstantOrVariable(t1, t2)
- loadpFromInstruction(6, t1)
+ loadisFromInstruction(6, t1)
storePropertyAtVariableOffset(t1, t0, t2)
end
-macro putGlobalVar()
+macro putGlobalVariable()
loadisFromInstruction(3, t0)
loadConstantOrVariable(t0, t1)
loadpFromInstruction(5, t2)
- notifyWrite(t2, t1, t0, .pDynamic)
loadpFromInstruction(6, t0)
+ notifyWrite(t2, .pDynamic)
storeq t1, [t0]
end
macro putClosureVar()
loadisFromInstruction(3, t1)
loadConstantOrVariable(t1, t2)
- loadp JSVariableObject::m_registers[t0], t0
- loadpFromInstruction(6, t1)
- storeq t2, [t0, t1, 8]
+ loadisFromInstruction(6, t1)
+ storeq t2, JSEnvironmentRecord_variables[t0, t1, 8]
+end
+
+macro putLocalClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2)
+ loadpFromInstruction(5, t3)
+ btpz t3, .noVariableWatchpointSet
+ notifyWrite(t3, .pDynamic)
+.noVariableWatchpointSet:
+ loadisFromInstruction(6, t1)
+ storeq t2, JSEnvironmentRecord_variables[t0, t1, 8]
+end
+
+macro checkTDZInGlobalPutToScopeIfNecessary()
+ loadisFromInstruction(4, t0)
+ andi InitializationModeMask, t0
+ rshifti InitializationModeShift, t0
+ bineq t0, NotInitialization, .noNeedForTDZCheck
+ loadpFromInstruction(6, t0)
+ loadq [t0], t0
+ bqeq t0, ValueEmpty, .pDynamic
+.noNeedForTDZCheck:
end
_llint_op_put_to_scope:
traceExecution()
loadisFromInstruction(4, t0)
- andi ResolveModeMask, t0
+ andi ResolveTypeMask, t0
-#pGlobalProperty:
- bineq t0, GlobalProperty, .pGlobalVar
+#pLocalClosureVar:
+ bineq t0, LocalClosureVar, .pGlobalProperty
+ loadVariable(1, t0)
+ putLocalClosureVar()
writeBarrierOnOperands(1, 3)
+ dispatch(7)
+
+.pGlobalProperty:
+ bineq t0, GlobalProperty, .pGlobalVar
loadWithStructureCheck(1, .pDynamic)
putProperty()
+ writeBarrierOnOperands(1, 3)
dispatch(7)
.pGlobalVar:
- bineq t0, GlobalVar, .pClosureVar
+ bineq t0, GlobalVar, .pGlobalLexicalVar
writeBarrierOnGlobalObject(3)
- putGlobalVar()
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .pClosureVar
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ checkTDZInGlobalPutToScopeIfNecessary()
+ putGlobalVariable()
dispatch(7)
.pClosureVar:
bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks
- writeBarrierOnOperands(1, 3)
loadVariable(1, t0)
putClosureVar()
+ writeBarrierOnOperands(1, 3)
dispatch(7)
.pGlobalPropertyWithVarInjectionChecks:
bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks
- writeBarrierOnOperands(1, 3)
loadWithStructureCheck(1, .pDynamic)
putProperty()
+ writeBarrierOnOperands(1, 3)
dispatch(7)
.pGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks
writeBarrierOnGlobalObject(3)
varInjectionCheck(.pDynamic)
- putGlobalVar()
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ varInjectionCheck(.pDynamic)
+ checkTDZInGlobalPutToScopeIfNecessary()
+ putGlobalVariable()
dispatch(7)
.pClosureVarWithVarInjectionChecks:
- bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic
- writeBarrierOnOperands(1, 3)
+ bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar
varInjectionCheck(.pDynamic)
loadVariable(1, t0)
putClosureVar()
+ writeBarrierOnOperands(1, 3)
+ dispatch(7)
+
+.pModuleVar:
+ bineq t0, ModuleVar, .pDynamic
+ callOpcodeSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error)
dispatch(7)
.pDynamic:
- callSlowPath(_llint_slow_path_put_to_scope)
+ callOpcodeSlowPath(_llint_slow_path_put_to_scope)
dispatch(7)
+
+
+_llint_op_get_from_arguments:
+ traceExecution()
+ loadVariable(2, t0)
+ loadi 24[PB, PC, 8], t1
+ loadq DirectArguments_storage[t0, t1, 8], t0
+ valueProfile(t0, 4, t1)
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(5)
+
+
+_llint_op_put_to_arguments:
+ traceExecution()
+ loadVariable(1, t0)
+ loadi 16[PB, PC, 8], t1
+ loadisFromInstruction(3, t3)
+ loadConstantOrVariable(t3, t2)
+ storeq t2, DirectArguments_storage[t0, t1, 8]
+ writeBarrierOnOperands(1, 3)
+ dispatch(4)
+
+
+_llint_op_get_parent_scope:
+ traceExecution()
+ loadVariable(2, t0)
+ loadp JSScope::m_next[t0], t0
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_profile_type:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_vm[t1], t1
+ # t1 is holding the pointer to the typeProfilerLog.
+ loadp VM::m_typeProfilerLog[t1], t1
+ # t2 is holding the pointer to the current log entry.
+ loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
+
+ # t0 is holding the JSValue argument.
+ loadisFromInstruction(1, t3)
+ loadConstantOrVariable(t3, t0)
+
+ bqeq t0, ValueEmpty, .opProfileTypeDone
+ # Store the JSValue onto the log entry.
+ storeq t0, TypeProfilerLog::LogEntry::value[t2]
+
+ # Store the TypeLocation onto the log entry.
+ loadpFromInstruction(2, t3)
+ storep t3, TypeProfilerLog::LogEntry::location[t2]
+
+ btqz t0, tagMask, .opProfileTypeIsCell
+ storei 0, TypeProfilerLog::LogEntry::structureID[t2]
+ jmp .opProfileTypeSkipIsCell
+.opProfileTypeIsCell:
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, TypeProfilerLog::LogEntry::structureID[t2]
+.opProfileTypeSkipIsCell:
+
+ # Increment the current log entry.
+ addp sizeof TypeProfilerLog::LogEntry, t2
+ storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
+
+ loadp TypeProfilerLog::m_logEndPtr[t1], t1
+ bpneq t2, t1, .opProfileTypeDone
+ callOpcodeSlowPath(_slow_path_profile_type_clear_log)
+
+.opProfileTypeDone:
+ dispatch(6)
+
+_llint_op_profile_control_flow:
+ traceExecution()
+ loadpFromInstruction(1, t0)
+ addq 1, BasicBlockLocation::m_executionCount[t0]
+ dispatch(2)
+
+
+_llint_op_get_rest_length:
+ traceExecution()
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ subi 1, t0
+ loadisFromInstruction(2, t1)
+ bilteq t0, t1, .storeZero
+ subi t1, t0
+ jmp .boxUp
+.storeZero:
+ move 0, t0
+.boxUp:
+ orq tagTypeNumber, t0
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_log_shadow_chicken_prologue:
+ traceExecution()
+ acquireShadowChickenPacket(.opLogShadowChickenPrologueSlow)
+ storep cfr, ShadowChicken::Packet::frame[t0]
+ loadp CallerFrame[cfr], t1
+ storep t1, ShadowChicken::Packet::callerFrame[t0]
+ loadp Callee[cfr], t1
+ storep t1, ShadowChicken::Packet::callee[t0]
+ loadVariable(1, t1)
+ storep t1, ShadowChicken::Packet::scope[t0]
+ dispatch(2)
+.opLogShadowChickenPrologueSlow:
+ callOpcodeSlowPath(_llint_slow_path_log_shadow_chicken_prologue)
+ dispatch(2)
+
+
+_llint_op_log_shadow_chicken_tail:
+ traceExecution()
+ acquireShadowChickenPacket(.opLogShadowChickenTailSlow)
+ storep cfr, ShadowChicken::Packet::frame[t0]
+ storep ShadowChickenTailMarker, ShadowChicken::Packet::callee[t0]
+ loadVariable(1, t1)
+ storep t1, ShadowChicken::Packet::thisValue[t0]
+ loadVariable(2, t1)
+ storep t1, ShadowChicken::Packet::scope[t0]
+ loadp CodeBlock[cfr], t1
+ storep t1, ShadowChicken::Packet::codeBlock[t0]
+ storei PC, ShadowChicken::Packet::callSiteIndex[t0]
+ dispatch(3)
+.opLogShadowChickenTailSlow:
+ callOpcodeSlowPath(_llint_slow_path_log_shadow_chicken_tail)
+ dispatch(3)